Merge branch 'master' of git://1984.lsi.us.es/nf-next
authorDavid S. Miller <davem@davemloft.net>
Tue, 19 Feb 2013 04:42:09 +0000 (23:42 -0500)
committerDavid S. Miller <davem@davemloft.net>
Tue, 19 Feb 2013 04:42:09 +0000 (23:42 -0500)
Pablo Neira Ayuso says:

====================
The following patchset contain updates for your net-next tree, they are:

* Fix (for just added) connlabel dependencies, from Florian Westphal.

* Add aliasing support for conntrack, thus users can either use -m state
  or -m conntrack from iptables while using the same kernel module, from
  Jozsef Kadlecsik.

* Some code refactoring for the CT target to merge common code in
  revision 0 and 1, from myself.

* Add aliasing support for CT, based on patch from Jozsef Kadlecsik.

* Add one mutex per nfnetlink subsystem, from myself.

* Improved logging for packets that are dropped by helpers, from myself.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
1009 files changed:
Documentation/devicetree/bindings/net/cpsw.txt
Documentation/hid/hid-sensor.txt [changed mode: 0755->0644]
Documentation/kernel-parameters.txt
Documentation/networking/LICENSE.qlcnic
Documentation/networking/ip-sysctl.txt
Documentation/x86/boot.txt
Documentation/x86/zero-page.txt
MAINTAINERS
Makefile
arch/arm/common/gic.c
arch/arm/include/asm/memory.h
arch/arm/include/asm/smp_scu.h
arch/arm/kernel/smp_scu.c
arch/arm/mach-exynos/Kconfig
arch/arm/mach-highbank/highbank.c
arch/arm/mach-highbank/sysregs.h
arch/arm/mach-omap2/board-omap3evm.c
arch/arm/mach-realview/include/mach/irqs-eb.h
arch/arm/mm/dma-mapping.c
arch/arm/net/bpf_jit_32.c
arch/avr32/include/asm/dma-mapping.h
arch/blackfin/include/asm/dma-mapping.h
arch/c6x/include/asm/dma-mapping.h
arch/cris/include/asm/dma-mapping.h
arch/frv/include/asm/dma-mapping.h
arch/m68k/include/asm/dma-mapping.h
arch/m68k/include/asm/processor.h
arch/mips/bcm47xx/Kconfig
arch/mips/cavium-octeon/executive/cvmx-l2c.c
arch/mips/include/asm/dsp.h
arch/mips/include/asm/inst.h
arch/mips/include/asm/mach-pnx833x/war.h
arch/mips/include/asm/pgtable-64.h
arch/mips/include/uapi/asm/Kbuild
arch/mips/include/uapi/asm/break.h [moved from arch/mips/include/asm/break.h with 100% similarity]
arch/mips/kernel/ftrace.c
arch/mips/kernel/mcount.S
arch/mips/kernel/vpe.c
arch/mips/lantiq/irq.c
arch/mips/lib/delay.c
arch/mips/mm/ioremap.c
arch/mips/mm/mmap.c
arch/mips/netlogic/xlr/setup.c
arch/mips/pci/pci-ar71xx.c
arch/mips/pci/pci-ar724x.c
arch/mn10300/include/asm/dma-mapping.h
arch/parisc/include/asm/dma-mapping.h
arch/powerpc/mm/hash_low_64.S
arch/s390/include/asm/pgtable.h
arch/s390/kernel/time.c
arch/tile/Kconfig
arch/tile/include/asm/io.h
arch/tile/include/asm/irqflags.h
arch/tile/include/uapi/arch/interrupts_32.h
arch/tile/include/uapi/arch/interrupts_64.h
arch/tile/kernel/intvec_64.S
arch/tile/kernel/process.c
arch/tile/kernel/reboot.c
arch/tile/kernel/setup.c
arch/tile/kernel/stack.c
arch/tile/lib/cacheflush.c
arch/tile/lib/cpumask.c
arch/tile/lib/exports.c
arch/tile/mm/homecache.c
arch/x86/Kconfig
arch/x86/boot/Makefile
arch/x86/boot/compressed/eboot.c
arch/x86/boot/compressed/head_32.S
arch/x86/boot/compressed/head_64.S
arch/x86/boot/header.S
arch/x86/boot/setup.ld
arch/x86/boot/tools/build.c
arch/x86/ia32/ia32entry.S
arch/x86/include/asm/efi.h
arch/x86/include/asm/mce.h
arch/x86/include/asm/pgtable.h
arch/x86/include/asm/uv/uv.h
arch/x86/include/uapi/asm/bootparam.h
arch/x86/include/uapi/asm/mce.h
arch/x86/kernel/apic/x2apic_phys.c
arch/x86/kernel/cpu/intel_cacheinfo.c
arch/x86/kernel/cpu/perf_event_intel.c
arch/x86/kernel/cpu/perf_event_p6.c
arch/x86/kernel/entry_64.S
arch/x86/kernel/head_32.S
arch/x86/kernel/msr.c
arch/x86/kernel/pci-dma.c
arch/x86/kernel/reboot.c
arch/x86/kernel/setup.c
arch/x86/mm/fault.c
arch/x86/mm/init_64.c
arch/x86/platform/efi/efi.c
arch/x86/platform/efi/efi_64.c
arch/x86/platform/uv/tlb_uv.c
arch/x86/tools/insn_sanity.c
arch/x86/tools/relocs.c
arch/xtensa/include/asm/dma-mapping.h
block/genhd.c
drivers/acpi/osl.c
drivers/atm/iphase.h
drivers/bcma/bcma_private.h
drivers/bcma/driver_chipcommon_nflash.c
drivers/bcma/driver_chipcommon_sflash.c
drivers/bcma/driver_gpio.c
drivers/bcma/driver_mips.c
drivers/bcma/main.c
drivers/block/drbd/drbd_req.c
drivers/block/drbd/drbd_req.h
drivers/block/drbd/drbd_state.c
drivers/block/mtip32xx/mtip32xx.c
drivers/block/xen-blkback/blkback.c
drivers/block/xen-blkfront.c
drivers/char/virtio_console.c
drivers/connector/connector.c
drivers/edac/edac_mc.c
drivers/edac/edac_pci_sysfs.c
drivers/firmware/dmi_scan.c
drivers/firmware/efivars.c
drivers/firmware/iscsi_ibft_find.c
drivers/gpu/drm/exynos/Kconfig
drivers/gpu/drm/exynos/exynos_drm_connector.c
drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
drivers/gpu/drm/exynos/exynos_drm_drv.h
drivers/gpu/drm/exynos/exynos_drm_g2d.c
drivers/gpu/drm/exynos/exynos_drm_hdmi.c
drivers/gpu/drm/exynos/exynos_drm_hdmi.h
drivers/gpu/drm/exynos/exynos_drm_ipp.c
drivers/gpu/drm/exynos/exynos_drm_rotator.c
drivers/gpu/drm/exynos/exynos_drm_vidi.c
drivers/gpu/drm/exynos/exynos_hdmi.c
drivers/gpu/drm/exynos/exynos_mixer.c
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/intel_ringbuffer.c
drivers/gpu/drm/nouveau/core/core/falcon.c
drivers/gpu/drm/nouveau/core/core/subdev.c
drivers/gpu/drm/nouveau/core/include/core/object.h
drivers/gpu/drm/nouveau/core/subdev/fb/base.c
drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c
drivers/gpu/drm/nouveau/nouveau_bo.c
drivers/gpu/drm/nouveau/nouveau_drm.c
drivers/gpu/drm/radeon/evergreen.c
drivers/gpu/drm/radeon/evergreen_cs.c
drivers/gpu/drm/radeon/ni.c
drivers/gpu/drm/radeon/r600.c
drivers/gpu/drm/radeon/r600_cs.c
drivers/gpu/drm/radeon/radeon_asic.c
drivers/gpu/drm/radeon/radeon_combios.c
drivers/gpu/drm/radeon/radeon_cs.c
drivers/gpu/drm/radeon/radeon_cursor.c
drivers/gpu/drm/radeon/radeon_device.c
drivers/gpu/drm/radeon/radeon_display.c
drivers/gpu/drm/radeon/radeon_ring.c
drivers/gpu/drm/radeon/radeon_ttm.c
drivers/gpu/drm/radeon/reg_srcs/cayman
drivers/gpu/drm/radeon/rv515.c
drivers/gpu/drm/ttm/ttm_bo_util.c
drivers/hid/hid-ids.h
drivers/hid/i2c-hid/i2c-hid.c
drivers/hid/usbhid/hid-quirks.c
drivers/infiniband/hw/qib/qib_qp.c
drivers/infiniband/ulp/ipoib/ipoib_cm.c
drivers/infiniband/ulp/ipoib/ipoib_ib.c
drivers/input/joystick/analog.c
drivers/iommu/intel-iommu.c
drivers/md/dm-thin.c
drivers/md/dm.c
drivers/media/radio/radio-keene.c
drivers/media/radio/radio-si4713.c
drivers/media/radio/radio-wl1273.c
drivers/media/radio/wl128x/fmdrv_v4l2.c
drivers/mtd/devices/Kconfig
drivers/mtd/maps/physmap_of.c
drivers/mtd/nand/bcm47xxnflash/ops_bcm4706.c
drivers/mtd/nand/davinci_nand.c
drivers/mtd/nand/nand_base.c
drivers/net/bonding/bond_main.c
drivers/net/bonding/bond_sysfs.c
drivers/net/can/c_can/c_can.c
drivers/net/can/usb/ems_usb.c
drivers/net/ethernet/amd/pcnet32.c
drivers/net/ethernet/atheros/atl1c/atl1c_main.c
drivers/net/ethernet/broadcom/bgmac.c
drivers/net/ethernet/broadcom/bgmac.h
drivers/net/ethernet/broadcom/tg3.c
drivers/net/ethernet/broadcom/tg3.h
drivers/net/ethernet/cadence/macb.c
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
drivers/net/ethernet/emulex/benet/be.h
drivers/net/ethernet/emulex/benet/be_cmds.c
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/freescale/fec.c
drivers/net/ethernet/freescale/fec_mpc52xx.c
drivers/net/ethernet/freescale/fec_ptp.c
drivers/net/ethernet/freescale/gianfar.c
drivers/net/ethernet/freescale/gianfar.h
drivers/net/ethernet/freescale/gianfar_ethtool.c
drivers/net/ethernet/ibm/ehea/ehea_main.c
drivers/net/ethernet/intel/e1000/e1000.h
drivers/net/ethernet/intel/e1000/e1000_ethtool.c
drivers/net/ethernet/intel/e1000/e1000_hw.c
drivers/net/ethernet/intel/e1000/e1000_main.c
drivers/net/ethernet/intel/e1000/e1000_param.c
drivers/net/ethernet/intel/e1000e/80003es2lan.c
drivers/net/ethernet/intel/e1000e/80003es2lan.h [new file with mode: 0644]
drivers/net/ethernet/intel/e1000e/82571.c
drivers/net/ethernet/intel/e1000e/82571.h [new file with mode: 0644]
drivers/net/ethernet/intel/e1000e/defines.h
drivers/net/ethernet/intel/e1000e/e1000.h
drivers/net/ethernet/intel/e1000e/ethtool.c
drivers/net/ethernet/intel/e1000e/hw.h
drivers/net/ethernet/intel/e1000e/ich8lan.c
drivers/net/ethernet/intel/e1000e/ich8lan.h [new file with mode: 0644]
drivers/net/ethernet/intel/e1000e/mac.h [new file with mode: 0644]
drivers/net/ethernet/intel/e1000e/manage.c
drivers/net/ethernet/intel/e1000e/manage.h [new file with mode: 0644]
drivers/net/ethernet/intel/e1000e/netdev.c
drivers/net/ethernet/intel/e1000e/nvm.h [new file with mode: 0644]
drivers/net/ethernet/intel/e1000e/phy.c
drivers/net/ethernet/intel/e1000e/phy.h [new file with mode: 0644]
drivers/net/ethernet/intel/e1000e/regs.h [new file with mode: 0644]
drivers/net/ethernet/intel/igb/e1000_82575.c
drivers/net/ethernet/intel/igb/igb.h
drivers/net/ethernet/intel/igb/igb_ethtool.c
drivers/net/ethernet/intel/igb/igb_main.c
drivers/net/ethernet/intel/igbvf/igbvf.h
drivers/net/ethernet/intel/igbvf/netdev.c
drivers/net/ethernet/intel/ixgb/ixgb_main.c
drivers/net/ethernet/intel/ixgbe/Makefile
drivers/net/ethernet/intel/ixgbe/ixgbe.h
drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h
drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h
drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h
drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h
drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c
drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
drivers/net/ethernet/mellanox/mlx4/en_main.c
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
drivers/net/ethernet/mellanox/mlx4/en_rx.c
drivers/net/ethernet/mellanox/mlx4/en_selftest.c
drivers/net/ethernet/mellanox/mlx4/en_tx.c
drivers/net/ethernet/mellanox/mlx4/main.c
drivers/net/ethernet/mellanox/mlx4/mlx4.h
drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
drivers/net/ethernet/mellanox/mlx4/port.c
drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h
drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
drivers/net/ethernet/qlogic/qlge/qlge_main.c
drivers/net/ethernet/realtek/r8169.c
drivers/net/ethernet/smsc/smsc9420.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
drivers/net/ethernet/ti/cpsw.c
drivers/net/ethernet/ti/cpsw_ale.c
drivers/net/ethernet/ti/cpsw_ale.h
drivers/net/ethernet/ti/davinci_cpdma.c
drivers/net/ethernet/ti/davinci_cpdma.h
drivers/net/ethernet/ti/davinci_emac.c
drivers/net/ethernet/via/via-rhine.c
drivers/net/hamradio/bpqether.c
drivers/net/hamradio/scc.c
drivers/net/hamradio/yam.c
drivers/net/ieee802154/at86rf230.c
drivers/net/macvlan.c
drivers/net/macvtap.c
drivers/net/ppp/ppp_generic.c
drivers/net/ppp/pppoe.c
drivers/net/team/team.c
drivers/net/tun.c
drivers/net/usb/cdc_ncm.c
drivers/net/usb/pegasus.c
drivers/net/usb/qmi_wwan.c
drivers/net/usb/usbnet.c
drivers/net/veth.c
drivers/net/virtio_net.c
drivers/net/vmxnet3/vmxnet3_drv.c
drivers/net/vxlan.c
drivers/net/wimax/i2400m/rx.c
drivers/net/wireless/ath/ath5k/phy.c
drivers/net/wireless/ath/ath5k/reset.c
drivers/net/wireless/ath/ath6kl/cfg80211.c
drivers/net/wireless/ath/ath6kl/cfg80211.h
drivers/net/wireless/ath/ath6kl/core.h
drivers/net/wireless/ath/ath6kl/htc_pipe.c
drivers/net/wireless/ath/ath6kl/init.c
drivers/net/wireless/ath/ath6kl/usb.c
drivers/net/wireless/ath/ath6kl/wmi.c
drivers/net/wireless/ath/ath6kl/wmi.h
drivers/net/wireless/ath/ath9k/Kconfig
drivers/net/wireless/ath/ath9k/ath9k.h
drivers/net/wireless/ath/ath9k/beacon.c
drivers/net/wireless/ath/ath9k/debug.c
drivers/net/wireless/ath/ath9k/debug.h
drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
drivers/net/wireless/ath/ath9k/hw.c
drivers/net/wireless/ath/ath9k/init.c
drivers/net/wireless/ath/ath9k/mac.c
drivers/net/wireless/ath/ath9k/main.c
drivers/net/wireless/ath/ath9k/mci.c
drivers/net/wireless/ath/ath9k/rc.c
drivers/net/wireless/ath/ath9k/recv.c
drivers/net/wireless/ath/ath9k/xmit.c
drivers/net/wireless/ath/carl9170/main.c
drivers/net/wireless/ath/wil6210/cfg80211.c
drivers/net/wireless/ath/wil6210/interrupt.c
drivers/net/wireless/ath/wil6210/main.c
drivers/net/wireless/ath/wil6210/netdev.c
drivers/net/wireless/ath/wil6210/pcie_bus.c
drivers/net/wireless/ath/wil6210/txrx.c
drivers/net/wireless/ath/wil6210/wil6210.h
drivers/net/wireless/ath/wil6210/wmi.c
drivers/net/wireless/brcm80211/brcmfmac/Makefile
drivers/net/wireless/brcm80211/brcmfmac/dhd.h
drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c
drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
drivers/net/wireless/brcm80211/brcmfmac/fweh.c
drivers/net/wireless/brcm80211/brcmfmac/fweh.h
drivers/net/wireless/brcm80211/brcmfmac/fwil.c
drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h [new file with mode: 0644]
drivers/net/wireless/brcm80211/brcmfmac/p2p.c [new file with mode: 0644]
drivers/net/wireless/brcm80211/brcmfmac/p2p.h [new file with mode: 0644]
drivers/net/wireless/brcm80211/brcmfmac/usb.c
drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h
drivers/net/wireless/brcm80211/brcmsmac/channel.c
drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h
drivers/net/wireless/brcm80211/brcmsmac/main.c
drivers/net/wireless/brcm80211/brcmsmac/pub.h
drivers/net/wireless/iwlegacy/3945-mac.c
drivers/net/wireless/iwlegacy/4965-mac.c
drivers/net/wireless/iwlegacy/4965-rs.c
drivers/net/wireless/iwlegacy/4965.c
drivers/net/wireless/iwlegacy/commands.h
drivers/net/wireless/iwlegacy/common.c
drivers/net/wireless/iwlegacy/common.h
drivers/net/wireless/iwlwifi/Kconfig
drivers/net/wireless/iwlwifi/Makefile
drivers/net/wireless/iwlwifi/dvm/agn.h
drivers/net/wireless/iwlwifi/dvm/calib.c
drivers/net/wireless/iwlwifi/dvm/calib.h
drivers/net/wireless/iwlwifi/dvm/commands.h
drivers/net/wireless/iwlwifi/dvm/debugfs.c
drivers/net/wireless/iwlwifi/dvm/dev.h
drivers/net/wireless/iwlwifi/dvm/devices.c
drivers/net/wireless/iwlwifi/dvm/led.c
drivers/net/wireless/iwlwifi/dvm/led.h
drivers/net/wireless/iwlwifi/dvm/lib.c
drivers/net/wireless/iwlwifi/dvm/mac80211.c
drivers/net/wireless/iwlwifi/dvm/main.c
drivers/net/wireless/iwlwifi/dvm/power.c
drivers/net/wireless/iwlwifi/dvm/power.h
drivers/net/wireless/iwlwifi/dvm/rs.c
drivers/net/wireless/iwlwifi/dvm/rs.h
drivers/net/wireless/iwlwifi/dvm/rx.c
drivers/net/wireless/iwlwifi/dvm/rxon.c
drivers/net/wireless/iwlwifi/dvm/scan.c
drivers/net/wireless/iwlwifi/dvm/sta.c
drivers/net/wireless/iwlwifi/dvm/testmode.c
drivers/net/wireless/iwlwifi/dvm/tt.c
drivers/net/wireless/iwlwifi/dvm/tt.h
drivers/net/wireless/iwlwifi/dvm/tx.c
drivers/net/wireless/iwlwifi/dvm/ucode.c
drivers/net/wireless/iwlwifi/iwl-agn-hw.h
drivers/net/wireless/iwlwifi/iwl-config.h
drivers/net/wireless/iwlwifi/iwl-csr.h
drivers/net/wireless/iwlwifi/iwl-debug.h
drivers/net/wireless/iwlwifi/iwl-devtrace.c
drivers/net/wireless/iwlwifi/iwl-devtrace.h
drivers/net/wireless/iwlwifi/iwl-drv.c
drivers/net/wireless/iwlwifi/iwl-drv.h
drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c
drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h
drivers/net/wireless/iwlwifi/iwl-eeprom-read.c
drivers/net/wireless/iwlwifi/iwl-eeprom-read.h
drivers/net/wireless/iwlwifi/iwl-fh.h
drivers/net/wireless/iwlwifi/iwl-fw-file.h
drivers/net/wireless/iwlwifi/iwl-fw.h
drivers/net/wireless/iwlwifi/iwl-io.c
drivers/net/wireless/iwlwifi/iwl-io.h
drivers/net/wireless/iwlwifi/iwl-modparams.h
drivers/net/wireless/iwlwifi/iwl-notif-wait.c
drivers/net/wireless/iwlwifi/iwl-notif-wait.h
drivers/net/wireless/iwlwifi/iwl-nvm-parse.c [new file with mode: 0644]
drivers/net/wireless/iwlwifi/iwl-nvm-parse.h [new file with mode: 0644]
drivers/net/wireless/iwlwifi/iwl-op-mode.h
drivers/net/wireless/iwlwifi/iwl-phy-db.c [new file with mode: 0644]
drivers/net/wireless/iwlwifi/iwl-phy-db.h [new file with mode: 0644]
drivers/net/wireless/iwlwifi/iwl-prph.h
drivers/net/wireless/iwlwifi/iwl-test.c
drivers/net/wireless/iwlwifi/iwl-test.h
drivers/net/wireless/iwlwifi/iwl-testmode.h
drivers/net/wireless/iwlwifi/iwl-trans.h
drivers/net/wireless/iwlwifi/mvm/Makefile [new file with mode: 0644]
drivers/net/wireless/iwlwifi/mvm/binding.c [new file with mode: 0644]
drivers/net/wireless/iwlwifi/mvm/d3.c [new file with mode: 0644]
drivers/net/wireless/iwlwifi/mvm/debugfs.c [new file with mode: 0644]
drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h [new file with mode: 0644]
drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h [new file with mode: 0644]
drivers/net/wireless/iwlwifi/mvm/fw-api-power.h [new file with mode: 0644]
drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h [new file with mode: 0644]
drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h [new file with mode: 0644]
drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h [new file with mode: 0644]
drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h [new file with mode: 0644]
drivers/net/wireless/iwlwifi/mvm/fw-api.h [new file with mode: 0644]
drivers/net/wireless/iwlwifi/mvm/fw.c [new file with mode: 0644]
drivers/net/wireless/iwlwifi/mvm/led.c [new file with mode: 0644]
drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c [new file with mode: 0644]
drivers/net/wireless/iwlwifi/mvm/mac80211.c [new file with mode: 0644]
drivers/net/wireless/iwlwifi/mvm/mvm.h [new file with mode: 0644]
drivers/net/wireless/iwlwifi/mvm/nvm.c [new file with mode: 0644]
drivers/net/wireless/iwlwifi/mvm/ops.c [new file with mode: 0644]
drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c [new file with mode: 0644]
drivers/net/wireless/iwlwifi/mvm/power.c [new file with mode: 0644]
drivers/net/wireless/iwlwifi/mvm/quota.c [new file with mode: 0644]
drivers/net/wireless/iwlwifi/mvm/rs.c [new file with mode: 0644]
drivers/net/wireless/iwlwifi/mvm/rs.h [new file with mode: 0644]
drivers/net/wireless/iwlwifi/mvm/rx.c [new file with mode: 0644]
drivers/net/wireless/iwlwifi/mvm/scan.c [new file with mode: 0644]
drivers/net/wireless/iwlwifi/mvm/sta.c [new file with mode: 0644]
drivers/net/wireless/iwlwifi/mvm/sta.h [new file with mode: 0644]
drivers/net/wireless/iwlwifi/mvm/time-event.c [new file with mode: 0644]
drivers/net/wireless/iwlwifi/mvm/time-event.h [new file with mode: 0644]
drivers/net/wireless/iwlwifi/mvm/tx.c [new file with mode: 0644]
drivers/net/wireless/iwlwifi/mvm/utils.c [new file with mode: 0644]
drivers/net/wireless/iwlwifi/pcie/1000.c
drivers/net/wireless/iwlwifi/pcie/2000.c
drivers/net/wireless/iwlwifi/pcie/5000.c
drivers/net/wireless/iwlwifi/pcie/6000.c
drivers/net/wireless/iwlwifi/pcie/7000.c [new file with mode: 0644]
drivers/net/wireless/iwlwifi/pcie/cfg.h
drivers/net/wireless/iwlwifi/pcie/drv.c
drivers/net/wireless/iwlwifi/pcie/internal.h
drivers/net/wireless/iwlwifi/pcie/rx.c
drivers/net/wireless/iwlwifi/pcie/trans.c
drivers/net/wireless/iwlwifi/pcie/tx.c
drivers/net/wireless/libertas/cfg.c
drivers/net/wireless/mac80211_hwsim.c
drivers/net/wireless/mwifiex/11n.c
drivers/net/wireless/mwifiex/11n.h
drivers/net/wireless/mwifiex/Kconfig
drivers/net/wireless/mwifiex/README
drivers/net/wireless/mwifiex/cfg80211.c
drivers/net/wireless/mwifiex/debugfs.c
drivers/net/wireless/mwifiex/init.c
drivers/net/wireless/mwifiex/ioctl.h
drivers/net/wireless/mwifiex/join.c
drivers/net/wireless/mwifiex/main.h
drivers/net/wireless/mwifiex/pcie.c
drivers/net/wireless/mwifiex/pcie.h
drivers/net/wireless/mwifiex/scan.c
drivers/net/wireless/mwifiex/sdio.c
drivers/net/wireless/mwifiex/sta_cmdresp.c
drivers/net/wireless/mwifiex/sta_ioctl.c
drivers/net/wireless/mwifiex/usb.c
drivers/net/wireless/mwifiex/util.c
drivers/net/wireless/mwl8k.c
drivers/net/wireless/orinoco/scan.c
drivers/net/wireless/p54/p54usb.c
drivers/net/wireless/rndis_wlan.c
drivers/net/wireless/rt2x00/rt2400pci.c
drivers/net/wireless/rt2x00/rt2500pci.c
drivers/net/wireless/rt2x00/rt2800lib.c
drivers/net/wireless/rt2x00/rt2800pci.c
drivers/net/wireless/rt2x00/rt2800usb.c
drivers/net/wireless/rt2x00/rt2x00.h
drivers/net/wireless/rt2x00/rt2x00dev.c
drivers/net/wireless/rt2x00/rt2x00mac.c
drivers/net/wireless/rt2x00/rt2x00queue.c
drivers/net/wireless/rtlwifi/Kconfig
drivers/net/wireless/rtlwifi/base.c
drivers/net/wireless/rtlwifi/rc.c
drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
drivers/net/wireless/rtlwifi/rtl8192de/dm.c
drivers/net/wireless/rtlwifi/rtl8192de/hw.c
drivers/net/wireless/rtlwifi/rtl8192de/trx.c
drivers/net/wireless/rtlwifi/rtl8192se/hw.c
drivers/net/wireless/rtlwifi/rtl8192se/trx.c
drivers/net/wireless/rtlwifi/rtl8723ae/fw.c
drivers/net/wireless/rtlwifi/rtl8723ae/hal_btc.c
drivers/net/wireless/rtlwifi/rtl8723ae/hw.c
drivers/net/wireless/rtlwifi/rtl8723ae/phy.c
drivers/net/wireless/rtlwifi/rtl8723ae/trx.c
drivers/net/wireless/rtlwifi/usb.c
drivers/net/wireless/rtlwifi/usb.h
drivers/net/wireless/rtlwifi/wifi.h
drivers/net/wireless/ti/Kconfig
drivers/net/wireless/ti/Makefile
drivers/net/wireless/ti/wilink_platform_data.c [moved from drivers/net/wireless/ti/wlcore/wl12xx_platform_data.c with 100% similarity]
drivers/net/wireless/ti/wl1251/Kconfig
drivers/net/wireless/ti/wl1251/event.c
drivers/net/wireless/ti/wl1251/main.c
drivers/net/wireless/ti/wl12xx/Makefile
drivers/net/wireless/ti/wl12xx/cmd.c
drivers/net/wireless/ti/wl12xx/cmd.h
drivers/net/wireless/ti/wl12xx/event.c [new file with mode: 0644]
drivers/net/wireless/ti/wl12xx/event.h [new file with mode: 0644]
drivers/net/wireless/ti/wl12xx/main.c
drivers/net/wireless/ti/wl12xx/scan.c [new file with mode: 0644]
drivers/net/wireless/ti/wl12xx/scan.h [new file with mode: 0644]
drivers/net/wireless/ti/wl12xx/wl12xx.h
drivers/net/wireless/ti/wl18xx/Makefile
drivers/net/wireless/ti/wl18xx/acx.c
drivers/net/wireless/ti/wl18xx/acx.h
drivers/net/wireless/ti/wl18xx/cmd.c [new file with mode: 0644]
drivers/net/wireless/ti/wl18xx/cmd.h [new file with mode: 0644]
drivers/net/wireless/ti/wl18xx/conf.h
drivers/net/wireless/ti/wl18xx/event.c [new file with mode: 0644]
drivers/net/wireless/ti/wl18xx/event.h [new file with mode: 0644]
drivers/net/wireless/ti/wl18xx/main.c
drivers/net/wireless/ti/wl18xx/scan.c [new file with mode: 0644]
drivers/net/wireless/ti/wl18xx/scan.h [new file with mode: 0644]
drivers/net/wireless/ti/wl18xx/tx.c
drivers/net/wireless/ti/wl18xx/wl18xx.h
drivers/net/wireless/ti/wlcore/Kconfig
drivers/net/wireless/ti/wlcore/Makefile
drivers/net/wireless/ti/wlcore/acx.c
drivers/net/wireless/ti/wlcore/acx.h
drivers/net/wireless/ti/wlcore/boot.c
drivers/net/wireless/ti/wlcore/cmd.c
drivers/net/wireless/ti/wlcore/cmd.h
drivers/net/wireless/ti/wlcore/conf.h
drivers/net/wireless/ti/wlcore/debugfs.c
drivers/net/wireless/ti/wlcore/event.c
drivers/net/wireless/ti/wlcore/event.h
drivers/net/wireless/ti/wlcore/hw_ops.h
drivers/net/wireless/ti/wlcore/init.c
drivers/net/wireless/ti/wlcore/io.h
drivers/net/wireless/ti/wlcore/main.c
drivers/net/wireless/ti/wlcore/ps.c
drivers/net/wireless/ti/wlcore/rx.c
drivers/net/wireless/ti/wlcore/rx.h
drivers/net/wireless/ti/wlcore/scan.c
drivers/net/wireless/ti/wlcore/scan.h
drivers/net/wireless/ti/wlcore/sdio.c
drivers/net/wireless/ti/wlcore/spi.c
drivers/net/wireless/ti/wlcore/tx.c
drivers/net/wireless/ti/wlcore/tx.h
drivers/net/wireless/ti/wlcore/wlcore.h
drivers/net/wireless/ti/wlcore/wlcore_i.h
drivers/net/xen-netback/common.h
drivers/net/xen-netback/interface.c
drivers/net/xen-netback/netback.c
drivers/nfc/Kconfig
drivers/nfc/Makefile
drivers/nfc/microread/Kconfig [new file with mode: 0644]
drivers/nfc/microread/Makefile [new file with mode: 0644]
drivers/nfc/microread/i2c.c [new file with mode: 0644]
drivers/nfc/microread/mei.c [new file with mode: 0644]
drivers/nfc/microread/microread.c [new file with mode: 0644]
drivers/nfc/microread/microread.h [new file with mode: 0644]
drivers/nfc/pn533.c
drivers/pinctrl/Kconfig
drivers/pinctrl/Makefile
drivers/pinctrl/mvebu/pinctrl-dove.c
drivers/pinctrl/mvebu/pinctrl-kirkwood.c
drivers/pinctrl/pinctrl-exynos5440.c
drivers/pinctrl/pinctrl-mxs.c
drivers/pinctrl/pinctrl-nomadik.c
drivers/pinctrl/pinctrl-single.c
drivers/pinctrl/pinctrl-sirf.c
drivers/platform/x86/ibm_rtl.c
drivers/platform/x86/samsung-laptop.c
drivers/regulator/dbx500-prcmu.c
drivers/regulator/max77686.c
drivers/regulator/max8907-regulator.c
drivers/regulator/max8997.c
drivers/regulator/max8998.c
drivers/regulator/of_regulator.c
drivers/regulator/s2mps11.c
drivers/regulator/tps65217-regulator.c
drivers/regulator/tps65910-regulator.c
drivers/regulator/tps80031-regulator.c
drivers/rtc/rtc-isl1208.c
drivers/rtc/rtc-pl031.c
drivers/rtc/rtc-vt8500.c
drivers/scsi/isci/init.c
drivers/ssb/driver_gpio.c
drivers/ssb/driver_mipscore.c
drivers/ssb/main.c
drivers/ssb/ssb_private.h
drivers/staging/wlan-ng/cfg80211.c
drivers/target/target_core_device.c
drivers/target/target_core_fabric_configfs.c
drivers/target/target_core_sbc.c
drivers/target/target_core_spc.c
drivers/usb/core/hcd.c
drivers/usb/core/hub.c
drivers/usb/host/ehci-hcd.c
drivers/usb/host/ehci-hub.c
drivers/usb/host/ehci-q.c
drivers/usb/host/ehci-sched.c
drivers/usb/host/ehci-timer.c
drivers/usb/host/pci-quirks.c
drivers/usb/host/uhci-hub.c
drivers/usb/host/xhci-ring.c
drivers/usb/serial/cp210x.c
drivers/usb/serial/ftdi_sio.c
drivers/usb/serial/ftdi_sio_ids.h
drivers/usb/serial/option.c
drivers/usb/serial/qcserial.c
drivers/usb/storage/initializers.c
drivers/usb/storage/initializers.h
drivers/usb/storage/unusual_devs.h
drivers/usb/storage/usb.c
drivers/usb/storage/usual-tables.c
drivers/vhost/net.c
drivers/vhost/tcm_vhost.c
drivers/vhost/vhost.c
drivers/vhost/vhost.h
drivers/xen/events.c
drivers/xen/xen-pciback/pciback_ops.c
fs/btrfs/extent-tree.c
fs/btrfs/extent_map.c
fs/btrfs/file.c
fs/btrfs/ioctl.c
fs/btrfs/ordered-data.c
fs/btrfs/scrub.c
fs/btrfs/transaction.c
fs/btrfs/volumes.c
fs/dlm/user.c
fs/nfs/namespace.c
fs/nfs/nfs4client.c
fs/nfs/nfs4state.c
fs/nfs/super.c
fs/nilfs2/ioctl.c
fs/proc/proc_net.c
fs/xfs/xfs_aops.c
fs/xfs/xfs_bmap.c
fs/xfs/xfs_buf.c
fs/xfs/xfs_buf_item.c
fs/xfs/xfs_dfrag.c
fs/xfs/xfs_iomap.c
fs/xfs/xfs_mount.c
fs/xfs/xfs_trace.h
include/linux/bcma/bcma_driver_chipcommon.h
include/linux/bcma/bcma_driver_mips.h
include/linux/efi.h
include/linux/ieee80211.h
include/linux/if_macvlan.h
include/linux/if_team.h
include/linux/in6.h
include/linux/llist.h
include/linux/memcontrol.h
include/linux/mlx4/device.h
include/linux/mmu_notifier.h
include/linux/netdev_features.h
include/linux/netdevice.h
include/linux/netpoll.h
include/linux/pci_ids.h
include/linux/platform_data/cpsw.h
include/linux/platform_data/microread.h [new file with mode: 0644]
include/linux/proc_fs.h
include/linux/skbuff.h
include/linux/socket.h
include/linux/ssb/ssb_driver_gige.h
include/linux/ssb/ssb_driver_mips.h
include/linux/tcp.h
include/linux/usb.h
include/linux/usb/hcd.h
include/linux/usb/usbnet.h
include/linux/wl12xx.h
include/net/act_api.h
include/net/bluetooth/a2mp.h
include/net/bluetooth/bluetooth.h
include/net/bluetooth/hci.h
include/net/bluetooth/hci_core.h
include/net/bluetooth/l2cap.h
include/net/cfg80211.h
include/net/dst.h
include/net/mac80211.h
include/net/mrp.h [new file with mode: 0644]
include/net/neighbour.h
include/net/netns/ipv4.h
include/net/netns/ipv6.h
include/net/pkt_sched.h
include/net/sch_generic.h
include/net/sctp/constants.h
include/net/sctp/structs.h
include/net/sock.h
include/net/tcp.h
include/net/transp_v6.h
include/net/xfrm.h
include/uapi/linux/auto_fs.h
include/uapi/linux/if_bridge.h
include/uapi/linux/if_ether.h
include/uapi/linux/if_vlan.h
include/uapi/linux/neighbour.h
include/uapi/linux/nl80211.h
include/uapi/linux/rtnetlink.h
include/uapi/linux/tcp.h
include/uapi/linux/usb/ch9.h
include/uapi/linux/vm_sockets.h [new file with mode: 0644]
init/main.c
kernel/events/core.c
kernel/pid.c
kernel/printk.c
kernel/rcutree_plugin.h
kernel/sched/debug.c
kernel/sched/fair.c
kernel/sched/rt.c
kernel/smp.c
kernel/sysctl_binary.c
lib/digsig.c
mm/huge_memory.c
mm/hugetlb.c
mm/memcontrol.c
mm/migrate.c
mm/mlock.c
mm/mmap.c
mm/page_alloc.c
net/802/Kconfig
net/802/Makefile
net/802/mrp.c [new file with mode: 0644]
net/8021q/Kconfig
net/8021q/Makefile
net/8021q/vlan.c
net/8021q/vlan.h
net/8021q/vlan_core.c
net/8021q/vlan_dev.c
net/8021q/vlan_mvrp.c [new file with mode: 0644]
net/8021q/vlan_netlink.c
net/8021q/vlanproc.c
net/Kconfig
net/Makefile
net/atm/proc.c
net/ax25/af_ax25.c
net/batman-adv/distributed-arp-table.c
net/bluetooth/a2mp.c
net/bluetooth/af_bluetooth.c
net/bluetooth/amp.c
net/bluetooth/bnep/core.c
net/bluetooth/hci_conn.c
net/bluetooth/hci_core.c
net/bluetooth/hci_event.c
net/bluetooth/hci_sysfs.c
net/bluetooth/l2cap_core.c
net/bluetooth/mgmt.c
net/bluetooth/sco.c
net/bluetooth/smp.c
net/bridge/Kconfig
net/bridge/Makefile
net/bridge/br_device.c
net/bridge/br_fdb.c
net/bridge/br_forward.c
net/bridge/br_if.c
net/bridge/br_input.c
net/bridge/br_multicast.c
net/bridge/br_netlink.c
net/bridge/br_private.h
net/bridge/br_stp_bpdu.c
net/bridge/br_stp_if.c
net/bridge/br_sysfs_br.c
net/bridge/br_vlan.c [new file with mode: 0644]
net/can/bcm.c
net/can/proc.c
net/core/Makefile
net/core/datagram.c
net/core/dev.c
net/core/dev_addr_lists.c
net/core/dev_ioctl.c [new file with mode: 0644]
net/core/ethtool.c
net/core/netpoll.c
net/core/netprio_cgroup.c
net/core/pktgen.c
net/core/rtnetlink.c
net/core/skbuff.c
net/core/sock.c
net/dccp/probe.c
net/decnet/af_decnet.c
net/decnet/dn_dev.c
net/decnet/dn_neigh.c
net/decnet/dn_route.c
net/ieee802154/6lowpan.c
net/ipv4/af_inet.c
net/ipv4/ah4.c
net/ipv4/arp.c
net/ipv4/fib_trie.c
net/ipv4/gre.c
net/ipv4/igmp.c
net/ipv4/ip_fragment.c
net/ipv4/ip_gre.c
net/ipv4/ip_input.c
net/ipv4/ip_output.c
net/ipv4/ipconfig.c
net/ipv4/ipmr.c
net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
net/ipv4/ping.c
net/ipv4/proc.c
net/ipv4/protocol.c
net/ipv4/raw.c
net/ipv4/route.c
net/ipv4/sysctl_net_ipv4.c
net/ipv4/tcp.c
net/ipv4/tcp_cong.c
net/ipv4/tcp_input.c
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_minisocks.c
net/ipv4/tcp_output.c
net/ipv4/tcp_probe.c
net/ipv4/udp.c
net/ipv4/xfrm4_input.c
net/ipv4/xfrm4_mode_tunnel.c
net/ipv4/xfrm4_policy.c
net/ipv6/addrconf.c
net/ipv6/ah6.c
net/ipv6/anycast.c
net/ipv6/datagram.c
net/ipv6/ip6_flowlabel.c
net/ipv6/ip6_gre.c
net/ipv6/ip6_input.c
net/ipv6/ip6_offload.c
net/ipv6/ip6_output.c
net/ipv6/ip6mr.c
net/ipv6/ipv6_sockglue.c
net/ipv6/mcast.c
net/ipv6/netfilter/ip6t_NPT.c
net/ipv6/netfilter/nf_conntrack_reasm.c
net/ipv6/proc.c
net/ipv6/raw.c
net/ipv6/reassembly.c
net/ipv6/route.c
net/ipv6/tcp_ipv6.c
net/ipv6/udp.c
net/ipv6/udp_offload.c
net/ipv6/xfrm6_mode_tunnel.c
net/ipv6/xfrm6_policy.c
net/key/af_key.c
net/l2tp/l2tp_core.c
net/l2tp/l2tp_core.h
net/l2tp/l2tp_ip.c
net/l2tp/l2tp_ip6.c
net/l2tp/l2tp_netlink.c
net/l2tp/l2tp_ppp.c
net/mac80211/Kconfig
net/mac80211/Makefile
net/mac80211/agg-rx.c
net/mac80211/agg-tx.c
net/mac80211/cfg.c
net/mac80211/chan.c
net/mac80211/debug.h
net/mac80211/debugfs.c
net/mac80211/debugfs_netdev.c
net/mac80211/debugfs_sta.c
net/mac80211/driver-ops.h
net/mac80211/ht.c
net/mac80211/ibss.c
net/mac80211/ieee80211_i.h
net/mac80211/iface.c
net/mac80211/key.c
net/mac80211/main.c
net/mac80211/mesh.c
net/mac80211/mesh.h
net/mac80211/mesh_hwmp.c
net/mac80211/mesh_pathtbl.c
net/mac80211/mesh_plink.c
net/mac80211/mesh_ps.c [new file with mode: 0644]
net/mac80211/mlme.c
net/mac80211/offchannel.c
net/mac80211/pm.c
net/mac80211/rate.h
net/mac80211/rc80211_minstrel.c
net/mac80211/rc80211_minstrel.h
net/mac80211/rc80211_minstrel_ht.c
net/mac80211/rc80211_minstrel_ht.h
net/mac80211/rc80211_minstrel_ht_debugfs.c
net/mac80211/rx.c
net/mac80211/scan.c
net/mac80211/sta_info.c
net/mac80211/sta_info.h
net/mac80211/status.c
net/mac80211/tkip.c
net/mac80211/trace.h
net/mac80211/tx.c
net/mac80211/util.c
net/mac80211/vht.c
net/mac80211/wme.c
net/mac80211/wpa.c
net/mac802154/wpan.c
net/netfilter/ipvs/ip_vs_app.c
net/netfilter/ipvs/ip_vs_conn.c
net/netfilter/ipvs/ip_vs_ctl.c
net/netfilter/ipvs/ip_vs_proto_sctp.c
net/netfilter/ipvs/ip_vs_sync.c
net/netfilter/nf_conntrack_expect.c
net/netfilter/nf_conntrack_helper.c
net/netfilter/nf_conntrack_netlink.c
net/netfilter/nf_conntrack_standalone.c
net/netfilter/x_tables.c
net/netfilter/xt_hashlimit.c
net/netfilter/xt_recent.c
net/netlink/af_netlink.c
net/netrom/af_netrom.c
net/nfc/llcp/llcp.c
net/openvswitch/datapath.c
net/openvswitch/vport-netdev.c
net/packet/af_packet.c
net/phonet/pn_dev.c
net/rose/af_rose.c
net/rxrpc/af_rxrpc.c
net/sched/act_ipt.c
net/sched/act_pedit.c
net/sched/act_police.c
net/sched/sch_api.c
net/sched/sch_generic.c
net/sched/sch_htb.c
net/sched/sch_netem.c
net/sched/sch_tbf.c
net/sctp/Kconfig
net/sctp/auth.c
net/sctp/endpointola.c
net/sctp/ipv6.c
net/sctp/probe.c
net/sctp/sm_make_chunk.c
net/sctp/socket.c
net/socket.c
net/sunrpc/sched.c
net/sunrpc/svcsock.c
net/tipc/bcast.c
net/tipc/socket.c
net/unix/af_unix.c
net/vmw_vsock/Kconfig [new file with mode: 0644]
net/vmw_vsock/Makefile [new file with mode: 0644]
net/vmw_vsock/af_vsock.c [new file with mode: 0644]
net/vmw_vsock/af_vsock.h [new file with mode: 0644]
net/vmw_vsock/vmci_transport.c [new file with mode: 0644]
net/vmw_vsock/vmci_transport.h [new file with mode: 0644]
net/vmw_vsock/vmci_transport_notify.c [new file with mode: 0644]
net/vmw_vsock/vmci_transport_notify.h [new file with mode: 0644]
net/vmw_vsock/vmci_transport_notify_qstate.c [new file with mode: 0644]
net/vmw_vsock/vsock_addr.c [new file with mode: 0644]
net/vmw_vsock/vsock_addr.h [new file with mode: 0644]
net/wireless/chan.c
net/wireless/core.c
net/wireless/core.h
net/wireless/ibss.c
net/wireless/mlme.c
net/wireless/nl80211.c
net/wireless/nl80211.h
net/wireless/rdev-ops.h
net/wireless/reg.c
net/wireless/scan.c
net/wireless/sme.c
net/wireless/sysfs.c
net/wireless/trace.h
net/wireless/util.c
net/wireless/wext-proc.c
net/xfrm/xfrm_algo.c
net/xfrm/xfrm_output.c
net/xfrm/xfrm_policy.c
net/xfrm/xfrm_proc.c
samples/seccomp/Makefile
scripts/checkpatch.pl
sound/pci/hda/hda_intel.c
sound/pci/hda/patch_realtek.c
sound/soc/codecs/arizona.c
sound/soc/codecs/wm2200.c
sound/soc/codecs/wm5102.c
sound/soc/codecs/wm5110.c
sound/soc/codecs/wm_adsp.c
sound/soc/fsl/imx-pcm-dma.c
sound/soc/fsl/imx-pcm-fiq.c
sound/soc/fsl/imx-pcm.c
sound/soc/fsl/imx-pcm.h
sound/soc/soc-dapm.c
sound/usb/mixer.c
tools/vm/.gitignore [new file with mode: 0644]

index 6ddd028..ecfdf75 100644 (file)
@@ -24,6 +24,8 @@ Required properties:
 Optional properties:
 - ti,hwmods            : Must be "cpgmac0"
 - no_bd_ram            : Must be 0 or 1
+- dual_emac            : Specifies Switch to act as Dual EMAC
+- dual_emac_res_vlan   : Specifies VID to be used to segregate the ports
 
 Note: "ti,hwmods" field is used to fetch the base address and irq
 resources from TI, omap hwmod data base during device registration.
old mode 100755 (executable)
new mode 100644 (file)
index 363e348..6c72381 100644 (file)
@@ -2438,7 +2438,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                        real-time workloads.  It can also improve energy
                        efficiency for asymmetric multiprocessors.
 
-       rcu_nocbs_poll  [KNL,BOOT]
+       rcu_nocb_poll   [KNL,BOOT]
                        Rather than requiring that offloaded CPUs
                        (specified by rcu_nocbs= above) explicitly
                        awaken the corresponding "rcuoN" kthreads,
index e7fb2c6..2ae3b64 100644 (file)
@@ -1,4 +1,4 @@
-Copyright (c) 2009-2011 QLogic Corporation
+Copyright (c) 2009-2013 QLogic Corporation
 QLogic Linux qlcnic NIC Driver
 
 You may modify and redistribute the device driver code under the
index 19ac180..dc2dc87 100644 (file)
@@ -130,17 +130,6 @@ somaxconn - INTEGER
        Defaults to 128.  See also tcp_max_syn_backlog for additional tuning
        for TCP sockets.
 
-tcp_abc - INTEGER
-       Controls Appropriate Byte Count (ABC) defined in RFC3465.
-       ABC is a way of increasing congestion window (cwnd) more slowly
-       in response to partial acknowledgments.
-       Possible values are:
-               0 increase cwnd once per acknowledgment (no ABC)
-               1 increase cwnd once per acknowledgment of full sized segment
-               2 allow increase cwnd by two if acknowledgment is
-                 of two segments to compensate for delayed acknowledgments.
-       Default: 0 (off)
-
 tcp_abort_on_overflow - BOOLEAN
        If listening service is too slow to accept new connections,
        reset them. Default state is FALSE. It means that if overflow
index 406d82d..b443f1d 100644 (file)
@@ -57,6 +57,10 @@ Protocol 2.10:       (Kernel 2.6.31) Added a protocol for relaxed alignment
 Protocol 2.11: (Kernel 3.6) Added a field for offset of EFI handover
                protocol entry point.
 
+Protocol 2.12: (Kernel 3.8) Added the xloadflags field and extension fields
+               to struct boot_params for for loading bzImage and ramdisk
+               above 4G in 64bit.
+
 **** MEMORY LAYOUT
 
 The traditional memory map for the kernel loader, used for Image or
@@ -182,7 +186,7 @@ Offset      Proto   Name            Meaning
 0230/4 2.05+   kernel_alignment Physical addr alignment required for kernel
 0234/1 2.05+   relocatable_kernel Whether kernel is relocatable or not
 0235/1 2.10+   min_alignment   Minimum alignment, as a power of two
-0236/2 N/A     pad3            Unused
+0236/2 2.12+   xloadflags      Boot protocol option flags
 0238/4 2.06+   cmdline_size    Maximum size of the kernel command line
 023C/4 2.07+   hardware_subarch Hardware subarchitecture
 0240/8 2.07+   hardware_subarch_data Subarchitecture-specific data
@@ -386,6 +390,7 @@ Protocol:   2.00+
        F  Special              (0xFF = undefined)
        10  Reserved
        11  Minimal Linux Bootloader <http://sebastian-plotz.blogspot.de>
+       12  OVMF UEFI virtualization stack
 
   Please contact <hpa@zytor.com> if you need a bootloader ID
   value assigned.
@@ -582,6 +587,27 @@ Protocol:  2.10+
   misaligned kernel.  Therefore, a loader should typically try each
   power-of-two alignment from kernel_alignment down to this alignment.
 
+Field name:     xloadflags
+Type:           read
+Offset/size:    0x236/2
+Protocol:       2.12+
+
+  This field is a bitmask.
+
+  Bit 0 (read):        XLF_KERNEL_64
+       - If 1, this kernel has the legacy 64-bit entry point at 0x200.
+
+  Bit 1 (read): XLF_CAN_BE_LOADED_ABOVE_4G
+        - If 1, kernel/boot_params/cmdline/ramdisk can be above 4G.
+
+  Bit 2 (read):        XLF_EFI_HANDOVER_32
+       - If 1, the kernel supports the 32-bit EFI handoff entry point
+          given at handover_offset.
+
+  Bit 3 (read): XLF_EFI_HANDOVER_64
+       - If 1, the kernel supports the 64-bit EFI handoff entry point
+          given at handover_offset + 0x200.
+
 Field name:    cmdline_size
 Type:          read
 Offset/size:   0x238/4
index cf5437d..199f453 100644 (file)
@@ -19,6 +19,9 @@ Offset        Proto   Name            Meaning
 090/010        ALL     hd1_info        hd1 disk parameter, OBSOLETE!!
 0A0/010        ALL     sys_desc_table  System description table (struct sys_desc_table)
 0B0/010        ALL     olpc_ofw_header OLPC's OpenFirmware CIF and friends
+0C0/004        ALL     ext_ramdisk_image ramdisk_image high 32bits
+0C4/004        ALL     ext_ramdisk_size  ramdisk_size high 32bits
+0C8/004        ALL     ext_cmd_line_ptr  cmd_line_ptr high 32bits
 140/080        ALL     edid_info       Video mode setup (struct edid_info)
 1C0/020        ALL     efi_info        EFI 32 information (struct efi_info)
 1E0/004        ALL     alk_mem_k       Alternative mem check, in KB
@@ -27,6 +30,7 @@ Offset        Proto   Name            Meaning
 1E9/001        ALL     eddbuf_entries  Number of entries in eddbuf (below)
 1EA/001        ALL     edd_mbr_sig_buf_entries Number of entries in edd_mbr_sig_buffer
                                (below)
+1EF/001        ALL     sentinel        Used to detect broken bootloaders
 290/040        ALL     edd_mbr_sig_buffer EDD MBR signatures
 2D0/A00        ALL     e820_map        E820 memory map table
                                (array of struct e820entry)
index b5ab4d9..8bbd949 100644 (file)
@@ -1489,7 +1489,7 @@ AVR32 ARCHITECTURE
 M:     Haavard Skinnemoen <hskinnemoen@gmail.com>
 M:     Hans-Christian Egtvedt <egtvedt@samfundet.no>
 W:     http://www.atmel.com/products/AVR32/
-W:     http://avr32linux.org/
+W:     http://mirror.egtvedt.no/avr32linux.org/
 W:     http://avrfreaks.net/
 S:     Maintained
 F:     arch/avr32/
@@ -7076,7 +7076,7 @@ F:        include/uapi/sound/
 F:     sound/
 
 SOUND - SOC LAYER / DYNAMIC AUDIO POWER MANAGEMENT (ASoC)
-M:     Liam Girdwood <lrg@ti.com>
+M:     Liam Girdwood <lgirdwood@gmail.com>
 M:     Mark Brown <broonie@opensource.wolfsonmicro.com>
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/broonie/sound.git
 L:     alsa-devel@alsa-project.org (moderated for non-subscribers)
@@ -7524,7 +7524,7 @@ S:        Maintained
 F:     drivers/media/tuners/tea5767.*
 
 TEAM DRIVER
-M:     Jiri Pirko <jpirko@redhat.com>
+M:     Jiri Pirko <jiri@resnulli.us>
 L:     netdev@vger.kernel.org
 S:     Supported
 F:     drivers/net/team/
index 2d3c92c..08ef9bd 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,8 +1,8 @@
 VERSION = 3
 PATCHLEVEL = 8
 SUBLEVEL = 0
-EXTRAVERSION = -rc5
-NAME = Terrified Chipmunk
+EXTRAVERSION = -rc7
+NAME = Unicycling Gorilla
 
 # *DOCUMENTATION*
 # To see a list of typical targets execute "make help"
index 36ae03a..87dfa90 100644 (file)
@@ -351,6 +351,25 @@ void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq)
        irq_set_chained_handler(irq, gic_handle_cascade_irq);
 }
 
+static u8 gic_get_cpumask(struct gic_chip_data *gic)
+{
+       void __iomem *base = gic_data_dist_base(gic);
+       u32 mask, i;
+
+       for (i = mask = 0; i < 32; i += 4) {
+               mask = readl_relaxed(base + GIC_DIST_TARGET + i);
+               mask |= mask >> 16;
+               mask |= mask >> 8;
+               if (mask)
+                       break;
+       }
+
+       if (!mask)
+               pr_crit("GIC CPU mask not found - kernel will fail to boot.\n");
+
+       return mask;
+}
+
 static void __init gic_dist_init(struct gic_chip_data *gic)
 {
        unsigned int i;
@@ -369,7 +388,9 @@ static void __init gic_dist_init(struct gic_chip_data *gic)
        /*
         * Set all global interrupts to this CPU only.
         */
-       cpumask = readl_relaxed(base + GIC_DIST_TARGET + 0);
+       cpumask = gic_get_cpumask(gic);
+       cpumask |= cpumask << 8;
+       cpumask |= cpumask << 16;
        for (i = 32; i < gic_irqs; i += 4)
                writel_relaxed(cpumask, base + GIC_DIST_TARGET + i * 4 / 4);
 
@@ -400,7 +421,7 @@ static void __cpuinit gic_cpu_init(struct gic_chip_data *gic)
         * Get what the GIC says our CPU mask is.
         */
        BUG_ON(cpu >= NR_GIC_CPU_IF);
-       cpu_mask = readl_relaxed(dist_base + GIC_DIST_TARGET + 0);
+       cpu_mask = gic_get_cpumask(gic);
        gic_cpu_map[cpu] = cpu_mask;
 
        /*
index 73cf03a..1c4df27 100644 (file)
@@ -37,7 +37,7 @@
  */
 #define PAGE_OFFSET            UL(CONFIG_PAGE_OFFSET)
 #define TASK_SIZE              (UL(CONFIG_PAGE_OFFSET) - UL(0x01000000))
-#define TASK_UNMAPPED_BASE     (UL(CONFIG_PAGE_OFFSET) / 3)
+#define TASK_UNMAPPED_BASE     ALIGN(TASK_SIZE / 3, SZ_16M)
 
 /*
  * The maximum size of a 26-bit user space task.
index 4eb6d00..86dff32 100644 (file)
@@ -7,8 +7,14 @@
 
 #ifndef __ASSEMBLER__
 unsigned int scu_get_core_count(void __iomem *);
-void scu_enable(void __iomem *);
 int scu_power_mode(void __iomem *, unsigned int);
+
+#ifdef CONFIG_SMP
+void scu_enable(void __iomem *scu_base);
+#else
+static inline void scu_enable(void __iomem *scu_base) {}
+#endif
+
 #endif
 
 #endif
index b9f015e..45eac87 100644 (file)
@@ -75,7 +75,7 @@ void scu_enable(void __iomem *scu_base)
 int scu_power_mode(void __iomem *scu_base, unsigned int mode)
 {
        unsigned int val;
-       int cpu = cpu_logical_map(smp_processor_id());
+       int cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(smp_processor_id()), 0);
 
        if (mode > 3 || mode == 1 || cpu > 3)
                return -EINVAL;
index e103c29..85afb03 100644 (file)
@@ -414,7 +414,7 @@ config MACH_EXYNOS4_DT
        select CPU_EXYNOS4210
        select HAVE_SAMSUNG_KEYPAD if INPUT_KEYBOARD
        select PINCTRL
-       select PINCTRL_EXYNOS4
+       select PINCTRL_EXYNOS
        select USE_OF
        help
          Machine support for Samsung Exynos4 machine with device tree enabled.
index 981dc1e..e6c0612 100644 (file)
@@ -28,6 +28,7 @@
 
 #include <asm/arch_timer.h>
 #include <asm/cacheflush.h>
+#include <asm/cputype.h>
 #include <asm/smp_plat.h>
 #include <asm/smp_twd.h>
 #include <asm/hardware/arm_timer.h>
@@ -59,7 +60,7 @@ static void __init highbank_scu_map_io(void)
 
 void highbank_set_cpu_jump(int cpu, void *jump_addr)
 {
-       cpu = cpu_logical_map(cpu);
+       cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 0);
        writel(virt_to_phys(jump_addr), HB_JUMP_TABLE_VIRT(cpu));
        __cpuc_flush_dcache_area(HB_JUMP_TABLE_VIRT(cpu), 16);
        outer_clean_range(HB_JUMP_TABLE_PHYS(cpu),
index 70af9d1..5995df7 100644 (file)
@@ -37,7 +37,7 @@ extern void __iomem *sregs_base;
 
 static inline void highbank_set_core_pwr(void)
 {
-       int cpu = cpu_logical_map(smp_processor_id());
+       int cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(smp_processor_id()), 0);
        if (scu_base_addr)
                scu_power_mode(scu_base_addr, SCU_PM_POWEROFF);
        else
@@ -46,7 +46,7 @@ static inline void highbank_set_core_pwr(void)
 
 static inline void highbank_clear_core_pwr(void)
 {
-       int cpu = cpu_logical_map(smp_processor_id());
+       int cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(smp_processor_id()), 0);
        if (scu_base_addr)
                scu_power_mode(scu_base_addr, SCU_PM_NORMAL);
        else
index 3985f35..a4ca63b 100644 (file)
@@ -309,7 +309,7 @@ static struct omap2_hsmmc_info mmc[] = {
                .gpio_wp        = 63,
                .deferred       = true,
        },
-#ifdef CONFIG_WL12XX_PLATFORM_DATA
+#ifdef CONFIG_WILINK_PLATFORM_DATA
        {
                .name           = "wl1271",
                .mmc            = 2,
@@ -450,7 +450,7 @@ static struct regulator_init_data omap3evm_vio = {
        .consumer_supplies      = omap3evm_vio_supply,
 };
 
-#ifdef CONFIG_WL12XX_PLATFORM_DATA
+#ifdef CONFIG_WILINK_PLATFORM_DATA
 
 #define OMAP3EVM_WLAN_PMENA_GPIO       (150)
 #define OMAP3EVM_WLAN_IRQ_GPIO         (149)
@@ -563,7 +563,7 @@ static struct omap_board_mux omap35x_board_mux[] __initdata = {
                                OMAP_PIN_OFF_NONE),
        OMAP3_MUX(GPMC_WAIT2, OMAP_MUX_MODE4 | OMAP_PIN_INPUT_PULLUP |
                                OMAP_PIN_OFF_NONE),
-#ifdef CONFIG_WL12XX_PLATFORM_DATA
+#ifdef CONFIG_WILINK_PLATFORM_DATA
        /* WLAN IRQ - GPIO 149 */
        OMAP3_MUX(UART1_RTS, OMAP_MUX_MODE4 | OMAP_PIN_INPUT),
 
@@ -601,7 +601,7 @@ static struct omap_board_mux omap36x_board_mux[] __initdata = {
        OMAP3_MUX(SYS_BOOT4, OMAP_MUX_MODE3 | OMAP_PIN_OFF_NONE),
        OMAP3_MUX(SYS_BOOT5, OMAP_MUX_MODE3 | OMAP_PIN_OFF_NONE),
        OMAP3_MUX(SYS_BOOT6, OMAP_MUX_MODE3 | OMAP_PIN_OFF_NONE),
-#ifdef CONFIG_WL12XX_PLATFORM_DATA
+#ifdef CONFIG_WILINK_PLATFORM_DATA
        /* WLAN IRQ - GPIO 149 */
        OMAP3_MUX(UART1_RTS, OMAP_MUX_MODE4 | OMAP_PIN_INPUT),
 
@@ -637,7 +637,7 @@ static struct gpio omap3_evm_ehci_gpios[] __initdata = {
 
 static void __init omap3_evm_wl12xx_init(void)
 {
-#ifdef CONFIG_WL12XX_PLATFORM_DATA
+#ifdef CONFIG_WILINK_PLATFORM_DATA
        int ret;
 
        /* WL12xx WLAN Init */
index d6b5073..4475423 100644 (file)
 /*
  * Only define NR_IRQS if less than NR_IRQS_EB
  */
-#define NR_IRQS_EB             (IRQ_EB_GIC_START + 96)
+#define NR_IRQS_EB             (IRQ_EB_GIC_START + 128)
 
 #if defined(CONFIG_MACH_REALVIEW_EB) \
        && (!defined(NR_IRQS) || (NR_IRQS < NR_IRQS_EB))
index 076c26d..dda3904 100644 (file)
@@ -640,7 +640,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
 
        if (is_coherent || nommu())
                addr = __alloc_simple_buffer(dev, size, gfp, &page);
-       else if (gfp & GFP_ATOMIC)
+       else if (!(gfp & __GFP_WAIT))
                addr = __alloc_from_pool(size, &page);
        else if (!IS_ENABLED(CONFIG_CMA))
                addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller);
index a34f1e2..6828ef6 100644 (file)
@@ -341,10 +341,17 @@ static void emit_load_be16(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx)
 
 static inline void emit_swap16(u8 r_dst, u8 r_src, struct jit_ctx *ctx)
 {
-       emit(ARM_LSL_R(ARM_R1, r_src, 8), ctx);
-       emit(ARM_ORR_S(r_dst, ARM_R1, r_src, SRTYPE_LSL, 8), ctx);
-       emit(ARM_LSL_I(r_dst, r_dst, 8), ctx);
-       emit(ARM_LSL_R(r_dst, r_dst, 8), ctx);
+       /* r_dst = (r_src << 8) | (r_src >> 8) */
+       emit(ARM_LSL_I(ARM_R1, r_src, 8), ctx);
+       emit(ARM_ORR_S(r_dst, ARM_R1, r_src, SRTYPE_LSR, 8), ctx);
+
+       /*
+        * we need to mask out the bits set in r_dst[23:16] due to
+        * the first shift instruction.
+        *
+        * note that 0x8ff is the encoded immediate 0x00ff0000.
+        */
+       emit(ARM_BIC_I(r_dst, r_dst, 0x8ff), ctx);
 }
 
 #else  /* ARMv6+ */
index aaf5199..b3d18f9 100644 (file)
@@ -336,4 +336,14 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
 
+/* drivers/base/dma-mapping.c */
+extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
+                          void *cpu_addr, dma_addr_t dma_addr, size_t size);
+extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
+                                 void *cpu_addr, dma_addr_t dma_addr,
+                                 size_t size);
+
+#define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s)
+#define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s)
+
 #endif /* __ASM_AVR32_DMA_MAPPING_H */
index bbf4610..054d9ec 100644 (file)
@@ -154,4 +154,14 @@ dma_cache_sync(struct device *dev, void *vaddr, size_t size,
        _dma_sync((dma_addr_t)vaddr, size, dir);
 }
 
+/* drivers/base/dma-mapping.c */
+extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
+                          void *cpu_addr, dma_addr_t dma_addr, size_t size);
+extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
+                                 void *cpu_addr, dma_addr_t dma_addr,
+                                 size_t size);
+
+#define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s)
+#define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s)
+
 #endif                         /* _BLACKFIN_DMA_MAPPING_H */
index 3c69406..88bd0d8 100644 (file)
@@ -89,4 +89,19 @@ extern void dma_free_coherent(struct device *, size_t, void *, dma_addr_t);
 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent((d), (s), (h), (f))
 #define dma_free_noncoherent(d, s, v, h)  dma_free_coherent((d), (s), (v), (h))
 
+/* Not supported for now */
+static inline int dma_mmap_coherent(struct device *dev,
+                                   struct vm_area_struct *vma, void *cpu_addr,
+                                   dma_addr_t dma_addr, size_t size)
+{
+       return -EINVAL;
+}
+
+static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt,
+                                 void *cpu_addr, dma_addr_t dma_addr,
+                                 size_t size)
+{
+       return -EINVAL;
+}
+
 #endif /* _ASM_C6X_DMA_MAPPING_H */
index 8588b2c..2f0f654 100644 (file)
@@ -158,5 +158,15 @@ dma_cache_sync(struct device *dev, void *vaddr, size_t size,
 {
 }
 
+/* drivers/base/dma-mapping.c */
+extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
+                          void *cpu_addr, dma_addr_t dma_addr, size_t size);
+extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
+                                 void *cpu_addr, dma_addr_t dma_addr,
+                                 size_t size);
+
+#define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s)
+#define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s)
+
 
 #endif
index dfb8110..1746a2b 100644 (file)
@@ -132,4 +132,19 @@ void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
        flush_write_buffers();
 }
 
+/* Not supported for now */
+static inline int dma_mmap_coherent(struct device *dev,
+                                   struct vm_area_struct *vma, void *cpu_addr,
+                                   dma_addr_t dma_addr, size_t size)
+{
+       return -EINVAL;
+}
+
+static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt,
+                                 void *cpu_addr, dma_addr_t dma_addr,
+                                 size_t size)
+{
+       return -EINVAL;
+}
+
 #endif  /* _ASM_DMA_MAPPING_H */
index 3e6b844..292805f 100644 (file)
@@ -115,4 +115,14 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t handle)
 #include <asm-generic/dma-mapping-broken.h>
 #endif
 
+/* drivers/base/dma-mapping.c */
+extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
+                          void *cpu_addr, dma_addr_t dma_addr, size_t size);
+extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
+                                 void *cpu_addr, dma_addr_t dma_addr,
+                                 size_t size);
+
+#define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s)
+#define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s)
+
 #endif  /* _M68K_DMA_MAPPING_H */
index ae700f4..b0768a6 100644 (file)
@@ -130,7 +130,6 @@ extern int handle_kernel_fault(struct pt_regs *regs);
 #define start_thread(_regs, _pc, _usp)                  \
 do {                                                    \
        (_regs)->pc = (_pc);                            \
-       ((struct switch_stack *)(_regs))[-1].a6 = 0;    \
        setframeformat(_regs);                          \
        if (current->mm)                                \
                (_regs)->d5 = current->mm->start_data;  \
index d7af29f..ba61192 100644 (file)
@@ -8,8 +8,10 @@ config BCM47XX_SSB
        select SSB_DRIVER_EXTIF
        select SSB_EMBEDDED
        select SSB_B43_PCI_BRIDGE if PCI
+       select SSB_DRIVER_PCICORE if PCI
        select SSB_PCICORE_HOSTMODE if PCI
        select SSB_DRIVER_GPIO
+       select GPIOLIB
        default y
        help
         Add support for old Broadcom BCM47xx boards with Sonics Silicon Backplane support.
@@ -25,6 +27,7 @@ config BCM47XX_BCMA
        select BCMA_HOST_PCI if PCI
        select BCMA_DRIVER_PCI_HOSTMODE if PCI
        select BCMA_DRIVER_GPIO
+       select GPIOLIB
        default y
        help
         Add support for new Broadcom BCM47xx boards with Broadcom specific Advanced Microcontroller Bus.
index 9f883bf..33b7214 100644 (file)
@@ -30,6 +30,7 @@
  * measurement, and debugging facilities.
  */
 
+#include <linux/compiler.h>
 #include <linux/irqflags.h>
 #include <asm/octeon/cvmx.h>
 #include <asm/octeon/cvmx-l2c.h>
@@ -285,22 +286,22 @@ uint64_t cvmx_l2c_read_perf(uint32_t counter)
  */
 static void fault_in(uint64_t addr, int len)
 {
-       volatile char *ptr;
-       volatile char dummy;
+       char *ptr;
+
        /*
         * Adjust addr and length so we get all cache lines even for
         * small ranges spanning two cache lines.
         */
        len += addr & CVMX_CACHE_LINE_MASK;
        addr &= ~CVMX_CACHE_LINE_MASK;
-       ptr = (volatile char *)cvmx_phys_to_ptr(addr);
+       ptr = cvmx_phys_to_ptr(addr);
        /*
         * Invalidate L1 cache to make sure all loads result in data
         * being in L2.
         */
        CVMX_DCACHE_INVALIDATE;
        while (len > 0) {
-               dummy += *ptr;
+               ACCESS_ONCE(*ptr);
                len -= CVMX_CACHE_LINE_SIZE;
                ptr += CVMX_CACHE_LINE_SIZE;
        }
index e9bfc08..7bfad05 100644 (file)
@@ -16,7 +16,7 @@
 #include <asm/mipsregs.h>
 
 #define DSP_DEFAULT    0x00000000
-#define DSP_MASK       0x3ff
+#define DSP_MASK       0x3f
 
 #define __enable_dsp_hazard()                                          \
 do {                                                                   \
index ab84064..33c34ad 100644 (file)
@@ -353,6 +353,7 @@ union mips_instruction {
        struct u_format u_format;
        struct c_format c_format;
        struct r_format r_format;
+       struct p_format p_format;
        struct f_format f_format;
        struct ma_format ma_format;
        struct b_format b_format;
index edaa06d..e410df4 100644 (file)
@@ -21,4 +21,4 @@
 #define R10000_LLSC_WAR                        0
 #define MIPS34K_MISSED_ITLB_WAR                0
 
-#endif /* __ASM_MIPS_MACH_PNX8550_WAR_H */
+#endif /* __ASM_MIPS_MACH_PNX833X_WAR_H */
index c631910..013d5f7 100644 (file)
@@ -230,6 +230,7 @@ static inline void pud_clear(pud_t *pudp)
 #else
 #define pte_pfn(x)             ((unsigned long)((x).pte >> _PFN_SHIFT))
 #define pfn_pte(pfn, prot)     __pte(((pfn) << _PFN_SHIFT) | pgprot_val(prot))
+#define pfn_pmd(pfn, prot)     __pmd(((pfn) << _PFN_SHIFT) | pgprot_val(prot))
 #endif
 
 #define __pgd_offset(address)  pgd_index(address)
index a1a0452..77d4fb3 100644 (file)
@@ -3,6 +3,7 @@ include include/uapi/asm-generic/Kbuild.asm
 
 header-y += auxvec.h
 header-y += bitsperlong.h
+header-y += break.h
 header-y += byteorder.h
 header-y += cachectl.h
 header-y += errno.h
index 6a2d758..83fa146 100644 (file)
 #define MCOUNT_OFFSET_INSNS 4
 #endif
 
+/* Arch override because MIPS doesn't need to run this from stop_machine() */
+void arch_ftrace_update_code(int command)
+{
+       ftrace_modify_all_code(command);
+}
+
 /*
  * Check if the address is in kernel space
  *
@@ -89,6 +95,24 @@ static int ftrace_modify_code(unsigned long ip, unsigned int new_code)
        return 0;
 }
 
+#ifndef CONFIG_64BIT
+static int ftrace_modify_code_2(unsigned long ip, unsigned int new_code1,
+                               unsigned int new_code2)
+{
+       int faulted;
+
+       safe_store_code(new_code1, ip, faulted);
+       if (unlikely(faulted))
+               return -EFAULT;
+       ip += 4;
+       safe_store_code(new_code2, ip, faulted);
+       if (unlikely(faulted))
+               return -EFAULT;
+       flush_icache_range(ip, ip + 8); /* original ip + 12 */
+       return 0;
+}
+#endif
+
 /*
  * The details about the calling site of mcount on MIPS
  *
@@ -131,8 +155,18 @@ int ftrace_make_nop(struct module *mod,
         * needed.
         */
        new = in_kernel_space(ip) ? INSN_NOP : INSN_B_1F;
-
+#ifdef CONFIG_64BIT
        return ftrace_modify_code(ip, new);
+#else
+       /*
+        * On 32 bit MIPS platforms, gcc adds a stack adjust
+        * instruction in the delay slot after the branch to
+        * mcount and expects mcount to restore the sp on return.
+        * This is based on a legacy API and does nothing but
+        * waste instructions so it's being removed at runtime.
+        */
+       return ftrace_modify_code_2(ip, new, INSN_NOP);
+#endif
 }
 
 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
index 4c968e7..1658676 100644 (file)
@@ -46,9 +46,8 @@
        PTR_L   a5, PT_R9(sp)
        PTR_L   a6, PT_R10(sp)
        PTR_L   a7, PT_R11(sp)
-       PTR_ADDIU       sp, PT_SIZE
 #else
-       PTR_ADDIU       sp, (PT_SIZE + 8)
+       PTR_ADDIU       sp, PT_SIZE
 #endif
 .endm
 
@@ -69,7 +68,9 @@ NESTED(ftrace_caller, PT_SIZE, ra)
        .globl _mcount
 _mcount:
        b       ftrace_stub
-        nop
+       addiu sp,sp,8
+
+       /* When tracing is activated, it calls ftrace_caller+8 (aka here) */
        lw      t1, function_trace_stop
        bnez    t1, ftrace_stub
         nop
index eec690a..147cec1 100644 (file)
@@ -705,7 +705,7 @@ static int vpe_run(struct vpe * v)
 
                        printk(KERN_WARNING
                               "VPE loader: TC %d is already in use.\n",
-                               t->index);
+                              v->tc->index);
                        return -ENOEXEC;
                }
        } else {
index f36acd1..a7935bf 100644 (file)
@@ -408,7 +408,7 @@ int __init icu_of_init(struct device_node *node, struct device_node *parent)
 #endif
 
        /* tell oprofile which irq to use */
-       cp0_perfcount_irq = LTQ_PERF_IRQ;
+       cp0_perfcount_irq = irq_create_mapping(ltq_domain, LTQ_PERF_IRQ);
 
        /*
         * if the timer irq is not one of the mips irqs we need to
index dc81ca8..288f795 100644 (file)
@@ -21,7 +21,7 @@ void __delay(unsigned long loops)
        "       .set    noreorder                               \n"
        "       .align  3                                       \n"
        "1:     bnez    %0, 1b                                  \n"
-#if __SIZEOF_LONG__ == 4
+#if BITS_PER_LONG == 32
        "       subu    %0, 1                                   \n"
 #else
        "       dsubu   %0, 1                                   \n"
index 7657fd2..cacfd31 100644 (file)
@@ -190,9 +190,3 @@ void __iounmap(const volatile void __iomem *addr)
 
 EXPORT_SYMBOL(__ioremap);
 EXPORT_SYMBOL(__iounmap);
-
-int __virt_addr_valid(const volatile void *kaddr)
-{
-       return pfn_valid(PFN_DOWN(virt_to_phys(kaddr)));
-}
-EXPORT_SYMBOL_GPL(__virt_addr_valid);
index d9be754..7e5fe27 100644 (file)
@@ -192,3 +192,9 @@ unsigned long arch_randomize_brk(struct mm_struct *mm)
 
        return ret;
 }
+
+int __virt_addr_valid(const volatile void *kaddr)
+{
+       return pfn_valid(PFN_DOWN(virt_to_phys(kaddr)));
+}
+EXPORT_SYMBOL_GPL(__virt_addr_valid);
index 4e7f49d..c5ce699 100644 (file)
@@ -193,8 +193,11 @@ static void nlm_init_node(void)
 
 void __init prom_init(void)
 {
-       int i, *argv, *envp;            /* passed as 32 bit ptrs */
+       int *argv, *envp;               /* passed as 32 bit ptrs */
        struct psb_info *prom_infop;
+#ifdef CONFIG_SMP
+       int i;
+#endif
 
        /* truncate to 32 bit and sign extend all args */
        argv = (int *)(long)(int)fw_arg1;
index 1552522..6eaa4f2 100644 (file)
@@ -24,7 +24,7 @@
 #include <asm/mach-ath79/pci.h>
 
 #define AR71XX_PCI_MEM_BASE    0x10000000
-#define AR71XX_PCI_MEM_SIZE    0x08000000
+#define AR71XX_PCI_MEM_SIZE    0x07000000
 
 #define AR71XX_PCI_WIN0_OFFS           0x10000000
 #define AR71XX_PCI_WIN1_OFFS           0x11000000
index 86d77a6..c11c75b 100644 (file)
@@ -21,7 +21,7 @@
 #define AR724X_PCI_CTRL_SIZE   0x100
 
 #define AR724X_PCI_MEM_BASE    0x10000000
-#define AR724X_PCI_MEM_SIZE    0x08000000
+#define AR724X_PCI_MEM_SIZE    0x04000000
 
 #define AR724X_PCI_REG_RESET           0x18
 #define AR724X_PCI_REG_INT_STATUS      0x4c
index c1be439..a18abfc 100644 (file)
@@ -168,4 +168,19 @@ void dma_cache_sync(void *vaddr, size_t size,
        mn10300_dcache_flush_inv();
 }
 
+/* Not supported for now */
+static inline int dma_mmap_coherent(struct device *dev,
+                                   struct vm_area_struct *vma, void *cpu_addr,
+                                   dma_addr_t dma_addr, size_t size)
+{
+       return -EINVAL;
+}
+
+static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt,
+                                 void *cpu_addr, dma_addr_t dma_addr,
+                                 size_t size)
+{
+       return -EINVAL;
+}
+
 #endif
index 467bbd5..106b395 100644 (file)
@@ -238,4 +238,19 @@ void * sba_get_iommu(struct parisc_device *dev);
 /* At the moment, we panic on error for IOMMU resource exaustion */
 #define dma_mapping_error(dev, x)      0
 
+/* This API cannot be supported on PA-RISC */
+static inline int dma_mmap_coherent(struct device *dev,
+                                   struct vm_area_struct *vma, void *cpu_addr,
+                                   dma_addr_t dma_addr, size_t size)
+{
+       return -EINVAL;
+}
+
+static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt,
+                                 void *cpu_addr, dma_addr_t dma_addr,
+                                 size_t size)
+{
+       return -EINVAL;
+}
+
 #endif
index 5658508..7443481 100644 (file)
@@ -115,11 +115,13 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
        sldi    r29,r5,SID_SHIFT - VPN_SHIFT
        rldicl  r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT - VPN_SHIFT)
        or      r29,r28,r29
-
-       /* Calculate hash value for primary slot and store it in r28 */
-       rldicl  r5,r5,0,25              /* vsid & 0x0000007fffffffff */
-       rldicl  r0,r3,64-12,48          /* (ea >> 12) & 0xffff */
-       xor     r28,r5,r0
+       /*
+        * Calculate hash value for primary slot and store it in r28
+        * r3 = va, r5 = vsid
+        * r0 = (va >> 12) & ((1ul << (28 - 12)) -1)
+        */
+       rldicl  r0,r3,64-12,48
+       xor     r28,r5,r0               /* hash */
        b       4f
 
 3:     /* Calc vpn and put it in r29 */
@@ -130,11 +132,12 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
        /*
         * calculate hash value for primary slot and
         * store it in r28 for 1T segment
+        * r3 = va, r5 = vsid
         */
-       rldic   r28,r5,25,25            /* (vsid << 25) & 0x7fffffffff */
-       clrldi  r5,r5,40                /* vsid & 0xffffff */
-       rldicl  r0,r3,64-12,36          /* (ea >> 12) & 0xfffffff */
-       xor     r28,r28,r5
+       sldi    r28,r5,25               /* vsid << 25 */
+       /* r0 =  (va >> 12) & ((1ul << (40 - 12)) -1) */
+       rldicl  r0,r3,64-12,36
+       xor     r28,r28,r5              /* vsid ^ ( vsid << 25) */
        xor     r28,r28,r0              /* hash */
 
        /* Convert linux PTE bits into HW equivalents */
@@ -407,11 +410,13 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
         */
        rldicl  r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT - VPN_SHIFT)
        or      r29,r28,r29
-
-       /* Calculate hash value for primary slot and store it in r28 */
-       rldicl  r5,r5,0,25              /* vsid & 0x0000007fffffffff */
-       rldicl  r0,r3,64-12,48          /* (ea >> 12) & 0xffff */
-       xor     r28,r5,r0
+       /*
+        * Calculate hash value for primary slot and store it in r28
+        * r3 = va, r5 = vsid
+        * r0 = (va >> 12) & ((1ul << (28 - 12)) -1)
+        */
+       rldicl  r0,r3,64-12,48
+       xor     r28,r5,r0               /* hash */
        b       4f
 
 3:     /* Calc vpn and put it in r29 */
@@ -426,11 +431,12 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
        /*
         * Calculate hash value for primary slot and
         * store it in r28  for 1T segment
+        * r3 = va, r5 = vsid
         */
-       rldic   r28,r5,25,25            /* (vsid << 25) & 0x7fffffffff */
-       clrldi  r5,r5,40                /* vsid & 0xffffff */
-       rldicl  r0,r3,64-12,36          /* (ea >> 12) & 0xfffffff */
-       xor     r28,r28,r5
+       sldi    r28,r5,25               /* vsid << 25 */
+       /* r0 = (va >> 12) & ((1ul << (40 - 12)) -1) */
+       rldicl  r0,r3,64-12,36
+       xor     r28,r28,r5              /* vsid ^ ( vsid << 25) */
        xor     r28,r28,r0              /* hash */
 
        /* Convert linux PTE bits into HW equivalents */
@@ -752,25 +758,27 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
        rldicl  r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT - VPN_SHIFT)
        or      r29,r28,r29
 
-       /* Calculate hash value for primary slot and store it in r28 */
-       rldicl  r5,r5,0,25              /* vsid & 0x0000007fffffffff */
-       rldicl  r0,r3,64-16,52          /* (ea >> 16) & 0xfff */
-       xor     r28,r5,r0
+       /* Calculate hash value for primary slot and store it in r28
+        * r3 = va, r5 = vsid
+        * r0 = (va >> 16) & ((1ul << (28 - 16)) -1)
+        */
+       rldicl  r0,r3,64-16,52
+       xor     r28,r5,r0               /* hash */
        b       4f
 
 3:     /* Calc vpn and put it in r29 */
        sldi    r29,r5,SID_SHIFT_1T - VPN_SHIFT
        rldicl  r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT_1T - VPN_SHIFT)
        or      r29,r28,r29
-
        /*
         * calculate hash value for primary slot and
         * store it in r28 for 1T segment
+        * r3 = va, r5 = vsid
         */
-       rldic   r28,r5,25,25            /* (vsid << 25) & 0x7fffffffff */
-       clrldi  r5,r5,40                /* vsid & 0xffffff */
-       rldicl  r0,r3,64-16,40          /* (ea >> 16) & 0xffffff */
-       xor     r28,r28,r5
+       sldi    r28,r5,25               /* vsid << 25 */
+       /* r0 = (va >> 16) & ((1ul << (40 - 16)) -1) */
+       rldicl  r0,r3,64-16,40
+       xor     r28,r28,r5              /* vsid ^ ( vsid << 25) */
        xor     r28,r28,r0              /* hash */
 
        /* Convert linux PTE bits into HW equivalents */
index c1d7930..098adbb 100644 (file)
@@ -1365,6 +1365,18 @@ static inline void pmdp_invalidate(struct vm_area_struct *vma,
        __pmd_idte(address, pmdp);
 }
 
+#define __HAVE_ARCH_PMDP_SET_WRPROTECT
+static inline void pmdp_set_wrprotect(struct mm_struct *mm,
+                                     unsigned long address, pmd_t *pmdp)
+{
+       pmd_t pmd = *pmdp;
+
+       if (pmd_write(pmd)) {
+               __pmd_idte(address, pmdp);
+               set_pmd_at(mm, address, pmdp, pmd_wrprotect(pmd));
+       }
+}
+
 static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot)
 {
        pmd_t __pmd;
index a5f4f5a..0aa98db 100644 (file)
@@ -120,6 +120,9 @@ static int s390_next_ktime(ktime_t expires,
        nsecs = ktime_to_ns(ktime_add(timespec_to_ktime(ts), expires));
        do_div(nsecs, 125);
        S390_lowcore.clock_comparator = sched_clock_base_cc + (nsecs << 9);
+       /* Program the maximum value if we have an overflow (== year 2042) */
+       if (unlikely(S390_lowcore.clock_comparator < sched_clock_base_cc))
+               S390_lowcore.clock_comparator = -1ULL;
        set_clock_comparator(S390_lowcore.clock_comparator);
        return 0;
 }
index 875d008..1bb7ad4 100644 (file)
@@ -140,6 +140,8 @@ config ARCH_DEFCONFIG
 
 source "init/Kconfig"
 
+source "kernel/Kconfig.freezer"
+
 menu "Tilera-specific configuration"
 
 config NR_CPUS
index 2a9b293..3167291 100644 (file)
@@ -250,7 +250,9 @@ static inline void writeq(u64 val, unsigned long addr)
 #define iowrite32 writel
 #define iowrite64 writeq
 
-static inline void memset_io(void *dst, int val, size_t len)
+#if CHIP_HAS_MMIO() || defined(CONFIG_PCI)
+
+static inline void memset_io(volatile void *dst, int val, size_t len)
 {
        int x;
        BUG_ON((unsigned long)dst & 0x3);
@@ -277,6 +279,8 @@ static inline void memcpy_toio(volatile void __iomem *dst, const void *src,
                writel(*(u32 *)(src + x), dst + x);
 }
 
+#endif
+
 /*
  * The Tile architecture does not support IOPORT, even with PCI.
  * Unfortunately we can't yet simply not declare these methods,
index b4e96fe..241c0bb 100644 (file)
 #include <arch/interrupts.h>
 #include <arch/chip.h>
 
-#if !defined(__tilegx__) && defined(__ASSEMBLY__)
-
 /*
  * The set of interrupts we want to allow when interrupts are nominally
  * disabled.  The remainder are effectively "NMI" interrupts from
  * the point of view of the generic Linux code.  Note that synchronous
  * interrupts (aka "non-queued") are not blocked by the mask in any case.
  */
-#if CHIP_HAS_AUX_PERF_COUNTERS()
-#define LINUX_MASKABLE_INTERRUPTS_HI \
-       (~(INT_MASK_HI(INT_PERF_COUNT) | INT_MASK_HI(INT_AUX_PERF_COUNT)))
-#else
-#define LINUX_MASKABLE_INTERRUPTS_HI \
-       (~(INT_MASK_HI(INT_PERF_COUNT)))
-#endif
-
-#else
-
-#if CHIP_HAS_AUX_PERF_COUNTERS()
-#define LINUX_MASKABLE_INTERRUPTS \
-       (~(INT_MASK(INT_PERF_COUNT) | INT_MASK(INT_AUX_PERF_COUNT)))
-#else
 #define LINUX_MASKABLE_INTERRUPTS \
-       (~(INT_MASK(INT_PERF_COUNT)))
-#endif
+       (~((_AC(1,ULL) << INT_PERF_COUNT) | (_AC(1,ULL) << INT_AUX_PERF_COUNT)))
 
+#if CHIP_HAS_SPLIT_INTR_MASK()
+/* The same macro, but for the two 32-bit SPRs separately. */
+#define LINUX_MASKABLE_INTERRUPTS_LO (-1)
+#define LINUX_MASKABLE_INTERRUPTS_HI \
+       (~((1 << (INT_PERF_COUNT - 32)) | (1 << (INT_AUX_PERF_COUNT - 32))))
 #endif
 
 #ifndef __ASSEMBLY__
  * to know our current state.
  */
 DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
-#define INITIAL_INTERRUPTS_ENABLED INT_MASK(INT_MEM_ERROR)
+#define INITIAL_INTERRUPTS_ENABLED (1ULL << INT_MEM_ERROR)
 
 /* Disable interrupts. */
 #define arch_local_irq_disable() \
@@ -165,7 +153,7 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
 
 /* Prevent the given interrupt from being enabled next time we enable irqs. */
 #define arch_local_irq_mask(interrupt) \
-       (__get_cpu_var(interrupts_enabled_mask) &= ~INT_MASK(interrupt))
+       (__get_cpu_var(interrupts_enabled_mask) &= ~(1ULL << (interrupt)))
 
 /* Prevent the given interrupt from being enabled immediately. */
 #define arch_local_irq_mask_now(interrupt) do { \
@@ -175,7 +163,7 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
 
 /* Allow the given interrupt to be enabled next time we enable irqs. */
 #define arch_local_irq_unmask(interrupt) \
-       (__get_cpu_var(interrupts_enabled_mask) |= INT_MASK(interrupt))
+       (__get_cpu_var(interrupts_enabled_mask) |= (1ULL << (interrupt)))
 
 /* Allow the given interrupt to be enabled immediately, if !irqs_disabled. */
 #define arch_local_irq_unmask_now(interrupt) do { \
@@ -250,7 +238,7 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
 /* Disable interrupts. */
 #define IRQ_DISABLE(tmp0, tmp1)                                        \
        {                                                       \
-        movei  tmp0, -1;                                       \
+        movei  tmp0, LINUX_MASKABLE_INTERRUPTS_LO;             \
         moveli tmp1, lo16(LINUX_MASKABLE_INTERRUPTS_HI)        \
        };                                                      \
        {                                                       \
index 96b5710..2efe3f6 100644 (file)
@@ -15,6 +15,7 @@
 #ifndef __ARCH_INTERRUPTS_H__
 #define __ARCH_INTERRUPTS_H__
 
+#ifndef __KERNEL__
 /** Mask for an interrupt. */
 /* Note: must handle breaking interrupts into high and low words manually. */
 #define INT_MASK_LO(intno) (1 << (intno))
@@ -23,6 +24,7 @@
 #ifndef __ASSEMBLER__
 #define INT_MASK(intno) (1ULL << (intno))
 #endif
+#endif
 
 
 /** Where a given interrupt executes */
 
 #ifndef __ASSEMBLER__
 #define QUEUED_INTERRUPTS ( \
-    INT_MASK(INT_MEM_ERROR) | \
-    INT_MASK(INT_DMATLB_MISS) | \
-    INT_MASK(INT_DMATLB_ACCESS) | \
-    INT_MASK(INT_SNITLB_MISS) | \
-    INT_MASK(INT_SN_NOTIFY) | \
-    INT_MASK(INT_SN_FIREWALL) | \
-    INT_MASK(INT_IDN_FIREWALL) | \
-    INT_MASK(INT_UDN_FIREWALL) | \
-    INT_MASK(INT_TILE_TIMER) | \
-    INT_MASK(INT_IDN_TIMER) | \
-    INT_MASK(INT_UDN_TIMER) | \
-    INT_MASK(INT_DMA_NOTIFY) | \
-    INT_MASK(INT_IDN_CA) | \
-    INT_MASK(INT_UDN_CA) | \
-    INT_MASK(INT_IDN_AVAIL) | \
-    INT_MASK(INT_UDN_AVAIL) | \
-    INT_MASK(INT_PERF_COUNT) | \
-    INT_MASK(INT_INTCTRL_3) | \
-    INT_MASK(INT_INTCTRL_2) | \
-    INT_MASK(INT_INTCTRL_1) | \
-    INT_MASK(INT_INTCTRL_0) | \
-    INT_MASK(INT_BOOT_ACCESS) | \
-    INT_MASK(INT_WORLD_ACCESS) | \
-    INT_MASK(INT_I_ASID) | \
-    INT_MASK(INT_D_ASID) | \
-    INT_MASK(INT_DMA_ASID) | \
-    INT_MASK(INT_SNI_ASID) | \
-    INT_MASK(INT_DMA_CPL) | \
-    INT_MASK(INT_SN_CPL) | \
-    INT_MASK(INT_DOUBLE_FAULT) | \
-    INT_MASK(INT_AUX_PERF_COUNT) | \
+    (1ULL << INT_MEM_ERROR) | \
+    (1ULL << INT_DMATLB_MISS) | \
+    (1ULL << INT_DMATLB_ACCESS) | \
+    (1ULL << INT_SNITLB_MISS) | \
+    (1ULL << INT_SN_NOTIFY) | \
+    (1ULL << INT_SN_FIREWALL) | \
+    (1ULL << INT_IDN_FIREWALL) | \
+    (1ULL << INT_UDN_FIREWALL) | \
+    (1ULL << INT_TILE_TIMER) | \
+    (1ULL << INT_IDN_TIMER) | \
+    (1ULL << INT_UDN_TIMER) | \
+    (1ULL << INT_DMA_NOTIFY) | \
+    (1ULL << INT_IDN_CA) | \
+    (1ULL << INT_UDN_CA) | \
+    (1ULL << INT_IDN_AVAIL) | \
+    (1ULL << INT_UDN_AVAIL) | \
+    (1ULL << INT_PERF_COUNT) | \
+    (1ULL << INT_INTCTRL_3) | \
+    (1ULL << INT_INTCTRL_2) | \
+    (1ULL << INT_INTCTRL_1) | \
+    (1ULL << INT_INTCTRL_0) | \
+    (1ULL << INT_BOOT_ACCESS) | \
+    (1ULL << INT_WORLD_ACCESS) | \
+    (1ULL << INT_I_ASID) | \
+    (1ULL << INT_D_ASID) | \
+    (1ULL << INT_DMA_ASID) | \
+    (1ULL << INT_SNI_ASID) | \
+    (1ULL << INT_DMA_CPL) | \
+    (1ULL << INT_SN_CPL) | \
+    (1ULL << INT_DOUBLE_FAULT) | \
+    (1ULL << INT_AUX_PERF_COUNT) | \
     0)
 #define NONQUEUED_INTERRUPTS ( \
-    INT_MASK(INT_ITLB_MISS) | \
-    INT_MASK(INT_ILL) | \
-    INT_MASK(INT_GPV) | \
-    INT_MASK(INT_SN_ACCESS) | \
-    INT_MASK(INT_IDN_ACCESS) | \
-    INT_MASK(INT_UDN_ACCESS) | \
-    INT_MASK(INT_IDN_REFILL) | \
-    INT_MASK(INT_UDN_REFILL) | \
-    INT_MASK(INT_IDN_COMPLETE) | \
-    INT_MASK(INT_UDN_COMPLETE) | \
-    INT_MASK(INT_SWINT_3) | \
-    INT_MASK(INT_SWINT_2) | \
-    INT_MASK(INT_SWINT_1) | \
-    INT_MASK(INT_SWINT_0) | \
-    INT_MASK(INT_UNALIGN_DATA) | \
-    INT_MASK(INT_DTLB_MISS) | \
-    INT_MASK(INT_DTLB_ACCESS) | \
-    INT_MASK(INT_SN_STATIC_ACCESS) | \
+    (1ULL << INT_ITLB_MISS) | \
+    (1ULL << INT_ILL) | \
+    (1ULL << INT_GPV) | \
+    (1ULL << INT_SN_ACCESS) | \
+    (1ULL << INT_IDN_ACCESS) | \
+    (1ULL << INT_UDN_ACCESS) | \
+    (1ULL << INT_IDN_REFILL) | \
+    (1ULL << INT_UDN_REFILL) | \
+    (1ULL << INT_IDN_COMPLETE) | \
+    (1ULL << INT_UDN_COMPLETE) | \
+    (1ULL << INT_SWINT_3) | \
+    (1ULL << INT_SWINT_2) | \
+    (1ULL << INT_SWINT_1) | \
+    (1ULL << INT_SWINT_0) | \
+    (1ULL << INT_UNALIGN_DATA) | \
+    (1ULL << INT_DTLB_MISS) | \
+    (1ULL << INT_DTLB_ACCESS) | \
+    (1ULL << INT_SN_STATIC_ACCESS) | \
     0)
 #define CRITICAL_MASKED_INTERRUPTS ( \
-    INT_MASK(INT_MEM_ERROR) | \
-    INT_MASK(INT_DMATLB_MISS) | \
-    INT_MASK(INT_DMATLB_ACCESS) | \
-    INT_MASK(INT_SNITLB_MISS) | \
-    INT_MASK(INT_SN_NOTIFY) | \
-    INT_MASK(INT_SN_FIREWALL) | \
-    INT_MASK(INT_IDN_FIREWALL) | \
-    INT_MASK(INT_UDN_FIREWALL) | \
-    INT_MASK(INT_TILE_TIMER) | \
-    INT_MASK(INT_IDN_TIMER) | \
-    INT_MASK(INT_UDN_TIMER) | \
-    INT_MASK(INT_DMA_NOTIFY) | \
-    INT_MASK(INT_IDN_CA) | \
-    INT_MASK(INT_UDN_CA) | \
-    INT_MASK(INT_IDN_AVAIL) | \
-    INT_MASK(INT_UDN_AVAIL) | \
-    INT_MASK(INT_PERF_COUNT) | \
-    INT_MASK(INT_INTCTRL_3) | \
-    INT_MASK(INT_INTCTRL_2) | \
-    INT_MASK(INT_INTCTRL_1) | \
-    INT_MASK(INT_INTCTRL_0) | \
-    INT_MASK(INT_AUX_PERF_COUNT) | \
+    (1ULL << INT_MEM_ERROR) | \
+    (1ULL << INT_DMATLB_MISS) | \
+    (1ULL << INT_DMATLB_ACCESS) | \
+    (1ULL << INT_SNITLB_MISS) | \
+    (1ULL << INT_SN_NOTIFY) | \
+    (1ULL << INT_SN_FIREWALL) | \
+    (1ULL << INT_IDN_FIREWALL) | \
+    (1ULL << INT_UDN_FIREWALL) | \
+    (1ULL << INT_TILE_TIMER) | \
+    (1ULL << INT_IDN_TIMER) | \
+    (1ULL << INT_UDN_TIMER) | \
+    (1ULL << INT_DMA_NOTIFY) | \
+    (1ULL << INT_IDN_CA) | \
+    (1ULL << INT_UDN_CA) | \
+    (1ULL << INT_IDN_AVAIL) | \
+    (1ULL << INT_UDN_AVAIL) | \
+    (1ULL << INT_PERF_COUNT) | \
+    (1ULL << INT_INTCTRL_3) | \
+    (1ULL << INT_INTCTRL_2) | \
+    (1ULL << INT_INTCTRL_1) | \
+    (1ULL << INT_INTCTRL_0) | \
+    (1ULL << INT_AUX_PERF_COUNT) | \
     0)
 #define CRITICAL_UNMASKED_INTERRUPTS ( \
-    INT_MASK(INT_ITLB_MISS) | \
-    INT_MASK(INT_ILL) | \
-    INT_MASK(INT_GPV) | \
-    INT_MASK(INT_SN_ACCESS) | \
-    INT_MASK(INT_IDN_ACCESS) | \
-    INT_MASK(INT_UDN_ACCESS) | \
-    INT_MASK(INT_IDN_REFILL) | \
-    INT_MASK(INT_UDN_REFILL) | \
-    INT_MASK(INT_IDN_COMPLETE) | \
-    INT_MASK(INT_UDN_COMPLETE) | \
-    INT_MASK(INT_SWINT_3) | \
-    INT_MASK(INT_SWINT_2) | \
-    INT_MASK(INT_SWINT_1) | \
-    INT_MASK(INT_SWINT_0) | \
-    INT_MASK(INT_UNALIGN_DATA) | \
-    INT_MASK(INT_DTLB_MISS) | \
-    INT_MASK(INT_DTLB_ACCESS) | \
-    INT_MASK(INT_BOOT_ACCESS) | \
-    INT_MASK(INT_WORLD_ACCESS) | \
-    INT_MASK(INT_I_ASID) | \
-    INT_MASK(INT_D_ASID) | \
-    INT_MASK(INT_DMA_ASID) | \
-    INT_MASK(INT_SNI_ASID) | \
-    INT_MASK(INT_DMA_CPL) | \
-    INT_MASK(INT_SN_CPL) | \
-    INT_MASK(INT_DOUBLE_FAULT) | \
-    INT_MASK(INT_SN_STATIC_ACCESS) | \
+    (1ULL << INT_ITLB_MISS) | \
+    (1ULL << INT_ILL) | \
+    (1ULL << INT_GPV) | \
+    (1ULL << INT_SN_ACCESS) | \
+    (1ULL << INT_IDN_ACCESS) | \
+    (1ULL << INT_UDN_ACCESS) | \
+    (1ULL << INT_IDN_REFILL) | \
+    (1ULL << INT_UDN_REFILL) | \
+    (1ULL << INT_IDN_COMPLETE) | \
+    (1ULL << INT_UDN_COMPLETE) | \
+    (1ULL << INT_SWINT_3) | \
+    (1ULL << INT_SWINT_2) | \
+    (1ULL << INT_SWINT_1) | \
+    (1ULL << INT_SWINT_0) | \
+    (1ULL << INT_UNALIGN_DATA) | \
+    (1ULL << INT_DTLB_MISS) | \
+    (1ULL << INT_DTLB_ACCESS) | \
+    (1ULL << INT_BOOT_ACCESS) | \
+    (1ULL << INT_WORLD_ACCESS) | \
+    (1ULL << INT_I_ASID) | \
+    (1ULL << INT_D_ASID) | \
+    (1ULL << INT_DMA_ASID) | \
+    (1ULL << INT_SNI_ASID) | \
+    (1ULL << INT_DMA_CPL) | \
+    (1ULL << INT_SN_CPL) | \
+    (1ULL << INT_DOUBLE_FAULT) | \
+    (1ULL << INT_SN_STATIC_ACCESS) | \
     0)
 #define MASKABLE_INTERRUPTS ( \
-    INT_MASK(INT_MEM_ERROR) | \
-    INT_MASK(INT_IDN_REFILL) | \
-    INT_MASK(INT_UDN_REFILL) | \
-    INT_MASK(INT_IDN_COMPLETE) | \
-    INT_MASK(INT_UDN_COMPLETE) | \
-    INT_MASK(INT_DMATLB_MISS) | \
-    INT_MASK(INT_DMATLB_ACCESS) | \
-    INT_MASK(INT_SNITLB_MISS) | \
-    INT_MASK(INT_SN_NOTIFY) | \
-    INT_MASK(INT_SN_FIREWALL) | \
-    INT_MASK(INT_IDN_FIREWALL) | \
-    INT_MASK(INT_UDN_FIREWALL) | \
-    INT_MASK(INT_TILE_TIMER) | \
-    INT_MASK(INT_IDN_TIMER) | \
-    INT_MASK(INT_UDN_TIMER) | \
-    INT_MASK(INT_DMA_NOTIFY) | \
-    INT_MASK(INT_IDN_CA) | \
-    INT_MASK(INT_UDN_CA) | \
-    INT_MASK(INT_IDN_AVAIL) | \
-    INT_MASK(INT_UDN_AVAIL) | \
-    INT_MASK(INT_PERF_COUNT) | \
-    INT_MASK(INT_INTCTRL_3) | \
-    INT_MASK(INT_INTCTRL_2) | \
-    INT_MASK(INT_INTCTRL_1) | \
-    INT_MASK(INT_INTCTRL_0) | \
-    INT_MASK(INT_AUX_PERF_COUNT) | \
+    (1ULL << INT_MEM_ERROR) | \
+    (1ULL << INT_IDN_REFILL) | \
+    (1ULL << INT_UDN_REFILL) | \
+    (1ULL << INT_IDN_COMPLETE) | \
+    (1ULL << INT_UDN_COMPLETE) | \
+    (1ULL << INT_DMATLB_MISS) | \
+    (1ULL << INT_DMATLB_ACCESS) | \
+    (1ULL << INT_SNITLB_MISS) | \
+    (1ULL << INT_SN_NOTIFY) | \
+    (1ULL << INT_SN_FIREWALL) | \
+    (1ULL << INT_IDN_FIREWALL) | \
+    (1ULL << INT_UDN_FIREWALL) | \
+    (1ULL << INT_TILE_TIMER) | \
+    (1ULL << INT_IDN_TIMER) | \
+    (1ULL << INT_UDN_TIMER) | \
+    (1ULL << INT_DMA_NOTIFY) | \
+    (1ULL << INT_IDN_CA) | \
+    (1ULL << INT_UDN_CA) | \
+    (1ULL << INT_IDN_AVAIL) | \
+    (1ULL << INT_UDN_AVAIL) | \
+    (1ULL << INT_PERF_COUNT) | \
+    (1ULL << INT_INTCTRL_3) | \
+    (1ULL << INT_INTCTRL_2) | \
+    (1ULL << INT_INTCTRL_1) | \
+    (1ULL << INT_INTCTRL_0) | \
+    (1ULL << INT_AUX_PERF_COUNT) | \
     0)
 #define UNMASKABLE_INTERRUPTS ( \
-    INT_MASK(INT_ITLB_MISS) | \
-    INT_MASK(INT_ILL) | \
-    INT_MASK(INT_GPV) | \
-    INT_MASK(INT_SN_ACCESS) | \
-    INT_MASK(INT_IDN_ACCESS) | \
-    INT_MASK(INT_UDN_ACCESS) | \
-    INT_MASK(INT_SWINT_3) | \
-    INT_MASK(INT_SWINT_2) | \
-    INT_MASK(INT_SWINT_1) | \
-    INT_MASK(INT_SWINT_0) | \
-    INT_MASK(INT_UNALIGN_DATA) | \
-    INT_MASK(INT_DTLB_MISS) | \
-    INT_MASK(INT_DTLB_ACCESS) | \
-    INT_MASK(INT_BOOT_ACCESS) | \
-    INT_MASK(INT_WORLD_ACCESS) | \
-    INT_MASK(INT_I_ASID) | \
-    INT_MASK(INT_D_ASID) | \
-    INT_MASK(INT_DMA_ASID) | \
-    INT_MASK(INT_SNI_ASID) | \
-    INT_MASK(INT_DMA_CPL) | \
-    INT_MASK(INT_SN_CPL) | \
-    INT_MASK(INT_DOUBLE_FAULT) | \
-    INT_MASK(INT_SN_STATIC_ACCESS) | \
+    (1ULL << INT_ITLB_MISS) | \
+    (1ULL << INT_ILL) | \
+    (1ULL << INT_GPV) | \
+    (1ULL << INT_SN_ACCESS) | \
+    (1ULL << INT_IDN_ACCESS) | \
+    (1ULL << INT_UDN_ACCESS) | \
+    (1ULL << INT_SWINT_3) | \
+    (1ULL << INT_SWINT_2) | \
+    (1ULL << INT_SWINT_1) | \
+    (1ULL << INT_SWINT_0) | \
+    (1ULL << INT_UNALIGN_DATA) | \
+    (1ULL << INT_DTLB_MISS) | \
+    (1ULL << INT_DTLB_ACCESS) | \
+    (1ULL << INT_BOOT_ACCESS) | \
+    (1ULL << INT_WORLD_ACCESS) | \
+    (1ULL << INT_I_ASID) | \
+    (1ULL << INT_D_ASID) | \
+    (1ULL << INT_DMA_ASID) | \
+    (1ULL << INT_SNI_ASID) | \
+    (1ULL << INT_DMA_CPL) | \
+    (1ULL << INT_SN_CPL) | \
+    (1ULL << INT_DOUBLE_FAULT) | \
+    (1ULL << INT_SN_STATIC_ACCESS) | \
     0)
 #define SYNC_INTERRUPTS ( \
-    INT_MASK(INT_ITLB_MISS) | \
-    INT_MASK(INT_ILL) | \
-    INT_MASK(INT_GPV) | \
-    INT_MASK(INT_SN_ACCESS) | \
-    INT_MASK(INT_IDN_ACCESS) | \
-    INT_MASK(INT_UDN_ACCESS) | \
-    INT_MASK(INT_IDN_REFILL) | \
-    INT_MASK(INT_UDN_REFILL) | \
-    INT_MASK(INT_IDN_COMPLETE) | \
-    INT_MASK(INT_UDN_COMPLETE) | \
-    INT_MASK(INT_SWINT_3) | \
-    INT_MASK(INT_SWINT_2) | \
-    INT_MASK(INT_SWINT_1) | \
-    INT_MASK(INT_SWINT_0) | \
-    INT_MASK(INT_UNALIGN_DATA) | \
-    INT_MASK(INT_DTLB_MISS) | \
-    INT_MASK(INT_DTLB_ACCESS) | \
-    INT_MASK(INT_SN_STATIC_ACCESS) | \
+    (1ULL << INT_ITLB_MISS) | \
+    (1ULL << INT_ILL) | \
+    (1ULL << INT_GPV) | \
+    (1ULL << INT_SN_ACCESS) | \
+    (1ULL << INT_IDN_ACCESS) | \
+    (1ULL << INT_UDN_ACCESS) | \
+    (1ULL << INT_IDN_REFILL) | \
+    (1ULL << INT_UDN_REFILL) | \
+    (1ULL << INT_IDN_COMPLETE) | \
+    (1ULL << INT_UDN_COMPLETE) | \
+    (1ULL << INT_SWINT_3) | \
+    (1ULL << INT_SWINT_2) | \
+    (1ULL << INT_SWINT_1) | \
+    (1ULL << INT_SWINT_0) | \
+    (1ULL << INT_UNALIGN_DATA) | \
+    (1ULL << INT_DTLB_MISS) | \
+    (1ULL << INT_DTLB_ACCESS) | \
+    (1ULL << INT_SN_STATIC_ACCESS) | \
     0)
 #define NON_SYNC_INTERRUPTS ( \
-    INT_MASK(INT_MEM_ERROR) | \
-    INT_MASK(INT_DMATLB_MISS) | \
-    INT_MASK(INT_DMATLB_ACCESS) | \
-    INT_MASK(INT_SNITLB_MISS) | \
-    INT_MASK(INT_SN_NOTIFY) | \
-    INT_MASK(INT_SN_FIREWALL) | \
-    INT_MASK(INT_IDN_FIREWALL) | \
-    INT_MASK(INT_UDN_FIREWALL) | \
-    INT_MASK(INT_TILE_TIMER) | \
-    INT_MASK(INT_IDN_TIMER) | \
-    INT_MASK(INT_UDN_TIMER) | \
-    INT_MASK(INT_DMA_NOTIFY) | \
-    INT_MASK(INT_IDN_CA) | \
-    INT_MASK(INT_UDN_CA) | \
-    INT_MASK(INT_IDN_AVAIL) | \
-    INT_MASK(INT_UDN_AVAIL) | \
-    INT_MASK(INT_PERF_COUNT) | \
-    INT_MASK(INT_INTCTRL_3) | \
-    INT_MASK(INT_INTCTRL_2) | \
-    INT_MASK(INT_INTCTRL_1) | \
-    INT_MASK(INT_INTCTRL_0) | \
-    INT_MASK(INT_BOOT_ACCESS) | \
-    INT_MASK(INT_WORLD_ACCESS) | \
-    INT_MASK(INT_I_ASID) | \
-    INT_MASK(INT_D_ASID) | \
-    INT_MASK(INT_DMA_ASID) | \
-    INT_MASK(INT_SNI_ASID) | \
-    INT_MASK(INT_DMA_CPL) | \
-    INT_MASK(INT_SN_CPL) | \
-    INT_MASK(INT_DOUBLE_FAULT) | \
-    INT_MASK(INT_AUX_PERF_COUNT) | \
+    (1ULL << INT_MEM_ERROR) | \
+    (1ULL << INT_DMATLB_MISS) | \
+    (1ULL << INT_DMATLB_ACCESS) | \
+    (1ULL << INT_SNITLB_MISS) | \
+    (1ULL << INT_SN_NOTIFY) | \
+    (1ULL << INT_SN_FIREWALL) | \
+    (1ULL << INT_IDN_FIREWALL) | \
+    (1ULL << INT_UDN_FIREWALL) | \
+    (1ULL << INT_TILE_TIMER) | \
+    (1ULL << INT_IDN_TIMER) | \
+    (1ULL << INT_UDN_TIMER) | \
+    (1ULL << INT_DMA_NOTIFY) | \
+    (1ULL << INT_IDN_CA) | \
+    (1ULL << INT_UDN_CA) | \
+    (1ULL << INT_IDN_AVAIL) | \
+    (1ULL << INT_UDN_AVAIL) | \
+    (1ULL << INT_PERF_COUNT) | \
+    (1ULL << INT_INTCTRL_3) | \
+    (1ULL << INT_INTCTRL_2) | \
+    (1ULL << INT_INTCTRL_1) | \
+    (1ULL << INT_INTCTRL_0) | \
+    (1ULL << INT_BOOT_ACCESS) | \
+    (1ULL << INT_WORLD_ACCESS) | \
+    (1ULL << INT_I_ASID) | \
+    (1ULL << INT_D_ASID) | \
+    (1ULL << INT_DMA_ASID) | \
+    (1ULL << INT_SNI_ASID) | \
+    (1ULL << INT_DMA_CPL) | \
+    (1ULL << INT_SN_CPL) | \
+    (1ULL << INT_DOUBLE_FAULT) | \
+    (1ULL << INT_AUX_PERF_COUNT) | \
     0)
 #endif /* !__ASSEMBLER__ */
 #endif /* !__ARCH_INTERRUPTS_H__ */
index 5bb58b2..13c9f91 100644 (file)
@@ -15,6 +15,7 @@
 #ifndef __ARCH_INTERRUPTS_H__
 #define __ARCH_INTERRUPTS_H__
 
+#ifndef __KERNEL__
 /** Mask for an interrupt. */
 #ifdef __ASSEMBLER__
 /* Note: must handle breaking interrupts into high and low words manually. */
@@ -22,6 +23,7 @@
 #else
 #define INT_MASK(intno) (1ULL << (intno))
 #endif
+#endif
 
 
 /** Where a given interrupt executes */
 
 #ifndef __ASSEMBLER__
 #define QUEUED_INTERRUPTS ( \
-    INT_MASK(INT_MEM_ERROR) | \
-    INT_MASK(INT_IDN_COMPLETE) | \
-    INT_MASK(INT_UDN_COMPLETE) | \
-    INT_MASK(INT_IDN_FIREWALL) | \
-    INT_MASK(INT_UDN_FIREWALL) | \
-    INT_MASK(INT_TILE_TIMER) | \
-    INT_MASK(INT_AUX_TILE_TIMER) | \
-    INT_MASK(INT_IDN_TIMER) | \
-    INT_MASK(INT_UDN_TIMER) | \
-    INT_MASK(INT_IDN_AVAIL) | \
-    INT_MASK(INT_UDN_AVAIL) | \
-    INT_MASK(INT_IPI_3) | \
-    INT_MASK(INT_IPI_2) | \
-    INT_MASK(INT_IPI_1) | \
-    INT_MASK(INT_IPI_0) | \
-    INT_MASK(INT_PERF_COUNT) | \
-    INT_MASK(INT_AUX_PERF_COUNT) | \
-    INT_MASK(INT_INTCTRL_3) | \
-    INT_MASK(INT_INTCTRL_2) | \
-    INT_MASK(INT_INTCTRL_1) | \
-    INT_MASK(INT_INTCTRL_0) | \
-    INT_MASK(INT_BOOT_ACCESS) | \
-    INT_MASK(INT_WORLD_ACCESS) | \
-    INT_MASK(INT_I_ASID) | \
-    INT_MASK(INT_D_ASID) | \
-    INT_MASK(INT_DOUBLE_FAULT) | \
+    (1ULL << INT_MEM_ERROR) | \
+    (1ULL << INT_IDN_COMPLETE) | \
+    (1ULL << INT_UDN_COMPLETE) | \
+    (1ULL << INT_IDN_FIREWALL) | \
+    (1ULL << INT_UDN_FIREWALL) | \
+    (1ULL << INT_TILE_TIMER) | \
+    (1ULL << INT_AUX_TILE_TIMER) | \
+    (1ULL << INT_IDN_TIMER) | \
+    (1ULL << INT_UDN_TIMER) | \
+    (1ULL << INT_IDN_AVAIL) | \
+    (1ULL << INT_UDN_AVAIL) | \
+    (1ULL << INT_IPI_3) | \
+    (1ULL << INT_IPI_2) | \
+    (1ULL << INT_IPI_1) | \
+    (1ULL << INT_IPI_0) | \
+    (1ULL << INT_PERF_COUNT) | \
+    (1ULL << INT_AUX_PERF_COUNT) | \
+    (1ULL << INT_INTCTRL_3) | \
+    (1ULL << INT_INTCTRL_2) | \
+    (1ULL << INT_INTCTRL_1) | \
+    (1ULL << INT_INTCTRL_0) | \
+    (1ULL << INT_BOOT_ACCESS) | \
+    (1ULL << INT_WORLD_ACCESS) | \
+    (1ULL << INT_I_ASID) | \
+    (1ULL << INT_D_ASID) | \
+    (1ULL << INT_DOUBLE_FAULT) | \
     0)
 #define NONQUEUED_INTERRUPTS ( \
-    INT_MASK(INT_SINGLE_STEP_3) | \
-    INT_MASK(INT_SINGLE_STEP_2) | \
-    INT_MASK(INT_SINGLE_STEP_1) | \
-    INT_MASK(INT_SINGLE_STEP_0) | \
-    INT_MASK(INT_ITLB_MISS) | \
-    INT_MASK(INT_ILL) | \
-    INT_MASK(INT_GPV) | \
-    INT_MASK(INT_IDN_ACCESS) | \
-    INT_MASK(INT_UDN_ACCESS) | \
-    INT_MASK(INT_SWINT_3) | \
-    INT_MASK(INT_SWINT_2) | \
-    INT_MASK(INT_SWINT_1) | \
-    INT_MASK(INT_SWINT_0) | \
-    INT_MASK(INT_ILL_TRANS) | \
-    INT_MASK(INT_UNALIGN_DATA) | \
-    INT_MASK(INT_DTLB_MISS) | \
-    INT_MASK(INT_DTLB_ACCESS) | \
+    (1ULL << INT_SINGLE_STEP_3) | \
+    (1ULL << INT_SINGLE_STEP_2) | \
+    (1ULL << INT_SINGLE_STEP_1) | \
+    (1ULL << INT_SINGLE_STEP_0) | \
+    (1ULL << INT_ITLB_MISS) | \
+    (1ULL << INT_ILL) | \
+    (1ULL << INT_GPV) | \
+    (1ULL << INT_IDN_ACCESS) | \
+    (1ULL << INT_UDN_ACCESS) | \
+    (1ULL << INT_SWINT_3) | \
+    (1ULL << INT_SWINT_2) | \
+    (1ULL << INT_SWINT_1) | \
+    (1ULL << INT_SWINT_0) | \
+    (1ULL << INT_ILL_TRANS) | \
+    (1ULL << INT_UNALIGN_DATA) | \
+    (1ULL << INT_DTLB_MISS) | \
+    (1ULL << INT_DTLB_ACCESS) | \
     0)
 #define CRITICAL_MASKED_INTERRUPTS ( \
-    INT_MASK(INT_MEM_ERROR) | \
-    INT_MASK(INT_SINGLE_STEP_3) | \
-    INT_MASK(INT_SINGLE_STEP_2) | \
-    INT_MASK(INT_SINGLE_STEP_1) | \
-    INT_MASK(INT_SINGLE_STEP_0) | \
-    INT_MASK(INT_IDN_COMPLETE) | \
-    INT_MASK(INT_UDN_COMPLETE) | \
-    INT_MASK(INT_IDN_FIREWALL) | \
-    INT_MASK(INT_UDN_FIREWALL) | \
-    INT_MASK(INT_TILE_TIMER) | \
-    INT_MASK(INT_AUX_TILE_TIMER) | \
-    INT_MASK(INT_IDN_TIMER) | \
-    INT_MASK(INT_UDN_TIMER) | \
-    INT_MASK(INT_IDN_AVAIL) | \
-    INT_MASK(INT_UDN_AVAIL) | \
-    INT_MASK(INT_IPI_3) | \
-    INT_MASK(INT_IPI_2) | \
-    INT_MASK(INT_IPI_1) | \
-    INT_MASK(INT_IPI_0) | \
-    INT_MASK(INT_PERF_COUNT) | \
-    INT_MASK(INT_AUX_PERF_COUNT) | \
-    INT_MASK(INT_INTCTRL_3) | \
-    INT_MASK(INT_INTCTRL_2) | \
-    INT_MASK(INT_INTCTRL_1) | \
-    INT_MASK(INT_INTCTRL_0) | \
+    (1ULL << INT_MEM_ERROR) | \
+    (1ULL << INT_SINGLE_STEP_3) | \
+    (1ULL << INT_SINGLE_STEP_2) | \
+    (1ULL << INT_SINGLE_STEP_1) | \
+    (1ULL << INT_SINGLE_STEP_0) | \
+    (1ULL << INT_IDN_COMPLETE) | \
+    (1ULL << INT_UDN_COMPLETE) | \
+    (1ULL << INT_IDN_FIREWALL) | \
+    (1ULL << INT_UDN_FIREWALL) | \
+    (1ULL << INT_TILE_TIMER) | \
+    (1ULL << INT_AUX_TILE_TIMER) | \
+    (1ULL << INT_IDN_TIMER) | \
+    (1ULL << INT_UDN_TIMER) | \
+    (1ULL << INT_IDN_AVAIL) | \
+    (1ULL << INT_UDN_AVAIL) | \
+    (1ULL << INT_IPI_3) | \
+    (1ULL << INT_IPI_2) | \
+    (1ULL << INT_IPI_1) | \
+    (1ULL << INT_IPI_0) | \
+    (1ULL << INT_PERF_COUNT) | \
+    (1ULL << INT_AUX_PERF_COUNT) | \
+    (1ULL << INT_INTCTRL_3) | \
+    (1ULL << INT_INTCTRL_2) | \
+    (1ULL << INT_INTCTRL_1) | \
+    (1ULL << INT_INTCTRL_0) | \
     0)
 #define CRITICAL_UNMASKED_INTERRUPTS ( \
-    INT_MASK(INT_ITLB_MISS) | \
-    INT_MASK(INT_ILL) | \
-    INT_MASK(INT_GPV) | \
-    INT_MASK(INT_IDN_ACCESS) | \
-    INT_MASK(INT_UDN_ACCESS) | \
-    INT_MASK(INT_SWINT_3) | \
-    INT_MASK(INT_SWINT_2) | \
-    INT_MASK(INT_SWINT_1) | \
-    INT_MASK(INT_SWINT_0) | \
-    INT_MASK(INT_ILL_TRANS) | \
-    INT_MASK(INT_UNALIGN_DATA) | \
-    INT_MASK(INT_DTLB_MISS) | \
-    INT_MASK(INT_DTLB_ACCESS) | \
-    INT_MASK(INT_BOOT_ACCESS) | \
-    INT_MASK(INT_WORLD_ACCESS) | \
-    INT_MASK(INT_I_ASID) | \
-    INT_MASK(INT_D_ASID) | \
-    INT_MASK(INT_DOUBLE_FAULT) | \
+    (1ULL << INT_ITLB_MISS) | \
+    (1ULL << INT_ILL) | \
+    (1ULL << INT_GPV) | \
+    (1ULL << INT_IDN_ACCESS) | \
+    (1ULL << INT_UDN_ACCESS) | \
+    (1ULL << INT_SWINT_3) | \
+    (1ULL << INT_SWINT_2) | \
+    (1ULL << INT_SWINT_1) | \
+    (1ULL << INT_SWINT_0) | \
+    (1ULL << INT_ILL_TRANS) | \
+    (1ULL << INT_UNALIGN_DATA) | \
+    (1ULL << INT_DTLB_MISS) | \
+    (1ULL << INT_DTLB_ACCESS) | \
+    (1ULL << INT_BOOT_ACCESS) | \
+    (1ULL << INT_WORLD_ACCESS) | \
+    (1ULL << INT_I_ASID) | \
+    (1ULL << INT_D_ASID) | \
+    (1ULL << INT_DOUBLE_FAULT) | \
     0)
 #define MASKABLE_INTERRUPTS ( \
-    INT_MASK(INT_MEM_ERROR) | \
-    INT_MASK(INT_SINGLE_STEP_3) | \
-    INT_MASK(INT_SINGLE_STEP_2) | \
-    INT_MASK(INT_SINGLE_STEP_1) | \
-    INT_MASK(INT_SINGLE_STEP_0) | \
-    INT_MASK(INT_IDN_COMPLETE) | \
-    INT_MASK(INT_UDN_COMPLETE) | \
-    INT_MASK(INT_IDN_FIREWALL) | \
-    INT_MASK(INT_UDN_FIREWALL) | \
-    INT_MASK(INT_TILE_TIMER) | \
-    INT_MASK(INT_AUX_TILE_TIMER) | \
-    INT_MASK(INT_IDN_TIMER) | \
-    INT_MASK(INT_UDN_TIMER) | \
-    INT_MASK(INT_IDN_AVAIL) | \
-    INT_MASK(INT_UDN_AVAIL) | \
-    INT_MASK(INT_IPI_3) | \
-    INT_MASK(INT_IPI_2) | \
-    INT_MASK(INT_IPI_1) | \
-    INT_MASK(INT_IPI_0) | \
-    INT_MASK(INT_PERF_COUNT) | \
-    INT_MASK(INT_AUX_PERF_COUNT) | \
-    INT_MASK(INT_INTCTRL_3) | \
-    INT_MASK(INT_INTCTRL_2) | \
-    INT_MASK(INT_INTCTRL_1) | \
-    INT_MASK(INT_INTCTRL_0) | \
+    (1ULL << INT_MEM_ERROR) | \
+    (1ULL << INT_SINGLE_STEP_3) | \
+    (1ULL << INT_SINGLE_STEP_2) | \
+    (1ULL << INT_SINGLE_STEP_1) | \
+    (1ULL << INT_SINGLE_STEP_0) | \
+    (1ULL << INT_IDN_COMPLETE) | \
+    (1ULL << INT_UDN_COMPLETE) | \
+    (1ULL << INT_IDN_FIREWALL) | \
+    (1ULL << INT_UDN_FIREWALL) | \
+    (1ULL << INT_TILE_TIMER) | \
+    (1ULL << INT_AUX_TILE_TIMER) | \
+    (1ULL << INT_IDN_TIMER) | \
+    (1ULL << INT_UDN_TIMER) | \
+    (1ULL << INT_IDN_AVAIL) | \
+    (1ULL << INT_UDN_AVAIL) | \
+    (1ULL << INT_IPI_3) | \
+    (1ULL << INT_IPI_2) | \
+    (1ULL << INT_IPI_1) | \
+    (1ULL << INT_IPI_0) | \
+    (1ULL << INT_PERF_COUNT) | \
+    (1ULL << INT_AUX_PERF_COUNT) | \
+    (1ULL << INT_INTCTRL_3) | \
+    (1ULL << INT_INTCTRL_2) | \
+    (1ULL << INT_INTCTRL_1) | \
+    (1ULL << INT_INTCTRL_0) | \
     0)
 #define UNMASKABLE_INTERRUPTS ( \
-    INT_MASK(INT_ITLB_MISS) | \
-    INT_MASK(INT_ILL) | \
-    INT_MASK(INT_GPV) | \
-    INT_MASK(INT_IDN_ACCESS) | \
-    INT_MASK(INT_UDN_ACCESS) | \
-    INT_MASK(INT_SWINT_3) | \
-    INT_MASK(INT_SWINT_2) | \
-    INT_MASK(INT_SWINT_1) | \
-    INT_MASK(INT_SWINT_0) | \
-    INT_MASK(INT_ILL_TRANS) | \
-    INT_MASK(INT_UNALIGN_DATA) | \
-    INT_MASK(INT_DTLB_MISS) | \
-    INT_MASK(INT_DTLB_ACCESS) | \
-    INT_MASK(INT_BOOT_ACCESS) | \
-    INT_MASK(INT_WORLD_ACCESS) | \
-    INT_MASK(INT_I_ASID) | \
-    INT_MASK(INT_D_ASID) | \
-    INT_MASK(INT_DOUBLE_FAULT) | \
+    (1ULL << INT_ITLB_MISS) | \
+    (1ULL << INT_ILL) | \
+    (1ULL << INT_GPV) | \
+    (1ULL << INT_IDN_ACCESS) | \
+    (1ULL << INT_UDN_ACCESS) | \
+    (1ULL << INT_SWINT_3) | \
+    (1ULL << INT_SWINT_2) | \
+    (1ULL << INT_SWINT_1) | \
+    (1ULL << INT_SWINT_0) | \
+    (1ULL << INT_ILL_TRANS) | \
+    (1ULL << INT_UNALIGN_DATA) | \
+    (1ULL << INT_DTLB_MISS) | \
+    (1ULL << INT_DTLB_ACCESS) | \
+    (1ULL << INT_BOOT_ACCESS) | \
+    (1ULL << INT_WORLD_ACCESS) | \
+    (1ULL << INT_I_ASID) | \
+    (1ULL << INT_D_ASID) | \
+    (1ULL << INT_DOUBLE_FAULT) | \
     0)
 #define SYNC_INTERRUPTS ( \
-    INT_MASK(INT_SINGLE_STEP_3) | \
-    INT_MASK(INT_SINGLE_STEP_2) | \
-    INT_MASK(INT_SINGLE_STEP_1) | \
-    INT_MASK(INT_SINGLE_STEP_0) | \
-    INT_MASK(INT_IDN_COMPLETE) | \
-    INT_MASK(INT_UDN_COMPLETE) | \
-    INT_MASK(INT_ITLB_MISS) | \
-    INT_MASK(INT_ILL) | \
-    INT_MASK(INT_GPV) | \
-    INT_MASK(INT_IDN_ACCESS) | \
-    INT_MASK(INT_UDN_ACCESS) | \
-    INT_MASK(INT_SWINT_3) | \
-    INT_MASK(INT_SWINT_2) | \
-    INT_MASK(INT_SWINT_1) | \
-    INT_MASK(INT_SWINT_0) | \
-    INT_MASK(INT_ILL_TRANS) | \
-    INT_MASK(INT_UNALIGN_DATA) | \
-    INT_MASK(INT_DTLB_MISS) | \
-    INT_MASK(INT_DTLB_ACCESS) | \
+    (1ULL << INT_SINGLE_STEP_3) | \
+    (1ULL << INT_SINGLE_STEP_2) | \
+    (1ULL << INT_SINGLE_STEP_1) | \
+    (1ULL << INT_SINGLE_STEP_0) | \
+    (1ULL << INT_IDN_COMPLETE) | \
+    (1ULL << INT_UDN_COMPLETE) | \
+    (1ULL << INT_ITLB_MISS) | \
+    (1ULL << INT_ILL) | \
+    (1ULL << INT_GPV) | \
+    (1ULL << INT_IDN_ACCESS) | \
+    (1ULL << INT_UDN_ACCESS) | \
+    (1ULL << INT_SWINT_3) | \
+    (1ULL << INT_SWINT_2) | \
+    (1ULL << INT_SWINT_1) | \
+    (1ULL << INT_SWINT_0) | \
+    (1ULL << INT_ILL_TRANS) | \
+    (1ULL << INT_UNALIGN_DATA) | \
+    (1ULL << INT_DTLB_MISS) | \
+    (1ULL << INT_DTLB_ACCESS) | \
     0)
 #define NON_SYNC_INTERRUPTS ( \
-    INT_MASK(INT_MEM_ERROR) | \
-    INT_MASK(INT_IDN_FIREWALL) | \
-    INT_MASK(INT_UDN_FIREWALL) | \
-    INT_MASK(INT_TILE_TIMER) | \
-    INT_MASK(INT_AUX_TILE_TIMER) | \
-    INT_MASK(INT_IDN_TIMER) | \
-    INT_MASK(INT_UDN_TIMER) | \
-    INT_MASK(INT_IDN_AVAIL) | \
-    INT_MASK(INT_UDN_AVAIL) | \
-    INT_MASK(INT_IPI_3) | \
-    INT_MASK(INT_IPI_2) | \
-    INT_MASK(INT_IPI_1) | \
-    INT_MASK(INT_IPI_0) | \
-    INT_MASK(INT_PERF_COUNT) | \
-    INT_MASK(INT_AUX_PERF_COUNT) | \
-    INT_MASK(INT_INTCTRL_3) | \
-    INT_MASK(INT_INTCTRL_2) | \
-    INT_MASK(INT_INTCTRL_1) | \
-    INT_MASK(INT_INTCTRL_0) | \
-    INT_MASK(INT_BOOT_ACCESS) | \
-    INT_MASK(INT_WORLD_ACCESS) | \
-    INT_MASK(INT_I_ASID) | \
-    INT_MASK(INT_D_ASID) | \
-    INT_MASK(INT_DOUBLE_FAULT) | \
+    (1ULL << INT_MEM_ERROR) | \
+    (1ULL << INT_IDN_FIREWALL) | \
+    (1ULL << INT_UDN_FIREWALL) | \
+    (1ULL << INT_TILE_TIMER) | \
+    (1ULL << INT_AUX_TILE_TIMER) | \
+    (1ULL << INT_IDN_TIMER) | \
+    (1ULL << INT_UDN_TIMER) | \
+    (1ULL << INT_IDN_AVAIL) | \
+    (1ULL << INT_UDN_AVAIL) | \
+    (1ULL << INT_IPI_3) | \
+    (1ULL << INT_IPI_2) | \
+    (1ULL << INT_IPI_1) | \
+    (1ULL << INT_IPI_0) | \
+    (1ULL << INT_PERF_COUNT) | \
+    (1ULL << INT_AUX_PERF_COUNT) | \
+    (1ULL << INT_INTCTRL_3) | \
+    (1ULL << INT_INTCTRL_2) | \
+    (1ULL << INT_INTCTRL_1) | \
+    (1ULL << INT_INTCTRL_0) | \
+    (1ULL << INT_BOOT_ACCESS) | \
+    (1ULL << INT_WORLD_ACCESS) | \
+    (1ULL << INT_I_ASID) | \
+    (1ULL << INT_D_ASID) | \
+    (1ULL << INT_DOUBLE_FAULT) | \
     0)
 #endif /* !__ASSEMBLER__ */
 #endif /* !__ARCH_INTERRUPTS_H__ */
index 54bc9a6..4ea0809 100644 (file)
@@ -1035,7 +1035,9 @@ handle_syscall:
        /* Ensure that the syscall number is within the legal range. */
        {
         moveli r20, hw2(sys_call_table)
+#ifdef CONFIG_COMPAT
         blbs   r30, .Lcompat_syscall
+#endif
        }
        {
         cmpltu r21, TREG_SYSCALL_NR_NAME, r21
@@ -1093,6 +1095,7 @@ handle_syscall:
         j      .Lresume_userspace   /* jump into middle of interrupt_return */
        }
 
+#ifdef CONFIG_COMPAT
 .Lcompat_syscall:
        /*
         * Load the base of the compat syscall table in r20, and
@@ -1117,6 +1120,7 @@ handle_syscall:
        { move r15, r4; addxi r4, r4, 0 }
        { move r16, r5; addxi r5, r5, 0 }
        j .Lload_syscall_pointer
+#endif
 
 .Linvalid_syscall:
        /* Report an invalid syscall back to the user program */
index 0e5661e..caf93ae 100644 (file)
@@ -159,7 +159,7 @@ static void save_arch_state(struct thread_struct *t);
 int copy_thread(unsigned long clone_flags, unsigned long sp,
                unsigned long arg, struct task_struct *p)
 {
-       struct pt_regs *childregs = task_pt_regs(p), *regs = current_pt_regs();
+       struct pt_regs *childregs = task_pt_regs(p);
        unsigned long ksp;
        unsigned long *callee_regs;
 
index baa3d90..d1b5c91 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/reboot.h>
 #include <linux/smp.h>
 #include <linux/pm.h>
+#include <linux/export.h>
 #include <asm/page.h>
 #include <asm/setup.h>
 #include <hv/hypervisor.h>
@@ -49,3 +50,4 @@ void machine_restart(char *cmd)
 
 /* No interesting distinction to be made here. */
 void (*pm_power_off)(void) = NULL;
+EXPORT_SYMBOL(pm_power_off);
index 6a649a4..d1e15f7 100644 (file)
@@ -31,6 +31,7 @@
 #include <linux/timex.h>
 #include <linux/hugetlb.h>
 #include <linux/start_kernel.h>
+#include <linux/screen_info.h>
 #include <asm/setup.h>
 #include <asm/sections.h>
 #include <asm/cacheflush.h>
@@ -49,6 +50,10 @@ static inline int ABS(int x) { return x >= 0 ? x : -x; }
 /* Chip information */
 char chip_model[64] __write_once;
 
+#ifdef CONFIG_VT
+struct screen_info screen_info;
+#endif
+
 struct pglist_data node_data[MAX_NUMNODES] __read_mostly;
 EXPORT_SYMBOL(node_data);
 
index b2f44c2..ed258b8 100644 (file)
@@ -112,7 +112,7 @@ static struct pt_regs *valid_fault_handler(struct KBacktraceIterator* kbt)
                       p->pc, p->sp, p->ex1);
                p = NULL;
        }
-       if (!kbt->profile || (INT_MASK(p->faultnum) & QUEUED_INTERRUPTS) == 0)
+       if (!kbt->profile || ((1ULL << p->faultnum) & QUEUED_INTERRUPTS) == 0)
                return p;
        return NULL;
 }
@@ -484,6 +484,7 @@ void save_stack_trace(struct stack_trace *trace)
 {
        save_stack_trace_tsk(NULL, trace);
 }
+EXPORT_SYMBOL_GPL(save_stack_trace);
 
 #endif
 
index db4fb89..8f8ad81 100644 (file)
@@ -12,6 +12,7 @@
  *   more details.
  */
 
+#include <linux/export.h>
 #include <asm/page.h>
 #include <asm/cacheflush.h>
 #include <arch/icache.h>
@@ -165,3 +166,4 @@ void finv_buffer_remote(void *buffer, size_t size, int hfh)
        __insn_mtspr(SPR_DSTREAM_PF, old_dstream_pf);
 #endif
 }
+EXPORT_SYMBOL_GPL(finv_buffer_remote);
index fdc4036..75947ed 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/ctype.h>
 #include <linux/errno.h>
 #include <linux/smp.h>
+#include <linux/export.h>
 
 /*
  * Allow cropping out bits beyond the end of the array.
@@ -50,3 +51,4 @@ int bitmap_parselist_crop(const char *bp, unsigned long *maskp, int nmaskbits)
        } while (*bp != '\0' && *bp != '\n');
        return 0;
 }
+EXPORT_SYMBOL(bitmap_parselist_crop);
index dd5f0a3..4385cb6 100644 (file)
@@ -55,6 +55,8 @@ EXPORT_SYMBOL(hv_dev_poll_cancel);
 EXPORT_SYMBOL(hv_dev_close);
 EXPORT_SYMBOL(hv_sysconf);
 EXPORT_SYMBOL(hv_confstr);
+EXPORT_SYMBOL(hv_get_rtc);
+EXPORT_SYMBOL(hv_set_rtc);
 
 /* libgcc.a */
 uint32_t __udivsi3(uint32_t dividend, uint32_t divisor);
index 5f7868d..1ae9119 100644 (file)
@@ -408,6 +408,7 @@ void homecache_change_page_home(struct page *page, int order, int home)
                __set_pte(ptep, pte_set_home(pteval, home));
        }
 }
+EXPORT_SYMBOL(homecache_change_page_home);
 
 struct page *homecache_alloc_pages(gfp_t gfp_mask,
                                   unsigned int order, int home)
index 79795af..225543b 100644 (file)
@@ -2138,6 +2138,7 @@ config OLPC_XO1_RTC
 config OLPC_XO1_SCI
        bool "OLPC XO-1 SCI extras"
        depends on OLPC && OLPC_XO1_PM
+       depends on INPUT=y
        select POWER_SUPPLY
        select GPIO_CS5535
        select MFD_CORE
index ccce0ed..379814b 100644 (file)
@@ -71,7 +71,7 @@ GCOV_PROFILE := n
 $(obj)/bzImage: asflags-y  := $(SVGA_MODE)
 
 quiet_cmd_image = BUILD   $@
-cmd_image = $(obj)/tools/build $(obj)/setup.bin $(obj)/vmlinux.bin > $@
+cmd_image = $(obj)/tools/build $(obj)/setup.bin $(obj)/vmlinux.bin $(obj)/zoffset.h > $@
 
 $(obj)/bzImage: $(obj)/setup.bin $(obj)/vmlinux.bin $(obj)/tools/build FORCE
        $(call if_changed,image)
@@ -92,7 +92,7 @@ targets += voffset.h
 $(obj)/voffset.h: vmlinux FORCE
        $(call if_changed,voffset)
 
-sed-zoffset := -e 's/^\([0-9a-fA-F]*\) . \(startup_32\|input_data\|_end\|z_.*\)$$/\#define ZO_\2 0x\1/p'
+sed-zoffset := -e 's/^\([0-9a-fA-F]*\) . \(startup_32\|startup_64\|efi_pe_entry\|efi_stub_entry\|input_data\|_end\|z_.*\)$$/\#define ZO_\2 0x\1/p'
 
 quiet_cmd_zoffset = ZOFFSET $@
       cmd_zoffset = $(NM) $< | sed -n $(sed-zoffset) > $@
index 18e329c..f8fa411 100644 (file)
@@ -256,10 +256,10 @@ static efi_status_t setup_efi_pci(struct boot_params *params)
        int i;
        struct setup_data *data;
 
-       data = (struct setup_data *)params->hdr.setup_data;
+       data = (struct setup_data *)(unsigned long)params->hdr.setup_data;
 
        while (data && data->next)
-               data = (struct setup_data *)data->next;
+               data = (struct setup_data *)(unsigned long)data->next;
 
        status = efi_call_phys5(sys_table->boottime->locate_handle,
                                EFI_LOCATE_BY_PROTOCOL, &pci_proto,
@@ -295,16 +295,18 @@ static efi_status_t setup_efi_pci(struct boot_params *params)
                if (!pci)
                        continue;
 
+#ifdef CONFIG_X86_64
                status = efi_call_phys4(pci->attributes, pci,
                                        EfiPciIoAttributeOperationGet, 0,
                                        &attributes);
-
+#else
+               status = efi_call_phys5(pci->attributes, pci,
+                                       EfiPciIoAttributeOperationGet, 0, 0,
+                                       &attributes);
+#endif
                if (status != EFI_SUCCESS)
                        continue;
 
-               if (!(attributes & EFI_PCI_IO_ATTRIBUTE_EMBEDDED_ROM))
-                       continue;
-
                if (!pci->romimage || !pci->romsize)
                        continue;
 
@@ -345,9 +347,9 @@ static efi_status_t setup_efi_pci(struct boot_params *params)
                memcpy(rom->romdata, pci->romimage, pci->romsize);
 
                if (data)
-                       data->next = (uint64_t)rom;
+                       data->next = (unsigned long)rom;
                else
-                       params->hdr.setup_data = (uint64_t)rom;
+                       params->hdr.setup_data = (unsigned long)rom;
 
                data = (struct setup_data *)rom;
 
@@ -432,10 +434,9 @@ static efi_status_t setup_gop(struct screen_info *si, efi_guid_t *proto,
                         * Once we've found a GOP supporting ConOut,
                         * don't bother looking any further.
                         */
+                       first_gop = gop;
                        if (conout_found)
                                break;
-
-                       first_gop = gop;
                }
        }
 
index aa4aaf1..1e3184f 100644 (file)
@@ -35,11 +35,11 @@ ENTRY(startup_32)
 #ifdef CONFIG_EFI_STUB
        jmp     preferred_addr
 
-       .balign 0x10
        /*
         * We don't need the return address, so set up the stack so
-        * efi_main() can find its arugments.
+        * efi_main() can find its arguments.
         */
+ENTRY(efi_pe_entry)
        add     $0x4, %esp
 
        call    make_boot_params
@@ -50,8 +50,10 @@ ENTRY(startup_32)
        pushl   %eax
        pushl   %esi
        pushl   %ecx
+       sub     $0x4, %esp
 
-       .org 0x30,0x90
+ENTRY(efi_stub_entry)
+       add     $0x4, %esp
        call    efi_main
        cmpl    $0, %eax
        movl    %eax, %esi
index 2c4b171..f5d1aaa 100644 (file)
@@ -201,12 +201,12 @@ ENTRY(startup_64)
         */
 #ifdef CONFIG_EFI_STUB
        /*
-        * The entry point for the PE/COFF executable is 0x210, so only
-        * legacy boot loaders will execute this jmp.
+        * The entry point for the PE/COFF executable is efi_pe_entry, so
+        * only legacy boot loaders will execute this jmp.
         */
        jmp     preferred_addr
 
-       .org 0x210
+ENTRY(efi_pe_entry)
        mov     %rcx, %rdi
        mov     %rdx, %rsi
        pushq   %rdi
@@ -218,7 +218,7 @@ ENTRY(startup_64)
        popq    %rsi
        popq    %rdi
 
-       .org 0x230,0x90
+ENTRY(efi_stub_entry)
        call    efi_main
        movq    %rax,%rsi
        cmpq    $0,%rax
index 8c132a6..944ce59 100644 (file)
@@ -21,6 +21,7 @@
 #include <asm/e820.h>
 #include <asm/page_types.h>
 #include <asm/setup.h>
+#include <asm/bootparam.h>
 #include "boot.h"
 #include "voffset.h"
 #include "zoffset.h"
@@ -255,6 +256,9 @@ section_table:
        # header, from the old boot sector.
 
        .section ".header", "a"
+       .globl  sentinel
+sentinel:      .byte 0xff, 0xff        /* Used to detect broken loaders */
+
        .globl  hdr
 hdr:
 setup_sects:   .byte 0                 /* Filled in by build.c */
@@ -279,7 +283,7 @@ _start:
        # Part 2 of the header, from the old setup.S
 
                .ascii  "HdrS"          # header signature
-               .word   0x020b          # header version number (>= 0x0105)
+               .word   0x020c          # header version number (>= 0x0105)
                                        # or else old loadlin-1.5 will fail)
                .globl realmode_swtch
 realmode_swtch:        .word   0, 0            # default_switch, SETUPSEG
@@ -297,13 +301,7 @@ type_of_loader:    .byte   0               # 0 means ancient bootloader, newer
 
 # flags, unused bits must be zero (RFU) bit within loadflags
 loadflags:
-LOADED_HIGH    = 1                     # If set, the kernel is loaded high
-CAN_USE_HEAP   = 0x80                  # If set, the loader also has set
-                                       # heap_end_ptr to tell how much
-                                       # space behind setup.S can be used for
-                                       # heap purposes.
-                                       # Only the loader knows what is free
-               .byte   LOADED_HIGH
+               .byte   LOADED_HIGH     # The kernel is to be loaded high
 
 setup_move_size: .word  0x8000         # size to move, when setup is not
                                        # loaded at 0x90000. We will move setup
@@ -369,7 +367,23 @@ relocatable_kernel:    .byte 1
 relocatable_kernel:    .byte 0
 #endif
 min_alignment:         .byte MIN_KERNEL_ALIGN_LG2      # minimum alignment
-pad3:                  .word 0
+
+xloadflags:
+#ifdef CONFIG_X86_64
+# define XLF0 XLF_KERNEL_64                    /* 64-bit kernel */
+#else
+# define XLF0 0
+#endif
+#ifdef CONFIG_EFI_STUB
+# ifdef CONFIG_X86_64
+#  define XLF23 XLF_EFI_HANDOVER_64            /* 64-bit EFI handover ok */
+# else
+#  define XLF23 XLF_EFI_HANDOVER_32            /* 32-bit EFI handover ok */
+# endif
+#else
+# define XLF23 0
+#endif
+                       .word XLF0 | XLF23
 
 cmdline_size:   .long   COMMAND_LINE_SIZE-1     #length of the command line,
                                                 #added with boot protocol
@@ -397,8 +411,13 @@ pref_address:              .quad LOAD_PHYSICAL_ADDR        # preferred load addr
 #define INIT_SIZE VO_INIT_SIZE
 #endif
 init_size:             .long INIT_SIZE         # kernel initialization size
-handover_offset:       .long 0x30              # offset to the handover
+handover_offset:
+#ifdef CONFIG_EFI_STUB
+                       .long 0x30              # offset to the handover
                                                # protocol entry point
+#else
+                       .long 0
+#endif
 
 # End of setup header #####################################################
 
index 03c0683..96a6c75 100644 (file)
@@ -13,7 +13,7 @@ SECTIONS
        .bstext         : { *(.bstext) }
        .bsdata         : { *(.bsdata) }
 
-       . = 497;
+       . = 495;
        .header         : { *(.header) }
        .entrytext      : { *(.entrytext) }
        .inittext       : { *(.inittext) }
index 4b8e165..94c5446 100644 (file)
@@ -52,6 +52,10 @@ int is_big_kernel;
 
 #define PECOFF_RELOC_RESERVE 0x20
 
+unsigned long efi_stub_entry;
+unsigned long efi_pe_entry;
+unsigned long startup_64;
+
 /*----------------------------------------------------------------------*/
 
 static const u32 crctab32[] = {
@@ -132,7 +136,7 @@ static void die(const char * str, ...)
 
 static void usage(void)
 {
-       die("Usage: build setup system [> image]");
+       die("Usage: build setup system [zoffset.h] [> image]");
 }
 
 #ifdef CONFIG_EFI_STUB
@@ -206,30 +210,54 @@ static void update_pecoff_text(unsigned int text_start, unsigned int file_sz)
         */
        put_unaligned_le32(file_sz - 512, &buf[pe_header + 0x1c]);
 
-#ifdef CONFIG_X86_32
        /*
-        * Address of entry point.
-        *
-        * The EFI stub entry point is +16 bytes from the start of
-        * the .text section.
+        * Address of entry point for PE/COFF executable
         */
-       put_unaligned_le32(text_start + 16, &buf[pe_header + 0x28]);
-#else
-       /*
-        * Address of entry point. startup_32 is at the beginning and
-        * the 64-bit entry point (startup_64) is always 512 bytes
-        * after. The EFI stub entry point is 16 bytes after that, as
-        * the first instruction allows legacy loaders to jump over
-        * the EFI stub initialisation
-        */
-       put_unaligned_le32(text_start + 528, &buf[pe_header + 0x28]);
-#endif /* CONFIG_X86_32 */
+       put_unaligned_le32(text_start + efi_pe_entry, &buf[pe_header + 0x28]);
 
        update_pecoff_section_header(".text", text_start, text_sz);
 }
 
 #endif /* CONFIG_EFI_STUB */
 
+
+/*
+ * Parse zoffset.h and find the entry points. We could just #include zoffset.h
+ * but that would mean tools/build would have to be rebuilt every time. It's
+ * not as if parsing it is hard...
+ */
+#define PARSE_ZOFS(p, sym) do { \
+       if (!strncmp(p, "#define ZO_" #sym " ", 11+sizeof(#sym)))       \
+               sym = strtoul(p + 11 + sizeof(#sym), NULL, 16);         \
+} while (0)
+
+static void parse_zoffset(char *fname)
+{
+       FILE *file;
+       char *p;
+       int c;
+
+       file = fopen(fname, "r");
+       if (!file)
+               die("Unable to open `%s': %m", fname);
+       c = fread(buf, 1, sizeof(buf) - 1, file);
+       if (ferror(file))
+               die("read-error on `zoffset.h'");
+       buf[c] = 0;
+
+       p = (char *)buf;
+
+       while (p && *p) {
+               PARSE_ZOFS(p, efi_stub_entry);
+               PARSE_ZOFS(p, efi_pe_entry);
+               PARSE_ZOFS(p, startup_64);
+
+               p = strchr(p, '\n');
+               while (p && (*p == '\r' || *p == '\n'))
+                       p++;
+       }
+}
+
 int main(int argc, char ** argv)
 {
        unsigned int i, sz, setup_sectors;
@@ -241,7 +269,19 @@ int main(int argc, char ** argv)
        void *kernel;
        u32 crc = 0xffffffffUL;
 
-       if (argc != 3)
+       /* Defaults for old kernel */
+#ifdef CONFIG_X86_32
+       efi_pe_entry = 0x10;
+       efi_stub_entry = 0x30;
+#else
+       efi_pe_entry = 0x210;
+       efi_stub_entry = 0x230;
+       startup_64 = 0x200;
+#endif
+
+       if (argc == 4)
+               parse_zoffset(argv[3]);
+       else if (argc != 3)
                usage();
 
        /* Copy the setup code */
@@ -299,6 +339,11 @@ int main(int argc, char ** argv)
 
 #ifdef CONFIG_EFI_STUB
        update_pecoff_text(setup_sectors * 512, sz + i + ((sys_size * 16) - sz));
+
+#ifdef CONFIG_X86_64 /* Yes, this is really how we defined it :( */
+       efi_stub_entry -= 0x200;
+#endif
+       put_unaligned_le32(efi_stub_entry, &buf[0x264]);
 #endif
 
        crc = partial_crc32(buf, i, crc);
index 102ff7c..142c4ce 100644 (file)
@@ -207,7 +207,7 @@ sysexit_from_sys_call:
        testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
        jnz ia32_ret_from_sys_call
        TRACE_IRQS_ON
-       sti
+       ENABLE_INTERRUPTS(CLBR_NONE)
        movl %eax,%esi          /* second arg, syscall return value */
        cmpl $-MAX_ERRNO,%eax   /* is it an error ? */
        jbe 1f
@@ -217,7 +217,7 @@ sysexit_from_sys_call:
        call __audit_syscall_exit
        movq RAX-ARGOFFSET(%rsp),%rax   /* reload syscall return value */
        movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
-       cli
+       DISABLE_INTERRUPTS(CLBR_NONE)
        TRACE_IRQS_OFF
        testl %edi,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
        jz \exit
index 6e8fdf5..28677c5 100644 (file)
@@ -94,6 +94,7 @@ extern void __iomem *efi_ioremap(unsigned long addr, unsigned long size,
 #endif /* CONFIG_X86_32 */
 
 extern int add_efi_memmap;
+extern unsigned long x86_efi_facility;
 extern void efi_set_executable(efi_memory_desc_t *md, bool executable);
 extern int efi_memblock_x86_reserve_range(void);
 extern void efi_call_phys_prelog(void);
index ecdfee6..f4076af 100644 (file)
@@ -3,6 +3,90 @@
 
 #include <uapi/asm/mce.h>
 
+/*
+ * Machine Check support for x86
+ */
+
+/* MCG_CAP register defines */
+#define MCG_BANKCNT_MASK       0xff         /* Number of Banks */
+#define MCG_CTL_P              (1ULL<<8)    /* MCG_CTL register available */
+#define MCG_EXT_P              (1ULL<<9)    /* Extended registers available */
+#define MCG_CMCI_P             (1ULL<<10)   /* CMCI supported */
+#define MCG_EXT_CNT_MASK       0xff0000     /* Number of Extended registers */
+#define MCG_EXT_CNT_SHIFT      16
+#define MCG_EXT_CNT(c)         (((c) & MCG_EXT_CNT_MASK) >> MCG_EXT_CNT_SHIFT)
+#define MCG_SER_P              (1ULL<<24)   /* MCA recovery/new status bits */
+
+/* MCG_STATUS register defines */
+#define MCG_STATUS_RIPV  (1ULL<<0)   /* restart ip valid */
+#define MCG_STATUS_EIPV  (1ULL<<1)   /* ip points to correct instruction */
+#define MCG_STATUS_MCIP  (1ULL<<2)   /* machine check in progress */
+
+/* MCi_STATUS register defines */
+#define MCI_STATUS_VAL   (1ULL<<63)  /* valid error */
+#define MCI_STATUS_OVER  (1ULL<<62)  /* previous errors lost */
+#define MCI_STATUS_UC    (1ULL<<61)  /* uncorrected error */
+#define MCI_STATUS_EN    (1ULL<<60)  /* error enabled */
+#define MCI_STATUS_MISCV (1ULL<<59)  /* misc error reg. valid */
+#define MCI_STATUS_ADDRV (1ULL<<58)  /* addr reg. valid */
+#define MCI_STATUS_PCC   (1ULL<<57)  /* processor context corrupt */
+#define MCI_STATUS_S    (1ULL<<56)  /* Signaled machine check */
+#define MCI_STATUS_AR   (1ULL<<55)  /* Action required */
+#define MCACOD           0xffff     /* MCA Error Code */
+
+/* Architecturally defined codes from SDM Vol. 3B Chapter 15 */
+#define MCACOD_SCRUB   0x00C0  /* 0xC0-0xCF Memory Scrubbing */
+#define MCACOD_SCRUBMSK        0xfff0
+#define MCACOD_L3WB    0x017A  /* L3 Explicit Writeback */
+#define MCACOD_DATA    0x0134  /* Data Load */
+#define MCACOD_INSTR   0x0150  /* Instruction Fetch */
+
+/* MCi_MISC register defines */
+#define MCI_MISC_ADDR_LSB(m)   ((m) & 0x3f)
+#define MCI_MISC_ADDR_MODE(m)  (((m) >> 6) & 7)
+#define  MCI_MISC_ADDR_SEGOFF  0       /* segment offset */
+#define  MCI_MISC_ADDR_LINEAR  1       /* linear address */
+#define  MCI_MISC_ADDR_PHYS    2       /* physical address */
+#define  MCI_MISC_ADDR_MEM     3       /* memory address */
+#define  MCI_MISC_ADDR_GENERIC 7       /* generic */
+
+/* CTL2 register defines */
+#define MCI_CTL2_CMCI_EN               (1ULL << 30)
+#define MCI_CTL2_CMCI_THRESHOLD_MASK   0x7fffULL
+
+#define MCJ_CTX_MASK           3
+#define MCJ_CTX(flags)         ((flags) & MCJ_CTX_MASK)
+#define MCJ_CTX_RANDOM         0    /* inject context: random */
+#define MCJ_CTX_PROCESS                0x1  /* inject context: process */
+#define MCJ_CTX_IRQ            0x2  /* inject context: IRQ */
+#define MCJ_NMI_BROADCAST      0x4  /* do NMI broadcasting */
+#define MCJ_EXCEPTION          0x8  /* raise as exception */
+#define MCJ_IRQ_BRAODCAST      0x10 /* do IRQ broadcasting */
+
+#define MCE_OVERFLOW 0         /* bit 0 in flags means overflow */
+
+/* Software defined banks */
+#define MCE_EXTENDED_BANK      128
+#define MCE_THERMAL_BANK       (MCE_EXTENDED_BANK + 0)
+#define K8_MCE_THRESHOLD_BASE   (MCE_EXTENDED_BANK + 1)
+
+#define MCE_LOG_LEN 32
+#define MCE_LOG_SIGNATURE      "MACHINECHECK"
+
+/*
+ * This structure contains all data related to the MCE log.  Also
+ * carries a signature to make it easier to find from external
+ * debugging tools.  Each entry is only valid when its finished flag
+ * is set.
+ */
+struct mce_log {
+       char signature[12]; /* "MACHINECHECK" */
+       unsigned len;       /* = MCE_LOG_LEN */
+       unsigned next;
+       unsigned flags;
+       unsigned recordlen;     /* length of struct mce */
+       struct mce entry[MCE_LOG_LEN];
+};
 
 struct mca_config {
        bool dont_log_ce;
index 5199db2..1c1a955 100644 (file)
@@ -142,6 +142,11 @@ static inline unsigned long pmd_pfn(pmd_t pmd)
        return (pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT;
 }
 
+static inline unsigned long pud_pfn(pud_t pud)
+{
+       return (pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT;
+}
+
 #define pte_page(pte)  pfn_to_page(pte_pfn(pte))
 
 static inline int pmd_large(pmd_t pte)
index b47c2a8..062921e 100644 (file)
@@ -16,7 +16,7 @@ extern void uv_system_init(void);
 extern const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
                                                 struct mm_struct *mm,
                                                 unsigned long start,
-                                                unsigned end,
+                                                unsigned long end,
                                                 unsigned int cpu);
 
 #else  /* X86_UV */
index 92862cd..c15ddaf 100644 (file)
@@ -1,6 +1,31 @@
 #ifndef _ASM_X86_BOOTPARAM_H
 #define _ASM_X86_BOOTPARAM_H
 
+/* setup_data types */
+#define SETUP_NONE                     0
+#define SETUP_E820_EXT                 1
+#define SETUP_DTB                      2
+#define SETUP_PCI                      3
+
+/* ram_size flags */
+#define RAMDISK_IMAGE_START_MASK       0x07FF
+#define RAMDISK_PROMPT_FLAG            0x8000
+#define RAMDISK_LOAD_FLAG              0x4000
+
+/* loadflags */
+#define LOADED_HIGH    (1<<0)
+#define QUIET_FLAG     (1<<5)
+#define KEEP_SEGMENTS  (1<<6)
+#define CAN_USE_HEAP   (1<<7)
+
+/* xloadflags */
+#define XLF_KERNEL_64                  (1<<0)
+#define XLF_CAN_BE_LOADED_ABOVE_4G     (1<<1)
+#define XLF_EFI_HANDOVER_32            (1<<2)
+#define XLF_EFI_HANDOVER_64            (1<<3)
+
+#ifndef __ASSEMBLY__
+
 #include <linux/types.h>
 #include <linux/screen_info.h>
 #include <linux/apm_bios.h>
@@ -9,12 +34,6 @@
 #include <asm/ist.h>
 #include <video/edid.h>
 
-/* setup data types */
-#define SETUP_NONE                     0
-#define SETUP_E820_EXT                 1
-#define SETUP_DTB                      2
-#define SETUP_PCI                      3
-
 /* extensible setup data list node */
 struct setup_data {
        __u64 next;
@@ -28,9 +47,6 @@ struct setup_header {
        __u16   root_flags;
        __u32   syssize;
        __u16   ram_size;
-#define RAMDISK_IMAGE_START_MASK       0x07FF
-#define RAMDISK_PROMPT_FLAG            0x8000
-#define RAMDISK_LOAD_FLAG              0x4000
        __u16   vid_mode;
        __u16   root_dev;
        __u16   boot_flag;
@@ -42,10 +58,6 @@ struct setup_header {
        __u16   kernel_version;
        __u8    type_of_loader;
        __u8    loadflags;
-#define LOADED_HIGH    (1<<0)
-#define QUIET_FLAG     (1<<5)
-#define KEEP_SEGMENTS  (1<<6)
-#define CAN_USE_HEAP   (1<<7)
        __u16   setup_move_size;
        __u32   code32_start;
        __u32   ramdisk_image;
@@ -58,7 +70,8 @@ struct setup_header {
        __u32   initrd_addr_max;
        __u32   kernel_alignment;
        __u8    relocatable_kernel;
-       __u8    _pad2[3];
+       __u8    min_alignment;
+       __u16   xloadflags;
        __u32   cmdline_size;
        __u32   hardware_subarch;
        __u64   hardware_subarch_data;
@@ -106,7 +119,10 @@ struct boot_params {
        __u8  hd1_info[16];     /* obsolete! */         /* 0x090 */
        struct sys_desc_table sys_desc_table;           /* 0x0a0 */
        struct olpc_ofw_header olpc_ofw_header;         /* 0x0b0 */
-       __u8  _pad4[128];                               /* 0x0c0 */
+       __u32 ext_ramdisk_image;                        /* 0x0c0 */
+       __u32 ext_ramdisk_size;                         /* 0x0c4 */
+       __u32 ext_cmd_line_ptr;                         /* 0x0c8 */
+       __u8  _pad4[116];                               /* 0x0cc */
        struct edid_info edid_info;                     /* 0x140 */
        struct efi_info efi_info;                       /* 0x1c0 */
        __u32 alt_mem_k;                                /* 0x1e0 */
@@ -115,7 +131,20 @@ struct boot_params {
        __u8  eddbuf_entries;                           /* 0x1e9 */
        __u8  edd_mbr_sig_buf_entries;                  /* 0x1ea */
        __u8  kbd_status;                               /* 0x1eb */
-       __u8  _pad6[5];                                 /* 0x1ec */
+       __u8  _pad5[3];                                 /* 0x1ec */
+       /*
+        * The sentinel is set to a nonzero value (0xff) in header.S.
+        *
+        * A bootloader is supposed to only take setup_header and put
+        * it into a clean boot_params buffer. If it turns out that
+        * it is clumsy or too generous with the buffer, it most
+        * probably will pick up the sentinel variable too. The fact
+        * that this variable then is still 0xff will let kernel
+        * know that some variables in boot_params are invalid and
+        * kernel should zero out certain portions of boot_params.
+        */
+       __u8  sentinel;                                 /* 0x1ef */
+       __u8  _pad6[1];                                 /* 0x1f0 */
        struct setup_header hdr;    /* setup header */  /* 0x1f1 */
        __u8  _pad7[0x290-0x1f1-sizeof(struct setup_header)];
        __u32 edd_mbr_sig_buffer[EDD_MBR_SIG_MAX];      /* 0x290 */
@@ -134,6 +163,6 @@ enum {
        X86_NR_SUBARCHS,
 };
 
-
+#endif /* __ASSEMBLY__ */
 
 #endif /* _ASM_X86_BOOTPARAM_H */
index 58c8298..a0eab85 100644 (file)
@@ -4,66 +4,6 @@
 #include <linux/types.h>
 #include <asm/ioctls.h>
 
-/*
- * Machine Check support for x86
- */
-
-/* MCG_CAP register defines */
-#define MCG_BANKCNT_MASK       0xff         /* Number of Banks */
-#define MCG_CTL_P              (1ULL<<8)    /* MCG_CTL register available */
-#define MCG_EXT_P              (1ULL<<9)    /* Extended registers available */
-#define MCG_CMCI_P             (1ULL<<10)   /* CMCI supported */
-#define MCG_EXT_CNT_MASK       0xff0000     /* Number of Extended registers */
-#define MCG_EXT_CNT_SHIFT      16
-#define MCG_EXT_CNT(c)         (((c) & MCG_EXT_CNT_MASK) >> MCG_EXT_CNT_SHIFT)
-#define MCG_SER_P              (1ULL<<24)   /* MCA recovery/new status bits */
-
-/* MCG_STATUS register defines */
-#define MCG_STATUS_RIPV  (1ULL<<0)   /* restart ip valid */
-#define MCG_STATUS_EIPV  (1ULL<<1)   /* ip points to correct instruction */
-#define MCG_STATUS_MCIP  (1ULL<<2)   /* machine check in progress */
-
-/* MCi_STATUS register defines */
-#define MCI_STATUS_VAL   (1ULL<<63)  /* valid error */
-#define MCI_STATUS_OVER  (1ULL<<62)  /* previous errors lost */
-#define MCI_STATUS_UC    (1ULL<<61)  /* uncorrected error */
-#define MCI_STATUS_EN    (1ULL<<60)  /* error enabled */
-#define MCI_STATUS_MISCV (1ULL<<59)  /* misc error reg. valid */
-#define MCI_STATUS_ADDRV (1ULL<<58)  /* addr reg. valid */
-#define MCI_STATUS_PCC   (1ULL<<57)  /* processor context corrupt */
-#define MCI_STATUS_S    (1ULL<<56)  /* Signaled machine check */
-#define MCI_STATUS_AR   (1ULL<<55)  /* Action required */
-#define MCACOD           0xffff     /* MCA Error Code */
-
-/* Architecturally defined codes from SDM Vol. 3B Chapter 15 */
-#define MCACOD_SCRUB   0x00C0  /* 0xC0-0xCF Memory Scrubbing */
-#define MCACOD_SCRUBMSK        0xfff0
-#define MCACOD_L3WB    0x017A  /* L3 Explicit Writeback */
-#define MCACOD_DATA    0x0134  /* Data Load */
-#define MCACOD_INSTR   0x0150  /* Instruction Fetch */
-
-/* MCi_MISC register defines */
-#define MCI_MISC_ADDR_LSB(m)   ((m) & 0x3f)
-#define MCI_MISC_ADDR_MODE(m)  (((m) >> 6) & 7)
-#define  MCI_MISC_ADDR_SEGOFF  0       /* segment offset */
-#define  MCI_MISC_ADDR_LINEAR  1       /* linear address */
-#define  MCI_MISC_ADDR_PHYS    2       /* physical address */
-#define  MCI_MISC_ADDR_MEM     3       /* memory address */
-#define  MCI_MISC_ADDR_GENERIC 7       /* generic */
-
-/* CTL2 register defines */
-#define MCI_CTL2_CMCI_EN               (1ULL << 30)
-#define MCI_CTL2_CMCI_THRESHOLD_MASK   0x7fffULL
-
-#define MCJ_CTX_MASK           3
-#define MCJ_CTX(flags)         ((flags) & MCJ_CTX_MASK)
-#define MCJ_CTX_RANDOM         0    /* inject context: random */
-#define MCJ_CTX_PROCESS                0x1  /* inject context: process */
-#define MCJ_CTX_IRQ            0x2  /* inject context: IRQ */
-#define MCJ_NMI_BROADCAST      0x4  /* do NMI broadcasting */
-#define MCJ_EXCEPTION          0x8  /* raise as exception */
-#define MCJ_IRQ_BRAODCAST      0x10 /* do IRQ broadcasting */
-
 /* Fields are zero when not available */
 struct mce {
        __u64 status;
@@ -87,35 +27,8 @@ struct mce {
        __u64 mcgcap;   /* MCGCAP MSR: machine check capabilities of CPU */
 };
 
-/*
- * This structure contains all data related to the MCE log.  Also
- * carries a signature to make it easier to find from external
- * debugging tools.  Each entry is only valid when its finished flag
- * is set.
- */
-
-#define MCE_LOG_LEN 32
-
-struct mce_log {
-       char signature[12]; /* "MACHINECHECK" */
-       unsigned len;       /* = MCE_LOG_LEN */
-       unsigned next;
-       unsigned flags;
-       unsigned recordlen;     /* length of struct mce */
-       struct mce entry[MCE_LOG_LEN];
-};
-
-#define MCE_OVERFLOW 0         /* bit 0 in flags means overflow */
-
-#define MCE_LOG_SIGNATURE      "MACHINECHECK"
-
 #define MCE_GET_RECORD_LEN   _IOR('M', 1, int)
 #define MCE_GET_LOG_LEN      _IOR('M', 2, int)
 #define MCE_GETCLEAR_FLAGS   _IOR('M', 3, int)
 
-/* Software defined banks */
-#define MCE_EXTENDED_BANK      128
-#define MCE_THERMAL_BANK       MCE_EXTENDED_BANK + 0
-#define K8_MCE_THRESHOLD_BASE      (MCE_EXTENDED_BANK + 1)
-
 #endif /* _UAPI_ASM_X86_MCE_H */
index e03a1e1..562a76d 100644 (file)
@@ -20,18 +20,19 @@ static int set_x2apic_phys_mode(char *arg)
 }
 early_param("x2apic_phys", set_x2apic_phys_mode);
 
-static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
+static bool x2apic_fadt_phys(void)
 {
-       if (x2apic_phys)
-               return x2apic_enabled();
-       else if ((acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID) &&
-               (acpi_gbl_FADT.flags & ACPI_FADT_APIC_PHYSICAL) &&
-               x2apic_enabled()) {
+       if ((acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID) &&
+               (acpi_gbl_FADT.flags & ACPI_FADT_APIC_PHYSICAL)) {
                printk(KERN_DEBUG "System requires x2apic physical mode\n");
-               return 1;
+               return true;
        }
-       else
-               return 0;
+       return false;
+}
+
+static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
+{
+       return x2apic_enabled() && (x2apic_phys || x2apic_fadt_phys());
 }
 
 static void
@@ -82,7 +83,7 @@ static void init_x2apic_ldr(void)
 
 static int x2apic_phys_probe(void)
 {
-       if (x2apic_mode && x2apic_phys)
+       if (x2apic_mode && (x2apic_phys || x2apic_fadt_phys()))
                return 1;
 
        return apic == &apic_x2apic_phys;
index fe9edec..84c1309 100644 (file)
@@ -298,8 +298,7 @@ struct _cache_attr {
                         unsigned int);
 };
 
-#ifdef CONFIG_AMD_NB
-
+#if defined(CONFIG_AMD_NB) && defined(CONFIG_SYSFS)
 /*
  * L3 cache descriptors
  */
@@ -524,9 +523,9 @@ store_subcaches(struct _cpuid4_info *this_leaf, const char *buf, size_t count,
 static struct _cache_attr subcaches =
        __ATTR(subcaches, 0644, show_subcaches, store_subcaches);
 
-#else  /* CONFIG_AMD_NB */
+#else
 #define amd_init_l3_cache(x, y)
-#endif /* CONFIG_AMD_NB */
+#endif  /* CONFIG_AMD_NB && CONFIG_SYSFS */
 
 static int
 __cpuinit cpuid4_cache_lookup_regs(int index,
index 93b9e11..4914e94 100644 (file)
@@ -2019,7 +2019,10 @@ __init int intel_pmu_init(void)
                break;
 
        case 28: /* Atom */
-       case 54: /* Cedariew */
+       case 38: /* Lincroft */
+       case 39: /* Penwell */
+       case 53: /* Cloverview */
+       case 54: /* Cedarview */
                memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
                       sizeof(hw_cache_event_ids));
 
@@ -2084,6 +2087,7 @@ __init int intel_pmu_init(void)
                pr_cont("SandyBridge events, ");
                break;
        case 58: /* IvyBridge */
+       case 62: /* IvyBridge EP */
                memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
                       sizeof(hw_cache_event_ids));
                memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
index f2af39f..4820c23 100644 (file)
@@ -19,7 +19,7 @@ static const u64 p6_perfmon_event_map[] =
 
 };
 
-static __initconst u64 p6_hw_cache_event_ids
+static u64 p6_hw_cache_event_ids
                                [PERF_COUNT_HW_CACHE_MAX]
                                [PERF_COUNT_HW_CACHE_OP_MAX]
                                [PERF_COUNT_HW_CACHE_RESULT_MAX] =
index 07a7a04..cb3c591 100644 (file)
@@ -1781,6 +1781,7 @@ first_nmi:
         * Leave room for the "copied" frame
         */
        subq $(5*8), %rsp
+       CFI_ADJUST_CFA_OFFSET 5*8
 
        /* Copy the stack frame to the Saved frame */
        .rept 5
@@ -1863,10 +1864,8 @@ end_repeat_nmi:
 nmi_swapgs:
        SWAPGS_UNSAFE_STACK
 nmi_restore:
-       RESTORE_ALL 8
-
-       /* Pop the extra iret frame */
-       addq $(5*8), %rsp
+       /* Pop the extra iret frame at once */
+       RESTORE_ALL 6*8
 
        /* Clear the NMI executing stack variable */
        movq $0, 5*8(%rsp)
index 8e7f655..c8932c7 100644 (file)
@@ -300,6 +300,12 @@ ENTRY(startup_32_smp)
        leal -__PAGE_OFFSET(%ecx),%esp
 
 default_entry:
+#define CR0_STATE      (X86_CR0_PE | X86_CR0_MP | X86_CR0_ET | \
+                        X86_CR0_NE | X86_CR0_WP | X86_CR0_AM | \
+                        X86_CR0_PG)
+       movl $(CR0_STATE & ~X86_CR0_PG),%eax
+       movl %eax,%cr0
+
 /*
  *     New page tables may be in 4Mbyte page mode and may
  *     be using the global pages. 
@@ -364,8 +370,7 @@ default_entry:
  */
        movl $pa(initial_page_table), %eax
        movl %eax,%cr3          /* set the page table pointer.. */
-       movl %cr0,%eax
-       orl  $X86_CR0_PG,%eax
+       movl $CR0_STATE,%eax
        movl %eax,%cr0          /* ..and set paging (PG) bit */
        ljmp $__BOOT_CS,$1f     /* Clear prefetch and normalize %eip */
 1:
index a7c5661..4929502 100644 (file)
@@ -174,6 +174,9 @@ static int msr_open(struct inode *inode, struct file *file)
        unsigned int cpu;
        struct cpuinfo_x86 *c;
 
+       if (!capable(CAP_SYS_RAWIO))
+               return -EPERM;
+
        cpu = iminor(file->f_path.dentry->d_inode);
        if (cpu >= nr_cpu_ids || !cpu_online(cpu))
                return -ENXIO;  /* No such CPU */
index 0f5dec5..872079a 100644 (file)
@@ -56,7 +56,7 @@ struct device x86_dma_fallback_dev = {
 EXPORT_SYMBOL(x86_dma_fallback_dev);
 
 /* Number of entries preallocated for DMA-API debugging */
-#define PREALLOC_DMA_DEBUG_ENTRIES       32768
+#define PREALLOC_DMA_DEBUG_ENTRIES       65536
 
 int dma_set_mask(struct device *dev, u64 mask)
 {
index 4e8ba39..76fa1e9 100644 (file)
@@ -584,7 +584,7 @@ static void native_machine_emergency_restart(void)
                        break;
 
                case BOOT_EFI:
-                       if (efi_enabled)
+                       if (efi_enabled(EFI_RUNTIME_SERVICES))
                                efi.reset_system(reboot_mode ?
                                                 EFI_RESET_WARM :
                                                 EFI_RESET_COLD,
index 00f6c14..8b24289 100644 (file)
@@ -807,15 +807,15 @@ void __init setup_arch(char **cmdline_p)
 #ifdef CONFIG_EFI
        if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature,
                     "EL32", 4)) {
-               efi_enabled = 1;
-               efi_64bit = false;
+               set_bit(EFI_BOOT, &x86_efi_facility);
        } else if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature,
                     "EL64", 4)) {
-               efi_enabled = 1;
-               efi_64bit = true;
+               set_bit(EFI_BOOT, &x86_efi_facility);
+               set_bit(EFI_64BIT, &x86_efi_facility);
        }
-       if (efi_enabled && efi_memblock_x86_reserve_range())
-               efi_enabled = 0;
+
+       if (efi_enabled(EFI_BOOT))
+               efi_memblock_x86_reserve_range();
 #endif
 
        x86_init.oem.arch_setup();
@@ -888,7 +888,7 @@ void __init setup_arch(char **cmdline_p)
 
        finish_e820_parsing();
 
-       if (efi_enabled)
+       if (efi_enabled(EFI_BOOT))
                efi_init();
 
        dmi_scan_machine();
@@ -971,7 +971,7 @@ void __init setup_arch(char **cmdline_p)
         * The EFI specification says that boot service code won't be called
         * after ExitBootServices(). This is, in fact, a lie.
         */
-       if (efi_enabled)
+       if (efi_enabled(EFI_MEMMAP))
                efi_reserve_boot_services();
 
        /* preallocate 4k for mptable mpc */
@@ -1114,7 +1114,7 @@ void __init setup_arch(char **cmdline_p)
 
 #ifdef CONFIG_VT
 #if defined(CONFIG_VGA_CONSOLE)
-       if (!efi_enabled || (efi_mem_type(0xa0000) != EFI_CONVENTIONAL_MEMORY))
+       if (!efi_enabled(EFI_BOOT) || (efi_mem_type(0xa0000) != EFI_CONVENTIONAL_MEMORY))
                conswitchp = &vga_con;
 #elif defined(CONFIG_DUMMY_CONSOLE)
        conswitchp = &dummy_con;
@@ -1131,14 +1131,14 @@ void __init setup_arch(char **cmdline_p)
        register_refined_jiffies(CLOCK_TICK_RATE);
 
 #ifdef CONFIG_EFI
-       /* Once setup is done above, disable efi_enabled on mismatched
-        * firmware/kernel archtectures since there is no support for
-        * runtime services.
+       /* Once setup is done above, unmap the EFI memory map on
+        * mismatched firmware/kernel archtectures since there is no
+        * support for runtime services.
         */
-       if (efi_enabled && IS_ENABLED(CONFIG_X86_64) != efi_64bit) {
+       if (efi_enabled(EFI_BOOT) &&
+           IS_ENABLED(CONFIG_X86_64) != efi_enabled(EFI_64BIT)) {
                pr_info("efi: Setup done, disabling due to 32/64-bit mismatch\n");
                efi_unmap_memmap();
-               efi_enabled = 0;
        }
 #endif
 }
index 027088f..fb674fd 100644 (file)
@@ -748,13 +748,15 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
                                return;
                }
 #endif
+               /* Kernel addresses are always protection faults: */
+               if (address >= TASK_SIZE)
+                       error_code |= PF_PROT;
 
-               if (unlikely(show_unhandled_signals))
+               if (likely(show_unhandled_signals))
                        show_signal_msg(regs, error_code, address, tsk);
 
-               /* Kernel addresses are always protection faults: */
                tsk->thread.cr2         = address;
-               tsk->thread.error_code  = error_code | (address >= TASK_SIZE);
+               tsk->thread.error_code  = error_code;
                tsk->thread.trap_nr     = X86_TRAP_PF;
 
                force_sig_info_fault(SIGSEGV, si_code, address, tsk, 0);
index 2ead3c8..75c9a6a 100644 (file)
@@ -831,6 +831,9 @@ int kern_addr_valid(unsigned long addr)
        if (pud_none(*pud))
                return 0;
 
+       if (pud_large(*pud))
+               return pfn_valid(pud_pfn(*pud));
+
        pmd = pmd_offset(pud, addr);
        if (pmd_none(*pmd))
                return 0;
index ad44391..77cf009 100644 (file)
@@ -51,9 +51,6 @@
 
 #define EFI_DEBUG      1
 
-int efi_enabled;
-EXPORT_SYMBOL(efi_enabled);
-
 struct efi __read_mostly efi = {
        .mps        = EFI_INVALID_TABLE_ADDR,
        .acpi       = EFI_INVALID_TABLE_ADDR,
@@ -69,19 +66,28 @@ EXPORT_SYMBOL(efi);
 
 struct efi_memory_map memmap;
 
-bool efi_64bit;
-
 static struct efi efi_phys __initdata;
 static efi_system_table_t efi_systab __initdata;
 
 static inline bool efi_is_native(void)
 {
-       return IS_ENABLED(CONFIG_X86_64) == efi_64bit;
+       return IS_ENABLED(CONFIG_X86_64) == efi_enabled(EFI_64BIT);
+}
+
+unsigned long x86_efi_facility;
+
+/*
+ * Returns 1 if 'facility' is enabled, 0 otherwise.
+ */
+int efi_enabled(int facility)
+{
+       return test_bit(facility, &x86_efi_facility) != 0;
 }
+EXPORT_SYMBOL(efi_enabled);
 
 static int __init setup_noefi(char *arg)
 {
-       efi_enabled = 0;
+       clear_bit(EFI_BOOT, &x86_efi_facility);
        return 0;
 }
 early_param("noefi", setup_noefi);
@@ -426,6 +432,7 @@ void __init efi_reserve_boot_services(void)
 
 void __init efi_unmap_memmap(void)
 {
+       clear_bit(EFI_MEMMAP, &x86_efi_facility);
        if (memmap.map) {
                early_iounmap(memmap.map, memmap.nr_map * memmap.desc_size);
                memmap.map = NULL;
@@ -460,7 +467,7 @@ void __init efi_free_boot_services(void)
 
 static int __init efi_systab_init(void *phys)
 {
-       if (efi_64bit) {
+       if (efi_enabled(EFI_64BIT)) {
                efi_system_table_64_t *systab64;
                u64 tmp = 0;
 
@@ -552,7 +559,7 @@ static int __init efi_config_init(u64 tables, int nr_tables)
        void *config_tables, *tablep;
        int i, sz;
 
-       if (efi_64bit)
+       if (efi_enabled(EFI_64BIT))
                sz = sizeof(efi_config_table_64_t);
        else
                sz = sizeof(efi_config_table_32_t);
@@ -572,7 +579,7 @@ static int __init efi_config_init(u64 tables, int nr_tables)
                efi_guid_t guid;
                unsigned long table;
 
-               if (efi_64bit) {
+               if (efi_enabled(EFI_64BIT)) {
                        u64 table64;
                        guid = ((efi_config_table_64_t *)tablep)->guid;
                        table64 = ((efi_config_table_64_t *)tablep)->table;
@@ -684,7 +691,6 @@ void __init efi_init(void)
        if (boot_params.efi_info.efi_systab_hi ||
            boot_params.efi_info.efi_memmap_hi) {
                pr_info("Table located above 4GB, disabling EFI.\n");
-               efi_enabled = 0;
                return;
        }
        efi_phys.systab = (efi_system_table_t *)boot_params.efi_info.efi_systab;
@@ -694,10 +700,10 @@ void __init efi_init(void)
                          ((__u64)boot_params.efi_info.efi_systab_hi<<32));
 #endif
 
-       if (efi_systab_init(efi_phys.systab)) {
-               efi_enabled = 0;
+       if (efi_systab_init(efi_phys.systab))
                return;
-       }
+
+       set_bit(EFI_SYSTEM_TABLES, &x86_efi_facility);
 
        /*
         * Show what we know for posterity
@@ -715,10 +721,10 @@ void __init efi_init(void)
                efi.systab->hdr.revision >> 16,
                efi.systab->hdr.revision & 0xffff, vendor);
 
-       if (efi_config_init(efi.systab->tables, efi.systab->nr_tables)) {
-               efi_enabled = 0;
+       if (efi_config_init(efi.systab->tables, efi.systab->nr_tables))
                return;
-       }
+
+       set_bit(EFI_CONFIG_TABLES, &x86_efi_facility);
 
        /*
         * Note: We currently don't support runtime services on an EFI
@@ -727,15 +733,17 @@ void __init efi_init(void)
 
        if (!efi_is_native())
                pr_info("No EFI runtime due to 32/64-bit mismatch with kernel\n");
-       else if (efi_runtime_init()) {
-               efi_enabled = 0;
-               return;
+       else {
+               if (efi_runtime_init())
+                       return;
+               set_bit(EFI_RUNTIME_SERVICES, &x86_efi_facility);
        }
 
-       if (efi_memmap_init()) {
-               efi_enabled = 0;
+       if (efi_memmap_init())
                return;
-       }
+
+       set_bit(EFI_MEMMAP, &x86_efi_facility);
+
 #ifdef CONFIG_X86_32
        if (efi_is_native()) {
                x86_platform.get_wallclock = efi_get_time;
@@ -941,7 +949,7 @@ void __init efi_enter_virtual_mode(void)
         *
         * Call EFI services through wrapper functions.
         */
-       efi.runtime_version = efi_systab.fw_revision;
+       efi.runtime_version = efi_systab.hdr.revision;
        efi.get_time = virt_efi_get_time;
        efi.set_time = virt_efi_set_time;
        efi.get_wakeup_time = virt_efi_get_wakeup_time;
@@ -969,6 +977,9 @@ u32 efi_mem_type(unsigned long phys_addr)
        efi_memory_desc_t *md;
        void *p;
 
+       if (!efi_enabled(EFI_MEMMAP))
+               return 0;
+
        for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
                md = p;
                if ((md->phys_addr <= phys_addr) &&
index 95fd505..2b20038 100644 (file)
@@ -38,7 +38,7 @@
 #include <asm/cacheflush.h>
 #include <asm/fixmap.h>
 
-static pgd_t save_pgd __initdata;
+static pgd_t *save_pgd __initdata;
 static unsigned long efi_flags __initdata;
 
 static void __init early_code_mapping_set_exec(int executable)
@@ -61,12 +61,20 @@ static void __init early_code_mapping_set_exec(int executable)
 void __init efi_call_phys_prelog(void)
 {
        unsigned long vaddress;
+       int pgd;
+       int n_pgds;
 
        early_code_mapping_set_exec(1);
        local_irq_save(efi_flags);
-       vaddress = (unsigned long)__va(0x0UL);
-       save_pgd = *pgd_offset_k(0x0UL);
-       set_pgd(pgd_offset_k(0x0UL), *pgd_offset_k(vaddress));
+
+       n_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT), PGDIR_SIZE);
+       save_pgd = kmalloc(n_pgds * sizeof(pgd_t), GFP_KERNEL);
+
+       for (pgd = 0; pgd < n_pgds; pgd++) {
+               save_pgd[pgd] = *pgd_offset_k(pgd * PGDIR_SIZE);
+               vaddress = (unsigned long)__va(pgd * PGDIR_SIZE);
+               set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), *pgd_offset_k(vaddress));
+       }
        __flush_tlb_all();
 }
 
@@ -75,7 +83,11 @@ void __init efi_call_phys_epilog(void)
        /*
         * After the lock is released, the original page table is restored.
         */
-       set_pgd(pgd_offset_k(0x0UL), save_pgd);
+       int pgd;
+       int n_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT) , PGDIR_SIZE);
+       for (pgd = 0; pgd < n_pgds; pgd++)
+               set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), save_pgd[pgd]);
+       kfree(save_pgd);
        __flush_tlb_all();
        local_irq_restore(efi_flags);
        early_code_mapping_set_exec(0);
index b8b3a37..dbbdca5 100644 (file)
@@ -1034,7 +1034,8 @@ static int set_distrib_bits(struct cpumask *flush_mask, struct bau_control *bcp,
  * globally purge translation cache of a virtual address or all TLB's
  * @cpumask: mask of all cpu's in which the address is to be removed
  * @mm: mm_struct containing virtual address range
- * @va: virtual address to be removed (or TLB_FLUSH_ALL for all TLB's on cpu)
+ * @start: start virtual address to be removed from TLB
+ * @end: end virtual address to be remove from TLB
  * @cpu: the current cpu
  *
  * This is the entry point for initiating any UV global TLB shootdown.
@@ -1056,7 +1057,7 @@ static int set_distrib_bits(struct cpumask *flush_mask, struct bau_control *bcp,
  */
 const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
                                struct mm_struct *mm, unsigned long start,
-                               unsigned end, unsigned int cpu)
+                               unsigned long end, unsigned int cpu)
 {
        int locals = 0;
        int remotes = 0;
@@ -1113,7 +1114,10 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
 
        record_send_statistics(stat, locals, hubs, remotes, bau_desc);
 
-       bau_desc->payload.address = start;
+       if (!end || (end - start) <= PAGE_SIZE)
+               bau_desc->payload.address = start;
+       else
+               bau_desc->payload.address = TLB_FLUSH_ALL;
        bau_desc->payload.sending_cpu = cpu;
        /*
         * uv_flush_send_and_wait returns 0 if all cpu's were messaged,
index cc2f8c1..872eb60 100644 (file)
@@ -55,7 +55,7 @@ static FILE           *input_file;    /* Input file name */
 static void usage(const char *err)
 {
        if (err)
-               fprintf(stderr, "Error: %s\n\n", err);
+               fprintf(stderr, "%s: Error: %s\n\n", prog, err);
        fprintf(stderr, "Usage: %s [-y|-n|-v] [-s seed[,no]] [-m max] [-i input]\n", prog);
        fprintf(stderr, "\t-y   64bit mode\n");
        fprintf(stderr, "\t-n   32bit mode\n");
@@ -269,7 +269,13 @@ int main(int argc, char **argv)
                insns++;
        }
 
-       fprintf(stdout, "%s: decoded and checked %d %s instructions with %d errors (seed:0x%x)\n", (errors) ? "Failure" : "Success", insns, (input_file) ? "given" : "random", errors, seed);
+       fprintf(stdout, "%s: %s: decoded and checked %d %s instructions with %d errors (seed:0x%x)\n",
+               prog,
+               (errors) ? "Failure" : "Success",
+               insns,
+               (input_file) ? "given" : "random",
+               errors,
+               seed);
 
        return errors ? 1 : 0;
 }
index 5a1847d..79d67bd 100644 (file)
@@ -814,12 +814,14 @@ int main(int argc, char **argv)
        read_relocs(fp);
        if (show_absolute_syms) {
                print_absolute_symbols();
-               return 0;
+               goto out;
        }
        if (show_absolute_relocs) {
                print_absolute_relocs();
-               return 0;
+               goto out;
        }
        emit_relocs(as_text, use_real_mode);
+out:
+       fclose(fp);
        return 0;
 }
index 4acb5fe..172a02a 100644 (file)
@@ -170,4 +170,19 @@ dma_cache_sync(struct device *dev, void *vaddr, size_t size,
        consistent_sync(vaddr, size, direction);
 }
 
+/* Not supported for now */
+static inline int dma_mmap_coherent(struct device *dev,
+                                   struct vm_area_struct *vma, void *cpu_addr,
+                                   dma_addr_t dma_addr, size_t size)
+{
+       return -EINVAL;
+}
+
+static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt,
+                                 void *cpu_addr, dma_addr_t dma_addr,
+                                 size_t size)
+{
+       return -EINVAL;
+}
+
 #endif /* _XTENSA_DMA_MAPPING_H */
index 9a289d7..3993ebf 100644 (file)
@@ -35,6 +35,8 @@ static DEFINE_IDR(ext_devt_idr);
 
 static struct device_type disk_type;
 
+static void disk_check_events(struct disk_events *ev,
+                             unsigned int *clearing_ptr);
 static void disk_alloc_events(struct gendisk *disk);
 static void disk_add_events(struct gendisk *disk);
 static void disk_del_events(struct gendisk *disk);
@@ -1549,6 +1551,7 @@ unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask)
        const struct block_device_operations *bdops = disk->fops;
        struct disk_events *ev = disk->ev;
        unsigned int pending;
+       unsigned int clearing = mask;
 
        if (!ev) {
                /* for drivers still using the old ->media_changed method */
@@ -1558,34 +1561,53 @@ unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask)
                return 0;
        }
 
-       /* tell the workfn about the events being cleared */
+       disk_block_events(disk);
+
+       /*
+        * store the union of mask and ev->clearing on the stack so that the
+        * race with disk_flush_events does not cause ambiguity (ev->clearing
+        * can still be modified even if events are blocked).
+        */
        spin_lock_irq(&ev->lock);
-       ev->clearing |= mask;
+       clearing |= ev->clearing;
+       ev->clearing = 0;
        spin_unlock_irq(&ev->lock);
 
-       /* uncondtionally schedule event check and wait for it to finish */
-       disk_block_events(disk);
-       queue_delayed_work(system_freezable_wq, &ev->dwork, 0);
-       flush_delayed_work(&ev->dwork);
-       __disk_unblock_events(disk, false);
+       disk_check_events(ev, &clearing);
+       /*
+        * if ev->clearing is not 0, the disk_flush_events got called in the
+        * middle of this function, so we want to run the workfn without delay.
+        */
+       __disk_unblock_events(disk, ev->clearing ? true : false);
 
        /* then, fetch and clear pending events */
        spin_lock_irq(&ev->lock);
-       WARN_ON_ONCE(ev->clearing & mask);      /* cleared by workfn */
        pending = ev->pending & mask;
        ev->pending &= ~mask;
        spin_unlock_irq(&ev->lock);
+       WARN_ON_ONCE(clearing & mask);
 
        return pending;
 }
 
+/*
+ * Separate this part out so that a different pointer for clearing_ptr can be
+ * passed in for disk_clear_events.
+ */
 static void disk_events_workfn(struct work_struct *work)
 {
        struct delayed_work *dwork = to_delayed_work(work);
        struct disk_events *ev = container_of(dwork, struct disk_events, dwork);
+
+       disk_check_events(ev, &ev->clearing);
+}
+
+static void disk_check_events(struct disk_events *ev,
+                             unsigned int *clearing_ptr)
+{
        struct gendisk *disk = ev->disk;
        char *envp[ARRAY_SIZE(disk_uevents) + 1] = { };
-       unsigned int clearing = ev->clearing;
+       unsigned int clearing = *clearing_ptr;
        unsigned int events;
        unsigned long intv;
        int nr_events = 0, i;
@@ -1598,7 +1620,7 @@ static void disk_events_workfn(struct work_struct *work)
 
        events &= ~ev->pending;
        ev->pending |= events;
-       ev->clearing &= ~clearing;
+       *clearing_ptr &= ~clearing;
 
        intv = disk_events_poll_jiffies(disk);
        if (!ev->block && intv)
index 3ff2678..bd22f86 100644 (file)
@@ -250,7 +250,7 @@ acpi_physical_address __init acpi_os_get_root_pointer(void)
                return acpi_rsdp;
 #endif
 
-       if (efi_enabled) {
+       if (efi_enabled(EFI_CONFIG_TABLES)) {
                if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
                        return efi.acpi20;
                else if (efi.acpi != EFI_INVALID_TABLE_ADDR)
index 6a0955e..53ecac5 100644 (file)
@@ -636,82 +636,82 @@ struct rx_buf_desc {
 #define SEG_BASE IPHASE5575_FRAG_CONTROL_REG_BASE  
 #define REASS_BASE IPHASE5575_REASS_CONTROL_REG_BASE  
 
-typedef volatile u_int  freg_t;
+typedef volatile u_int ffreg_t;
 typedef u_int   rreg_t;
 
 typedef struct _ffredn_t {
-        freg_t  idlehead_high;  /* Idle cell header (high)              */
-        freg_t  idlehead_low;   /* Idle cell header (low)               */
-        freg_t  maxrate;        /* Maximum rate                         */
-        freg_t  stparms;        /* Traffic Management Parameters        */
-        freg_t  abrubr_abr;     /* ABRUBR Priority Byte 1, TCR Byte 0   */
-        freg_t  rm_type;        /*                                      */
-        u_int   filler5[0x17 - 0x06];
-        freg_t  cmd_reg;        /* Command register                     */
-        u_int   filler18[0x20 - 0x18];
-        freg_t  cbr_base;       /* CBR Pointer Base                     */
-        freg_t  vbr_base;       /* VBR Pointer Base                     */
-        freg_t  abr_base;       /* ABR Pointer Base                     */
-        freg_t  ubr_base;       /* UBR Pointer Base                     */
-        u_int   filler24;
-        freg_t  vbrwq_base;     /* VBR Wait Queue Base                  */
-        freg_t  abrwq_base;     /* ABR Wait Queue Base                  */
-        freg_t  ubrwq_base;     /* UBR Wait Queue Base                  */
-        freg_t  vct_base;       /* Main VC Table Base                   */
-        freg_t  vcte_base;      /* Extended Main VC Table Base          */
-        u_int   filler2a[0x2C - 0x2A];
-        freg_t  cbr_tab_beg;    /* CBR Table Begin                      */
-        freg_t  cbr_tab_end;    /* CBR Table End                        */
-        freg_t  cbr_pointer;    /* CBR Pointer                          */
-        u_int   filler2f[0x30 - 0x2F];
-        freg_t  prq_st_adr;     /* Packet Ready Queue Start Address     */
-        freg_t  prq_ed_adr;     /* Packet Ready Queue End Address       */
-        freg_t  prq_rd_ptr;     /* Packet Ready Queue read pointer      */
-        freg_t  prq_wr_ptr;     /* Packet Ready Queue write pointer     */
-        freg_t  tcq_st_adr;     /* Transmit Complete Queue Start Address*/
-        freg_t  tcq_ed_adr;     /* Transmit Complete Queue End Address  */
-        freg_t  tcq_rd_ptr;     /* Transmit Complete Queue read pointer */
-        freg_t  tcq_wr_ptr;     /* Transmit Complete Queue write pointer*/
-        u_int   filler38[0x40 - 0x38];
-        freg_t  queue_base;     /* Base address for PRQ and TCQ         */
-        freg_t  desc_base;      /* Base address of descriptor table     */
-        u_int   filler42[0x45 - 0x42];
-        freg_t  mode_reg_0;     /* Mode register 0                      */
-        freg_t  mode_reg_1;     /* Mode register 1                      */
-        freg_t  intr_status_reg;/* Interrupt Status register            */
-        freg_t  mask_reg;       /* Mask Register                        */
-        freg_t  cell_ctr_high1; /* Total cell transfer count (high)     */
-        freg_t  cell_ctr_lo1;   /* Total cell transfer count (low)      */
-        freg_t  state_reg;      /* Status register                      */
-        u_int   filler4c[0x58 - 0x4c];
-        freg_t  curr_desc_num;  /* Contains the current descriptor num  */
-        freg_t  next_desc;      /* Next descriptor                      */
-        freg_t  next_vc;        /* Next VC                              */
-        u_int   filler5b[0x5d - 0x5b];
-        freg_t  present_slot_cnt;/* Present slot count                  */
-        u_int   filler5e[0x6a - 0x5e];
-        freg_t  new_desc_num;   /* New descriptor number                */
-        freg_t  new_vc;         /* New VC                               */
-        freg_t  sched_tbl_ptr;  /* Schedule table pointer               */
-        freg_t  vbrwq_wptr;     /* VBR wait queue write pointer         */
-        freg_t  vbrwq_rptr;     /* VBR wait queue read pointer          */
-        freg_t  abrwq_wptr;     /* ABR wait queue write pointer         */
-        freg_t  abrwq_rptr;     /* ABR wait queue read pointer          */
-        freg_t  ubrwq_wptr;     /* UBR wait queue write pointer         */
-        freg_t  ubrwq_rptr;     /* UBR wait queue read pointer          */
-        freg_t  cbr_vc;         /* CBR VC                               */
-        freg_t  vbr_sb_vc;      /* VBR SB VC                            */
-        freg_t  abr_sb_vc;      /* ABR SB VC                            */
-        freg_t  ubr_sb_vc;      /* UBR SB VC                            */
-        freg_t  vbr_next_link;  /* VBR next link                        */
-        freg_t  abr_next_link;  /* ABR next link                        */
-        freg_t  ubr_next_link;  /* UBR next link                        */
-        u_int   filler7a[0x7c-0x7a];
-        freg_t  out_rate_head;  /* Out of rate head                     */
-        u_int   filler7d[0xca-0x7d]; /* pad out to full address space   */
-        freg_t  cell_ctr_high1_nc;/* Total cell transfer count (high)   */
-        freg_t  cell_ctr_lo1_nc;/* Total cell transfer count (low)      */
-        u_int   fillercc[0x100-0xcc]; /* pad out to full address space   */
+       ffreg_t idlehead_high;  /* Idle cell header (high)              */
+       ffreg_t idlehead_low;   /* Idle cell header (low)               */
+       ffreg_t maxrate;        /* Maximum rate                         */
+       ffreg_t stparms;        /* Traffic Management Parameters        */
+       ffreg_t abrubr_abr;     /* ABRUBR Priority Byte 1, TCR Byte 0   */
+       ffreg_t rm_type;        /*                                      */
+       u_int   filler5[0x17 - 0x06];
+       ffreg_t cmd_reg;        /* Command register                     */
+       u_int   filler18[0x20 - 0x18];
+       ffreg_t cbr_base;       /* CBR Pointer Base                     */
+       ffreg_t vbr_base;       /* VBR Pointer Base                     */
+       ffreg_t abr_base;       /* ABR Pointer Base                     */
+       ffreg_t ubr_base;       /* UBR Pointer Base                     */
+       u_int   filler24;
+       ffreg_t vbrwq_base;     /* VBR Wait Queue Base                  */
+       ffreg_t abrwq_base;     /* ABR Wait Queue Base                  */
+       ffreg_t ubrwq_base;     /* UBR Wait Queue Base                  */
+       ffreg_t vct_base;       /* Main VC Table Base                   */
+       ffreg_t vcte_base;      /* Extended Main VC Table Base          */
+       u_int   filler2a[0x2C - 0x2A];
+       ffreg_t cbr_tab_beg;    /* CBR Table Begin                      */
+       ffreg_t cbr_tab_end;    /* CBR Table End                        */
+       ffreg_t cbr_pointer;    /* CBR Pointer                          */
+       u_int   filler2f[0x30 - 0x2F];
+       ffreg_t prq_st_adr;     /* Packet Ready Queue Start Address     */
+       ffreg_t prq_ed_adr;     /* Packet Ready Queue End Address       */
+       ffreg_t prq_rd_ptr;     /* Packet Ready Queue read pointer      */
+       ffreg_t prq_wr_ptr;     /* Packet Ready Queue write pointer     */
+       ffreg_t tcq_st_adr;     /* Transmit Complete Queue Start Address*/
+       ffreg_t tcq_ed_adr;     /* Transmit Complete Queue End Address  */
+       ffreg_t tcq_rd_ptr;     /* Transmit Complete Queue read pointer */
+       ffreg_t tcq_wr_ptr;     /* Transmit Complete Queue write pointer*/
+       u_int   filler38[0x40 - 0x38];
+       ffreg_t queue_base;     /* Base address for PRQ and TCQ         */
+       ffreg_t desc_base;      /* Base address of descriptor table     */
+       u_int   filler42[0x45 - 0x42];
+       ffreg_t mode_reg_0;     /* Mode register 0                      */
+       ffreg_t mode_reg_1;     /* Mode register 1                      */
+       ffreg_t intr_status_reg;/* Interrupt Status register            */
+       ffreg_t mask_reg;       /* Mask Register                        */
+       ffreg_t cell_ctr_high1; /* Total cell transfer count (high)     */
+       ffreg_t cell_ctr_lo1;   /* Total cell transfer count (low)      */
+       ffreg_t state_reg;      /* Status register                      */
+       u_int   filler4c[0x58 - 0x4c];
+       ffreg_t curr_desc_num;  /* Contains the current descriptor num  */
+       ffreg_t next_desc;      /* Next descriptor                      */
+       ffreg_t next_vc;        /* Next VC                              */
+       u_int   filler5b[0x5d - 0x5b];
+       ffreg_t present_slot_cnt;/* Present slot count                  */
+       u_int   filler5e[0x6a - 0x5e];
+       ffreg_t new_desc_num;   /* New descriptor number                */
+       ffreg_t new_vc;         /* New VC                               */
+       ffreg_t sched_tbl_ptr;  /* Schedule table pointer               */
+       ffreg_t vbrwq_wptr;     /* VBR wait queue write pointer         */
+       ffreg_t vbrwq_rptr;     /* VBR wait queue read pointer          */
+       ffreg_t abrwq_wptr;     /* ABR wait queue write pointer         */
+       ffreg_t abrwq_rptr;     /* ABR wait queue read pointer          */
+       ffreg_t ubrwq_wptr;     /* UBR wait queue write pointer         */
+       ffreg_t ubrwq_rptr;     /* UBR wait queue read pointer          */
+       ffreg_t cbr_vc;         /* CBR VC                               */
+       ffreg_t vbr_sb_vc;      /* VBR SB VC                            */
+       ffreg_t abr_sb_vc;      /* ABR SB VC                            */
+       ffreg_t ubr_sb_vc;      /* UBR SB VC                            */
+       ffreg_t vbr_next_link;  /* VBR next link                        */
+       ffreg_t abr_next_link;  /* ABR next link                        */
+       ffreg_t ubr_next_link;  /* UBR next link                        */
+       u_int   filler7a[0x7c-0x7a];
+       ffreg_t out_rate_head;  /* Out of rate head                     */
+       u_int   filler7d[0xca-0x7d]; /* pad out to full address space   */
+       ffreg_t cell_ctr_high1_nc;/* Total cell transfer count (high)   */
+       ffreg_t cell_ctr_lo1_nc;/* Total cell transfer count (low)      */
+       u_int   fillercc[0x100-0xcc]; /* pad out to full address space   */
 } ffredn_t;
 
 typedef struct _rfredn_t {
index 04f7c86..79595a0 100644 (file)
@@ -47,6 +47,7 @@ int bcma_sprom_get(struct bcma_bus *bus);
 /* driver_chipcommon.c */
 #ifdef CONFIG_BCMA_DRIVER_MIPS
 void bcma_chipco_serial_init(struct bcma_drv_cc *cc);
+extern struct platform_device bcma_pflash_dev;
 #endif /* CONFIG_BCMA_DRIVER_MIPS */
 
 /* driver_chipcommon_pmu.c */
@@ -96,11 +97,16 @@ void bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc);
 #ifdef CONFIG_BCMA_DRIVER_GPIO
 /* driver_gpio.c */
 int bcma_gpio_init(struct bcma_drv_cc *cc);
+int bcma_gpio_unregister(struct bcma_drv_cc *cc);
 #else
 static inline int bcma_gpio_init(struct bcma_drv_cc *cc)
 {
        return -ENOTSUPP;
 }
+static inline int bcma_gpio_unregister(struct bcma_drv_cc *cc)
+{
+       return 0;
+}
 #endif /* CONFIG_BCMA_DRIVER_GPIO */
 
 #endif
index dbda91e..d4f699a 100644 (file)
@@ -5,11 +5,11 @@
  * Licensed under the GNU/GPL. See COPYING for details.
  */
 
+#include "bcma_private.h"
+
 #include <linux/platform_device.h>
 #include <linux/bcma/bcma.h>
 
-#include "bcma_private.h"
-
 struct platform_device bcma_nflash_dev = {
        .name           = "bcma_nflash",
        .num_resources  = 0,
@@ -21,7 +21,7 @@ int bcma_nflash_init(struct bcma_drv_cc *cc)
        struct bcma_bus *bus = cc->core->bus;
 
        if (bus->chipinfo.id != BCMA_CHIP_ID_BCM4706 &&
-           cc->core->id.rev != 0x38) {
+           cc->core->id.rev != 38) {
                bcma_err(bus, "NAND flash on unsupported board!\n");
                return -ENOTSUPP;
        }
index 1e694db..e6ed4fe 100644 (file)
@@ -5,11 +5,11 @@
  * Licensed under the GNU/GPL. See COPYING for details.
  */
 
+#include "bcma_private.h"
+
 #include <linux/platform_device.h>
 #include <linux/bcma/bcma.h>
 
-#include "bcma_private.h"
-
 static struct resource bcma_sflash_resource = {
        .name   = "bcma_sflash",
        .start  = BCMA_SOC_FLASH2,
index 9a6f585..45f0996 100644 (file)
@@ -73,6 +73,16 @@ static void bcma_gpio_free(struct gpio_chip *chip, unsigned gpio)
        bcma_chipco_gpio_pullup(cc, 1 << gpio, 0);
 }
 
+static int bcma_gpio_to_irq(struct gpio_chip *chip, unsigned gpio)
+{
+       struct bcma_drv_cc *cc = bcma_gpio_get_cc(chip);
+
+       if (cc->core->bus->hosttype == BCMA_HOSTTYPE_SOC)
+               return bcma_core_irq(cc->core);
+       else
+               return -EINVAL;
+}
+
 int bcma_gpio_init(struct bcma_drv_cc *cc)
 {
        struct gpio_chip *chip = &cc->gpio;
@@ -85,6 +95,7 @@ int bcma_gpio_init(struct bcma_drv_cc *cc)
        chip->set               = bcma_gpio_set_value;
        chip->direction_input   = bcma_gpio_direction_input;
        chip->direction_output  = bcma_gpio_direction_output;
+       chip->to_irq            = bcma_gpio_to_irq;
        chip->ngpio             = 16;
        /* There is just one SoC in one device and its GPIO addresses should be
         * deterministic to address them more easily. The other buses could get
@@ -96,3 +107,8 @@ int bcma_gpio_init(struct bcma_drv_cc *cc)
 
        return gpiochip_add(chip);
 }
+
+int bcma_gpio_unregister(struct bcma_drv_cc *cc)
+{
+       return gpiochip_remove(&cc->gpio);
+}
index 9fe86ee..9a7f0e3 100644 (file)
 
 #include <linux/bcma/bcma.h>
 
+#include <linux/mtd/physmap.h>
+#include <linux/platform_device.h>
 #include <linux/serial.h>
 #include <linux/serial_core.h>
 #include <linux/serial_reg.h>
 #include <linux/time.h>
 
+static const char *part_probes[] = { "bcm47xxpart", NULL };
+
+static struct physmap_flash_data bcma_pflash_data = {
+       .part_probe_types       = part_probes,
+};
+
+static struct resource bcma_pflash_resource = {
+       .name   = "bcma_pflash",
+       .flags  = IORESOURCE_MEM,
+};
+
+struct platform_device bcma_pflash_dev = {
+       .name           = "physmap-flash",
+       .dev            = {
+               .platform_data  = &bcma_pflash_data,
+       },
+       .resource       = &bcma_pflash_resource,
+       .num_resources  = 1,
+};
+
 /* The 47162a0 hangs when reading MIPS DMP registers registers */
 static inline bool bcma_core_mips_bcm47162a0_quirk(struct bcma_device *dev)
 {
@@ -211,6 +233,7 @@ static void bcma_core_mips_flash_detect(struct bcma_drv_mips *mcore)
 {
        struct bcma_bus *bus = mcore->core->bus;
        struct bcma_drv_cc *cc = &bus->drv_cc;
+       struct bcma_pflash *pflash = &cc->pflash;
 
        switch (cc->capabilities & BCMA_CC_CAP_FLASHT) {
        case BCMA_CC_FLASHT_STSER:
@@ -220,15 +243,20 @@ static void bcma_core_mips_flash_detect(struct bcma_drv_mips *mcore)
                break;
        case BCMA_CC_FLASHT_PARA:
                bcma_debug(bus, "Found parallel flash\n");
-               cc->pflash.present = true;
-               cc->pflash.window = BCMA_SOC_FLASH2;
-               cc->pflash.window_size = BCMA_SOC_FLASH2_SZ;
+               pflash->present = true;
+               pflash->window = BCMA_SOC_FLASH2;
+               pflash->window_size = BCMA_SOC_FLASH2_SZ;
 
                if ((bcma_read32(cc->core, BCMA_CC_FLASH_CFG) &
                     BCMA_CC_FLASH_CFG_DS) == 0)
-                       cc->pflash.buswidth = 1;
+                       pflash->buswidth = 1;
                else
-                       cc->pflash.buswidth = 2;
+                       pflash->buswidth = 2;
+
+               bcma_pflash_data.width = pflash->buswidth;
+               bcma_pflash_resource.start = pflash->window;
+               bcma_pflash_resource.end = pflash->window + pflash->window_size;
+
                break;
        default:
                bcma_err(bus, "Flash type not supported\n");
index ff85289..9a6188a 100644 (file)
@@ -149,6 +149,14 @@ static int bcma_register_cores(struct bcma_bus *bus)
                dev_id++;
        }
 
+#ifdef CONFIG_BCMA_DRIVER_MIPS
+       if (bus->drv_cc.pflash.present) {
+               err = platform_device_register(&bcma_pflash_dev);
+               if (err)
+                       bcma_err(bus, "Error registering parallel flash\n");
+       }
+#endif
+
 #ifdef CONFIG_BCMA_SFLASH
        if (bus->drv_cc.sflash.present) {
                err = platform_device_register(&bcma_sflash_dev);
@@ -268,6 +276,13 @@ int bcma_bus_register(struct bcma_bus *bus)
 void bcma_bus_unregister(struct bcma_bus *bus)
 {
        struct bcma_device *cores[3];
+       int err;
+
+       err = bcma_gpio_unregister(&bus->drv_cc);
+       if (err == -EBUSY)
+               bcma_err(bus, "Some GPIOs are still in use.\n");
+       else if (err)
+               bcma_err(bus, "Can not unregister GPIO driver: %i\n", err);
 
        cores[0] = bcma_find_core(bus, BCMA_CORE_MIPS_74K);
        cores[1] = bcma_find_core(bus, BCMA_CORE_PCIE);
index f58a4a4..2b8303a 100644 (file)
@@ -168,7 +168,7 @@ static void wake_all_senders(struct drbd_tconn *tconn) {
 }
 
 /* must hold resource->req_lock */
-static void start_new_tl_epoch(struct drbd_tconn *tconn)
+void start_new_tl_epoch(struct drbd_tconn *tconn)
 {
        /* no point closing an epoch, if it is empty, anyways. */
        if (tconn->current_tle_writes == 0)
index 016de6b..c08d229 100644 (file)
@@ -267,6 +267,7 @@ struct bio_and_error {
        int error;
 };
 
+extern void start_new_tl_epoch(struct drbd_tconn *tconn);
 extern void drbd_req_destroy(struct kref *kref);
 extern void _req_may_be_done(struct drbd_request *req,
                struct bio_and_error *m);
index 53bf618..0fe220c 100644 (file)
@@ -931,6 +931,7 @@ __drbd_set_state(struct drbd_conf *mdev, union drbd_state ns,
        enum drbd_state_rv rv = SS_SUCCESS;
        enum sanitize_state_warnings ssw;
        struct after_state_chg_work *ascw;
+       bool did_remote, should_do_remote;
 
        os = drbd_read_state(mdev);
 
@@ -981,11 +982,17 @@ __drbd_set_state(struct drbd_conf *mdev, union drbd_state ns,
            (os.disk != D_DISKLESS && ns.disk == D_DISKLESS))
                atomic_inc(&mdev->local_cnt);
 
+       did_remote = drbd_should_do_remote(mdev->state);
        mdev->state.i = ns.i;
+       should_do_remote = drbd_should_do_remote(mdev->state);
        mdev->tconn->susp = ns.susp;
        mdev->tconn->susp_nod = ns.susp_nod;
        mdev->tconn->susp_fen = ns.susp_fen;
 
+       /* put replicated vs not-replicated requests in seperate epochs */
+       if (did_remote != should_do_remote)
+               start_new_tl_epoch(mdev->tconn);
+
        if (os.disk == D_ATTACHING && ns.disk >= D_NEGOTIATING)
                drbd_print_uuids(mdev, "attached to UUIDs");
 
index 9694dd9..3fd1009 100644 (file)
@@ -626,12 +626,13 @@ static void mtip_timeout_function(unsigned long int data)
                }
        }
 
-       if (cmdto_cnt && !test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags)) {
+       if (cmdto_cnt) {
                print_tags(port->dd, "timed out", tagaccum, cmdto_cnt);
-
-               mtip_restart_port(port);
+               if (!test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags)) {
+                       mtip_restart_port(port);
+                       wake_up_interruptible(&port->svc_wait);
+               }
                clear_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags);
-               wake_up_interruptible(&port->svc_wait);
        }
 
        if (port->ic_pause_timer) {
@@ -3887,7 +3888,12 @@ static int mtip_block_remove(struct driver_data *dd)
         * Delete our gendisk structure. This also removes the device
         * from /dev
         */
-       del_gendisk(dd->disk);
+       if (dd->disk) {
+               if (dd->disk->queue)
+                       del_gendisk(dd->disk);
+               else
+                       put_disk(dd->disk);
+       }
 
        spin_lock(&rssd_index_lock);
        ida_remove(&rssd_index_ida, dd->index);
@@ -3921,7 +3927,13 @@ static int mtip_block_shutdown(struct driver_data *dd)
                "Shutting down %s ...\n", dd->disk->disk_name);
 
        /* Delete our gendisk structure, and cleanup the blk queue. */
-       del_gendisk(dd->disk);
+       if (dd->disk) {
+               if (dd->disk->queue)
+                       del_gendisk(dd->disk);
+               else
+                       put_disk(dd->disk);
+       }
+
 
        spin_lock(&rssd_index_lock);
        ida_remove(&rssd_index_ida, dd->index);
index 74374fb..5ac841f 100644 (file)
@@ -161,10 +161,12 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
 static void make_response(struct xen_blkif *blkif, u64 id,
                          unsigned short op, int st);
 
-#define foreach_grant(pos, rbtree, node) \
-       for ((pos) = container_of(rb_first((rbtree)), typeof(*(pos)), node); \
+#define foreach_grant_safe(pos, n, rbtree, node) \
+       for ((pos) = container_of(rb_first((rbtree)), typeof(*(pos)), node), \
+            (n) = rb_next(&(pos)->node); \
             &(pos)->node != NULL; \
-            (pos) = container_of(rb_next(&(pos)->node), typeof(*(pos)), node))
+            (pos) = container_of(n, typeof(*(pos)), node), \
+            (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL)
 
 
 static void add_persistent_gnt(struct rb_root *root,
@@ -217,10 +219,11 @@ static void free_persistent_gnts(struct rb_root *root, unsigned int num)
        struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
        struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
        struct persistent_gnt *persistent_gnt;
+       struct rb_node *n;
        int ret = 0;
        int segs_to_unmap = 0;
 
-       foreach_grant(persistent_gnt, root, node) {
+       foreach_grant_safe(persistent_gnt, n, root, node) {
                BUG_ON(persistent_gnt->handle ==
                        BLKBACK_INVALID_HANDLE);
                gnttab_set_unmap_op(&unmap[segs_to_unmap],
@@ -230,9 +233,6 @@ static void free_persistent_gnts(struct rb_root *root, unsigned int num)
                        persistent_gnt->handle);
 
                pages[segs_to_unmap] = persistent_gnt->page;
-               rb_erase(&persistent_gnt->node, root);
-               kfree(persistent_gnt);
-               num--;
 
                if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST ||
                        !rb_next(&persistent_gnt->node)) {
@@ -241,6 +241,10 @@ static void free_persistent_gnts(struct rb_root *root, unsigned int num)
                        BUG_ON(ret);
                        segs_to_unmap = 0;
                }
+
+               rb_erase(&persistent_gnt->node, root);
+               kfree(persistent_gnt);
+               num--;
        }
        BUG_ON(num != 0);
 }
index 96e9b00..11043c1 100644 (file)
@@ -792,6 +792,7 @@ static void blkif_free(struct blkfront_info *info, int suspend)
 {
        struct llist_node *all_gnts;
        struct grant *persistent_gnt;
+       struct llist_node *n;
 
        /* Prevent new requests being issued until we fix things up. */
        spin_lock_irq(&info->io_lock);
@@ -804,7 +805,7 @@ static void blkif_free(struct blkfront_info *info, int suspend)
        /* Remove all persistent grants */
        if (info->persistent_gnts_c) {
                all_gnts = llist_del_all(&info->persistent_gnts);
-               llist_for_each_entry(persistent_gnt, all_gnts, node) {
+               llist_for_each_entry_safe(persistent_gnt, n, all_gnts, node) {
                        gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL);
                        __free_page(pfn_to_page(persistent_gnt->pfn));
                        kfree(persistent_gnt);
@@ -835,7 +836,7 @@ static void blkif_free(struct blkfront_info *info, int suspend)
 static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info,
                             struct blkif_response *bret)
 {
-       int i;
+       int i = 0;
        struct bio_vec *bvec;
        struct req_iterator iter;
        unsigned long flags;
@@ -852,7 +853,8 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info,
                 */
                rq_for_each_segment(bvec, s->request, iter) {
                        BUG_ON((bvec->bv_offset + bvec->bv_len) > PAGE_SIZE);
-                       i = offset >> PAGE_SHIFT;
+                       if (bvec->bv_offset < offset)
+                               i++;
                        BUG_ON(i >= s->req.u.rw.nr_segments);
                        shared_data = kmap_atomic(
                                pfn_to_page(s->grants_used[i]->pfn));
@@ -861,7 +863,7 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info,
                                bvec->bv_len);
                        bvec_kunmap_irq(bvec_data, &flags);
                        kunmap_atomic(shared_data);
-                       offset += bvec->bv_len;
+                       offset = bvec->bv_offset + bvec->bv_len;
                }
        }
        /* Add the persistent grant into the list of free grants */
index 684b0d5..ee4dbea 100644 (file)
@@ -2062,7 +2062,8 @@ static void virtcons_remove(struct virtio_device *vdev)
        /* Disable interrupts for vqs */
        vdev->config->reset(vdev);
        /* Finish up work that's lined up */
-       cancel_work_sync(&portdev->control_work);
+       if (use_multiport(portdev))
+               cancel_work_sync(&portdev->control_work);
 
        list_for_each_entry_safe(port, port2, &portdev->ports, list)
                unplug_port(port);
index 7b69591..f1b7e24 100644 (file)
@@ -276,7 +276,7 @@ static int cn_init(void)
 
        cn_already_initialized = 1;
 
-       proc_net_fops_create(&init_net, "connector", S_IRUGO, &cn_file_ops);
+       proc_create("connector", S_IRUGO, init_net.proc_net, &cn_file_ops);
 
        return 0;
 }
@@ -287,7 +287,7 @@ static void cn_fini(void)
 
        cn_already_initialized = 0;
 
-       proc_net_remove(&init_net, "connector");
+       remove_proc_entry("connector", init_net.proc_net);
 
        cn_queue_free_dev(dev->cbdev);
        netlink_kernel_release(dev->nls);
index 281f566..d1e9eb1 100644 (file)
@@ -340,7 +340,7 @@ struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
        /*
         * Alocate and fill the csrow/channels structs
         */
-       mci->csrows = kcalloc(sizeof(*mci->csrows), tot_csrows, GFP_KERNEL);
+       mci->csrows = kcalloc(tot_csrows, sizeof(*mci->csrows), GFP_KERNEL);
        if (!mci->csrows)
                goto error;
        for (row = 0; row < tot_csrows; row++) {
@@ -351,7 +351,7 @@ struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
                csr->csrow_idx = row;
                csr->mci = mci;
                csr->nr_channels = tot_channels;
-               csr->channels = kcalloc(sizeof(*csr->channels), tot_channels,
+               csr->channels = kcalloc(tot_channels, sizeof(*csr->channels),
                                        GFP_KERNEL);
                if (!csr->channels)
                        goto error;
@@ -369,7 +369,7 @@ struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
        /*
         * Allocate and fill the dimm structs
         */
-       mci->dimms  = kcalloc(sizeof(*mci->dimms), tot_dimms, GFP_KERNEL);
+       mci->dimms  = kcalloc(tot_dimms, sizeof(*mci->dimms), GFP_KERNEL);
        if (!mci->dimms)
                goto error;
 
index dc6e905..0056c4d 100644 (file)
@@ -256,7 +256,7 @@ static ssize_t edac_pci_dev_store(struct kobject *kobj,
        struct edac_pci_dev_attribute *edac_pci_dev;
        edac_pci_dev = (struct edac_pci_dev_attribute *)attr;
 
-       if (edac_pci_dev->show)
+       if (edac_pci_dev->store)
                return edac_pci_dev->store(edac_pci_dev->value, buffer, count);
        return -EIO;
 }
index fd3ae62..982f1f5 100644 (file)
@@ -471,7 +471,7 @@ void __init dmi_scan_machine(void)
        char __iomem *p, *q;
        int rc;
 
-       if (efi_enabled) {
+       if (efi_enabled(EFI_CONFIG_TABLES)) {
                if (efi.smbios == EFI_INVALID_TABLE_ADDR)
                        goto error;
 
index 7b1c374..f5596db 100644 (file)
@@ -674,7 +674,7 @@ static int efi_status_to_err(efi_status_t status)
                err = -EACCES;
                break;
        case EFI_NOT_FOUND:
-               err = -ENOENT;
+               err = -EIO;
                break;
        default:
                err = -EINVAL;
@@ -793,6 +793,7 @@ static ssize_t efivarfs_file_write(struct file *file,
                spin_unlock(&efivars->lock);
                efivar_unregister(var);
                drop_nlink(inode);
+               d_delete(file->f_dentry);
                dput(file->f_dentry);
 
        } else {
@@ -994,7 +995,7 @@ static int efivarfs_unlink(struct inode *dir, struct dentry *dentry)
                list_del(&var->list);
                spin_unlock(&efivars->lock);
                efivar_unregister(var);
-               drop_nlink(dir);
+               drop_nlink(dentry->d_inode);
                dput(dentry);
                return 0;
        }
@@ -1782,7 +1783,7 @@ efivars_init(void)
        printk(KERN_INFO "EFI Variables Facility v%s %s\n", EFIVARS_VERSION,
               EFIVARS_DATE);
 
-       if (!efi_enabled)
+       if (!efi_enabled(EFI_RUNTIME_SERVICES))
                return 0;
 
        /* For now we'll register the efi directory at /sys/firmware/efi */
@@ -1822,7 +1823,7 @@ err_put:
 static void __exit
 efivars_exit(void)
 {
-       if (efi_enabled) {
+       if (efi_enabled(EFI_RUNTIME_SERVICES)) {
                unregister_efivars(&__efivars);
                kobject_put(efi_kobj);
        }
index 4da4eb9..2224f1d 100644 (file)
@@ -99,7 +99,7 @@ unsigned long __init find_ibft_region(unsigned long *sizep)
        /* iBFT 1.03 section 1.4.3.1 mandates that UEFI machines will
         * only use ACPI for this */
 
-       if (!efi_enabled)
+       if (!efi_enabled(EFI_BOOT))
                find_ibft_in_mem();
 
        if (ibft_addr) {
index 1d1f1e5..046bcda 100644 (file)
@@ -24,7 +24,7 @@ config DRM_EXYNOS_DMABUF
 
 config DRM_EXYNOS_FIMD
        bool "Exynos DRM FIMD"
-       depends on DRM_EXYNOS && !FB_S3C
+       depends on DRM_EXYNOS && !FB_S3C && !ARCH_MULTIPLATFORM
        help
          Choose this option if you want to use Exynos FIMD for DRM.
 
@@ -48,7 +48,7 @@ config DRM_EXYNOS_G2D
 
 config DRM_EXYNOS_IPP
        bool "Exynos DRM IPP"
-       depends on DRM_EXYNOS
+       depends on DRM_EXYNOS && !ARCH_MULTIPLATFORM
        help
          Choose this option if you want to use IPP feature for DRM.
 
index ab37437..4c5b685 100644 (file)
@@ -18,7 +18,6 @@
 #include "exynos_drm_drv.h"
 #include "exynos_drm_encoder.h"
 
-#define MAX_EDID 256
 #define to_exynos_connector(x) container_of(x, struct exynos_drm_connector,\
                                drm_connector)
 
@@ -96,7 +95,9 @@ static int exynos_drm_connector_get_modes(struct drm_connector *connector)
                                        to_exynos_connector(connector);
        struct exynos_drm_manager *manager = exynos_connector->manager;
        struct exynos_drm_display_ops *display_ops = manager->display_ops;
-       unsigned int count;
+       struct edid *edid = NULL;
+       unsigned int count = 0;
+       int ret;
 
        DRM_DEBUG_KMS("%s\n", __FILE__);
 
@@ -114,27 +115,21 @@ static int exynos_drm_connector_get_modes(struct drm_connector *connector)
         * because lcd panel has only one mode.
         */
        if (display_ops->get_edid) {
-               int ret;
-               void *edid;
-
-               edid = kzalloc(MAX_EDID, GFP_KERNEL);
-               if (!edid) {
-                       DRM_ERROR("failed to allocate edid\n");
-                       return 0;
+               edid = display_ops->get_edid(manager->dev, connector);
+               if (IS_ERR_OR_NULL(edid)) {
+                       ret = PTR_ERR(edid);
+                       edid = NULL;
+                       DRM_ERROR("Panel operation get_edid failed %d\n", ret);
+                       goto out;
                }
 
-               ret = display_ops->get_edid(manager->dev, connector,
-                                               edid, MAX_EDID);
-               if (ret < 0) {
-                       DRM_ERROR("failed to get edid data.\n");
-                       kfree(edid);
-                       edid = NULL;
-                       return 0;
+               count = drm_add_edid_modes(connector, edid);
+               if (count < 0) {
+                       DRM_ERROR("Add edid modes failed %d\n", count);
+                       goto out;
                }
 
                drm_mode_connector_update_edid_property(connector, edid);
-               count = drm_add_edid_modes(connector, edid);
-               kfree(edid);
        } else {
                struct exynos_drm_panel_info *panel;
                struct drm_display_mode *mode = drm_mode_create(connector->dev);
@@ -161,6 +156,8 @@ static int exynos_drm_connector_get_modes(struct drm_connector *connector)
                count = 1;
        }
 
+out:
+       kfree(edid);
        return count;
 }
 
index 9df9771..ba0a3aa 100644 (file)
@@ -19,6 +19,7 @@
 struct exynos_drm_dmabuf_attachment {
        struct sg_table sgt;
        enum dma_data_direction dir;
+       bool is_mapped;
 };
 
 static int exynos_gem_attach_dma_buf(struct dma_buf *dmabuf,
@@ -72,17 +73,10 @@ static struct sg_table *
 
        DRM_DEBUG_PRIME("%s\n", __FILE__);
 
-       if (WARN_ON(dir == DMA_NONE))
-               return ERR_PTR(-EINVAL);
-
        /* just return current sgt if already requested. */
-       if (exynos_attach->dir == dir)
+       if (exynos_attach->dir == dir && exynos_attach->is_mapped)
                return &exynos_attach->sgt;
 
-       /* reattaching is not allowed. */
-       if (WARN_ON(exynos_attach->dir != DMA_NONE))
-               return ERR_PTR(-EBUSY);
-
        buf = gem_obj->buffer;
        if (!buf) {
                DRM_ERROR("buffer is null.\n");
@@ -107,13 +101,17 @@ static struct sg_table *
                wr = sg_next(wr);
        }
 
-       nents = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir);
-       if (!nents) {
-               DRM_ERROR("failed to map sgl with iommu.\n");
-               sgt = ERR_PTR(-EIO);
-               goto err_unlock;
+       if (dir != DMA_NONE) {
+               nents = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir);
+               if (!nents) {
+                       DRM_ERROR("failed to map sgl with iommu.\n");
+                       sg_free_table(sgt);
+                       sgt = ERR_PTR(-EIO);
+                       goto err_unlock;
+               }
        }
 
+       exynos_attach->is_mapped = true;
        exynos_attach->dir = dir;
        attach->priv = exynos_attach;
 
index b9e51bc..4606fac 100644 (file)
@@ -148,8 +148,8 @@ struct exynos_drm_overlay {
 struct exynos_drm_display_ops {
        enum exynos_drm_output_type type;
        bool (*is_connected)(struct device *dev);
-       int (*get_edid)(struct device *dev, struct drm_connector *connector,
-                               u8 *edid, int len);
+       struct edid *(*get_edid)(struct device *dev,
+                       struct drm_connector *connector);
        void *(*get_panel)(struct device *dev);
        int (*check_timing)(struct device *dev, void *timing);
        int (*power_on)(struct device *dev, int mode);
index 36c3905..9a4c08e 100644 (file)
@@ -324,7 +324,7 @@ out:
        g2d_userptr = NULL;
 }
 
-dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev,
+static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev,
                                        unsigned long userptr,
                                        unsigned long size,
                                        struct drm_file *filp,
index 850e995..2864453 100644 (file)
@@ -108,18 +108,17 @@ static bool drm_hdmi_is_connected(struct device *dev)
        return false;
 }
 
-static int drm_hdmi_get_edid(struct device *dev,
-               struct drm_connector *connector, u8 *edid, int len)
+static struct edid *drm_hdmi_get_edid(struct device *dev,
+                       struct drm_connector *connector)
 {
        struct drm_hdmi_context *ctx = to_context(dev);
 
        DRM_DEBUG_KMS("%s\n", __FILE__);
 
        if (hdmi_ops && hdmi_ops->get_edid)
-               return hdmi_ops->get_edid(ctx->hdmi_ctx->ctx, connector, edid,
-                                         len);
+               return hdmi_ops->get_edid(ctx->hdmi_ctx->ctx, connector);
 
-       return 0;
+       return NULL;
 }
 
 static int drm_hdmi_check_timing(struct device *dev, void *timing)
index 784a7e9..d80516f 100644 (file)
@@ -30,8 +30,8 @@ struct exynos_drm_hdmi_context {
 struct exynos_hdmi_ops {
        /* display */
        bool (*is_connected)(void *ctx);
-       int (*get_edid)(void *ctx, struct drm_connector *connector,
-                       u8 *edid, int len);
+       struct edid *(*get_edid)(void *ctx,
+                       struct drm_connector *connector);
        int (*check_timing)(void *ctx, void *timing);
        int (*power_on)(void *ctx, int mode);
 
index 0bda964..1a55635 100644 (file)
@@ -869,7 +869,7 @@ static void ipp_put_event(struct drm_exynos_ipp_cmd_node *c_node,
        }
 }
 
-void ipp_handle_cmd_work(struct device *dev,
+static void ipp_handle_cmd_work(struct device *dev,
                struct exynos_drm_ippdrv *ippdrv,
                struct drm_exynos_ipp_cmd_work *cmd_work,
                struct drm_exynos_ipp_cmd_node *c_node)
index e9e83ef..f976e29 100644 (file)
@@ -734,7 +734,7 @@ static int rotator_remove(struct platform_device *pdev)
        return 0;
 }
 
-struct rot_limit_table rot_limit_tbl = {
+static struct rot_limit_table rot_limit_tbl = {
        .ycbcr420_2p = {
                .min_w = 32,
                .min_h = 32,
@@ -751,7 +751,7 @@ struct rot_limit_table rot_limit_tbl = {
        },
 };
 
-struct platform_device_id rotator_driver_ids[] = {
+static struct platform_device_id rotator_driver_ids[] = {
        {
                .name           = "exynos-rot",
                .driver_data    = (unsigned long)&rot_limit_tbl,
index d0ca3c4..13ccbd4 100644 (file)
@@ -98,10 +98,12 @@ static bool vidi_display_is_connected(struct device *dev)
        return ctx->connected ? true : false;
 }
 
-static int vidi_get_edid(struct device *dev, struct drm_connector *connector,
-                               u8 *edid, int len)
+static struct edid *vidi_get_edid(struct device *dev,
+                       struct drm_connector *connector)
 {
        struct vidi_context *ctx = get_vidi_context(dev);
+       struct edid *edid;
+       int edid_len;
 
        DRM_DEBUG_KMS("%s\n", __FILE__);
 
@@ -111,13 +113,18 @@ static int vidi_get_edid(struct device *dev, struct drm_connector *connector,
         */
        if (!ctx->raw_edid) {
                DRM_DEBUG_KMS("raw_edid is null.\n");
-               return -EFAULT;
+               return ERR_PTR(-EFAULT);
        }
 
-       memcpy(edid, ctx->raw_edid, min((1 + ctx->raw_edid->extensions)
-                                       * EDID_LENGTH, len));
+       edid_len = (1 + ctx->raw_edid->extensions) * EDID_LENGTH;
+       edid = kzalloc(edid_len, GFP_KERNEL);
+       if (!edid) {
+               DRM_DEBUG_KMS("failed to allocate edid\n");
+               return ERR_PTR(-ENOMEM);
+       }
 
-       return 0;
+       memcpy(edid, ctx->raw_edid, edid_len);
+       return edid;
 }
 
 static void *vidi_get_panel(struct device *dev)
@@ -514,7 +521,6 @@ int vidi_connection_ioctl(struct drm_device *drm_dev, void *data,
        struct exynos_drm_manager *manager;
        struct exynos_drm_display_ops *display_ops;
        struct drm_exynos_vidi_connection *vidi = data;
-       struct edid *raw_edid;
        int edid_len;
 
        DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -551,11 +557,11 @@ int vidi_connection_ioctl(struct drm_device *drm_dev, void *data,
        }
 
        if (vidi->connection) {
-               if (!vidi->edid) {
-                       DRM_DEBUG_KMS("edid data is null.\n");
+               struct edid *raw_edid  = (struct edid *)(uint32_t)vidi->edid;
+               if (!drm_edid_is_valid(raw_edid)) {
+                       DRM_DEBUG_KMS("edid data is invalid.\n");
                        return -EINVAL;
                }
-               raw_edid = (struct edid *)(uint32_t)vidi->edid;
                edid_len = (1 + raw_edid->extensions) * EDID_LENGTH;
                ctx->raw_edid = kzalloc(edid_len, GFP_KERNEL);
                if (!ctx->raw_edid) {
index 41ff79d..fbab3c4 100644 (file)
@@ -34,7 +34,6 @@
 #include <linux/regulator/consumer.h>
 #include <linux/io.h>
 #include <linux/of_gpio.h>
-#include <plat/gpio-cfg.h>
 
 #include <drm/exynos_drm.h>
 
@@ -98,8 +97,7 @@ struct hdmi_context {
 
        void __iomem                    *regs;
        void                            *parent_ctx;
-       int                             external_irq;
-       int                             internal_irq;
+       int                             irq;
 
        struct i2c_client               *ddc_port;
        struct i2c_client               *hdmiphy_port;
@@ -1391,8 +1389,7 @@ static bool hdmi_is_connected(void *ctx)
        return hdata->hpd;
 }
 
-static int hdmi_get_edid(void *ctx, struct drm_connector *connector,
-                               u8 *edid, int len)
+static struct edid *hdmi_get_edid(void *ctx, struct drm_connector *connector)
 {
        struct edid *raw_edid;
        struct hdmi_context *hdata = ctx;
@@ -1400,22 +1397,18 @@ static int hdmi_get_edid(void *ctx, struct drm_connector *connector,
        DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
 
        if (!hdata->ddc_port)
-               return -ENODEV;
+               return ERR_PTR(-ENODEV);
 
        raw_edid = drm_get_edid(connector, hdata->ddc_port->adapter);
-       if (raw_edid) {
-               hdata->dvi_mode = !drm_detect_hdmi_monitor(raw_edid);
-               memcpy(edid, raw_edid, min((1 + raw_edid->extensions)
-                                       * EDID_LENGTH, len));
-               DRM_DEBUG_KMS("%s : width[%d] x height[%d]\n",
-                       (hdata->dvi_mode ? "dvi monitor" : "hdmi monitor"),
-                       raw_edid->width_cm, raw_edid->height_cm);
-               kfree(raw_edid);
-       } else {
-               return -ENODEV;
-       }
+       if (!raw_edid)
+               return ERR_PTR(-ENODEV);
 
-       return 0;
+       hdata->dvi_mode = !drm_detect_hdmi_monitor(raw_edid);
+       DRM_DEBUG_KMS("%s : width[%d] x height[%d]\n",
+               (hdata->dvi_mode ? "dvi monitor" : "hdmi monitor"),
+               raw_edid->width_cm, raw_edid->height_cm);
+
+       return raw_edid;
 }
 
 static int hdmi_v13_check_timing(struct fb_videomode *check_timing)
@@ -1652,16 +1645,16 @@ static void hdmi_conf_reset(struct hdmi_context *hdata)
 
        /* resetting HDMI core */
        hdmi_reg_writemask(hdata, reg,  0, HDMI_CORE_SW_RSTOUT);
-       mdelay(10);
+       usleep_range(10000, 12000);
        hdmi_reg_writemask(hdata, reg, ~0, HDMI_CORE_SW_RSTOUT);
-       mdelay(10);
+       usleep_range(10000, 12000);
 }
 
 static void hdmi_conf_init(struct hdmi_context *hdata)
 {
        struct hdmi_infoframe infoframe;
 
-       /* disable HPD interrupts */
+       /* disable HPD interrupts from HDMI IP block, use GPIO instead */
        hdmi_reg_writemask(hdata, HDMI_INTC_CON, 0, HDMI_INTC_EN_GLOBAL |
                HDMI_INTC_EN_HPD_PLUG | HDMI_INTC_EN_HPD_UNPLUG);
 
@@ -1779,7 +1772,7 @@ static void hdmi_v13_timing_apply(struct hdmi_context *hdata)
                u32 val = hdmi_reg_read(hdata, HDMI_V13_PHY_STATUS);
                if (val & HDMI_PHY_STATUS_READY)
                        break;
-               mdelay(1);
+               usleep_range(1000, 2000);
        }
        /* steady state not achieved */
        if (tries == 0) {
@@ -1946,7 +1939,7 @@ static void hdmi_v14_timing_apply(struct hdmi_context *hdata)
                u32 val = hdmi_reg_read(hdata, HDMI_PHY_STATUS_0);
                if (val & HDMI_PHY_STATUS_READY)
                        break;
-               mdelay(1);
+               usleep_range(1000, 2000);
        }
        /* steady state not achieved */
        if (tries == 0) {
@@ -1998,9 +1991,9 @@ static void hdmiphy_conf_reset(struct hdmi_context *hdata)
 
        /* reset hdmiphy */
        hdmi_reg_writemask(hdata, reg, ~0, HDMI_PHY_SW_RSTOUT);
-       mdelay(10);
+       usleep_range(10000, 12000);
        hdmi_reg_writemask(hdata, reg,  0, HDMI_PHY_SW_RSTOUT);
-       mdelay(10);
+       usleep_range(10000, 12000);
 }
 
 static void hdmiphy_poweron(struct hdmi_context *hdata)
@@ -2048,7 +2041,7 @@ static void hdmiphy_conf_apply(struct hdmi_context *hdata)
                return;
        }
 
-       mdelay(10);
+       usleep_range(10000, 12000);
 
        /* operation mode */
        operation[0] = 0x1f;
@@ -2170,6 +2163,13 @@ static void hdmi_commit(void *ctx)
 
        DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
 
+       mutex_lock(&hdata->hdmi_mutex);
+       if (!hdata->powered) {
+               mutex_unlock(&hdata->hdmi_mutex);
+               return;
+       }
+       mutex_unlock(&hdata->hdmi_mutex);
+
        hdmi_conf_apply(hdata);
 }
 
@@ -2265,7 +2265,7 @@ static struct exynos_hdmi_ops hdmi_ops = {
        .dpms           = hdmi_dpms,
 };
 
-static irqreturn_t hdmi_external_irq_thread(int irq, void *arg)
+static irqreturn_t hdmi_irq_thread(int irq, void *arg)
 {
        struct exynos_drm_hdmi_context *ctx = arg;
        struct hdmi_context *hdata = ctx->ctx;
@@ -2280,31 +2280,6 @@ static irqreturn_t hdmi_external_irq_thread(int irq, void *arg)
        return IRQ_HANDLED;
 }
 
-static irqreturn_t hdmi_internal_irq_thread(int irq, void *arg)
-{
-       struct exynos_drm_hdmi_context *ctx = arg;
-       struct hdmi_context *hdata = ctx->ctx;
-       u32 intc_flag;
-
-       intc_flag = hdmi_reg_read(hdata, HDMI_INTC_FLAG);
-       /* clearing flags for HPD plug/unplug */
-       if (intc_flag & HDMI_INTC_FLAG_HPD_UNPLUG) {
-               DRM_DEBUG_KMS("unplugged\n");
-               hdmi_reg_writemask(hdata, HDMI_INTC_FLAG, ~0,
-                       HDMI_INTC_FLAG_HPD_UNPLUG);
-       }
-       if (intc_flag & HDMI_INTC_FLAG_HPD_PLUG) {
-               DRM_DEBUG_KMS("plugged\n");
-               hdmi_reg_writemask(hdata, HDMI_INTC_FLAG, ~0,
-                       HDMI_INTC_FLAG_HPD_PLUG);
-       }
-
-       if (ctx->drm_dev)
-               drm_helper_hpd_irq_event(ctx->drm_dev);
-
-       return IRQ_HANDLED;
-}
-
 static int hdmi_resources_init(struct hdmi_context *hdata)
 {
        struct device *dev = hdata->dev;
@@ -2555,39 +2530,24 @@ static int hdmi_probe(struct platform_device *pdev)
 
        hdata->hdmiphy_port = hdmi_hdmiphy;
 
-       hdata->external_irq = gpio_to_irq(hdata->hpd_gpio);
-       if (hdata->external_irq < 0) {
-               DRM_ERROR("failed to get GPIO external irq\n");
-               ret = hdata->external_irq;
-               goto err_hdmiphy;
-       }
-
-       hdata->internal_irq = platform_get_irq(pdev, 0);
-       if (hdata->internal_irq < 0) {
-               DRM_ERROR("failed to get platform internal irq\n");
-               ret = hdata->internal_irq;
+       hdata->irq = gpio_to_irq(hdata->hpd_gpio);
+       if (hdata->irq < 0) {
+               DRM_ERROR("failed to get GPIO irq\n");
+               ret = hdata->irq;
                goto err_hdmiphy;
        }
 
        hdata->hpd = gpio_get_value(hdata->hpd_gpio);
 
-       ret = request_threaded_irq(hdata->external_irq, NULL,
-                       hdmi_external_irq_thread, IRQF_TRIGGER_RISING |
+       ret = request_threaded_irq(hdata->irq, NULL,
+                       hdmi_irq_thread, IRQF_TRIGGER_RISING |
                        IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
-                       "hdmi_external", drm_hdmi_ctx);
+                       "hdmi", drm_hdmi_ctx);
        if (ret) {
-               DRM_ERROR("failed to register hdmi external interrupt\n");
+               DRM_ERROR("failed to register hdmi interrupt\n");
                goto err_hdmiphy;
        }
 
-       ret = request_threaded_irq(hdata->internal_irq, NULL,
-                       hdmi_internal_irq_thread, IRQF_ONESHOT,
-                       "hdmi_internal", drm_hdmi_ctx);
-       if (ret) {
-               DRM_ERROR("failed to register hdmi internal interrupt\n");
-               goto err_free_irq;
-       }
-
        /* Attach HDMI Driver to common hdmi. */
        exynos_hdmi_drv_attach(drm_hdmi_ctx);
 
@@ -2598,8 +2558,6 @@ static int hdmi_probe(struct platform_device *pdev)
 
        return 0;
 
-err_free_irq:
-       free_irq(hdata->external_irq, drm_hdmi_ctx);
 err_hdmiphy:
        i2c_del_driver(&hdmiphy_driver);
 err_ddc:
@@ -2617,8 +2575,7 @@ static int hdmi_remove(struct platform_device *pdev)
 
        pm_runtime_disable(dev);
 
-       free_irq(hdata->internal_irq, hdata);
-       free_irq(hdata->external_irq, hdata);
+       free_irq(hdata->irq, hdata);
 
 
        /* hdmiphy i2c driver */
@@ -2637,8 +2594,7 @@ static int hdmi_suspend(struct device *dev)
 
        DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
 
-       disable_irq(hdata->internal_irq);
-       disable_irq(hdata->external_irq);
+       disable_irq(hdata->irq);
 
        hdata->hpd = false;
        if (ctx->drm_dev)
@@ -2663,8 +2619,7 @@ static int hdmi_resume(struct device *dev)
 
        hdata->hpd = gpio_get_value(hdata->hpd_gpio);
 
-       enable_irq(hdata->external_irq);
-       enable_irq(hdata->internal_irq);
+       enable_irq(hdata->irq);
 
        if (!pm_runtime_suspended(dev)) {
                DRM_DEBUG_KMS("%s : Already resumed\n", __func__);
index c187ea3..c414584 100644 (file)
@@ -600,7 +600,7 @@ static void vp_win_reset(struct mixer_context *ctx)
                /* waiting until VP_SRESET_PROCESSING is 0 */
                if (~vp_reg_read(res, VP_SRESET) & VP_SRESET_PROCESSING)
                        break;
-               mdelay(10);
+               usleep_range(10000, 12000);
        }
        WARN(tries == 0, "failed to reset Video Processor\n");
 }
@@ -776,6 +776,13 @@ static void mixer_win_commit(void *ctx, int win)
 
        DRM_DEBUG_KMS("[%d] %s, win: %d\n", __LINE__, __func__, win);
 
+       mutex_lock(&mixer_ctx->mixer_mutex);
+       if (!mixer_ctx->powered) {
+               mutex_unlock(&mixer_ctx->mixer_mutex);
+               return;
+       }
+       mutex_unlock(&mixer_ctx->mixer_mutex);
+
        if (win > 1 && mixer_ctx->vp_enabled)
                vp_video_buffer(mixer_ctx, win);
        else
index 7944d30..9d4a2c2 100644 (file)
@@ -30,6 +30,7 @@
 #include <linux/debugfs.h>
 #include <linux/slab.h>
 #include <linux/export.h>
+#include <generated/utsrelease.h>
 #include <drm/drmP.h>
 #include "intel_drv.h"
 #include "intel_ringbuffer.h"
@@ -690,6 +691,7 @@ static int i915_error_state(struct seq_file *m, void *unused)
 
        seq_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
                   error->time.tv_usec);
+       seq_printf(m, "Kernel: " UTS_RELEASE);
        seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
        seq_printf(m, "EIR: 0x%08x\n", error->eir);
        seq_printf(m, "IER: 0x%08x\n", error->ier);
index b401788..59afb7e 100644 (file)
 #define MI_MODE                0x0209c
 # define VS_TIMER_DISPATCH                             (1 << 6)
 # define MI_FLUSH_ENABLE                               (1 << 12)
+# define ASYNC_FLIP_PERF_DISABLE                       (1 << 14)
 
 #define GEN6_GT_MODE   0x20d0
 #define   GEN6_GT_MODE_HI                              (1 << 9)
index ae253e0..42ff97d 100644 (file)
@@ -505,13 +505,25 @@ static int init_render_ring(struct intel_ring_buffer *ring)
        struct drm_i915_private *dev_priv = dev->dev_private;
        int ret = init_ring_common(ring);
 
-       if (INTEL_INFO(dev)->gen > 3) {
+       if (INTEL_INFO(dev)->gen > 3)
                I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
-               if (IS_GEN7(dev))
-                       I915_WRITE(GFX_MODE_GEN7,
-                                  _MASKED_BIT_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) |
-                                  _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
-       }
+
+       /* We need to disable the AsyncFlip performance optimisations in order
+        * to use MI_WAIT_FOR_EVENT within the CS. It should already be
+        * programmed to '1' on all products.
+        */
+       if (INTEL_INFO(dev)->gen >= 6)
+               I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
+
+       /* Required for the hardware to program scanline values for waiting */
+       if (INTEL_INFO(dev)->gen == 6)
+               I915_WRITE(GFX_MODE,
+                          _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_ALWAYS));
+
+       if (IS_GEN7(dev))
+               I915_WRITE(GFX_MODE_GEN7,
+                          _MASKED_BIT_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) |
+                          _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
 
        if (INTEL_INFO(dev)->gen >= 5) {
                ret = init_pipe_control(ring);
index 6b0843c..e05c157 100644 (file)
@@ -73,8 +73,11 @@ _nouveau_falcon_init(struct nouveau_object *object)
        nv_debug(falcon, "data limit: %d\n", falcon->data.limit);
 
        /* wait for 'uc halted' to be signalled before continuing */
-       if (falcon->secret) {
-               nv_wait(falcon, 0x008, 0x00000010, 0x00000010);
+       if (falcon->secret && falcon->version < 4) {
+               if (!falcon->version)
+                       nv_wait(falcon, 0x008, 0x00000010, 0x00000010);
+               else
+                       nv_wait(falcon, 0x180, 0x80000000, 0);
                nv_wo32(falcon, 0x004, 0x00000010);
        }
 
index f74c30a..48f0637 100644 (file)
@@ -99,7 +99,7 @@ nouveau_subdev_create_(struct nouveau_object *parent,
        if (ret)
                return ret;
 
-       mutex_init(&subdev->mutex);
+       __mutex_init(&subdev->mutex, subname, &oclass->lock_class_key);
        subdev->name = subname;
 
        if (parent) {
index 5982935..106bb19 100644 (file)
@@ -50,10 +50,13 @@ int  nouveau_object_fini(struct nouveau_object *, bool suspend);
 
 extern struct nouveau_ofuncs nouveau_object_ofuncs;
 
+/* Don't allocate dynamically, because lockdep needs lock_class_keys to be in
+ * ".data". */
 struct nouveau_oclass {
        u32 handle;
-       struct nouveau_ofuncs *ofuncs;
-       struct nouveau_omthds *omthds;
+       struct nouveau_ofuncs * const ofuncs;
+       struct nouveau_omthds * const omthds;
+       struct lock_class_key lock_class_key;
 };
 
 #define nv_oclass(o)    nv_object(o)->oclass
index d6d1600..d62045f 100644 (file)
@@ -86,8 +86,8 @@ nouveau_fb_preinit(struct nouveau_fb *pfb)
                        return ret;
        }
 
-       if (!nouveau_mm_initialised(&pfb->tags) && tags) {
-               ret = nouveau_mm_init(&pfb->tags, 0, ++tags, 1);
+       if (!nouveau_mm_initialised(&pfb->tags)) {
+               ret = nouveau_mm_init(&pfb->tags, 0, tags ? ++tags : 0, 1);
                if (ret)
                        return ret;
        }
index 487cb8c..eac236e 100644 (file)
@@ -99,7 +99,7 @@ nv50_fb_vram_init(struct nouveau_fb *pfb)
        struct nouveau_bios *bios = nouveau_bios(device);
        const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
        const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
-       u32 size;
+       u32 size, tags = 0;
        int ret;
 
        pfb->ram.size = nv_rd32(pfb, 0x10020c);
@@ -140,10 +140,11 @@ nv50_fb_vram_init(struct nouveau_fb *pfb)
                        return ret;
 
                pfb->ram.ranks = (nv_rd32(pfb, 0x100200) & 0x4) ? 2 : 1;
+               tags = nv_rd32(pfb, 0x100320);
                break;
        }
 
-       return nv_rd32(pfb, 0x100320);
+       return tags;
 }
 
 static int
index 69d7b1d..1699a90 100644 (file)
@@ -28,6 +28,7 @@
  */
 
 #include <core/engine.h>
+#include <linux/swiotlb.h>
 
 #include <subdev/fb.h>
 #include <subdev/vm.h>
index 8b090f1..5e7aef2 100644 (file)
@@ -245,6 +245,8 @@ static int nouveau_drm_probe(struct pci_dev *pdev,
        return 0;
 }
 
+static struct lock_class_key drm_client_lock_class_key;
+
 static int
 nouveau_drm_load(struct drm_device *dev, unsigned long flags)
 {
@@ -256,6 +258,7 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
        ret = nouveau_cli_create(pdev, "DRM", sizeof(*drm), (void**)&drm);
        if (ret)
                return ret;
+       lockdep_set_class(&drm->client.mutex, &drm_client_lock_class_key);
 
        dev->dev_private = drm;
        drm->dev = dev;
index 4d0e60a..a2d478e 100644 (file)
@@ -1313,14 +1313,18 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
                                if (!(tmp & EVERGREEN_CRTC_BLANK_DATA_EN)) {
                                        radeon_wait_for_vblank(rdev, i);
                                        tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
+                                       WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
                                        WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
+                                       WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
                                }
                        } else {
                                tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
                                if (!(tmp & EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE)) {
                                        radeon_wait_for_vblank(rdev, i);
                                        tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
+                                       WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
                                        WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
+                                       WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
                                }
                        }
                        /* wait for the next frame */
@@ -1345,6 +1349,8 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
                blackout &= ~BLACKOUT_MODE_MASK;
                WREG32(MC_SHARED_BLACKOUT_CNTL, blackout | 1);
        }
+       /* wait for the MC to settle */
+       udelay(100);
 }
 
 void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save)
@@ -1378,11 +1384,15 @@ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *s
                        if (ASIC_IS_DCE6(rdev)) {
                                tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]);
                                tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
+                               WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
                                WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
+                               WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
                        } else {
                                tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
                                tmp &= ~EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
+                               WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
                                WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
+                               WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
                        }
                        /* wait for the next frame */
                        frame_count = radeon_get_vblank_counter(rdev, i);
@@ -2036,9 +2046,20 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
        WREG32(HDP_ADDR_CONFIG, gb_addr_config);
        WREG32(DMA_TILING_CONFIG, gb_addr_config);
 
-       tmp = gb_addr_config & NUM_PIPES_MASK;
-       tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.evergreen.max_backends,
-                                       EVERGREEN_MAX_BACKENDS, disabled_rb_mask);
+       if ((rdev->config.evergreen.max_backends == 1) &&
+           (rdev->flags & RADEON_IS_IGP)) {
+               if ((disabled_rb_mask & 3) == 1) {
+                       /* RB0 disabled, RB1 enabled */
+                       tmp = 0x11111111;
+               } else {
+                       /* RB1 disabled, RB0 enabled */
+                       tmp = 0x00000000;
+               }
+       } else {
+               tmp = gb_addr_config & NUM_PIPES_MASK;
+               tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.evergreen.max_backends,
+                                               EVERGREEN_MAX_BACKENDS, disabled_rb_mask);
+       }
        WREG32(GB_BACKEND_MAP, tmp);
 
        WREG32(CGTS_SYS_TCC_DISABLE, 0);
index 7a44566..ee4cff5 100644 (file)
@@ -2909,14 +2909,14 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
                                return -EINVAL;
                        }
                        if (tiled) {
-                               dst_offset = ib[idx+1];
+                               dst_offset = radeon_get_ib_value(p, idx+1);
                                dst_offset <<= 8;
 
                                ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
                                p->idx += count + 7;
                        } else {
-                               dst_offset = ib[idx+1];
-                               dst_offset |= ((u64)(ib[idx+2] & 0xff)) << 32;
+                               dst_offset = radeon_get_ib_value(p, idx+1);
+                               dst_offset |= ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
 
                                ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
                                ib[idx+2] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
@@ -2954,12 +2954,12 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
                                                        DRM_ERROR("bad L2T, frame to fields DMA_PACKET_COPY\n");
                                                        return -EINVAL;
                                                }
-                                               dst_offset = ib[idx+1];
+                                               dst_offset = radeon_get_ib_value(p, idx+1);
                                                dst_offset <<= 8;
-                                               dst2_offset = ib[idx+2];
+                                               dst2_offset = radeon_get_ib_value(p, idx+2);
                                                dst2_offset <<= 8;
-                                               src_offset = ib[idx+8];
-                                               src_offset |= ((u64)(ib[idx+9] & 0xff)) << 32;
+                                               src_offset = radeon_get_ib_value(p, idx+8);
+                                               src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32;
                                                if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
                                                        dev_warn(p->dev, "DMA L2T, frame to fields src buffer too small (%llu %lu)\n",
                                                                 src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
@@ -3014,12 +3014,12 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
                                                        DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
                                                        return -EINVAL;
                                                }
-                                               dst_offset = ib[idx+1];
+                                               dst_offset = radeon_get_ib_value(p, idx+1);
                                                dst_offset <<= 8;
-                                               dst2_offset = ib[idx+2];
+                                               dst2_offset = radeon_get_ib_value(p, idx+2);
                                                dst2_offset <<= 8;
-                                               src_offset = ib[idx+8];
-                                               src_offset |= ((u64)(ib[idx+9] & 0xff)) << 32;
+                                               src_offset = radeon_get_ib_value(p, idx+8);
+                                               src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32;
                                                if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
                                                        dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n",
                                                                 src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
@@ -3046,22 +3046,22 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
                                                /* detile bit */
                                                if (idx_value & (1 << 31)) {
                                                        /* tiled src, linear dst */
-                                                       src_offset = ib[idx+1];
+                                                       src_offset = radeon_get_ib_value(p, idx+1);
                                                        src_offset <<= 8;
                                                        ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
 
-                                                       dst_offset = ib[idx+7];
-                                                       dst_offset |= ((u64)(ib[idx+8] & 0xff)) << 32;
+                                                       dst_offset = radeon_get_ib_value(p, idx+7);
+                                                       dst_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
                                                        ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
                                                        ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
                                                } else {
                                                        /* linear src, tiled dst */
-                                                       src_offset = ib[idx+7];
-                                                       src_offset |= ((u64)(ib[idx+8] & 0xff)) << 32;
+                                                       src_offset = radeon_get_ib_value(p, idx+7);
+                                                       src_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
                                                        ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
                                                        ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
 
-                                                       dst_offset = ib[idx+1];
+                                                       dst_offset = radeon_get_ib_value(p, idx+1);
                                                        dst_offset <<= 8;
                                                        ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
                                                }
@@ -3098,12 +3098,12 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
                                                        DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
                                                        return -EINVAL;
                                                }
-                                               dst_offset = ib[idx+1];
+                                               dst_offset = radeon_get_ib_value(p, idx+1);
                                                dst_offset <<= 8;
-                                               dst2_offset = ib[idx+2];
+                                               dst2_offset = radeon_get_ib_value(p, idx+2);
                                                dst2_offset <<= 8;
-                                               src_offset = ib[idx+8];
-                                               src_offset |= ((u64)(ib[idx+9] & 0xff)) << 32;
+                                               src_offset = radeon_get_ib_value(p, idx+8);
+                                               src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32;
                                                if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
                                                        dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n",
                                                                 src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
@@ -3135,22 +3135,22 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
                                                /* detile bit */
                                                if (idx_value & (1 << 31)) {
                                                        /* tiled src, linear dst */
-                                                       src_offset = ib[idx+1];
+                                                       src_offset = radeon_get_ib_value(p, idx+1);
                                                        src_offset <<= 8;
                                                        ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
 
-                                                       dst_offset = ib[idx+7];
-                                                       dst_offset |= ((u64)(ib[idx+8] & 0xff)) << 32;
+                                                       dst_offset = radeon_get_ib_value(p, idx+7);
+                                                       dst_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
                                                        ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
                                                        ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
                                                } else {
                                                        /* linear src, tiled dst */
-                                                       src_offset = ib[idx+7];
-                                                       src_offset |= ((u64)(ib[idx+8] & 0xff)) << 32;
+                                                       src_offset = radeon_get_ib_value(p, idx+7);
+                                                       src_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
                                                        ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
                                                        ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
 
-                                                       dst_offset = ib[idx+1];
+                                                       dst_offset = radeon_get_ib_value(p, idx+1);
                                                        dst_offset <<= 8;
                                                        ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
                                                }
@@ -3176,10 +3176,10 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
                                        switch (misc) {
                                        case 0:
                                                /* L2L, byte */
-                                               src_offset = ib[idx+2];
-                                               src_offset |= ((u64)(ib[idx+4] & 0xff)) << 32;
-                                               dst_offset = ib[idx+1];
-                                               dst_offset |= ((u64)(ib[idx+3] & 0xff)) << 32;
+                                               src_offset = radeon_get_ib_value(p, idx+2);
+                                               src_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
+                                               dst_offset = radeon_get_ib_value(p, idx+1);
+                                               dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
                                                if ((src_offset + count) > radeon_bo_size(src_reloc->robj)) {
                                                        dev_warn(p->dev, "DMA L2L, byte src buffer too small (%llu %lu)\n",
                                                                 src_offset + count, radeon_bo_size(src_reloc->robj));
@@ -3216,12 +3216,12 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
                                                        DRM_ERROR("bad L2L, dw, broadcast DMA_PACKET_COPY\n");
                                                        return -EINVAL;
                                                }
-                                               dst_offset = ib[idx+1];
-                                               dst_offset |= ((u64)(ib[idx+4] & 0xff)) << 32;
-                                               dst2_offset = ib[idx+2];
-                                               dst2_offset |= ((u64)(ib[idx+5] & 0xff)) << 32;
-                                               src_offset = ib[idx+3];
-                                               src_offset |= ((u64)(ib[idx+6] & 0xff)) << 32;
+                                               dst_offset = radeon_get_ib_value(p, idx+1);
+                                               dst_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
+                                               dst2_offset = radeon_get_ib_value(p, idx+2);
+                                               dst2_offset |= ((u64)(radeon_get_ib_value(p, idx+5) & 0xff)) << 32;
+                                               src_offset = radeon_get_ib_value(p, idx+3);
+                                               src_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32;
                                                if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
                                                        dev_warn(p->dev, "DMA L2L, dw, broadcast src buffer too small (%llu %lu)\n",
                                                                 src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
@@ -3251,10 +3251,10 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
                                        }
                                } else {
                                        /* L2L, dw */
-                                       src_offset = ib[idx+2];
-                                       src_offset |= ((u64)(ib[idx+4] & 0xff)) << 32;
-                                       dst_offset = ib[idx+1];
-                                       dst_offset |= ((u64)(ib[idx+3] & 0xff)) << 32;
+                                       src_offset = radeon_get_ib_value(p, idx+2);
+                                       src_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
+                                       dst_offset = radeon_get_ib_value(p, idx+1);
+                                       dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
                                        if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
                                                dev_warn(p->dev, "DMA L2L, dw src buffer too small (%llu %lu)\n",
                                                         src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
@@ -3279,8 +3279,8 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
                                DRM_ERROR("bad DMA_PACKET_CONSTANT_FILL\n");
                                return -EINVAL;
                        }
-                       dst_offset = ib[idx+1];
-                       dst_offset |= ((u64)(ib[idx+3] & 0x00ff0000)) << 16;
+                       dst_offset = radeon_get_ib_value(p, idx+1);
+                       dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0x00ff0000)) << 16;
                        if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
                                dev_warn(p->dev, "DMA constant fill buffer too small (%llu %lu)\n",
                                         dst_offset, radeon_bo_size(dst_reloc->robj));
index 59acabb..835992d 100644 (file)
@@ -1216,7 +1216,7 @@ void cayman_dma_stop(struct radeon_device *rdev)
 int cayman_dma_resume(struct radeon_device *rdev)
 {
        struct radeon_ring *ring;
-       u32 rb_cntl, dma_cntl;
+       u32 rb_cntl, dma_cntl, ib_cntl;
        u32 rb_bufsz;
        u32 reg_offset, wb_offset;
        int i, r;
@@ -1265,7 +1265,11 @@ int cayman_dma_resume(struct radeon_device *rdev)
                WREG32(DMA_RB_BASE + reg_offset, ring->gpu_addr >> 8);
 
                /* enable DMA IBs */
-               WREG32(DMA_IB_CNTL + reg_offset, DMA_IB_ENABLE | CMD_VMID_FORCE);
+               ib_cntl = DMA_IB_ENABLE | CMD_VMID_FORCE;
+#ifdef __BIG_ENDIAN
+               ib_cntl |= DMA_IB_SWAP_ENABLE;
+#endif
+               WREG32(DMA_IB_CNTL + reg_offset, ib_cntl);
 
                dma_cntl = RREG32(DMA_CNTL + reg_offset);
                dma_cntl &= ~CTXEMPTY_INT_ENABLE;
index 3cb9d60..becb03e 100644 (file)
@@ -1462,12 +1462,15 @@ u32 r6xx_remap_render_backend(struct radeon_device *rdev,
                              u32 disabled_rb_mask)
 {
        u32 rendering_pipe_num, rb_num_width, req_rb_num;
-       u32 pipe_rb_ratio, pipe_rb_remain;
+       u32 pipe_rb_ratio, pipe_rb_remain, tmp;
        u32 data = 0, mask = 1 << (max_rb_num - 1);
        unsigned i, j;
 
        /* mask out the RBs that don't exist on that asic */
-       disabled_rb_mask |= (0xff << max_rb_num) & 0xff;
+       tmp = disabled_rb_mask | ((0xff << max_rb_num) & 0xff);
+       /* make sure at least one RB is available */
+       if ((tmp & 0xff) != 0xff)
+               disabled_rb_mask = tmp;
 
        rendering_pipe_num = 1 << tiling_pipe_num;
        req_rb_num = total_max_rb_num - r600_count_pipe_bits(disabled_rb_mask);
@@ -2313,7 +2316,7 @@ void r600_dma_stop(struct radeon_device *rdev)
 int r600_dma_resume(struct radeon_device *rdev)
 {
        struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
-       u32 rb_cntl, dma_cntl;
+       u32 rb_cntl, dma_cntl, ib_cntl;
        u32 rb_bufsz;
        int r;
 
@@ -2353,7 +2356,11 @@ int r600_dma_resume(struct radeon_device *rdev)
        WREG32(DMA_RB_BASE, ring->gpu_addr >> 8);
 
        /* enable DMA IBs */
-       WREG32(DMA_IB_CNTL, DMA_IB_ENABLE);
+       ib_cntl = DMA_IB_ENABLE;
+#ifdef __BIG_ENDIAN
+       ib_cntl |= DMA_IB_SWAP_ENABLE;
+#endif
+       WREG32(DMA_IB_CNTL, ib_cntl);
 
        dma_cntl = RREG32(DMA_CNTL);
        dma_cntl &= ~CTXEMPTY_INT_ENABLE;
index 69ec24a..9b2512b 100644 (file)
@@ -2623,14 +2623,14 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p)
                                return -EINVAL;
                        }
                        if (tiled) {
-                               dst_offset = ib[idx+1];
+                               dst_offset = radeon_get_ib_value(p, idx+1);
                                dst_offset <<= 8;
 
                                ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
                                p->idx += count + 5;
                        } else {
-                               dst_offset = ib[idx+1];
-                               dst_offset |= ((u64)(ib[idx+2] & 0xff)) << 32;
+                               dst_offset = radeon_get_ib_value(p, idx+1);
+                               dst_offset |= ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
 
                                ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
                                ib[idx+2] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
@@ -2658,32 +2658,32 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p)
                                /* detile bit */
                                if (idx_value & (1 << 31)) {
                                        /* tiled src, linear dst */
-                                       src_offset = ib[idx+1];
+                                       src_offset = radeon_get_ib_value(p, idx+1);
                                        src_offset <<= 8;
                                        ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
 
-                                       dst_offset = ib[idx+5];
-                                       dst_offset |= ((u64)(ib[idx+6] & 0xff)) << 32;
+                                       dst_offset = radeon_get_ib_value(p, idx+5);
+                                       dst_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32;
                                        ib[idx+5] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
                                        ib[idx+6] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
                                } else {
                                        /* linear src, tiled dst */
-                                       src_offset = ib[idx+5];
-                                       src_offset |= ((u64)(ib[idx+6] & 0xff)) << 32;
+                                       src_offset = radeon_get_ib_value(p, idx+5);
+                                       src_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32;
                                        ib[idx+5] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
                                        ib[idx+6] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
 
-                                       dst_offset = ib[idx+1];
+                                       dst_offset = radeon_get_ib_value(p, idx+1);
                                        dst_offset <<= 8;
                                        ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
                                }
                                p->idx += 7;
                        } else {
                                if (p->family >= CHIP_RV770) {
-                                       src_offset = ib[idx+2];
-                                       src_offset |= ((u64)(ib[idx+4] & 0xff)) << 32;
-                                       dst_offset = ib[idx+1];
-                                       dst_offset |= ((u64)(ib[idx+3] & 0xff)) << 32;
+                                       src_offset = radeon_get_ib_value(p, idx+2);
+                                       src_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
+                                       dst_offset = radeon_get_ib_value(p, idx+1);
+                                       dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
 
                                        ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
                                        ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
@@ -2691,10 +2691,10 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p)
                                        ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
                                        p->idx += 5;
                                } else {
-                                       src_offset = ib[idx+2];
-                                       src_offset |= ((u64)(ib[idx+3] & 0xff)) << 32;
-                                       dst_offset = ib[idx+1];
-                                       dst_offset |= ((u64)(ib[idx+3] & 0xff0000)) << 16;
+                                       src_offset = radeon_get_ib_value(p, idx+2);
+                                       src_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
+                                       dst_offset = radeon_get_ib_value(p, idx+1);
+                                       dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff0000)) << 16;
 
                                        ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
                                        ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
@@ -2724,8 +2724,8 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p)
                                DRM_ERROR("bad DMA_PACKET_WRITE\n");
                                return -EINVAL;
                        }
-                       dst_offset = ib[idx+1];
-                       dst_offset |= ((u64)(ib[idx+3] & 0x00ff0000)) << 16;
+                       dst_offset = radeon_get_ib_value(p, idx+1);
+                       dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0x00ff0000)) << 16;
                        if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
                                dev_warn(p->dev, "DMA constant fill buffer too small (%llu %lu)\n",
                                         dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
index 9056faf..0b202c0 100644 (file)
@@ -1445,7 +1445,7 @@ static struct radeon_asic cayman_asic = {
        .vm = {
                .init = &cayman_vm_init,
                .fini = &cayman_vm_fini,
-               .pt_ring_index = R600_RING_TYPE_DMA_INDEX,
+               .pt_ring_index = RADEON_RING_TYPE_GFX_INDEX,
                .set_page = &cayman_vm_set_page,
        },
        .ring = {
@@ -1572,7 +1572,7 @@ static struct radeon_asic trinity_asic = {
        .vm = {
                .init = &cayman_vm_init,
                .fini = &cayman_vm_fini,
-               .pt_ring_index = R600_RING_TYPE_DMA_INDEX,
+               .pt_ring_index = RADEON_RING_TYPE_GFX_INDEX,
                .set_page = &cayman_vm_set_page,
        },
        .ring = {
@@ -1699,7 +1699,7 @@ static struct radeon_asic si_asic = {
        .vm = {
                .init = &si_vm_init,
                .fini = &si_vm_fini,
-               .pt_ring_index = R600_RING_TYPE_DMA_INDEX,
+               .pt_ring_index = RADEON_RING_TYPE_GFX_INDEX,
                .set_page = &si_vm_set_page,
        },
        .ring = {
index 33a56a0..3e403bd 100644 (file)
@@ -2470,6 +2470,14 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
                                                                   1),
                                                                  ATOM_DEVICE_CRT1_SUPPORT);
                                }
+                               /* RV100 board with external TDMS bit mis-set.
+                                * Actually uses internal TMDS, clear the bit.
+                                */
+                               if (dev->pdev->device == 0x5159 &&
+                                   dev->pdev->subsystem_vendor == 0x1014 &&
+                                   dev->pdev->subsystem_device == 0x029A) {
+                                       tmp &= ~(1 << 4);
+                               }
                                if ((tmp >> 4) & 0x1) {
                                        devices |= ATOM_DEVICE_DFP2_SUPPORT;
                                        radeon_add_legacy_encoder(dev,
index 469661f..5407459 100644 (file)
@@ -286,6 +286,8 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
                            p->chunks[p->chunk_ib_idx].kpage[1] == NULL) {
                                kfree(p->chunks[p->chunk_ib_idx].kpage[0]);
                                kfree(p->chunks[p->chunk_ib_idx].kpage[1]);
+                               p->chunks[p->chunk_ib_idx].kpage[0] = NULL;
+                               p->chunks[p->chunk_ib_idx].kpage[1] = NULL;
                                return -ENOMEM;
                        }
                }
index ad6df62..0d67674 100644 (file)
@@ -241,7 +241,8 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc,
                y = 0;
        }
 
-       if (ASIC_IS_AVIVO(rdev)) {
+       /* fixed on DCE6 and newer */
+       if (ASIC_IS_AVIVO(rdev) && !ASIC_IS_DCE6(rdev)) {
                int i = 0;
                struct drm_crtc *crtc_p;
 
index edfc54e..0d6562b 100644 (file)
@@ -429,7 +429,8 @@ bool radeon_card_posted(struct radeon_device *rdev)
 {
        uint32_t reg;
 
-       if (efi_enabled && rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE)
+       if (efi_enabled(EFI_BOOT) &&
+           rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE)
                return false;
 
        /* first check CRTCs */
index 1da2386..05c96fa 100644 (file)
@@ -1115,14 +1115,16 @@ radeon_user_framebuffer_create(struct drm_device *dev,
        }
 
        radeon_fb = kzalloc(sizeof(*radeon_fb), GFP_KERNEL);
-       if (radeon_fb == NULL)
+       if (radeon_fb == NULL) {
+               drm_gem_object_unreference_unlocked(obj);
                return ERR_PTR(-ENOMEM);
+       }
 
        ret = radeon_framebuffer_init(dev, radeon_fb, mode_cmd, obj);
        if (ret) {
                kfree(radeon_fb);
                drm_gem_object_unreference_unlocked(obj);
-               return NULL;
+               return ERR_PTR(ret);
        }
 
        return &radeon_fb->base;
index 2430d80..cd72062 100644 (file)
@@ -377,6 +377,9 @@ int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *ring, unsi
 {
        int r;
 
+       /* make sure we aren't trying to allocate more space than there is on the ring */
+       if (ndw > (ring->ring_size / 4))
+               return -ENOMEM;
        /* Align requested size with padding so unlock_commit can
         * pad safely */
        ndw = (ndw + ring->align_mask) & ~ring->align_mask;
index 1d8ff2f..93f760e 100644 (file)
@@ -38,6 +38,7 @@
 #include <drm/radeon_drm.h>
 #include <linux/seq_file.h>
 #include <linux/slab.h>
+#include <linux/swiotlb.h>
 #include "radeon_reg.h"
 #include "radeon.h"
 
index 0f656b1..a072fa8 100644 (file)
@@ -1,5 +1,6 @@
 cayman 0x9400
 0x0000802C GRBM_GFX_INDEX
+0x00008040 WAIT_UNTIL
 0x000084FC CP_STRMOUT_CNTL
 0x000085F0 CP_COHER_CNTL
 0x000085F4 CP_COHER_SIZE
index 2bb6d0e..435ed35 100644 (file)
@@ -336,6 +336,8 @@ void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save)
                                WREG32(R600_CITF_CNTL, blackout);
                }
        }
+       /* wait for the MC to settle */
+       udelay(100);
 }
 
 void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save)
index 44420fc..8be35c8 100644 (file)
@@ -429,7 +429,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
        struct ttm_bo_device *bdev = bo->bdev;
        struct ttm_bo_driver *driver = bdev->driver;
 
-       fbo = kzalloc(sizeof(*fbo), GFP_KERNEL);
+       fbo = kmalloc(sizeof(*fbo), GFP_KERNEL);
        if (!fbo)
                return -ENOMEM;
 
@@ -448,7 +448,12 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
        fbo->vm_node = NULL;
        atomic_set(&fbo->cpu_writers, 0);
 
-       fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
+       spin_lock(&bdev->fence_lock);
+       if (bo->sync_obj)
+               fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
+       else
+               fbo->sync_obj = NULL;
+       spin_unlock(&bdev->fence_lock);
        kref_init(&fbo->list_kref);
        kref_init(&fbo->kref);
        fbo->destroy = &ttm_transfered_destroy;
@@ -661,13 +666,11 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
                 */
 
                set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
-
-               /* ttm_buffer_object_transfer accesses bo->sync_obj */
-               ret = ttm_buffer_object_transfer(bo, &ghost_obj);
                spin_unlock(&bdev->fence_lock);
                if (tmp_obj)
                        driver->sync_obj_unref(&tmp_obj);
 
+               ret = ttm_buffer_object_transfer(bo, &ghost_obj);
                if (ret)
                        return ret;
 
index 4dfa605..34e2547 100644 (file)
 #define USB_VENDOR_ID_EZKEY            0x0518
 #define USB_DEVICE_ID_BTC_8193         0x0002
 
+#define USB_VENDOR_ID_FORMOSA          0x147a
+#define USB_DEVICE_ID_FORMOSA_IR_RECEIVER      0xe03e
+
 #define USB_VENDOR_ID_FREESCALE                0x15A2
 #define USB_DEVICE_ID_FREESCALE_MX28   0x004F
 
index 12e4fdc..e766b56 100644 (file)
@@ -540,13 +540,24 @@ static int i2c_hid_output_raw_report(struct hid_device *hid, __u8 *buf,
 {
        struct i2c_client *client = hid->driver_data;
        int report_id = buf[0];
+       int ret;
 
        if (report_type == HID_INPUT_REPORT)
                return -EINVAL;
 
-       return i2c_hid_set_report(client,
+       if (report_id) {
+               buf++;
+               count--;
+       }
+
+       ret = i2c_hid_set_report(client,
                                report_type == HID_FEATURE_REPORT ? 0x03 : 0x02,
                                report_id, buf, count);
+
+       if (report_id && ret >= 0)
+               ret++; /* add report_id to the number of transfered bytes */
+
+       return ret;
 }
 
 static int i2c_hid_parse(struct hid_device *hid)
index ac9e352..e0e6abf 100644 (file)
@@ -70,6 +70,7 @@ static const struct hid_blacklist {
        { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_AXIS_295, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET },
+       { USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER, HID_QUIRK_NO_INIT_REPORTS },
        { USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_NOVATEK, USB_DEVICE_ID_NOVATEK_MOUSE, HID_QUIRK_NO_INIT_REPORTS },
index 4850d03..3527509 100644 (file)
@@ -263,20 +263,15 @@ static void remove_qp(struct qib_ibdev *dev, struct qib_qp *qp)
                struct qib_qp __rcu **qpp;
 
                qpp = &dev->qp_table[n];
-               q = rcu_dereference_protected(*qpp,
-                       lockdep_is_held(&dev->qpt_lock));
-               for (; q; qpp = &q->next) {
+               for (; (q = rcu_dereference_protected(*qpp,
+                               lockdep_is_held(&dev->qpt_lock))) != NULL;
+                               qpp = &q->next)
                        if (q == qp) {
                                atomic_dec(&qp->refcount);
                                *qpp = qp->next;
                                rcu_assign_pointer(qp->next, NULL);
-                               q = rcu_dereference_protected(*qpp,
-                                       lockdep_is_held(&dev->qpt_lock));
                                break;
                        }
-                       q = rcu_dereference_protected(*qpp,
-                               lockdep_is_held(&dev->qpt_lock));
-               }
        }
 
        spin_unlock_irqrestore(&dev->qpt_lock, flags);
index 03103d2..67b0c1d 100644 (file)
@@ -741,6 +741,9 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
 
        tx_req->mapping = addr;
 
+       skb_orphan(skb);
+       skb_dst_drop(skb);
+
        rc = post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1),
                       addr, skb->len);
        if (unlikely(rc)) {
@@ -752,9 +755,6 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
                dev->trans_start = jiffies;
                ++tx->tx_head;
 
-               skb_orphan(skb);
-               skb_dst_drop(skb);
-
                if (++priv->tx_outstanding == ipoib_sendq_size) {
                        ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n",
                                  tx->qp->qp_num);
index a1bca70..2cfa76f 100644 (file)
@@ -600,6 +600,9 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
                netif_stop_queue(dev);
        }
 
+       skb_orphan(skb);
+       skb_dst_drop(skb);
+
        rc = post_send(priv, priv->tx_head & (ipoib_sendq_size - 1),
                       address->ah, qpn, tx_req, phead, hlen);
        if (unlikely(rc)) {
@@ -615,9 +618,6 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
 
                address->last_send = priv->tx_head;
                ++priv->tx_head;
-
-               skb_orphan(skb);
-               skb_dst_drop(skb);
        }
 
        if (unlikely(priv->tx_outstanding > MAX_SEND_CQE))
index 358cd7e..7cd74e2 100644 (file)
@@ -162,7 +162,7 @@ static unsigned int get_time_pit(void)
 #define GET_TIME(x)    do { x = get_cycles(); } while (0)
 #define DELTA(x,y)     ((y)-(x))
 #define TIME_NAME      "PCC"
-#elif defined(CONFIG_MN10300)
+#elif defined(CONFIG_MN10300) || defined(CONFIG_TILE)
 #define GET_TIME(x)    do { x = get_cycles(); } while (0)
 #define DELTA(x, y)    ((x) - (y))
 #define TIME_NAME      "TSC"
index b9d0911..eca2801 100644 (file)
@@ -4234,6 +4234,21 @@ static struct iommu_ops intel_iommu_ops = {
        .pgsize_bitmap  = INTEL_IOMMU_PGSIZES,
 };
 
+static void quirk_iommu_g4x_gfx(struct pci_dev *dev)
+{
+       /* G4x/GM45 integrated gfx dmar support is totally busted. */
+       printk(KERN_INFO "DMAR: Disabling IOMMU for graphics on this chipset\n");
+       dmar_map_gfx = 0;
+}
+
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_g4x_gfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_g4x_gfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_g4x_gfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_g4x_gfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_g4x_gfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_g4x_gfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_g4x_gfx);
+
 static void quirk_iommu_rwbf(struct pci_dev *dev)
 {
        /*
@@ -4242,12 +4257,6 @@ static void quirk_iommu_rwbf(struct pci_dev *dev)
         */
        printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
        rwbf_quirk = 1;
-
-       /* https://bugzilla.redhat.com/show_bug.cgi?id=538163 */
-       if (dev->revision == 0x07) {
-               printk(KERN_INFO "DMAR: Disabling IOMMU for graphics on this chipset\n");
-               dmar_map_gfx = 0;
-       }
 }
 
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
index 675ae52..5409607 100644 (file)
@@ -2746,19 +2746,9 @@ static int thin_iterate_devices(struct dm_target *ti,
        return 0;
 }
 
-/*
- * A thin device always inherits its queue limits from its pool.
- */
-static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits)
-{
-       struct thin_c *tc = ti->private;
-
-       *limits = bdev_get_queue(tc->pool_dev->bdev)->limits;
-}
-
 static struct target_type thin_target = {
        .name = "thin",
-       .version = {1, 6, 0},
+       .version = {1, 7, 0},
        .module = THIS_MODULE,
        .ctr = thin_ctr,
        .dtr = thin_dtr,
@@ -2767,7 +2757,6 @@ static struct target_type thin_target = {
        .postsuspend = thin_postsuspend,
        .status = thin_status,
        .iterate_devices = thin_iterate_devices,
-       .io_hints = thin_io_hints,
 };
 
 /*----------------------------------------------------------------*/
index c72e4d5..314a0e2 100644 (file)
@@ -1188,6 +1188,7 @@ static int __clone_and_map_changing_extent_only(struct clone_info *ci,
 {
        struct dm_target *ti;
        sector_t len;
+       unsigned num_requests;
 
        do {
                ti = dm_table_find_target(ci->map, ci->sector);
@@ -1200,7 +1201,8 @@ static int __clone_and_map_changing_extent_only(struct clone_info *ci,
                 * reconfiguration might also have changed that since the
                 * check was performed.
                 */
-               if (!get_num_requests || !get_num_requests(ti))
+               num_requests = get_num_requests ? get_num_requests(ti) : 0;
+               if (!num_requests)
                        return -EOPNOTSUPP;
 
                if (is_split_required && !is_split_required(ti))
@@ -1208,7 +1210,7 @@ static int __clone_and_map_changing_extent_only(struct clone_info *ci,
                else
                        len = min(ci->sector_count, max_io_len(ci->sector, ti));
 
-               __issue_target_requests(ci, ti, ti->num_discard_requests, len);
+               __issue_target_requests(ci, ti, num_requests, len);
 
                ci->sector += len;
        } while (ci->sector_count -= len);
index e10e525..296941a 100644 (file)
@@ -374,6 +374,7 @@ static int usb_keene_probe(struct usb_interface *intf,
        radio->vdev.ioctl_ops = &usb_keene_ioctl_ops;
        radio->vdev.lock = &radio->lock;
        radio->vdev.release = video_device_release_empty;
+       radio->vdev.vfl_dir = VFL_DIR_TX;
 
        radio->usbdev = interface_to_usbdev(intf);
        radio->intf = intf;
index a082e40..1507c9d 100644 (file)
@@ -250,6 +250,7 @@ static struct video_device radio_si4713_vdev_template = {
        .name                   = "radio-si4713",
        .release                = video_device_release,
        .ioctl_ops              = &radio_si4713_ioctl_ops,
+       .vfl_dir                = VFL_DIR_TX,
 };
 
 /* Platform driver interface */
index c48be19..cabbe3a 100644 (file)
@@ -1971,6 +1971,7 @@ static struct video_device wl1273_viddev_template = {
        .ioctl_ops              = &wl1273_ioctl_ops,
        .name                   = WL1273_FM_DRIVER_NAME,
        .release                = wl1273_vdev_release,
+       .vfl_dir                = VFL_DIR_TX,
 };
 
 static int wl1273_fm_radio_remove(struct platform_device *pdev)
index 048de45..0a8ee8f 100644 (file)
@@ -518,6 +518,16 @@ static struct video_device fm_viddev_template = {
        .ioctl_ops = &fm_drv_ioctl_ops,
        .name = FM_DRV_NAME,
        .release = video_device_release,
+       /*
+        * To ensure both the tuner and modulator ioctls are accessible we
+        * set the vfl_dir to M2M to indicate this.
+        *
+        * It is not really a mem2mem device of course, but it can both receive
+        * and transmit using the same radio device. It's the only radio driver
+        * that does this and it should really be split in two radio devices,
+        * but that would affect applications using this driver.
+        */
+       .vfl_dir = VFL_DIR_M2M,
 };
 
 int fm_v4l2_init_video_device(struct fmdev *fmdev, int radio_nr)
index 27f80cd..46dcb54 100644 (file)
@@ -272,6 +272,7 @@ config MTD_DOCG3
        tristate "M-Systems Disk-On-Chip G3"
        select BCH
        select BCH_CONST_PARAMS
+       select BITREVERSE
        ---help---
          This provides an MTD device driver for the M-Systems DiskOnChip
          G3 devices.
index 67cc73c..7901d72 100644 (file)
@@ -170,7 +170,7 @@ static int of_flash_probe(struct platform_device *dev)
        resource_size_t res_size;
        struct mtd_part_parser_data ppdata;
        bool map_indirect;
-       const char *mtd_name;
+       const char *mtd_name = NULL;
 
        match = of_match_device(of_flash_match, &dev->dev);
        if (!match)
index 86c9a79..595de40 100644 (file)
@@ -17,8 +17,8 @@
 #include "bcm47xxnflash.h"
 
 /* Broadcom uses 1'000'000 but it seems to be too many. Tests on WNDR4500 has
- * shown 164 retries as maxiumum. */
-#define NFLASH_READY_RETRIES           1000
+ * shown ~1000 retries as maxiumum. */
+#define NFLASH_READY_RETRIES           10000
 
 #define NFLASH_SECTOR_SIZE             512
 
index 3502606..feae55c 100644 (file)
@@ -523,7 +523,7 @@ static struct nand_ecclayout hwecc4_2048 __initconst = {
 static const struct of_device_id davinci_nand_of_match[] = {
        {.compatible = "ti,davinci-nand", },
        {},
-}
+};
 MODULE_DEVICE_TABLE(of, davinci_nand_of_match);
 
 static struct davinci_nand_pdata
index 8323ac9..3766682 100644 (file)
@@ -2857,8 +2857,11 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
        int i;
        int val;
 
-       /* ONFI need to be probed in 8 bits mode */
-       WARN_ON(chip->options & NAND_BUSWIDTH_16);
+       /* ONFI need to be probed in 8 bits mode, and 16 bits should be selected with NAND_BUSWIDTH_AUTO */
+       if (chip->options & NAND_BUSWIDTH_16) {
+               pr_err("Trying ONFI probe in 16 bits mode, aborting !\n");
+               return 0;
+       }
        /* Try ONFI for unknown chip or LP */
        chip->cmdfunc(mtd, NAND_CMD_READID, 0x20, -1);
        if (chip->read_byte(mtd) != 'O' || chip->read_byte(mtd) != 'N' ||
index 2239937..94c1534 100644 (file)
@@ -1249,7 +1249,7 @@ static inline void slave_disable_netpoll(struct slave *slave)
                return;
 
        slave->np = NULL;
-       __netpoll_free_rcu(np);
+       __netpoll_free_async(np);
 }
 static inline bool slave_dev_support_netpoll(struct net_device *slave_dev)
 {
index 1877ed7..1c9e09f 100644 (file)
@@ -1053,6 +1053,7 @@ static ssize_t bonding_store_primary(struct device *d,
                pr_info("%s: Setting primary slave to None.\n",
                        bond->dev->name);
                bond->primary_slave = NULL;
+               memset(bond->params.primary, 0, sizeof(bond->params.primary));
                bond_select_active_slave(bond);
                goto out;
        }
index 285f763..a668cd4 100644 (file)
@@ -491,8 +491,12 @@ static void c_can_setup_receive_object(struct net_device *dev, int iface,
 
        priv->write_reg(priv, C_CAN_IFACE(MASK1_REG, iface),
                        IFX_WRITE_LOW_16BIT(mask));
+
+       /* According to C_CAN documentation, the reserved bit
+        * in IFx_MASK2 register is fixed 1
+        */
        priv->write_reg(priv, C_CAN_IFACE(MASK2_REG, iface),
-                       IFX_WRITE_HIGH_16BIT(mask));
+                       IFX_WRITE_HIGH_16BIT(mask) | BIT(13));
 
        priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface),
                        IFX_WRITE_LOW_16BIT(id));
index 0e7bde7..5f9a7ad 100644 (file)
@@ -1019,10 +1019,8 @@ static int ems_usb_probe(struct usb_interface *intf,
 
        dev->tx_msg_buffer = kzalloc(CPC_HEADER_SIZE +
                                     sizeof(struct ems_cpc_msg), GFP_KERNEL);
-       if (!dev->tx_msg_buffer) {
-               dev_err(&intf->dev, "Couldn't alloc Tx buffer\n");
+       if (!dev->tx_msg_buffer)
                goto cleanup_intr_in_buffer;
-       }
 
        usb_set_intfdata(intf, dev);
 
index 74cfc01..797f847 100644 (file)
@@ -494,19 +494,15 @@ static void pcnet32_realloc_tx_ring(struct net_device *dev,
        }
        memset(new_tx_ring, 0, sizeof(struct pcnet32_tx_head) * (1 << size));
 
-       new_dma_addr_list = kcalloc((1 << size), sizeof(dma_addr_t),
-                               GFP_ATOMIC);
-       if (!new_dma_addr_list) {
-               netif_err(lp, drv, dev, "Memory allocation failed\n");
+       new_dma_addr_list = kcalloc(1 << size, sizeof(dma_addr_t),
+                                   GFP_ATOMIC);
+       if (!new_dma_addr_list)
                goto free_new_tx_ring;
-       }
 
-       new_skb_list = kcalloc((1 << size), sizeof(struct sk_buff *),
-                               GFP_ATOMIC);
-       if (!new_skb_list) {
-               netif_err(lp, drv, dev, "Memory allocation failed\n");
+       new_skb_list = kcalloc(1 << size, sizeof(struct sk_buff *),
+                              GFP_ATOMIC);
+       if (!new_skb_list)
                goto free_new_lists;
-       }
 
        kfree(lp->tx_skbuff);
        kfree(lp->tx_dma_addr);
@@ -564,19 +560,14 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev,
        }
        memset(new_rx_ring, 0, sizeof(struct pcnet32_rx_head) * (1 << size));
 
-       new_dma_addr_list = kcalloc((1 << size), sizeof(dma_addr_t),
-                               GFP_ATOMIC);
-       if (!new_dma_addr_list) {
-               netif_err(lp, drv, dev, "Memory allocation failed\n");
+       new_dma_addr_list = kcalloc(1 << size, sizeof(dma_addr_t), GFP_ATOMIC);
+       if (!new_dma_addr_list)
                goto free_new_rx_ring;
-       }
 
-       new_skb_list = kcalloc((1 << size), sizeof(struct sk_buff *),
-                               GFP_ATOMIC);
-       if (!new_skb_list) {
-               netif_err(lp, drv, dev, "Memory allocation failed\n");
+       new_skb_list = kcalloc(1 << size, sizeof(struct sk_buff *),
+                              GFP_ATOMIC);
+       if (!new_skb_list)
                goto free_new_lists;
-       }
 
        /* first copy the current receive buffers */
        overlap = min(size, lp->rx_ring_size);
@@ -1933,31 +1924,23 @@ static int pcnet32_alloc_ring(struct net_device *dev, const char *name)
 
        lp->tx_dma_addr = kcalloc(lp->tx_ring_size, sizeof(dma_addr_t),
                                  GFP_ATOMIC);
-       if (!lp->tx_dma_addr) {
-               netif_err(lp, drv, dev, "Memory allocation failed\n");
+       if (!lp->tx_dma_addr)
                return -ENOMEM;
-       }
 
        lp->rx_dma_addr = kcalloc(lp->rx_ring_size, sizeof(dma_addr_t),
                                  GFP_ATOMIC);
-       if (!lp->rx_dma_addr) {
-               netif_err(lp, drv, dev, "Memory allocation failed\n");
+       if (!lp->rx_dma_addr)
                return -ENOMEM;
-       }
 
        lp->tx_skbuff = kcalloc(lp->tx_ring_size, sizeof(struct sk_buff *),
                                GFP_ATOMIC);
-       if (!lp->tx_skbuff) {
-               netif_err(lp, drv, dev, "Memory allocation failed\n");
+       if (!lp->tx_skbuff)
                return -ENOMEM;
-       }
 
        lp->rx_skbuff = kcalloc(lp->rx_ring_size, sizeof(struct sk_buff *),
                                GFP_ATOMIC);
-       if (!lp->rx_skbuff) {
-               netif_err(lp, drv, dev, "Memory allocation failed\n");
+       if (!lp->rx_skbuff)
                return -ENOMEM;
-       }
 
        return 0;
 }
index 571b514..8f33315 100644 (file)
@@ -21,7 +21,7 @@
 
 #include "atl1c.h"
 
-#define ATL1C_DRV_VERSION "1.0.1.0-NAPI"
+#define ATL1C_DRV_VERSION "1.0.1.1-NAPI"
 char atl1c_driver_name[] = "atl1c";
 char atl1c_driver_version[] = ATL1C_DRV_VERSION;
 
@@ -1649,6 +1649,7 @@ static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter)
        u16 num_alloc = 0;
        u16 rfd_next_to_use, next_next;
        struct atl1c_rx_free_desc *rfd_desc;
+       dma_addr_t mapping;
 
        next_next = rfd_next_to_use = rfd_ring->next_to_use;
        if (++next_next == rfd_ring->count)
@@ -1675,9 +1676,18 @@ static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter)
                ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY);
                buffer_info->skb = skb;
                buffer_info->length = adapter->rx_buffer_len;
-               buffer_info->dma = pci_map_single(pdev, vir_addr,
+               mapping = pci_map_single(pdev, vir_addr,
                                                buffer_info->length,
                                                PCI_DMA_FROMDEVICE);
+               if (unlikely(pci_dma_mapping_error(pdev, mapping))) {
+                       dev_kfree_skb(skb);
+                       buffer_info->skb = NULL;
+                       buffer_info->length = 0;
+                       ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_FREE);
+                       netif_warn(adapter, rx_err, adapter->netdev, "RX pci_map_single failed");
+                       break;
+               }
+               buffer_info->dma = mapping;
                ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_SINGLE,
                        ATL1C_PCIMAP_FROMDEVICE);
                rfd_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
@@ -2012,7 +2022,29 @@ check_sum:
        return 0;
 }
 
-static void atl1c_tx_map(struct atl1c_adapter *adapter,
+static void atl1c_tx_rollback(struct atl1c_adapter *adpt,
+                             struct atl1c_tpd_desc *first_tpd,
+                             enum atl1c_trans_queue type)
+{
+       struct atl1c_tpd_ring *tpd_ring = &adpt->tpd_ring[type];
+       struct atl1c_buffer *buffer_info;
+       struct atl1c_tpd_desc *tpd;
+       u16 first_index, index;
+
+       first_index = first_tpd - (struct atl1c_tpd_desc *)tpd_ring->desc;
+       index = first_index;
+       while (index != tpd_ring->next_to_use) {
+               tpd = ATL1C_TPD_DESC(tpd_ring, index);
+               buffer_info = &tpd_ring->buffer_info[index];
+               atl1c_clean_buffer(adpt->pdev, buffer_info, 0);
+               memset(tpd, 0, sizeof(struct atl1c_tpd_desc));
+               if (++index == tpd_ring->count)
+                       index = 0;
+       }
+       tpd_ring->next_to_use = first_index;
+}
+
+static int atl1c_tx_map(struct atl1c_adapter *adapter,
                      struct sk_buff *skb, struct atl1c_tpd_desc *tpd,
                        enum atl1c_trans_queue type)
 {
@@ -2037,7 +2069,10 @@ static void atl1c_tx_map(struct atl1c_adapter *adapter,
                buffer_info->length = map_len;
                buffer_info->dma = pci_map_single(adapter->pdev,
                                        skb->data, hdr_len, PCI_DMA_TODEVICE);
-               ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY);
+               if (unlikely(pci_dma_mapping_error(adapter->pdev,
+                                                  buffer_info->dma)))
+                       goto err_dma;
+
                ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_SINGLE,
                        ATL1C_PCIMAP_TODEVICE);
                mapped_len += map_len;
@@ -2059,6 +2094,10 @@ static void atl1c_tx_map(struct atl1c_adapter *adapter,
                buffer_info->dma =
                        pci_map_single(adapter->pdev, skb->data + mapped_len,
                                        buffer_info->length, PCI_DMA_TODEVICE);
+               if (unlikely(pci_dma_mapping_error(adapter->pdev,
+                                                  buffer_info->dma)))
+                       goto err_dma;
+
                ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY);
                ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_SINGLE,
                        ATL1C_PCIMAP_TODEVICE);
@@ -2080,6 +2119,9 @@ static void atl1c_tx_map(struct atl1c_adapter *adapter,
                                                    frag, 0,
                                                    buffer_info->length,
                                                    DMA_TO_DEVICE);
+               if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma))
+                       goto err_dma;
+
                ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY);
                ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_PAGE,
                        ATL1C_PCIMAP_TODEVICE);
@@ -2092,6 +2134,13 @@ static void atl1c_tx_map(struct atl1c_adapter *adapter,
        /* The last buffer info contain the skb address,
           so it will be free after unmap */
        buffer_info->skb = skb;
+
+       return 0;
+
+err_dma:
+       buffer_info->dma = 0;
+       buffer_info->length = 0;
+       return -1;
 }
 
 static void atl1c_tx_queue(struct atl1c_adapter *adapter, struct sk_buff *skb,
@@ -2154,10 +2203,18 @@ static netdev_tx_t atl1c_xmit_frame(struct sk_buff *skb,
        if (skb_network_offset(skb) != ETH_HLEN)
                tpd->word1 |= 1 << TPD_ETH_TYPE_SHIFT; /* Ethernet frame */
 
-       atl1c_tx_map(adapter, skb, tpd, type);
-       atl1c_tx_queue(adapter, skb, tpd, type);
+       if (atl1c_tx_map(adapter, skb, tpd, type) < 0) {
+               netif_info(adapter, tx_done, adapter->netdev,
+                          "tx-skb droppted due to dma error\n");
+               /* roll back tpd/buffer */
+               atl1c_tx_rollback(adapter, tpd, type);
+               spin_unlock_irqrestore(&adapter->tx_lock, flags);
+               dev_kfree_skb(skb);
+       } else {
+               atl1c_tx_queue(adapter, skb, tpd, type);
+               spin_unlock_irqrestore(&adapter->tx_lock, flags);
+       }
 
-       spin_unlock_irqrestore(&adapter->tx_lock, flags);
        return NETDEV_TX_OK;
 }
 
index 9bd33db..3fd3288 100644 (file)
@@ -301,7 +301,7 @@ static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
                        bgmac_err(bgmac, "Found poisoned packet at slot %d, DMA issue!\n",
                                  ring->start);
                } else {
-                       new_skb = netdev_alloc_skb(bgmac->net_dev, len);
+                       new_skb = netdev_alloc_skb_ip_align(bgmac->net_dev, len);
                        if (new_skb) {
                                skb_put(new_skb, len);
                                skb_copy_from_linear_data_offset(skb, BGMAC_RX_FRAME_OFFSET,
@@ -535,7 +535,7 @@ static void bgmac_dma_init(struct bgmac *bgmac)
  * PHY ops
  **************************************************/
 
-u16 bgmac_phy_read(struct bgmac *bgmac, u8 phyaddr, u8 reg)
+static u16 bgmac_phy_read(struct bgmac *bgmac, u8 phyaddr, u8 reg)
 {
        struct bcma_device *core;
        u16 phy_access_addr;
@@ -584,7 +584,7 @@ u16 bgmac_phy_read(struct bgmac *bgmac, u8 phyaddr, u8 reg)
 }
 
 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphywr */
-void bgmac_phy_write(struct bgmac *bgmac, u8 phyaddr, u8 reg, u16 value)
+static int bgmac_phy_write(struct bgmac *bgmac, u8 phyaddr, u8 reg, u16 value)
 {
        struct bcma_device *core;
        u16 phy_access_addr;
@@ -617,9 +617,13 @@ void bgmac_phy_write(struct bgmac *bgmac, u8 phyaddr, u8 reg, u16 value)
        tmp |= value;
        bcma_write32(core, phy_access_addr, tmp);
 
-       if (!bgmac_wait_value(core, phy_access_addr, BGMAC_PA_START, 0, 1000))
+       if (!bgmac_wait_value(core, phy_access_addr, BGMAC_PA_START, 0, 1000)) {
                bgmac_err(bgmac, "Writing to PHY %d register 0x%X failed\n",
                          phyaddr, reg);
+               return -ETIMEDOUT;
+       }
+
+       return 0;
 }
 
 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyforce */
@@ -761,6 +765,26 @@ static void bgmac_cmdcfg_maskset(struct bgmac *bgmac, u32 mask, u32 set,
        udelay(2);
 }
 
+static void bgmac_write_mac_address(struct bgmac *bgmac, u8 *addr)
+{
+       u32 tmp;
+
+       tmp = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3];
+       bgmac_write(bgmac, BGMAC_MACADDR_HIGH, tmp);
+       tmp = (addr[4] << 8) | addr[5];
+       bgmac_write(bgmac, BGMAC_MACADDR_LOW, tmp);
+}
+
+static void bgmac_set_rx_mode(struct net_device *net_dev)
+{
+       struct bgmac *bgmac = netdev_priv(net_dev);
+
+       if (net_dev->flags & IFF_PROMISC)
+               bgmac_cmdcfg_maskset(bgmac, ~0, BGMAC_CMDCFG_PROM, true);
+       else
+               bgmac_cmdcfg_maskset(bgmac, ~BGMAC_CMDCFG_PROM, 0, true);
+}
+
 #if 0 /* We don't use that regs yet */
 static void bgmac_chip_stats_update(struct bgmac *bgmac)
 {
@@ -889,8 +913,10 @@ static void bgmac_chip_reset(struct bgmac *bgmac)
                        sw_type = et_swtype;
                } else if (ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == 9) {
                        sw_type = BGMAC_CHIPCTL_1_SW_TYPE_EPHYRMII;
-               } else if (0) {
-                       /* TODO */
+               } else if ((ci->id != BCMA_CHIP_ID_BCM53572 && ci->pkg == 10) ||
+                          (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg == 9)) {
+                       sw_type = BGMAC_CHIPCTL_1_IF_TYPE_RGMII |
+                                 BGMAC_CHIPCTL_1_SW_TYPE_RGMII;
                }
                bcma_chipco_chipctl_maskset(cc, 1,
                                            ~(BGMAC_CHIPCTL_1_IF_TYPE_MASK |
@@ -948,6 +974,7 @@ static void bgmac_chip_intrs_on(struct bgmac *bgmac)
 static void bgmac_chip_intrs_off(struct bgmac *bgmac)
 {
        bgmac_write(bgmac, BGMAC_INT_MASK, 0);
+       bgmac_read(bgmac, BGMAC_INT_MASK);
 }
 
 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_enable */
@@ -1004,8 +1031,6 @@ static void bgmac_enable(struct bgmac *bgmac)
 static void bgmac_chip_init(struct bgmac *bgmac, bool full_init)
 {
        struct bgmac_dma_ring *ring;
-       u8 *mac = bgmac->net_dev->dev_addr;
-       u32 tmp;
        int i;
 
        /* 1 interrupt per received frame */
@@ -1014,21 +1039,14 @@ static void bgmac_chip_init(struct bgmac *bgmac, bool full_init)
        /* Enable 802.3x tx flow control (honor received PAUSE frames) */
        bgmac_cmdcfg_maskset(bgmac, ~BGMAC_CMDCFG_RPI, 0, true);
 
-       if (bgmac->net_dev->flags & IFF_PROMISC)
-               bgmac_cmdcfg_maskset(bgmac, ~0, BGMAC_CMDCFG_PROM, false);
-       else
-               bgmac_cmdcfg_maskset(bgmac, ~BGMAC_CMDCFG_PROM, 0, false);
+       bgmac_set_rx_mode(bgmac->net_dev);
 
-       /* Set MAC addr */
-       tmp = (mac[0] << 24) | (mac[1] << 16) | (mac[2] << 8) | mac[3];
-       bgmac_write(bgmac, BGMAC_MACADDR_HIGH, tmp);
-       tmp = (mac[4] << 8) | mac[5];
-       bgmac_write(bgmac, BGMAC_MACADDR_LOW, tmp);
+       bgmac_write_mac_address(bgmac, bgmac->net_dev->dev_addr);
 
        if (bgmac->loopback)
-               bgmac_cmdcfg_maskset(bgmac, ~0, BGMAC_CMDCFG_ML, true);
+               bgmac_cmdcfg_maskset(bgmac, ~0, BGMAC_CMDCFG_ML, false);
        else
-               bgmac_cmdcfg_maskset(bgmac, ~BGMAC_CMDCFG_ML, 0, true);
+               bgmac_cmdcfg_maskset(bgmac, ~BGMAC_CMDCFG_ML, 0, false);
 
        bgmac_write(bgmac, BGMAC_RXMAX_LENGTH, 32 + ETHER_MAX_LEN);
 
@@ -1160,6 +1178,19 @@ static netdev_tx_t bgmac_start_xmit(struct sk_buff *skb,
        return bgmac_dma_tx_add(bgmac, ring, skb);
 }
 
+static int bgmac_set_mac_address(struct net_device *net_dev, void *addr)
+{
+       struct bgmac *bgmac = netdev_priv(net_dev);
+       int ret;
+
+       ret = eth_prepare_mac_addr_change(net_dev, addr);
+       if (ret < 0)
+               return ret;
+       bgmac_write_mac_address(bgmac, (u8 *)addr);
+       eth_commit_mac_addr_change(net_dev, addr);
+       return 0;
+}
+
 static int bgmac_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
 {
        struct bgmac *bgmac = netdev_priv(net_dev);
@@ -1190,7 +1221,9 @@ static const struct net_device_ops bgmac_netdev_ops = {
        .ndo_open               = bgmac_open,
        .ndo_stop               = bgmac_stop,
        .ndo_start_xmit         = bgmac_start_xmit,
-       .ndo_set_mac_address    = eth_mac_addr, /* generic, sets dev_addr */
+       .ndo_set_rx_mode        = bgmac_set_rx_mode,
+       .ndo_set_mac_address    = bgmac_set_mac_address,
+       .ndo_validate_addr      = eth_validate_addr,
        .ndo_do_ioctl           = bgmac_ioctl,
 };
 
@@ -1290,6 +1323,12 @@ static int bgmac_probe(struct bcma_device *core)
                return -ENOTSUPP;
        }
 
+       if (!is_valid_ether_addr(mac)) {
+               dev_err(&core->dev, "Invalid MAC addr: %pM\n", mac);
+               eth_random_addr(mac);
+               dev_warn(&core->dev, "Using random MAC: %pM\n", mac);
+       }
+
        /* Allocation and references */
        net_dev = alloc_etherdev(sizeof(*bgmac));
        if (!net_dev)
index 1299470..4ede614 100644 (file)
 #define BGMAC_CHIPCTL_1_SW_TYPE_EPHY           0x00000000
 #define BGMAC_CHIPCTL_1_SW_TYPE_EPHYMII                0x00000040
 #define BGMAC_CHIPCTL_1_SW_TYPE_EPHYRMII       0x00000080
-#define BGMAC_CHIPCTL_1_SW_TYPE_RGMI           0x000000C0
+#define BGMAC_CHIPCTL_1_SW_TYPE_RGMII          0x000000C0
 #define BGMAC_CHIPCTL_1_RXC_DLL_BYPASS         0x00010000
 
 #define BGMAC_SPEED_10                         0x0001
@@ -450,7 +450,4 @@ static inline void bgmac_set(struct bgmac *bgmac, u16 offset, u32 set)
        bgmac_maskset(bgmac, offset, ~0, set);
 }
 
-u16 bgmac_phy_read(struct bgmac *bgmac, u8 phyaddr, u8 reg);
-void bgmac_phy_write(struct bgmac *bgmac, u8 phyaddr, u8 reg, u16 value);
-
 #endif /* _BGMAC_H */
index 90195e3..fdb9b56 100644 (file)
@@ -44,6 +44,7 @@
 #include <linux/prefetch.h>
 #include <linux/dma-mapping.h>
 #include <linux/firmware.h>
+#include <linux/ssb/ssb_driver_gige.h>
 #include <linux/hwmon.h>
 #include <linux/hwmon-sysfs.h>
 
@@ -93,10 +94,10 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
 
 #define DRV_MODULE_NAME                "tg3"
 #define TG3_MAJ_NUM                    3
-#define TG3_MIN_NUM                    129
+#define TG3_MIN_NUM                    130
 #define DRV_MODULE_VERSION     \
        __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
-#define DRV_MODULE_RELDATE     "January 06, 2013"
+#define DRV_MODULE_RELDATE     "February 14, 2013"
 
 #define RESET_KIND_SHUTDOWN    0
 #define RESET_KIND_INIT                1
@@ -263,6 +264,7 @@ static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
                        TG3_DRV_DATA_FLAG_5705_10_100},
        {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
        {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
+       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
        {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
        {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
        {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
@@ -330,6 +332,7 @@ static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
        {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
        {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
        {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
+       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
        {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
        {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
        {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
@@ -573,7 +576,9 @@ static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
 {
        tp->write32_mbox(tp, off, val);
-       if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
+       if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
+           (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
+            !tg3_flag(tp, ICH_WORKAROUND)))
                tp->read32_mbox(tp, off);
 }
 
@@ -583,7 +588,8 @@ static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
        writel(val, mbox);
        if (tg3_flag(tp, TXD_MBOX_HWBUG))
                writel(val, mbox);
-       if (tg3_flag(tp, MBOX_WRITE_REORDER))
+       if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
+           tg3_flag(tp, FLUSH_POSTED_WRITES))
                readl(mbox);
 }
 
@@ -612,7 +618,7 @@ static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
 {
        unsigned long flags;
 
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
+       if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
            (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
                return;
 
@@ -637,7 +643,7 @@ static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
 {
        unsigned long flags;
 
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
+       if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
            (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
                *val = 0;
                return;
@@ -665,7 +671,7 @@ static void tg3_ape_lock_init(struct tg3 *tp)
        int i;
        u32 regbase, bit;
 
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
+       if (tg3_asic_rev(tp) == ASIC_REV_5761)
                regbase = TG3_APE_LOCK_GRANT;
        else
                regbase = TG3_APE_PER_LOCK_GRANT;
@@ -701,7 +707,7 @@ static int tg3_ape_lock(struct tg3 *tp, int locknum)
 
        switch (locknum) {
        case TG3_APE_LOCK_GPIO:
-               if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
+               if (tg3_asic_rev(tp) == ASIC_REV_5761)
                        return 0;
        case TG3_APE_LOCK_GRC:
        case TG3_APE_LOCK_MEM:
@@ -720,7 +726,7 @@ static int tg3_ape_lock(struct tg3 *tp, int locknum)
                return -EINVAL;
        }
 
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
+       if (tg3_asic_rev(tp) == ASIC_REV_5761) {
                req = TG3_APE_LOCK_REQ;
                gnt = TG3_APE_LOCK_GRANT;
        } else {
@@ -758,7 +764,7 @@ static void tg3_ape_unlock(struct tg3 *tp, int locknum)
 
        switch (locknum) {
        case TG3_APE_LOCK_GPIO:
-               if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
+               if (tg3_asic_rev(tp) == ASIC_REV_5761)
                        return;
        case TG3_APE_LOCK_GRC:
        case TG3_APE_LOCK_MEM:
@@ -777,7 +783,7 @@ static void tg3_ape_unlock(struct tg3 *tp, int locknum)
                return;
        }
 
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
+       if (tg3_asic_rev(tp) == ASIC_REV_5761)
                gnt = TG3_APE_LOCK_GRANT;
        else
                gnt = TG3_APE_PER_LOCK_GRANT;
@@ -1091,7 +1097,8 @@ static void tg3_switch_clocks(struct tg3 *tp)
 
 #define PHY_BUSY_LOOPS 5000
 
-static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
+static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
+                        u32 *val)
 {
        u32 frame_val;
        unsigned int loops;
@@ -1107,7 +1114,7 @@ static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
 
        *val = 0x0;
 
-       frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
+       frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
                      MI_COM_PHY_ADDR_MASK);
        frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
                      MI_COM_REG_ADDR_MASK);
@@ -1144,7 +1151,13 @@ static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
        return ret;
 }
 
-static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
+static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
+{
+       return __tg3_readphy(tp, tp->phy_addr, reg, val);
+}
+
+static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
+                         u32 val)
 {
        u32 frame_val;
        unsigned int loops;
@@ -1162,7 +1175,7 @@ static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
 
        tg3_ape_lock(tp, tp->phy_ape_lock);
 
-       frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
+       frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
                      MI_COM_PHY_ADDR_MASK);
        frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
                      MI_COM_REG_ADDR_MASK);
@@ -1197,6 +1210,11 @@ static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
        return ret;
 }
 
+static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
+{
+       return __tg3_writephy(tp, tp->phy_addr, reg, val);
+}
+
 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
 {
        int err;
@@ -1461,7 +1479,7 @@ static void tg3_mdio_start(struct tg3 *tp)
        udelay(80);
 
        if (tg3_flag(tp, MDIOBUS_INITED) &&
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
+           tg3_asic_rev(tp) == ASIC_REV_5785)
                tg3_mdio_config_5785(tp);
 }
 
@@ -1476,7 +1494,7 @@ static int tg3_mdio_init(struct tg3 *tp)
 
                tp->phy_addr = tp->pci_fn + 1;
 
-               if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
+               if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
                        is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
                else
                        is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
@@ -1564,7 +1582,7 @@ static int tg3_mdio_init(struct tg3 *tp)
 
        tg3_flag_set(tp, MDIOBUS_INITED);
 
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
+       if (tg3_asic_rev(tp) == ASIC_REV_5785)
                tg3_mdio_config_5785(tp);
 
        return 0;
@@ -1781,7 +1799,12 @@ static int tg3_poll_fw(struct tg3 *tp)
        int i;
        u32 val;
 
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+       if (tg3_flag(tp, IS_SSB_CORE)) {
+               /* We don't use firmware. */
+               return 0;
+       }
+
+       if (tg3_asic_rev(tp) == ASIC_REV_5906) {
                /* Wait up to 20ms for init done. */
                for (i = 0; i < 200; i++) {
                        if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
@@ -1810,7 +1833,7 @@ static int tg3_poll_fw(struct tg3 *tp)
                netdev_info(tp->dev, "No firmware running\n");
        }
 
-       if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
+       if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
                /* The 57765 A0 needs a little more
                 * time to do some important work.
                 */
@@ -1940,7 +1963,7 @@ static void tg3_adjust_link(struct net_device *dev)
                if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
                        mac_mode |= MAC_MODE_PORT_MODE_MII;
                else if (phydev->speed == SPEED_1000 ||
-                        GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
+                        tg3_asic_rev(tp) != ASIC_REV_5785)
                        mac_mode |= MAC_MODE_PORT_MODE_GMII;
                else
                        mac_mode |= MAC_MODE_PORT_MODE_MII;
@@ -1967,7 +1990,7 @@ static void tg3_adjust_link(struct net_device *dev)
                udelay(40);
        }
 
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
+       if (tg3_asic_rev(tp) == ASIC_REV_5785) {
                if (phydev->speed == SPEED_10)
                        tw32(MAC_MI_STAT,
                             MAC_MI_STAT_10MBPS_MODE |
@@ -2159,7 +2182,7 @@ static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
              MII_TG3_MISC_SHDW_SCR5_DLPTLM |
              MII_TG3_MISC_SHDW_SCR5_SDTL |
              MII_TG3_MISC_SHDW_SCR5_C125OE;
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
+       if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
                reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
 
        tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
@@ -2314,8 +2337,8 @@ static void tg3_phy_eee_enable(struct tg3 *tp)
        u32 val;
 
        if (tp->link_config.active_speed == SPEED_1000 &&
-           (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
-            GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
+           (tg3_asic_rev(tp) == ASIC_REV_5717 ||
+            tg3_asic_rev(tp) == ASIC_REV_5719 ||
             tg3_flag(tp, 57765_CLASS)) &&
            !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
                val = MII_TG3_DSP_TAP26_ALNOKO |
@@ -2519,7 +2542,7 @@ static int tg3_phy_reset(struct tg3 *tp)
        u32 val, cpmuctrl;
        int err;
 
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+       if (tg3_asic_rev(tp) == ASIC_REV_5906) {
                val = tr32(GRC_MISC_CFG);
                tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
                udelay(40);
@@ -2534,9 +2557,9 @@ static int tg3_phy_reset(struct tg3 *tp)
                tg3_link_report(tp);
        }
 
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
+       if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
+           tg3_asic_rev(tp) == ASIC_REV_5704 ||
+           tg3_asic_rev(tp) == ASIC_REV_5705) {
                err = tg3_phy_reset_5703_4_5(tp);
                if (err)
                        return err;
@@ -2544,8 +2567,8 @@ static int tg3_phy_reset(struct tg3 *tp)
        }
 
        cpmuctrl = 0;
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
-           GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
+       if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
+           tg3_chip_rev(tp) != CHIPREV_5784_AX) {
                cpmuctrl = tr32(TG3_CPMU_CTRL);
                if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
                        tw32(TG3_CPMU_CTRL,
@@ -2563,8 +2586,8 @@ static int tg3_phy_reset(struct tg3 *tp)
                tw32(TG3_CPMU_CTRL, cpmuctrl);
        }
 
-       if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
-           GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
+       if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
+           tg3_chip_rev(tp) == CHIPREV_5761_AX) {
                val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
                if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
                    CPMU_LSPD_1000MB_MACCLK_12_5) {
@@ -2642,12 +2665,12 @@ out:
                                     val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
        }
 
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+       if (tg3_asic_rev(tp) == ASIC_REV_5906) {
                /* adjust output voltage */
                tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
        }
 
-       if (tp->pci_chip_rev_id == CHIPREV_ID_5762_A0)
+       if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
                tg3_phydsp_write(tp, 0xffb, 0x4000);
 
        tg3_phy_toggle_automdix(tp, 1);
@@ -2675,8 +2698,8 @@ static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
 {
        u32 status, shift;
 
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
+       if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
+           tg3_asic_rev(tp) == ASIC_REV_5719)
                status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
        else
                status = tr32(TG3_CPMU_DRV_STATUS);
@@ -2685,8 +2708,8 @@ static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
        status &= ~(TG3_GPIO_MSG_MASK << shift);
        status |= (newstat << shift);
 
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
+       if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
+           tg3_asic_rev(tp) == ASIC_REV_5719)
                tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
        else
                tw32(TG3_CPMU_DRV_STATUS, status);
@@ -2699,9 +2722,9 @@ static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
        if (!tg3_flag(tp, IS_NIC))
                return 0;
 
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
+       if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
+           tg3_asic_rev(tp) == ASIC_REV_5719 ||
+           tg3_asic_rev(tp) == ASIC_REV_5720) {
                if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
                        return -EIO;
 
@@ -2724,8 +2747,8 @@ static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
        u32 grc_local_ctrl;
 
        if (!tg3_flag(tp, IS_NIC) ||
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
+           tg3_asic_rev(tp) == ASIC_REV_5700 ||
+           tg3_asic_rev(tp) == ASIC_REV_5701)
                return;
 
        grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
@@ -2748,8 +2771,8 @@ static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
        if (!tg3_flag(tp, IS_NIC))
                return;
 
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
+       if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
+           tg3_asic_rev(tp) == ASIC_REV_5701) {
                tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
                            (GRC_LCLCTRL_GPIO_OE0 |
                             GRC_LCLCTRL_GPIO_OE1 |
@@ -2781,7 +2804,7 @@ static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
                u32 grc_local_ctrl = 0;
 
                /* Workaround to prevent overdrawing Amps. */
-               if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
+               if (tg3_asic_rev(tp) == ASIC_REV_5714) {
                        grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
                        tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
                                    grc_local_ctrl,
@@ -2853,9 +2876,9 @@ static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
        if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
                return;
 
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
+       if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
+           tg3_asic_rev(tp) == ASIC_REV_5719 ||
+           tg3_asic_rev(tp) == ASIC_REV_5720) {
                tg3_frob_aux_power_5717(tp, include_wol ?
                                        tg3_flag(tp, WOL_ENABLE) != 0 : 0);
                return;
@@ -2907,7 +2930,7 @@ static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
        u32 val;
 
        if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
-               if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
+               if (tg3_asic_rev(tp) == ASIC_REV_5704) {
                        u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
                        u32 serdes_cfg = tr32(MAC_SERDES_CFG);
 
@@ -2919,7 +2942,7 @@ static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
                return;
        }
 
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+       if (tg3_asic_rev(tp) == ASIC_REV_5906) {
                tg3_bmcr_reset(tp);
                val = tr32(GRC_MISC_CFG);
                tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
@@ -2958,16 +2981,16 @@ static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
        /* The PHY should not be powered down on some chips because
         * of bugs.
         */
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
-           (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
+       if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
+           tg3_asic_rev(tp) == ASIC_REV_5704 ||
+           (tg3_asic_rev(tp) == ASIC_REV_5780 &&
             (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) ||
-           (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
+           (tg3_asic_rev(tp) == ASIC_REV_5717 &&
             !tp->pci_fn))
                return;
 
-       if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
-           GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
+       if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
+           tg3_chip_rev(tp) == CHIPREV_5761_AX) {
                val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
                val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
                val |= CPMU_LSPD_1000MB_MACCLK_12_5;
@@ -3350,7 +3373,7 @@ static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
                    !tg3_flag(tp, 57765_PLUS))
                        tw32(NVRAM_ADDR, phy_addr);
 
-               if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
+               if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
                    !tg3_flag(tp, 5755_PLUS) &&
                    (tp->nvram_jedecnum == JEDEC_ST) &&
                    (nvram_cmd & NVRAM_CMD_FIRST)) {
@@ -3435,7 +3458,7 @@ static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
 
        BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
 
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+       if (tg3_asic_rev(tp) == ASIC_REV_5906) {
                u32 val = tr32(GRC_VCPU_EXT_CTRL);
 
                tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
@@ -3453,6 +3476,13 @@ static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
                tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
                udelay(10);
        } else {
+               /*
+                * There is only an Rx CPU for the 5750 derivative in the
+                * BCM4785.
+                */
+               if (tg3_flag(tp, IS_SSB_CORE))
+                       return 0;
+
                for (i = 0; i < 10000; i++) {
                        tw32(offset + CPU_STATE, 0xffffffff);
                        tw32(offset + CPU_MODE,  CPU_MODE_HALT);
@@ -3606,7 +3636,7 @@ static int tg3_load_tso_firmware(struct tg3 *tp)
        info.fw_len = tp->fw->size - 12;
        info.fw_data = &fw_data[3];
 
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
+       if (tg3_asic_rev(tp) == ASIC_REV_5705) {
                cpu_base = RX_CPU_BASE;
                cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
        } else {
@@ -3664,8 +3694,8 @@ static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
                tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
        }
 
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
+       if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
+           tg3_asic_rev(tp) == ASIC_REV_5704) {
                for (i = 0; i < 12; i++) {
                        tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
                        tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
@@ -3784,7 +3814,7 @@ static int tg3_power_down_prepare(struct tg3 *tp)
                        tg3_setup_phy(tp, 0);
        }
 
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+       if (tg3_asic_rev(tp) == ASIC_REV_5906) {
                u32 val;
 
                val = tr32(GRC_VCPU_EXT_CTRL);
@@ -3826,8 +3856,7 @@ static int tg3_power_down_prepare(struct tg3 *tp)
                                mac_mode = MAC_MODE_PORT_MODE_MII;
 
                        mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
-                       if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
-                           ASIC_REV_5700) {
+                       if (tg3_asic_rev(tp) == ASIC_REV_5700) {
                                u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
                                             SPEED_100 : SPEED_10;
                                if (tg3_5700_link_polarity(tp, speed))
@@ -3860,8 +3889,8 @@ static int tg3_power_down_prepare(struct tg3 *tp)
        }
 
        if (!tg3_flag(tp, WOL_SPEED_100MB) &&
-           (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
-            GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
+           (tg3_asic_rev(tp) == ASIC_REV_5700 ||
+            tg3_asic_rev(tp) == ASIC_REV_5701)) {
                u32 base_val;
 
                base_val = tp->pci_clock_ctrl;
@@ -3872,13 +3901,13 @@ static int tg3_power_down_prepare(struct tg3 *tp)
                            CLOCK_CTRL_PWRDOWN_PLL133, 40);
        } else if (tg3_flag(tp, 5780_CLASS) ||
                   tg3_flag(tp, CPMU_PRESENT) ||
-                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+                  tg3_asic_rev(tp) == ASIC_REV_5906) {
                /* do nothing */
        } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
                u32 newbits1, newbits2;
 
-               if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
-                   GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
+               if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
+                   tg3_asic_rev(tp) == ASIC_REV_5701) {
                        newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
                                    CLOCK_CTRL_TXCLK_DISABLE |
                                    CLOCK_CTRL_ALTCLK);
@@ -3900,8 +3929,8 @@ static int tg3_power_down_prepare(struct tg3 *tp)
                if (!tg3_flag(tp, 5705_PLUS)) {
                        u32 newbits3;
 
-                       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
-                           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
+                       if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
+                           tg3_asic_rev(tp) == ASIC_REV_5701) {
                                newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
                                            CLOCK_CTRL_TXCLK_DISABLE |
                                            CLOCK_CTRL_44MHZ_CORE);
@@ -3920,8 +3949,9 @@ static int tg3_power_down_prepare(struct tg3 *tp)
        tg3_frob_aux_power(tp, true);
 
        /* Workaround for unstable PLL clock */
-       if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
-           (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
+       if ((!tg3_flag(tp, IS_SSB_CORE)) &&
+           ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
+            (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
                u32 val = tr32(0x7d00);
 
                val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
@@ -4012,8 +4042,8 @@ static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
        if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
                new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
 
-               if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
-                   tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
+               if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
+                   tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
                        new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
 
                err = tg3_writephy(tp, MII_CTRL1000, new_adv);
@@ -4042,7 +4072,7 @@ static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
                if (err)
                        val = 0;
 
-               switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
+               switch (tg3_asic_rev(tp)) {
                case ASIC_REV_5717:
                case ASIC_REV_57765:
                case ASIC_REV_57766:
@@ -4190,8 +4220,8 @@ static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
                        return false;
 
                if (tgtadv &&
-                   (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
-                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)) {
+                   (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
+                    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
                        tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
                        tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
                                     CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
@@ -4275,9 +4305,9 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
        /* Some third-party PHYs need to be reset on link going
         * down.
         */
-       if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
-            GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
-            GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
+       if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
+            tg3_asic_rev(tp) == ASIC_REV_5704 ||
+            tg3_asic_rev(tp) == ASIC_REV_5705) &&
            tp->link_up) {
                tg3_readphy(tp, MII_BMSR, &bmsr);
                if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
@@ -4319,8 +4349,8 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
                                        return err;
                        }
                }
-       } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
-                  tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
+       } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
+                  tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
                /* 5701 {A0,B0} CRC bug workaround */
                tg3_writephy(tp, 0x15, 0x0a75);
                tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
@@ -4337,8 +4367,8 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
        else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
                tg3_writephy(tp, MII_TG3_IMASK, ~0);
 
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
+       if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
+           tg3_asic_rev(tp) == ASIC_REV_5701) {
                if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
                        tg3_writephy(tp, MII_TG3_EXT_CTRL,
                                     MII_TG3_EXT_CTRL_LNK3_LED_MODE);
@@ -4442,6 +4472,15 @@ relink:
        if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
                tg3_phy_copper_begin(tp);
 
+               if (tg3_flag(tp, ROBOSWITCH)) {
+                       current_link_up = 1;
+                       /* FIXME: when BCM5325 switch is used use 100 MBit/s */
+                       current_speed = SPEED_1000;
+                       current_duplex = DUPLEX_FULL;
+                       tp->link_config.active_speed = current_speed;
+                       tp->link_config.active_duplex = current_duplex;
+               }
+
                tg3_readphy(tp, MII_BMSR, &bmsr);
                if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
                    (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
@@ -4460,11 +4499,31 @@ relink:
        else
                tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
 
+       /* In order for the 5750 core in BCM4785 chip to work properly
+        * in RGMII mode, the Led Control Register must be set up.
+        */
+       if (tg3_flag(tp, RGMII_MODE)) {
+               u32 led_ctrl = tr32(MAC_LED_CTRL);
+               led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
+
+               if (tp->link_config.active_speed == SPEED_10)
+                       led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
+               else if (tp->link_config.active_speed == SPEED_100)
+                       led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
+                                    LED_CTRL_100MBPS_ON);
+               else if (tp->link_config.active_speed == SPEED_1000)
+                       led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
+                                    LED_CTRL_1000MBPS_ON);
+
+               tw32(MAC_LED_CTRL, led_ctrl);
+               udelay(40);
+       }
+
        tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
        if (tp->link_config.active_duplex == DUPLEX_HALF)
                tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
 
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
+       if (tg3_asic_rev(tp) == ASIC_REV_5700) {
                if (current_link_up == 1 &&
                    tg3_5700_link_polarity(tp, tp->link_config.active_speed))
                        tp->mac_mode |= MAC_MODE_LINK_POLARITY;
@@ -4476,7 +4535,7 @@ relink:
         * ??? send/receive packets...
         */
        if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
-           tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
+           tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
                tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
                tw32_f(MAC_MI_MODE, tp->mi_mode);
                udelay(80);
@@ -4495,7 +4554,7 @@ relink:
        }
        udelay(40);
 
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
+       if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
            current_link_up == 1 &&
            tp->link_config.active_speed == SPEED_1000 &&
            (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
@@ -4950,8 +5009,8 @@ static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
        port_a = 1;
        current_link_up = 0;
 
-       if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
-           tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
+       if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
+           tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
                workaround = 1;
                if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
                        port_a = 0;
@@ -5280,7 +5339,7 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
 
        err |= tg3_readphy(tp, MII_BMSR, &bmsr);
        err |= tg3_readphy(tp, MII_BMSR, &bmsr);
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
+       if (tg3_asic_rev(tp) == ASIC_REV_5714) {
                if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
                        bmsr |= BMSR_LSTATUS;
                else
@@ -5349,8 +5408,7 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
                        bmcr = new_bmcr;
                        err |= tg3_readphy(tp, MII_BMSR, &bmsr);
                        err |= tg3_readphy(tp, MII_BMSR, &bmsr);
-                       if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
-                           ASIC_REV_5714) {
+                       if (tg3_asic_rev(tp) == ASIC_REV_5714) {
                                if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
                                        bmsr |= BMSR_LSTATUS;
                                else
@@ -5485,7 +5543,7 @@ static int tg3_setup_phy(struct tg3 *tp, int force_reset)
        else
                err = tg3_setup_copper_phy(tp, force_reset);
 
-       if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
+       if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
                u32 scale;
 
                val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
@@ -5503,8 +5561,8 @@ static int tg3_setup_phy(struct tg3 *tp, int force_reset)
 
        val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
              (6 << TX_LENGTHS_IPG_SHIFT);
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
+       if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
+           tg3_asic_rev(tp) == ASIC_REV_5762)
                val |= tr32(MAC_TX_LENGTHS) &
                       (TX_LENGTHS_JMB_FRM_LEN_MSK |
                        TX_LENGTHS_CNT_DWN_VAL_MSK);
@@ -7128,7 +7186,7 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
        dma_addr_t new_addr = 0;
        int ret = 0;
 
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
+       if (tg3_asic_rev(tp) != ASIC_REV_5701)
                new_skb = skb_copy(skb, GFP_ATOMIC);
        else {
                int more_headroom = 4 - ((unsigned long)skb->data & 3);
@@ -7302,7 +7360,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
                } else if (tg3_flag(tp, HW_TSO_2))
                        mss |= hdr_len << 9;
                else if (tg3_flag(tp, HW_TSO_1) ||
-                        GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
+                        tg3_asic_rev(tp) == ASIC_REV_5705) {
                        if (tcp_opt_len || iph->ihl > 5) {
                                int tsflags;
 
@@ -7458,7 +7516,7 @@ static void tg3_mac_loopback(struct tg3 *tp, bool enable)
 
                if (tg3_flag(tp, 5705_PLUS) ||
                    (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
-                   GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
+                   tg3_asic_rev(tp) == ASIC_REV_5700)
                        tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
        }
 
@@ -7517,7 +7575,7 @@ static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
        udelay(40);
 
        if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
+           tg3_asic_rev(tp) == ASIC_REV_5785) {
                tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
                             MII_TG3_FET_PTEST_FRC_TX_LINK |
                             MII_TG3_FET_PTEST_FRC_TX_LOCK);
@@ -7541,7 +7599,7 @@ static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
        else
                mac_mode |= MAC_MODE_PORT_MODE_MII;
 
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
+       if (tg3_asic_rev(tp) == ASIC_REV_5700) {
                u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
 
                if (masked_phy_id == TG3_PHY_ID_BCM5401)
@@ -8219,7 +8277,7 @@ static void tg3_restore_pci_state(struct tg3 *tp)
 
        /* Set MAX PCI retry to zero. */
        val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
-       if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
+       if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
            tg3_flag(tp, PCIX_MODE))
                val |= PCISTATE_RETRY_SAME_DMA;
        /* Allow reads and writes to the APE register and memory space. */
@@ -8291,7 +8349,7 @@ static int tg3_chip_reset(struct tg3 *tp)
         */
        tg3_save_pci_state(tp);
 
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
+       if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
            tg3_flag(tp, 5755_PLUS))
                tw32(GRC_FASTBOOT_PC, 0);
 
@@ -8326,7 +8384,7 @@ static int tg3_chip_reset(struct tg3 *tp)
        for (i = 0; i < tp->irq_cnt; i++)
                synchronize_irq(tp->napi[i].irq_vec);
 
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
+       if (tg3_asic_rev(tp) == ASIC_REV_57780) {
                val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
                tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
        }
@@ -8336,19 +8394,19 @@ static int tg3_chip_reset(struct tg3 *tp)
 
        if (tg3_flag(tp, PCI_EXPRESS)) {
                /* Force PCIe 1.0a mode */
-               if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
+               if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
                    !tg3_flag(tp, 57765_PLUS) &&
                    tr32(TG3_PCIE_PHY_TSTCTL) ==
                    (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
                        tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
 
-               if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
+               if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
                        tw32(GRC_MISC_CFG, (1 << 29));
                        val |= (1 << 29);
                }
        }
 
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+       if (tg3_asic_rev(tp) == ASIC_REV_5906) {
                tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
                tw32(GRC_VCPU_EXT_CTRL,
                     tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
@@ -8391,7 +8449,7 @@ static int tg3_chip_reset(struct tg3 *tp)
        if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
                u16 val16;
 
-               if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
+               if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
                        int j;
                        u32 cfg_val;
 
@@ -8432,23 +8490,33 @@ static int tg3_chip_reset(struct tg3 *tp)
                val = tr32(MEMARB_MODE);
        tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
 
-       if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
+       if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
                tg3_stop_fw(tp);
                tw32(0x5000, 0x400);
        }
 
+       if (tg3_flag(tp, IS_SSB_CORE)) {
+               /*
+                * BCM4785: In order to avoid repercussions from using
+                * potentially defective internal ROM, stop the Rx RISC CPU,
+                * which is not required.
+                */
+               tg3_stop_fw(tp);
+               tg3_halt_cpu(tp, RX_CPU_BASE);
+       }
+
        tw32(GRC_MODE, tp->grc_mode);
 
-       if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
+       if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
                val = tr32(0xc4);
 
                tw32(0xc4, val | (1 << 15));
        }
 
        if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
+           tg3_asic_rev(tp) == ASIC_REV_5705) {
                tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
-               if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
+               if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
                        tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
                tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
        }
@@ -8474,15 +8542,15 @@ static int tg3_chip_reset(struct tg3 *tp)
        tg3_mdio_start(tp);
 
        if (tg3_flag(tp, PCI_EXPRESS) &&
-           tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
-           GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
+           tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
+           tg3_asic_rev(tp) != ASIC_REV_5785 &&
            !tg3_flag(tp, 57765_PLUS)) {
                val = tr32(0x7c00);
 
                tw32(0x7c00, val | (1 << 25));
        }
 
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
+       if (tg3_asic_rev(tp) == ASIC_REV_5720) {
                val = tr32(TG3_CPMU_CLCK_ORIDE);
                tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
        }
@@ -8694,7 +8762,7 @@ static void tg3_rings_reset(struct tg3 *tp)
        else if (tg3_flag(tp, 5717_PLUS))
                limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
        else if (tg3_flag(tp, 57765_CLASS) ||
-                GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
+                tg3_asic_rev(tp) == ASIC_REV_5762)
                limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
        else
                limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
@@ -8710,8 +8778,8 @@ static void tg3_rings_reset(struct tg3 *tp)
                limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
        else if (!tg3_flag(tp, 5705_PLUS))
                limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
-       else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
-                GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762 ||
+       else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
+                tg3_asic_rev(tp) == ASIC_REV_5762 ||
                 tg3_flag(tp, 57765_CLASS))
                limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
        else
@@ -8817,12 +8885,12 @@ static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
 
        if (!tg3_flag(tp, 5750_PLUS) ||
            tg3_flag(tp, 5780_CLASS) ||
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
+           tg3_asic_rev(tp) == ASIC_REV_5750 ||
+           tg3_asic_rev(tp) == ASIC_REV_5752 ||
            tg3_flag(tp, 57765_PLUS))
                bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
-       else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
-                GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
+       else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
+                tg3_asic_rev(tp) == ASIC_REV_5787)
                bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
        else
                bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
@@ -9004,7 +9072,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
        if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
                val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
                      TG3_CPMU_EEE_LNKIDL_UART_IDL;
-               if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
+               if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
                        val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
 
                tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
@@ -9017,7 +9085,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
                      TG3_CPMU_EEEMD_LPI_IN_RX |
                      TG3_CPMU_EEEMD_EEE_ENABLE;
 
-               if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
+               if (tg3_asic_rev(tp) != ASIC_REV_5717)
                        val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
 
                if (tg3_flag(tp, ENABLE_APE))
@@ -9043,7 +9111,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
 
        tg3_write_sig_legacy(tp, RESET_KIND_INIT);
 
-       if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
+       if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
                val = tr32(TG3_CPMU_CTRL);
                val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
                tw32(TG3_CPMU_CTRL, val);
@@ -9064,7 +9132,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
                tw32(TG3_CPMU_HST_ACC, val);
        }
 
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
+       if (tg3_asic_rev(tp) == ASIC_REV_57780) {
                val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
                val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
                       PCIE_PWR_MGMT_L1_THRESH_4MS;
@@ -9094,7 +9162,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
        }
 
        if (tg3_flag(tp, 57765_CLASS)) {
-               if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
+               if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
                        u32 grc_mode = tr32(GRC_MODE);
 
                        /* Access the lower 1K of PL PCIE block registers. */
@@ -9109,8 +9177,15 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
                        tw32(GRC_MODE, grc_mode);
                }
 
-               if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
-                       u32 grc_mode = tr32(GRC_MODE);
+               if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
+                       u32 grc_mode;
+
+                       /* Fix transmit hangs */
+                       val = tr32(TG3_CPMU_PADRNG_CTL);
+                       val |= TG3_CPMU_PADRNG_CTL_RDIV2;
+                       tw32(TG3_CPMU_PADRNG_CTL, val);
+
+                       grc_mode = tr32(GRC_MODE);
 
                        /* Access the lower 1K of DL PCIE block registers. */
                        val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
@@ -9142,7 +9217,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
                tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
        }
 
-       if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
+       if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
            tg3_flag(tp, PCIX_MODE)) {
                val = tr32(TG3PCI_PCISTATE);
                val |= PCISTATE_RETRY_SAME_DMA;
@@ -9160,7 +9235,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
                tw32(TG3PCI_PCISTATE, val);
        }
 
-       if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
+       if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
                /* Enable some hw fixes.  */
                val = tr32(TG3PCI_MSI_DATA);
                val |= (1 << 26) | (1 << 28) | (1 << 29);
@@ -9179,15 +9254,15 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
        if (tg3_flag(tp, 57765_PLUS)) {
                val = tr32(TG3PCI_DMA_RW_CTRL) &
                      ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
-               if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
+               if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
                        val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
                if (!tg3_flag(tp, 57765_CLASS) &&
-                   GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
-                   GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5762)
+                   tg3_asic_rev(tp) != ASIC_REV_5717 &&
+                   tg3_asic_rev(tp) != ASIC_REV_5762)
                        val |= DMA_RWCTRL_TAGGED_STAT_WA;
                tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
-       } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
-                  GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
+       } else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
+                  tg3_asic_rev(tp) != ASIC_REV_5761) {
                /* This value is determined during the probe time DMA
                 * engine test, tg3_test_dma.
                 */
@@ -9227,9 +9302,9 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
        /* Initialize MBUF/DESC pool. */
        if (tg3_flag(tp, 5750_PLUS)) {
                /* Do nothing.  */
-       } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
+       } else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
                tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
-               if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
+               if (tg3_asic_rev(tp) == ASIC_REV_5704)
                        tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
                else
                        tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
@@ -9267,11 +9342,11 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
             tp->bufmgr_config.dma_high_water);
 
        val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
+       if (tg3_asic_rev(tp) == ASIC_REV_5719)
                val |= BUFMGR_MODE_NO_TX_UNDERRUN;
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
-           tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
-           tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
+       if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
+           tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
+           tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
                val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
        tw32(BUFMGR_MODE, val);
        for (i = 0; i < 2000; i++) {
@@ -9284,7 +9359,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
                return -ENODEV;
        }
 
-       if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
+       if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
                tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
 
        tg3_setup_rxbd_thresholds(tp);
@@ -9322,7 +9397,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
        /* Program the jumbo buffer descriptor ring control
         * blocks on those devices that have them.
         */
-       if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
+       if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
            (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
 
                if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
@@ -9336,7 +9411,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
                             val | BDINFO_FLAGS_USE_EXT_RECV);
                        if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
                            tg3_flag(tp, 57765_CLASS) ||
-                           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
+                           tg3_asic_rev(tp) == ASIC_REV_5762)
                                tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
                                     NIC_SRAM_RX_JUMBO_BUFFER_DESC);
                } else {
@@ -9378,8 +9453,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
              (6 << TX_LENGTHS_IPG_SHIFT) |
              (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
 
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
+       if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
+           tg3_asic_rev(tp) == ASIC_REV_5762)
                val |= tr32(MAC_TX_LENGTHS) &
                       (TX_LENGTHS_JMB_FRM_LEN_MSK |
                        TX_LENGTHS_CNT_DWN_VAL_MSK);
@@ -9399,20 +9474,20 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
                      RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
                      RDMAC_MODE_LNGREAD_ENAB);
 
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
+       if (tg3_asic_rev(tp) == ASIC_REV_5717)
                rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
 
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
+       if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
+           tg3_asic_rev(tp) == ASIC_REV_5785 ||
+           tg3_asic_rev(tp) == ASIC_REV_57780)
                rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
                              RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
                              RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
 
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
-           tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
+       if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
+           tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
                if (tg3_flag(tp, TSO_CAPABLE) &&
-                   GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
+                   tg3_asic_rev(tp) == ASIC_REV_5705) {
                        rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
                } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
                           !tg3_flag(tp, IS_5788)) {
@@ -9423,35 +9498,43 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
        if (tg3_flag(tp, PCI_EXPRESS))
                rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
 
+       if (tg3_asic_rev(tp) == ASIC_REV_57766) {
+               tp->dma_limit = 0;
+               if (tp->dev->mtu <= ETH_DATA_LEN) {
+                       rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
+                       tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
+               }
+       }
+
        if (tg3_flag(tp, HW_TSO_1) ||
            tg3_flag(tp, HW_TSO_2) ||
            tg3_flag(tp, HW_TSO_3))
                rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
 
        if (tg3_flag(tp, 57765_PLUS) ||
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
+           tg3_asic_rev(tp) == ASIC_REV_5785 ||
+           tg3_asic_rev(tp) == ASIC_REV_57780)
                rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
 
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
+       if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
+           tg3_asic_rev(tp) == ASIC_REV_5762)
                rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
 
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
+       if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
+           tg3_asic_rev(tp) == ASIC_REV_5784 ||
+           tg3_asic_rev(tp) == ASIC_REV_5785 ||
+           tg3_asic_rev(tp) == ASIC_REV_57780 ||
            tg3_flag(tp, 57765_PLUS)) {
                u32 tgtreg;
 
-               if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
+               if (tg3_asic_rev(tp) == ASIC_REV_5762)
                        tgtreg = TG3_RDMA_RSRVCTRL_REG2;
                else
                        tgtreg = TG3_RDMA_RSRVCTRL_REG;
 
                val = tr32(tgtreg);
-               if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
-                   GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762) {
+               if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
+                   tg3_asic_rev(tp) == ASIC_REV_5762) {
                        val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
                                 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
                                 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
@@ -9462,12 +9545,12 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
                tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
        }
 
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762) {
+       if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
+           tg3_asic_rev(tp) == ASIC_REV_5720 ||
+           tg3_asic_rev(tp) == ASIC_REV_5762) {
                u32 tgtreg;
 
-               if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
+               if (tg3_asic_rev(tp) == ASIC_REV_5762)
                        tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
                else
                        tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
@@ -9550,7 +9633,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
                tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
        if (!tg3_flag(tp, 5705_PLUS) &&
            !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
-           GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
+           tg3_asic_rev(tp) != ASIC_REV_5700)
                tp->mac_mode |= MAC_MODE_LINK_POLARITY;
        tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
        udelay(40);
@@ -9568,11 +9651,11 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
                            GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
                            GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
 
-               if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
+               if (tg3_asic_rev(tp) == ASIC_REV_5752)
                        gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
                                     GRC_LCLCTRL_GPIO_OUTPUT3;
 
-               if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
+               if (tg3_asic_rev(tp) == ASIC_REV_5755)
                        gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
 
                tp->grc_local_ctrl &= ~gpio_mask;
@@ -9607,11 +9690,11 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
               WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
               WDMAC_MODE_LNGREAD_ENAB);
 
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
-           tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
+       if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
+           tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
                if (tg3_flag(tp, TSO_CAPABLE) &&
-                   (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
-                    tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
+                   (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
+                    tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
                        /* nothing */
                } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
                           !tg3_flag(tp, IS_5788)) {
@@ -9623,7 +9706,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
        if (tg3_flag(tp, 5755_PLUS))
                val |= WDMAC_MODE_STATUS_TAG_FIX;
 
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
+       if (tg3_asic_rev(tp) == ASIC_REV_5785)
                val |= WDMAC_MODE_BURST_ALL_DATA;
 
        tw32_f(WDMAC_MODE, val);
@@ -9634,10 +9717,10 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
 
                pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
                                     &pcix_cmd);
-               if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
+               if (tg3_asic_rev(tp) == ASIC_REV_5703) {
                        pcix_cmd &= ~PCI_X_CMD_MAX_READ;
                        pcix_cmd |= PCI_X_CMD_READ_2K;
-               } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
+               } else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
                        pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
                        pcix_cmd |= PCI_X_CMD_READ_2K;
                }
@@ -9648,7 +9731,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
        tw32_f(RDMAC_MODE, rdmac_mode);
        udelay(40);
 
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
+       if (tg3_asic_rev(tp) == ASIC_REV_5719) {
                for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
                        if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
                                break;
@@ -9665,7 +9748,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
        if (!tg3_flag(tp, 5705_PLUS))
                tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
 
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
+       if (tg3_asic_rev(tp) == ASIC_REV_5761)
                tw32(SNDDATAC_MODE,
                     SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
        else
@@ -9688,7 +9771,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
        tw32(SNDBDI_MODE, val);
        tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
 
-       if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
+       if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
                err = tg3_load_5701_a0_firmware_fix(tp);
                if (err)
                        return err;
@@ -9703,11 +9786,11 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
        tp->tx_mode = TX_MODE_ENABLE;
 
        if (tg3_flag(tp, 5755_PLUS) ||
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
+           tg3_asic_rev(tp) == ASIC_REV_5906)
                tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
 
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762) {
+       if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
+           tg3_asic_rev(tp) == ASIC_REV_5762) {
                val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
                tp->tx_mode &= ~val;
                tp->tx_mode |= tr32(MAC_TX_MODE) & val;
@@ -9758,8 +9841,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
        udelay(10);
 
        if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
-               if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
-                       !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
+               if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
+                   !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
                        /* Set drive transmission level to 1.2V  */
                        /* only if the signal pre-emphasis bit is not set  */
                        val = tr32(MAC_SERDES_CFG);
@@ -9767,7 +9850,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
                        val |= 0x880;
                        tw32(MAC_SERDES_CFG, val);
                }
-               if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
+               if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
                        tw32(MAC_SERDES_CFG, 0x616000);
        }
 
@@ -9780,14 +9863,14 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
                val = 2;
        tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
 
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
+       if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
            (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
                /* Use hardware link auto-negotiation */
                tg3_flag_set(tp, HW_AUTONEG);
        }
 
        if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
+           tg3_asic_rev(tp) == ASIC_REV_5714) {
                u32 tmp;
 
                tmp = tr32(SERDES_RX_CTRL);
@@ -10041,9 +10124,9 @@ static void tg3_periodic_fetch_stats(struct tg3 *tp)
        TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
 
        TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
-           tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
-           tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
+       if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
+           tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
+           tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
                TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
        } else {
                u32 val = tr32(HOSTCC_FLOW_ATTN);
@@ -10091,10 +10174,15 @@ static void tg3_timer(unsigned long __opaque)
 
        spin_lock(&tp->lock);
 
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+       if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
            tg3_flag(tp, 57765_CLASS))
                tg3_chk_missed_msi(tp);
 
+       if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
+               /* BCM4785: Flush posted writes from GbE to host memory. */
+               tr32(HOSTCC_MODE);
+       }
+
        if (!tg3_flag(tp, TAGGED_STATUS)) {
                /* All of this garbage is because when using non-tagged
                 * IRQ status the mailbox/status_block protocol the chip
@@ -10212,7 +10300,7 @@ restart_timer:
 static void tg3_timer_init(struct tg3 *tp)
 {
        if (tg3_flag(tp, TAGGED_STATUS) &&
-           GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
+           tg3_asic_rev(tp) != ASIC_REV_5717 &&
            !tg3_flag(tp, 57765_CLASS))
                tp->timer_offset = HZ;
        else
@@ -10793,7 +10881,7 @@ static int tg3_open(struct net_device *dev)
 
        if (tp->fw_needed) {
                err = tg3_request_firmware(tp);
-               if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
+               if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
                        if (err)
                                return err;
                } else if (err) {
@@ -10863,8 +10951,8 @@ static u64 tg3_calc_crc_errors(struct tg3 *tp)
        struct tg3_hw_stats *hw_stats = tp->hw_stats;
 
        if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
-           (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
-            GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
+           (tg3_asic_rev(tp) == ASIC_REV_5700 ||
+            tg3_asic_rev(tp) == ASIC_REV_5701)) {
                u32 val;
 
                if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
@@ -12389,11 +12477,11 @@ static int tg3_test_memory(struct tg3 *tp)
        if (tg3_flag(tp, 5717_PLUS))
                mem_tbl = mem_tbl_5717;
        else if (tg3_flag(tp, 57765_CLASS) ||
-                GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
+                tg3_asic_rev(tp) == ASIC_REV_5762)
                mem_tbl = mem_tbl_57765;
        else if (tg3_flag(tp, 5755_PLUS))
                mem_tbl = mem_tbl_5755;
-       else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
+       else if (tg3_asic_rev(tp) == ASIC_REV_5906)
                mem_tbl = mem_tbl_5906;
        else if (tg3_flag(tp, 5705_PLUS))
                mem_tbl = mem_tbl_5705;
@@ -12505,7 +12593,7 @@ static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
                } else if (tg3_flag(tp, HW_TSO_2))
                        mss |= hdr_len << 9;
                else if (tg3_flag(tp, HW_TSO_1) ||
-                        GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
+                        tg3_asic_rev(tp) == ASIC_REV_5705) {
                        mss |= (TG3_TSO_TCP_OPT_LEN << 9);
                } else {
                        base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
@@ -12691,7 +12779,7 @@ static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
         * errata.  Also, the MAC loopback test is deprecated for
         * all newer ASIC revisions.
         */
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
+       if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
            !tg3_flag(tp, CPMU_PRESENT)) {
                tg3_mac_loopback(tp, true);
 
@@ -12969,7 +13057,8 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
                        return -EAGAIN;
 
                spin_lock_bh(&tp->lock);
-               err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
+               err = __tg3_readphy(tp, data->phy_id & 0x1f,
+                                   data->reg_num & 0x1f, &mii_regval);
                spin_unlock_bh(&tp->lock);
 
                data->val_out = mii_regval;
@@ -12985,7 +13074,8 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
                        return -EAGAIN;
 
                spin_lock_bh(&tp->lock);
-               err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
+               err = __tg3_writephy(tp, data->phy_id & 0x1f,
+                                    data->reg_num & 0x1f, data->val_in);
                spin_unlock_bh(&tp->lock);
 
                return err;
@@ -13176,7 +13266,7 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu)
        /* Reset PHY, otherwise the read DMA engine will be in a mode that
         * breaks all requests to 256 bytes.
         */
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
+       if (tg3_asic_rev(tp) == ASIC_REV_57766)
                reset_phy = 1;
 
        err = tg3_restart_hw(tp, reset_phy);
@@ -13289,7 +13379,7 @@ static void tg3_get_nvram_info(struct tg3 *tp)
                tw32(NVRAM_CFG1, nvcfg1);
        }
 
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
+       if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
            tg3_flag(tp, 5780_CLASS)) {
                switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
                case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
@@ -13730,7 +13820,7 @@ static void tg3_get_5720_nvram_info(struct tg3 *tp)
        nvcfg1 = tr32(NVRAM_CFG1);
        nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
 
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762) {
+       if (tg3_asic_rev(tp) == ASIC_REV_5762) {
                if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
                        tg3_flag_set(tp, NO_NVRAM);
                        return;
@@ -13791,7 +13881,8 @@ static void tg3_get_5720_nvram_info(struct tg3 *tp)
                        tp->nvram_size = TG3_NVRAM_SIZE_1MB;
                        break;
                default:
-                       tp->nvram_size = TG3_NVRAM_SIZE_128KB;
+                       if (tg3_asic_rev(tp) != ASIC_REV_5762)
+                               tp->nvram_size = TG3_NVRAM_SIZE_128KB;
                        break;
                }
                break;
@@ -13837,7 +13928,8 @@ static void tg3_get_5720_nvram_info(struct tg3 *tp)
                        tp->nvram_size = TG3_NVRAM_SIZE_1MB;
                        break;
                default:
-                       tp->nvram_size = TG3_NVRAM_SIZE_128KB;
+                       if (tg3_asic_rev(tp) != ASIC_REV_5762)
+                               tp->nvram_size = TG3_NVRAM_SIZE_128KB;
                        break;
                }
                break;
@@ -13850,7 +13942,7 @@ static void tg3_get_5720_nvram_info(struct tg3 *tp)
        if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
                tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
 
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762) {
+       if (tg3_asic_rev(tp) == ASIC_REV_5762) {
                u32 val;
 
                if (tg3_nvram_read(tp, 0, &val))
@@ -13865,6 +13957,14 @@ static void tg3_get_5720_nvram_info(struct tg3 *tp)
 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
 static void tg3_nvram_init(struct tg3 *tp)
 {
+       if (tg3_flag(tp, IS_SSB_CORE)) {
+               /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
+               tg3_flag_clear(tp, NVRAM);
+               tg3_flag_clear(tp, NVRAM_BUFFERED);
+               tg3_flag_set(tp, NO_NVRAM);
+               return;
+       }
+
        tw32_f(GRC_EEPROM_ADDR,
             (EEPROM_ADDR_FSM_RESET |
              (EEPROM_DEFAULT_CLOCK_PERIOD <<
@@ -13877,8 +13977,8 @@ static void tg3_nvram_init(struct tg3 *tp)
             tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
        udelay(100);
 
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
-           GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
+       if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
+           tg3_asic_rev(tp) != ASIC_REV_5701) {
                tg3_flag_set(tp, NVRAM);
 
                if (tg3_nvram_lock(tp)) {
@@ -13891,26 +13991,26 @@ static void tg3_nvram_init(struct tg3 *tp)
 
                tp->nvram_size = 0;
 
-               if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
+               if (tg3_asic_rev(tp) == ASIC_REV_5752)
                        tg3_get_5752_nvram_info(tp);
-               else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
+               else if (tg3_asic_rev(tp) == ASIC_REV_5755)
                        tg3_get_5755_nvram_info(tp);
-               else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
-                        GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
-                        GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
+               else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
+                        tg3_asic_rev(tp) == ASIC_REV_5784 ||
+                        tg3_asic_rev(tp) == ASIC_REV_5785)
                        tg3_get_5787_nvram_info(tp);
-               else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
+               else if (tg3_asic_rev(tp) == ASIC_REV_5761)
                        tg3_get_5761_nvram_info(tp);
-               else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
+               else if (tg3_asic_rev(tp) == ASIC_REV_5906)
                        tg3_get_5906_nvram_info(tp);
-               else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
+               else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
                         tg3_flag(tp, 57765_CLASS))
                        tg3_get_57780_nvram_info(tp);
-               else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
-                        GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
+               else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
+                        tg3_asic_rev(tp) == ASIC_REV_5719)
                        tg3_get_5717_nvram_info(tp);
-               else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
-                        GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
+               else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
+                        tg3_asic_rev(tp) == ASIC_REV_5762)
                        tg3_get_5720_nvram_info(tp);
                else
                        tg3_get_nvram_info(tp);
@@ -14023,7 +14123,7 @@ static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
        tg3_flag_set(tp, EEPROM_WRITE_PROT);
        tg3_flag_set(tp, WOL_CAP);
 
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+       if (tg3_asic_rev(tp) == ASIC_REV_5906) {
                if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
                        tg3_flag_clear(tp, EEPROM_WRITE_PROT);
                        tg3_flag_set(tp, IS_NIC);
@@ -14050,13 +14150,13 @@ static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
 
                tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
                ver >>= NIC_SRAM_DATA_VER_SHIFT;
-               if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
-                   GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
-                   GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
+               if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
+                   tg3_asic_rev(tp) != ASIC_REV_5701 &&
+                   tg3_asic_rev(tp) != ASIC_REV_5703 &&
                    (ver > 0) && (ver < 0x100))
                        tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
 
-               if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
+               if (tg3_asic_rev(tp) == ASIC_REV_5785)
                        tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
 
                if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
@@ -14104,18 +14204,16 @@ static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
                        /* Default to PHY_1_MODE if 0 (MAC_MODE) is
                         * read on some older 5700/5701 bootcode.
                         */
-                       if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
-                           ASIC_REV_5700 ||
-                           GET_ASIC_REV(tp->pci_chip_rev_id) ==
-                           ASIC_REV_5701)
+                       if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
+                           tg3_asic_rev(tp) == ASIC_REV_5701)
                                tp->led_ctrl = LED_CTRL_MODE_PHY_1;
 
                        break;
 
                case SHASTA_EXT_LED_SHARED:
                        tp->led_ctrl = LED_CTRL_MODE_SHARED;
-                       if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
-                           tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
+                       if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
+                           tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
                                tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
                                                 LED_CTRL_MODE_PHY_2);
                        break;
@@ -14126,19 +14224,19 @@ static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
 
                case SHASTA_EXT_LED_COMBO:
                        tp->led_ctrl = LED_CTRL_MODE_COMBO;
-                       if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
+                       if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
                                tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
                                                 LED_CTRL_MODE_PHY_2);
                        break;
 
                }
 
-               if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
-                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
+               if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
+                    tg3_asic_rev(tp) == ASIC_REV_5701) &&
                    tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
                        tp->led_ctrl = LED_CTRL_MODE_PHY_2;
 
-               if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
+               if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
                        tp->led_ctrl = LED_CTRL_MODE_PHY_1;
 
                if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
@@ -14182,13 +14280,13 @@ static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
                        tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
 
                if ((tg3_flag(tp, 57765_PLUS) ||
-                    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
-                     GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
+                    (tg3_asic_rev(tp) == ASIC_REV_5784 &&
+                     tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
                    (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
                        tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
 
                if (tg3_flag(tp, PCI_EXPRESS) &&
-                   GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
+                   tg3_asic_rev(tp) != ASIC_REV_5785 &&
                    !tg3_flag(tp, 57765_PLUS)) {
                        u32 cfg3;
 
@@ -14391,10 +14489,19 @@ static int tg3_phy_probe(struct tg3 *tp)
                         * subsys device table.
                         */
                        p = tg3_lookup_by_subsys(tp);
-                       if (!p)
+                       if (p) {
+                               tp->phy_id = p->phy_id;
+                       } else if (!tg3_flag(tp, IS_SSB_CORE)) {
+                               /* For now we saw the IDs 0xbc050cd0,
+                                * 0xbc050f80 and 0xbc050c30 on devices
+                                * connected to an BCM4785 and there are
+                                * probably more. Just assume that the phy is
+                                * supported when it is connected to a SSB core
+                                * for now.
+                                */
                                return -ENODEV;
+                       }
 
-                       tp->phy_id = p->phy_id;
                        if (!tp->phy_id ||
                            tp->phy_id == TG3_PHY_ID_BCM8002)
                                tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
@@ -14402,13 +14509,13 @@ static int tg3_phy_probe(struct tg3 *tp)
        }
 
        if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
-           (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
-            GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
-            GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762 ||
-            (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
-             tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
-            (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
-             tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
+           (tg3_asic_rev(tp) == ASIC_REV_5719 ||
+            tg3_asic_rev(tp) == ASIC_REV_5720 ||
+            tg3_asic_rev(tp) == ASIC_REV_5762 ||
+            (tg3_asic_rev(tp) == ASIC_REV_5717 &&
+             tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
+            (tg3_asic_rev(tp) == ASIC_REV_57765 &&
+             tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0)))
                tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
 
        tg3_phy_init_link_config(tp);
@@ -14518,7 +14625,7 @@ out_not_found:
                return;
 
 out_no_vpd:
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
+       if (tg3_asic_rev(tp) == ASIC_REV_5717) {
                if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
                    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
                        strcpy(tp->board_part_number, "BCM5717");
@@ -14526,7 +14633,7 @@ out_no_vpd:
                        strcpy(tp->board_part_number, "BCM5718");
                else
                        goto nomatch;
-       } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
+       } else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
                if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
                        strcpy(tp->board_part_number, "BCM57780");
                else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
@@ -14537,7 +14644,7 @@ out_no_vpd:
                        strcpy(tp->board_part_number, "BCM57788");
                else
                        goto nomatch;
-       } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
+       } else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
                if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
                        strcpy(tp->board_part_number, "BCM57761");
                else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
@@ -14552,7 +14659,7 @@ out_no_vpd:
                        strcpy(tp->board_part_number, "BCM57795");
                else
                        goto nomatch;
-       } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766) {
+       } else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
                if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
                        strcpy(tp->board_part_number, "BCM57762");
                else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
@@ -14563,7 +14670,7 @@ out_no_vpd:
                        strcpy(tp->board_part_number, "BCM57786");
                else
                        goto nomatch;
-       } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+       } else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
                strcpy(tp->board_part_number, "BCM95906");
        } else {
 nomatch:
@@ -14804,7 +14911,7 @@ static void tg3_read_otp_ver(struct tg3 *tp)
 {
        u32 val, val2;
 
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5762)
+       if (tg3_asic_rev(tp) != ASIC_REV_5762)
                return;
 
        if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
@@ -14910,7 +15017,7 @@ static struct pci_dev *tg3_find_peer(struct tg3 *tp)
 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
 {
        tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
+       if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
                u32 reg;
 
                /* All devices that use the alternate
@@ -14947,47 +15054,47 @@ static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
        /* Wrong chip ID in 5752 A0. This code can be removed later
         * as A0 is not in production.
         */
-       if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
+       if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
                tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
 
-       if (tp->pci_chip_rev_id == CHIPREV_ID_5717_C0)
+       if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
                tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
 
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
+       if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
+           tg3_asic_rev(tp) == ASIC_REV_5719 ||
+           tg3_asic_rev(tp) == ASIC_REV_5720)
                tg3_flag_set(tp, 5717_PLUS);
 
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
+       if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
+           tg3_asic_rev(tp) == ASIC_REV_57766)
                tg3_flag_set(tp, 57765_CLASS);
 
        if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
-            GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
+            tg3_asic_rev(tp) == ASIC_REV_5762)
                tg3_flag_set(tp, 57765_PLUS);
 
        /* Intentionally exclude ASIC_REV_5906 */
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
+       if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
+           tg3_asic_rev(tp) == ASIC_REV_5787 ||
+           tg3_asic_rev(tp) == ASIC_REV_5784 ||
+           tg3_asic_rev(tp) == ASIC_REV_5761 ||
+           tg3_asic_rev(tp) == ASIC_REV_5785 ||
+           tg3_asic_rev(tp) == ASIC_REV_57780 ||
            tg3_flag(tp, 57765_PLUS))
                tg3_flag_set(tp, 5755_PLUS);
 
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
+       if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
+           tg3_asic_rev(tp) == ASIC_REV_5714)
                tg3_flag_set(tp, 5780_CLASS);
 
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
+       if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
+           tg3_asic_rev(tp) == ASIC_REV_5752 ||
+           tg3_asic_rev(tp) == ASIC_REV_5906 ||
            tg3_flag(tp, 5755_PLUS) ||
            tg3_flag(tp, 5780_CLASS))
                tg3_flag_set(tp, 5750_PLUS);
 
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
+       if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
            tg3_flag(tp, 5750_PLUS))
                tg3_flag_set(tp, 5705_PLUS);
 }
@@ -14997,13 +15104,13 @@ static bool tg3_10_100_only_device(struct tg3 *tp,
 {
        u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
 
-       if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
-           (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
+       if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
+            (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
            (tp->phy_flags & TG3_PHYFLG_IS_FET))
                return true;
 
        if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
-               if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
+               if (tg3_asic_rev(tp) == ASIC_REV_5705) {
                        if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
                                return true;
                } else {
@@ -15064,8 +15171,8 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
         * enable this workaround if the 5703 is on the secondary
         * bus of these ICH bridges.
         */
-       if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
-           (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
+       if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
+           (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
                static struct tg3_dev_id {
                        u32     vendor;
                        u32     device;
@@ -15105,7 +15212,7 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
                }
        }
 
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
+       if (tg3_asic_rev(tp) == ASIC_REV_5701) {
                static struct tg3_dev_id {
                        u32     vendor;
                        u32     device;
@@ -15165,29 +15272,29 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
                } while (bridge);
        }
 
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
+       if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
+           tg3_asic_rev(tp) == ASIC_REV_5714)
                tp->pdev_peer = tg3_find_peer(tp);
 
        /* Determine TSO capabilities */
-       if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0)
+       if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
                ; /* Do nothing. HW bug. */
        else if (tg3_flag(tp, 57765_PLUS))
                tg3_flag_set(tp, HW_TSO_3);
        else if (tg3_flag(tp, 5755_PLUS) ||
-                GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
+                tg3_asic_rev(tp) == ASIC_REV_5906)
                tg3_flag_set(tp, HW_TSO_2);
        else if (tg3_flag(tp, 5750_PLUS)) {
                tg3_flag_set(tp, HW_TSO_1);
                tg3_flag_set(tp, TSO_BUG);
-               if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
-                   tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
+               if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
+                   tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
                        tg3_flag_clear(tp, TSO_BUG);
-       } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
-                  GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
-                  tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
+       } else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
+                  tg3_asic_rev(tp) != ASIC_REV_5701 &&
+                  tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
                        tg3_flag_set(tp, TSO_BUG);
-               if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
+               if (tg3_asic_rev(tp) == ASIC_REV_5705)
                        tp->fw_needed = FIRMWARE_TG3TSO5;
                else
                        tp->fw_needed = FIRMWARE_TG3TSO;
@@ -15209,22 +15316,22 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
                tp->fw_needed = NULL;
        }
 
-       if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
+       if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
                tp->fw_needed = FIRMWARE_TG3;
 
        tp->irq_max = 1;
 
        if (tg3_flag(tp, 5750_PLUS)) {
                tg3_flag_set(tp, SUPPORT_MSI);
-               if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
-                   GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
-                   (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
-                    tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
+               if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
+                   tg3_chip_rev(tp) == CHIPREV_5750_BX ||
+                   (tg3_asic_rev(tp) == ASIC_REV_5714 &&
+                    tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
                     tp->pdev_peer == tp->pdev))
                        tg3_flag_clear(tp, SUPPORT_MSI);
 
                if (tg3_flag(tp, 5755_PLUS) ||
-                   GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+                   tg3_asic_rev(tp) == ASIC_REV_5906) {
                        tg3_flag_set(tp, 1SHOT_MSI);
                }
 
@@ -15240,26 +15347,26 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
                tp->rxq_max = TG3_RSS_MAX_NUM_QS;
                tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
 
-               if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
-                   GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
+               if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
+                   tg3_asic_rev(tp) == ASIC_REV_5720)
                        tp->txq_max = tp->irq_max - 1;
        }
 
        if (tg3_flag(tp, 5755_PLUS) ||
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
+           tg3_asic_rev(tp) == ASIC_REV_5906)
                tg3_flag_set(tp, SHORT_DMA_BUG);
 
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
+       if (tg3_asic_rev(tp) == ASIC_REV_5719)
                tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
 
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
+       if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
+           tg3_asic_rev(tp) == ASIC_REV_5719 ||
+           tg3_asic_rev(tp) == ASIC_REV_5720 ||
+           tg3_asic_rev(tp) == ASIC_REV_5762)
                tg3_flag_set(tp, LRG_PROD_RING_CAP);
 
        if (tg3_flag(tp, 57765_PLUS) &&
-           tp->pci_chip_rev_id != CHIPREV_ID_5719_A0)
+           tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
                tg3_flag_set(tp, USE_JUMBO_BDFLAG);
 
        if (!tg3_flag(tp, 5705_PLUS) ||
@@ -15277,20 +15384,19 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
 
                pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
                if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
-                       if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
-                           ASIC_REV_5906) {
+                       if (tg3_asic_rev(tp) == ASIC_REV_5906) {
                                tg3_flag_clear(tp, HW_TSO_2);
                                tg3_flag_clear(tp, TSO_CAPABLE);
                        }
-                       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
-                           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
-                           tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
-                           tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
+                       if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
+                           tg3_asic_rev(tp) == ASIC_REV_5761 ||
+                           tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
+                           tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
                                tg3_flag_set(tp, CLKREQ_BUG);
-               } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
+               } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
                        tg3_flag_set(tp, L1PLLPD_EN);
                }
-       } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
+       } else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
                /* BCM5785 devices are effectively PCIe devices, and should
                 * follow PCIe codepaths, but do not have a PCIe capabilities
                 * section.
@@ -15323,7 +15429,7 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
                             &tp->pci_cacheline_sz);
        pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
                             &tp->pci_lat_timer);
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
+       if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
            tp->pci_lat_timer < 64) {
                tp->pci_lat_timer = 64;
                pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
@@ -15333,7 +15439,7 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
        /* Important! -- It is critical that the PCI-X hw workaround
         * situation is decided before the first MMIO register access.
         */
-       if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
+       if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
                /* 5700 BX chips need to have their TX producer index
                 * mailboxes written twice to workaround a bug.
                 */
@@ -15375,7 +15481,7 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
                tg3_flag_set(tp, PCI_32BIT);
 
        /* Chip-specific fixup from Broadcom driver */
-       if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
+       if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
            (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
                pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
                pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
@@ -15392,9 +15498,9 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
        /* Various workaround register access methods */
        if (tg3_flag(tp, PCIX_TARGET_HWBUG))
                tp->write32 = tg3_write_indirect_reg32;
-       else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
+       else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
                 (tg3_flag(tp, PCI_EXPRESS) &&
-                 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
+                 tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
                /*
                 * Back to back register writes can cause problems on these
                 * chips, the workaround is to read back all reg writes
@@ -15426,7 +15532,7 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
                pci_cmd &= ~PCI_COMMAND_MEMORY;
                pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
        }
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+       if (tg3_asic_rev(tp) == ASIC_REV_5906) {
                tp->read32_mbox = tg3_read32_mbox_5906;
                tp->write32_mbox = tg3_write32_mbox_5906;
                tp->write32_tx_mbox = tg3_write32_mbox_5906;
@@ -15435,8 +15541,8 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
 
        if (tp->write32 == tg3_write_indirect_reg32 ||
            (tg3_flag(tp, PCIX_MODE) &&
-            (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
-             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
+            (tg3_asic_rev(tp) == ASIC_REV_5700 ||
+             tg3_asic_rev(tp) == ASIC_REV_5701)))
                tg3_flag_set(tp, SRAM_USE_CONFIG);
 
        /* The memory arbiter has to be enabled in order for SRAM accesses
@@ -15448,7 +15554,7 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
        tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
 
        tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
+       if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
            tg3_flag(tp, 5780_CLASS)) {
                if (tg3_flag(tp, PCIX_MODE)) {
                        pci_read_config_dword(tp->pdev,
@@ -15456,20 +15562,25 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
                                              &val);
                        tp->pci_fn = val & 0x7;
                }
-       } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
-                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
-                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
+       } else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
+                  tg3_asic_rev(tp) == ASIC_REV_5719 ||
+                  tg3_asic_rev(tp) == ASIC_REV_5720) {
                tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
                if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
                        val = tr32(TG3_CPMU_STATUS);
 
-               if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
+               if (tg3_asic_rev(tp) == ASIC_REV_5717)
                        tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
                else
                        tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
                                     TG3_CPMU_STATUS_FSHFT_5719;
        }
 
+       if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
+               tp->write32_tx_mbox = tg3_write_flush_reg32;
+               tp->write32_rx_mbox = tg3_write_flush_reg32;
+       }
+
        /* Get eeprom hw config before calling tg3_set_power_state().
         * In particular, the TG3_FLAG_IS_NIC flag must be
         * determined before calling tg3_set_power_state() so that
@@ -15505,18 +15616,18 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
         * It is also used as eeprom write protect on LOMs.
         */
        tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
+       if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
            tg3_flag(tp, EEPROM_WRITE_PROT))
                tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
                                       GRC_LCLCTRL_GPIO_OUTPUT1);
        /* Unused GPIO3 must be driven as output on 5752 because there
         * are no pull-up resistors on unused GPIO pins.
         */
-       else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
+       else if (tg3_asic_rev(tp) == ASIC_REV_5752)
                tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
 
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
+       if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
+           tg3_asic_rev(tp) == ASIC_REV_57780 ||
            tg3_flag(tp, 57765_CLASS))
                tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
 
@@ -15530,7 +15641,7 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
                                              GRC_LCLCTRL_GPIO_OUTPUT0;
        }
 
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
+       if (tg3_asic_rev(tp) == ASIC_REV_5762)
                tp->grc_local_ctrl |=
                        tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
 
@@ -15544,42 +15655,42 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
                tg3_flag_set(tp, JUMBO_RING_ENABLE);
 
        /* Determine WakeOnLan speed to use. */
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
-           tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
-           tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
-           tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
+       if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
+           tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
+           tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
+           tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
                tg3_flag_clear(tp, WOL_SPEED_100MB);
        } else {
                tg3_flag_set(tp, WOL_SPEED_100MB);
        }
 
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
+       if (tg3_asic_rev(tp) == ASIC_REV_5906)
                tp->phy_flags |= TG3_PHYFLG_IS_FET;
 
        /* A few boards don't want Ethernet@WireSpeed phy feature */
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
-           (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
-            (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
-            (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
+       if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
+           (tg3_asic_rev(tp) == ASIC_REV_5705 &&
+            (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
+            (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
            (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
            (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
                tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
 
-       if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
-           GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
+       if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
+           tg3_chip_rev(tp) == CHIPREV_5704_AX)
                tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
-       if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
+       if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
                tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
 
        if (tg3_flag(tp, 5705_PLUS) &&
            !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
-           GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
-           GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
+           tg3_asic_rev(tp) != ASIC_REV_5785 &&
+           tg3_asic_rev(tp) != ASIC_REV_57780 &&
            !tg3_flag(tp, 57765_PLUS)) {
-               if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
-                   GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
-                   GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
-                   GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
+               if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
+                   tg3_asic_rev(tp) == ASIC_REV_5787 ||
+                   tg3_asic_rev(tp) == ASIC_REV_5784 ||
+                   tg3_asic_rev(tp) == ASIC_REV_5761) {
                        if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
                            tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
                                tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
@@ -15589,8 +15700,8 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
                        tp->phy_flags |= TG3_PHYFLG_BER_BUG;
        }
 
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
-           GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
+       if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
+           tg3_chip_rev(tp) != CHIPREV_5784_AX) {
                tp->phy_otp = tg3_read_otp_phycfg(tp);
                if (tp->phy_otp == 0)
                        tp->phy_otp = TG3_OTP_DEFAULT;
@@ -15602,20 +15713,20 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
                tp->mi_mode = MAC_MI_MODE_BASE;
 
        tp->coalesce_mode = 0;
-       if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
-           GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
+       if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
+           tg3_chip_rev(tp) != CHIPREV_5700_BX)
                tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
 
        /* Set these bits to enable statistics workaround. */
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
-           tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
-           tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
+       if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
+           tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
+           tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
                tp->coalesce_mode |= HOSTCC_MODE_ATTN;
                tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
        }
 
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
+       if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
+           tg3_asic_rev(tp) == ASIC_REV_57780)
                tg3_flag_set(tp, USE_PHYLIB);
 
        err = tg3_mdio_init(tp);
@@ -15624,8 +15735,8 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
 
        /* Initialize data/descriptor byte/word swapping. */
        val = tr32(GRC_MODE);
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
+       if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
+           tg3_asic_rev(tp) == ASIC_REV_5762)
                val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
                        GRC_MODE_WORD_SWAP_B2HRX_DATA |
                        GRC_MODE_B2HRX_ENABLE |
@@ -15645,12 +15756,10 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
                              &pci_state_reg);
        if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
            !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
-               u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
-
-               if (chiprevid == CHIPREV_ID_5701_A0 ||
-                   chiprevid == CHIPREV_ID_5701_B0 ||
-                   chiprevid == CHIPREV_ID_5701_B2 ||
-                   chiprevid == CHIPREV_ID_5701_B5) {
+               if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
+                   tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
+                   tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
+                   tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
                        void __iomem *sram_base;
 
                        /* Write some dummy words into the SRAM status block
@@ -15673,13 +15782,13 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
        grc_misc_cfg = tr32(GRC_MISC_CFG);
        grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
 
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
+       if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
            (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
             grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
                tg3_flag_set(tp, IS_5788);
 
        if (!tg3_flag(tp, IS_5788) &&
-           GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
+           tg3_asic_rev(tp) != ASIC_REV_5700)
                tg3_flag_set(tp, TAGGED_STATUS);
        if (tg3_flag(tp, TAGGED_STATUS)) {
                tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
@@ -15712,7 +15821,7 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
        if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
                tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
        } else {
-               if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
+               if (tg3_asic_rev(tp) == ASIC_REV_5700)
                        tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
                else
                        tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
@@ -15722,7 +15831,7 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
         * change bit implementation, so we must use the
         * status register in those cases.
         */
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
+       if (tg3_asic_rev(tp) == ASIC_REV_5700)
                tg3_flag_set(tp, USE_LINKCHG_REG);
        else
                tg3_flag_clear(tp, USE_LINKCHG_REG);
@@ -15732,7 +15841,7 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
         * upon subsystem IDs.
         */
        if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
+           tg3_asic_rev(tp) == ASIC_REV_5701 &&
            !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
                tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
                tg3_flag_set(tp, USE_LINKCHG_REG);
@@ -15746,7 +15855,7 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
 
        tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
        tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
+       if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
            tg3_flag(tp, PCIX_MODE)) {
                tp->rx_offset = NET_SKB_PAD;
 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
@@ -15763,9 +15872,9 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
        /* Increment the rx prod index on the rx std ring by at most
         * 8 for these chips to workaround hw errata.
         */
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
+       if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
+           tg3_asic_rev(tp) == ASIC_REV_5752 ||
+           tg3_asic_rev(tp) == ASIC_REV_5755)
                tp->rx_std_max_post = 8;
 
        if (tg3_flag(tp, ASPM_WORKAROUND))
@@ -15806,14 +15915,21 @@ static int tg3_get_device_address(struct tg3 *tp)
        struct net_device *dev = tp->dev;
        u32 hi, lo, mac_offset;
        int addr_ok = 0;
+       int err;
 
 #ifdef CONFIG_SPARC
        if (!tg3_get_macaddr_sparc(tp))
                return 0;
 #endif
 
+       if (tg3_flag(tp, IS_SSB_CORE)) {
+               err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
+               if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
+                       return 0;
+       }
+
        mac_offset = 0x7c;
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
+       if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
            tg3_flag(tp, 5780_CLASS)) {
                if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
                        mac_offset = 0xcc;
@@ -15826,7 +15942,7 @@ static int tg3_get_device_address(struct tg3 *tp)
                        mac_offset = 0xcc;
                if (tp->pci_fn > 1)
                        mac_offset += 0x18c;
-       } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
+       } else if (tg3_asic_rev(tp) == ASIC_REV_5906)
                mac_offset = 0x10;
 
        /* First try to get it from MAC address mailbox. */
@@ -15894,8 +16010,8 @@ static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
        /* On 5703 and later chips, the boundary bits have no
         * effect.
         */
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
-           GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
+       if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
+           tg3_asic_rev(tp) != ASIC_REV_5701 &&
            !tg3_flag(tp, PCI_EXPRESS))
                goto out;
 
@@ -16133,14 +16249,14 @@ static int tg3_test_dma(struct tg3 *tp)
                /* DMA read watermark not used on PCIE */
                tp->dma_rwctrl |= 0x00180000;
        } else if (!tg3_flag(tp, PCIX_MODE)) {
-               if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
-                   GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
+               if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
+                   tg3_asic_rev(tp) == ASIC_REV_5750)
                        tp->dma_rwctrl |= 0x003f0000;
                else
                        tp->dma_rwctrl |= 0x003f000f;
        } else {
-               if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
-                   GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
+               if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
+                   tg3_asic_rev(tp) == ASIC_REV_5704) {
                        u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
                        u32 read_water = 0x7;
 
@@ -16149,35 +16265,37 @@ static int tg3_test_dma(struct tg3 *tp)
                         * better performance.
                         */
                        if (tg3_flag(tp, 40BIT_DMA_BUG) &&
-                           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
+                           tg3_asic_rev(tp) == ASIC_REV_5704)
                                tp->dma_rwctrl |= 0x8000;
                        else if (ccval == 0x6 || ccval == 0x7)
                                tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
 
-                       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
+                       if (tg3_asic_rev(tp) == ASIC_REV_5703)
                                read_water = 4;
                        /* Set bit 23 to enable PCIX hw bug fix */
                        tp->dma_rwctrl |=
                                (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
                                (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
                                (1 << 23);
-               } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
+               } else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
                        /* 5780 always in PCIX mode */
                        tp->dma_rwctrl |= 0x00144000;
-               } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
+               } else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
                        /* 5714 always in PCIX mode */
                        tp->dma_rwctrl |= 0x00148000;
                } else {
                        tp->dma_rwctrl |= 0x001b000f;
                }
        }
+       if (tg3_flag(tp, ONE_DMA_AT_ONCE))
+               tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
 
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
+       if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
+           tg3_asic_rev(tp) == ASIC_REV_5704)
                tp->dma_rwctrl &= 0xfffffff0;
 
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
+       if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
+           tg3_asic_rev(tp) == ASIC_REV_5701) {
                /* Remove this if it causes problems for some boards. */
                tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
 
@@ -16201,8 +16319,8 @@ static int tg3_test_dma(struct tg3 *tp)
        tg3_switch_clocks(tp);
 #endif
 
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
-           GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
+       if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
+           tg3_asic_rev(tp) != ASIC_REV_5701)
                goto out;
 
        /* It is best to perform DMA test with maximum write burst size
@@ -16321,7 +16439,7 @@ static void tg3_init_bufmgr_config(struct tg3 *tp)
                        DEFAULT_MB_MACRX_LOW_WATER_5705;
                tp->bufmgr_config.mbuf_high_water =
                        DEFAULT_MB_HIGH_WATER_5705;
-               if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+               if (tg3_asic_rev(tp) == ASIC_REV_5906) {
                        tp->bufmgr_config.mbuf_mac_rx_low_water =
                                DEFAULT_MB_MACRX_LOW_WATER_5906;
                        tp->bufmgr_config.mbuf_high_water =
@@ -16516,6 +16634,18 @@ static int tg3_init_one(struct pci_dev *pdev,
        else
                tp->msg_enable = TG3_DEF_MSG_ENABLE;
 
+       if (pdev_is_ssb_gige_core(pdev)) {
+               tg3_flag_set(tp, IS_SSB_CORE);
+               if (ssb_gige_must_flush_posted_writes(pdev))
+                       tg3_flag_set(tp, FLUSH_POSTED_WRITES);
+               if (ssb_gige_one_dma_at_once(pdev))
+                       tg3_flag_set(tp, ONE_DMA_AT_ONCE);
+               if (ssb_gige_have_roboswitch(pdev))
+                       tg3_flag_set(tp, ROBOSWITCH);
+               if (ssb_gige_is_rgmii(pdev))
+                       tg3_flag_set(tp, RGMII_MODE);
+       }
+
        /* The word/byte swap controls here control register access byte
         * swapping.  DMA data byte swapping is controlled in the GRC_MODE
         * setting below.
@@ -16631,7 +16761,7 @@ static int tg3_init_one(struct pci_dev *pdev,
        /* 5700 B0 chips do not support checksumming correctly due
         * to hardware bugs.
         */
-       if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
+       if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
                features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
 
                if (tg3_flag(tp, 5755_PLUS))
@@ -16651,11 +16781,11 @@ static int tg3_init_one(struct pci_dev *pdev,
                if (features & NETIF_F_IPV6_CSUM)
                        features |= NETIF_F_TSO6;
                if (tg3_flag(tp, HW_TSO_3) ||
-                   GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
-                   (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
-                    GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
-                   GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
-                   GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
+                   tg3_asic_rev(tp) == ASIC_REV_5761 ||
+                   (tg3_asic_rev(tp) == ASIC_REV_5784 &&
+                    tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
+                   tg3_asic_rev(tp) == ASIC_REV_5785 ||
+                   tg3_asic_rev(tp) == ASIC_REV_57780)
                        features |= NETIF_F_TSO_ECN;
        }
 
@@ -16667,14 +16797,14 @@ static int tg3_init_one(struct pci_dev *pdev,
         * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
         * loopback for the remaining devices.
         */
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
+       if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
            !tg3_flag(tp, CPMU_PRESENT))
                /* Add the loopback capability */
                features |= NETIF_F_LOOPBACK;
 
        dev->hw_features |= features;
 
-       if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
+       if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
            !tg3_flag(tp, TSO_CAPABLE) &&
            !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
                tg3_flag_set(tp, MAX_RXPEND_64);
@@ -16753,9 +16883,9 @@ static int tg3_init_one(struct pci_dev *pdev,
 
        pci_set_drvdata(pdev, dev);
 
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
+       if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
+           tg3_asic_rev(tp) == ASIC_REV_5720 ||
+           tg3_asic_rev(tp) == ASIC_REV_5762)
                tg3_flag_set(tp, PTP_CAPABLE);
 
        if (tg3_flag(tp, 5717_PLUS)) {
@@ -16765,6 +16895,8 @@ static int tg3_init_one(struct pci_dev *pdev,
 
        tg3_timer_init(tp);
 
+       tg3_carrier_off(tp);
+
        err = register_netdev(dev);
        if (err) {
                dev_err(&pdev->dev, "Cannot register net device, aborting\n");
@@ -16773,7 +16905,7 @@ static int tg3_init_one(struct pci_dev *pdev,
 
        netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
                    tp->board_part_number,
-                   tp->pci_chip_rev_id,
+                   tg3_chip_rev_id(tp),
                    tg3_bus_string(tp, str),
                    dev->dev_addr);
 
index 9cd88a4..8d7d4c2 100644 (file)
 #define  MISC_HOST_CTRL_TAGGED_STATUS   0x00000200
 #define  MISC_HOST_CTRL_CHIPREV                 0xffff0000
 #define  MISC_HOST_CTRL_CHIPREV_SHIFT   16
-#define  GET_CHIP_REV_ID(MISC_HOST_CTRL) \
-        (((MISC_HOST_CTRL) & MISC_HOST_CTRL_CHIPREV) >> \
-         MISC_HOST_CTRL_CHIPREV_SHIFT)
+
 #define  CHIPREV_ID_5700_A0             0x7000
 #define  CHIPREV_ID_5700_A1             0x7001
 #define  CHIPREV_ID_5700_B0             0x7100
 #define  CHIPREV_ID_5719_A0             0x05719000
 #define  CHIPREV_ID_5720_A0             0x05720000
 #define  CHIPREV_ID_5762_A0             0x05762000
-#define  GET_ASIC_REV(CHIP_REV_ID)     ((CHIP_REV_ID) >> 12)
+
 #define   ASIC_REV_5700                         0x07
 #define   ASIC_REV_5701                         0x00
 #define   ASIC_REV_5703                         0x01
 #define   ASIC_REV_5720                         0x5720
 #define   ASIC_REV_57766                0x57766
 #define   ASIC_REV_5762                         0x5762
-#define  GET_CHIP_REV(CHIP_REV_ID)     ((CHIP_REV_ID) >> 8)
 #define   CHIPREV_5700_AX               0x70
 #define   CHIPREV_5700_BX               0x71
 #define   CHIPREV_5700_CX               0x72
 #define   CHIPREV_5784_AX               0x57840
 #define   CHIPREV_5761_AX               0x57610
 #define   CHIPREV_57765_AX              0x577650
-#define  GET_METAL_REV(CHIP_REV_ID)    ((CHIP_REV_ID) & 0xff)
 #define   METAL_REV_A0                  0x00
 #define   METAL_REV_A1                  0x01
 #define   METAL_REV_B0                  0x00
 #define  CPMU_MUTEX_GNT_DRIVER          0x00001000
 #define TG3_CPMU_PHY_STRAP             0x00003664
 #define TG3_CPMU_PHY_STRAP_IS_SERDES    0x00000020
+#define TG3_CPMU_PADRNG_CTL            0x00003668
+#define  TG3_CPMU_PADRNG_CTL_RDIV2      0x00040000
 /* 0x3664 --> 0x36b0 unused */
 
 #define TG3_CPMU_EEE_MODE              0x000036b0
@@ -3056,6 +3054,11 @@ enum TG3_FLAGS {
        TG3_FLAG_57765_PLUS,
        TG3_FLAG_57765_CLASS,
        TG3_FLAG_5717_PLUS,
+       TG3_FLAG_IS_SSB_CORE,
+       TG3_FLAG_FLUSH_POSTED_WRITES,
+       TG3_FLAG_ROBOSWITCH,
+       TG3_FLAG_ONE_DMA_AT_ONCE,
+       TG3_FLAG_RGMII_MODE,
 
        /* Add new flags before this comment and TG3_FLAG_NUMBER_OF_FLAGS */
        TG3_FLAG_NUMBER_OF_FLAGS,       /* Last entry in enum TG3_FLAGS */
@@ -3352,4 +3355,18 @@ struct tg3 {
        bool                            link_up;
 };
 
+/* Accessor macros for chip and asic attributes
+ *
+ * nb: Using static inlines equivalent to the accessor macros generates
+ *     larger object code with gcc 4.7.
+ *     Using statement expression macros to check tp with
+ *     typecheck(struct tg3 *, tp) also creates larger objects.
+ */
+#define tg3_chip_rev_id(tp)                                    \
+       ((tp)->pci_chip_rev_id)
+#define tg3_asic_rev(tp)                                       \
+       ((tp)->pci_chip_rev_id >> 12)
+#define tg3_chip_rev(tp)                                       \
+       ((tp)->pci_chip_rev_id >> 8)
+
 #endif /* !(_T3_H) */
index 352190b..7903943 100644 (file)
@@ -693,6 +693,11 @@ static int macb_poll(struct napi_struct *napi, int budget)
                 * get notified when new packets arrive.
                 */
                macb_writel(bp, IER, MACB_RX_INT_FLAGS);
+
+               /* Packets received while interrupts were disabled */
+               status = macb_readl(bp, RSR);
+               if (unlikely(status))
+                       napi_reschedule(napi);
        }
 
        /* TODO: Handle errors */
index 92170d5..9488032 100644 (file)
@@ -1477,8 +1477,10 @@ static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
        skb->ip_summed = CHECKSUM_UNNECESSARY;
        skb_record_rx_queue(skb, rxq->rspq.idx);
 
-       if (pkt->vlan_ex)
+       if (pkt->vlan_ex) {
                __vlan_hwaccel_put_tag(skb, be16_to_cpu(pkt->vlan));
+               rxq->stats.vlan_ex++;
+       }
        ret = napi_gro_frags(&rxq->rspq.napi);
 
        if (ret == GRO_HELD)
@@ -1501,7 +1503,7 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
                       const struct pkt_gl *gl)
 {
        struct sk_buff *skb;
-       const struct cpl_rx_pkt *pkt = (void *)&rsp[1];
+       const struct cpl_rx_pkt *pkt = (void *)rsp;
        bool csum_ok = pkt->csum_calc && !pkt->err_vec;
        struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq);
 
index 4010cb7..28ceb84 100644 (file)
 
 #define DRV_VER                        "4.6.62.0u"
 #define DRV_NAME               "be2net"
-#define BE_NAME                        "ServerEngines BladeEngine2 10Gbps NIC"
-#define BE3_NAME               "ServerEngines BladeEngine3 10Gbps NIC"
-#define OC_NAME                        "Emulex OneConnect 10Gbps NIC"
+#define BE_NAME                        "Emulex BladeEngine2"
+#define BE3_NAME               "Emulex BladeEngine3"
+#define OC_NAME                        "Emulex OneConnect"
 #define OC_NAME_BE             OC_NAME "(be3)"
 #define OC_NAME_LANCER         OC_NAME "(Lancer)"
 #define OC_NAME_SH             OC_NAME "(Skyhawk)"
-#define DRV_DESC               "ServerEngines BladeEngine 10Gbps NIC Driver"
+#define DRV_DESC               "Emulex OneConnect 10Gbps NIC Driver"
 
 #define BE_VENDOR_ID           0x19a2
 #define EMULEX_VENDOR_ID       0x10df
index 8b04880..071aea7 100644 (file)
@@ -93,13 +93,16 @@ static void be_mcc_notify(struct be_adapter *adapter)
  * little endian) */
 static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
 {
+       u32 flags;
+
        if (compl->flags != 0) {
-               compl->flags = le32_to_cpu(compl->flags);
-               BUG_ON((compl->flags & CQE_FLAGS_VALID_MASK) == 0);
-               return true;
-       } else {
-               return false;
+               flags = le32_to_cpu(compl->flags);
+               if (flags & CQE_FLAGS_VALID_MASK) {
+                       compl->flags = flags;
+                       return true;
+               }
        }
+       return false;
 }
 
 /* Need to reset the entire word that houses the valid bit */
index 7d53481..3860888 100644 (file)
@@ -25,7 +25,7 @@
 MODULE_VERSION(DRV_VER);
 MODULE_DEVICE_TABLE(pci, be_dev_ids);
 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
-MODULE_AUTHOR("ServerEngines Corporation");
+MODULE_AUTHOR("Emulex Corporation");
 MODULE_LICENSE("GPL");
 
 static unsigned int num_vfs;
index 51215e1..0fe68c4 100644 (file)
@@ -1609,7 +1609,6 @@ static int fec_enet_init(struct net_device *ndev)
        }
 
        spin_lock_init(&fep->hw_lock);
-       spin_lock_init(&fep->tmreg_lock);
 
        fep->netdev = ndev;
 
@@ -1841,6 +1840,9 @@ fec_probe(struct platform_device *pdev)
 
        fec_reset_phy(pdev);
 
+       if (fep->bufdesc_ex)
+               fec_ptp_init(ndev, pdev);
+
        ret = fec_enet_init(ndev);
        if (ret)
                goto failed_init;
@@ -1856,9 +1858,6 @@ fec_probe(struct platform_device *pdev)
        if (ret)
                goto failed_register;
 
-       if (fep->bufdesc_ex)
-               fec_ptp_init(ndev, pdev);
-
        return 0;
 
 failed_register:
index 817d081..7f91b0c 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/delay.h>
 #include <linux/of_device.h>
 #include <linux/of_mdio.h>
+#include <linux/of_net.h>
 #include <linux/of_platform.h>
 
 #include <linux/netdevice.h>
@@ -76,10 +77,6 @@ static void mpc52xx_fec_stop(struct net_device *dev);
 static void mpc52xx_fec_start(struct net_device *dev);
 static void mpc52xx_fec_reset(struct net_device *dev);
 
-static u8 mpc52xx_fec_mac_addr[6];
-module_param_array_named(mac, mpc52xx_fec_mac_addr, byte, NULL, 0);
-MODULE_PARM_DESC(mac, "six hex digits, ie. 0x1,0x2,0xc0,0x01,0xba,0xbe");
-
 #define MPC52xx_MESSAGES_DEFAULT ( NETIF_MSG_DRV | NETIF_MSG_PROBE | \
                NETIF_MSG_LINK | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP)
 static int debug = -1; /* the above default */
@@ -110,15 +107,6 @@ static void mpc52xx_fec_set_paddr(struct net_device *dev, u8 *mac)
        out_be32(&fec->paddr2, (*(u16 *)(&mac[4]) << 16) | FEC_PADDR2_TYPE);
 }
 
-static void mpc52xx_fec_get_paddr(struct net_device *dev, u8 *mac)
-{
-       struct mpc52xx_fec_priv *priv = netdev_priv(dev);
-       struct mpc52xx_fec __iomem *fec = priv->fec;
-
-       *(u32 *)(&mac[0]) = in_be32(&fec->paddr1);
-       *(u16 *)(&mac[4]) = in_be32(&fec->paddr2) >> 16;
-}
-
 static int mpc52xx_fec_set_mac_address(struct net_device *dev, void *addr)
 {
        struct sockaddr *sock = addr;
@@ -853,6 +841,8 @@ static int mpc52xx_fec_probe(struct platform_device *op)
        struct resource mem;
        const u32 *prop;
        int prop_size;
+       struct device_node *np = op->dev.of_node;
+       const char *mac_addr;
 
        phys_addr_t rx_fifo;
        phys_addr_t tx_fifo;
@@ -866,7 +856,7 @@ static int mpc52xx_fec_probe(struct platform_device *op)
        priv->ndev = ndev;
 
        /* Reserve FEC control zone */
-       rv = of_address_to_resource(op->dev.of_node, 0, &mem);
+       rv = of_address_to_resource(np, 0, &mem);
        if (rv) {
                printk(KERN_ERR DRIVER_NAME ": "
                                "Error while parsing device node resource\n" );
@@ -919,7 +909,7 @@ static int mpc52xx_fec_probe(struct platform_device *op)
 
        /* Get the IRQ we need one by one */
                /* Control */
-       ndev->irq = irq_of_parse_and_map(op->dev.of_node, 0);
+       ndev->irq = irq_of_parse_and_map(np, 0);
 
                /* RX */
        priv->r_irq = bcom_get_task_irq(priv->rx_dmatsk);
@@ -927,11 +917,33 @@ static int mpc52xx_fec_probe(struct platform_device *op)
                /* TX */
        priv->t_irq = bcom_get_task_irq(priv->tx_dmatsk);
 
-       /* MAC address init */
-       if (!is_zero_ether_addr(mpc52xx_fec_mac_addr))
-               memcpy(ndev->dev_addr, mpc52xx_fec_mac_addr, 6);
-       else
-               mpc52xx_fec_get_paddr(ndev, ndev->dev_addr);
+       /*
+        * MAC address init:
+        *
+        * First try to read MAC address from DT
+        */
+       mac_addr = of_get_mac_address(np);
+       if (mac_addr) {
+               memcpy(ndev->dev_addr, mac_addr, ETH_ALEN);
+       } else {
+               struct mpc52xx_fec __iomem *fec = priv->fec;
+
+               /*
+                * If the MAC addresse is not provided via DT then read
+                * it back from the controller regs
+                */
+               *(u32 *)(&ndev->dev_addr[0]) = in_be32(&fec->paddr1);
+               *(u16 *)(&ndev->dev_addr[4]) = in_be32(&fec->paddr2) >> 16;
+       }
+
+       /*
+        * Check if the MAC address is valid, if not get a random one
+        */
+       if (!is_valid_ether_addr(ndev->dev_addr)) {
+               eth_hw_addr_random(ndev);
+               dev_warn(&ndev->dev, "using random MAC address %pM\n",
+                        ndev->dev_addr);
+       }
 
        priv->msg_enable = netif_msg_init(debug, MPC52xx_MESSAGES_DEFAULT);
 
@@ -942,20 +954,20 @@ static int mpc52xx_fec_probe(struct platform_device *op)
        /* Start with safe defaults for link connection */
        priv->speed = 100;
        priv->duplex = DUPLEX_HALF;
-       priv->mdio_speed = ((mpc5xxx_get_bus_frequency(op->dev.of_node) >> 20) / 5) << 1;
+       priv->mdio_speed = ((mpc5xxx_get_bus_frequency(np) >> 20) / 5) << 1;
 
        /* The current speed preconfigures the speed of the MII link */
-       prop = of_get_property(op->dev.of_node, "current-speed", &prop_size);
+       prop = of_get_property(np, "current-speed", &prop_size);
        if (prop && (prop_size >= sizeof(u32) * 2)) {
                priv->speed = prop[0];
                priv->duplex = prop[1] ? DUPLEX_FULL : DUPLEX_HALF;
        }
 
        /* If there is a phy handle, then get the PHY node */
-       priv->phy_node = of_parse_phandle(op->dev.of_node, "phy-handle", 0);
+       priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
 
        /* the 7-wire property means don't use MII mode */
-       if (of_find_property(op->dev.of_node, "fsl,7-wire-mode", NULL)) {
+       if (of_find_property(np, "fsl,7-wire-mode", NULL)) {
                priv->seven_wire_mode = 1;
                dev_info(&ndev->dev, "using 7-wire PHY mode\n");
        }
@@ -970,6 +982,8 @@ static int mpc52xx_fec_probe(struct platform_device *op)
 
        /* We're done ! */
        dev_set_drvdata(&op->dev, ndev);
+       printk(KERN_INFO "%s: %s MAC %pM\n",
+              ndev->name, op->dev.of_node->full_name, ndev->dev_addr);
 
        return 0;
 
index c40526c..1f17ca0 100644 (file)
@@ -104,7 +104,7 @@ void fec_ptp_start_cyclecounter(struct net_device *ndev)
        unsigned long flags;
        int inc;
 
-       inc = 1000000000 / clk_get_rate(fep->clk_ptp);
+       inc = 1000000000 / fep->cycle_speed;
 
        /* grab the ptp lock */
        spin_lock_irqsave(&fep->tmreg_lock, flags);
@@ -363,6 +363,8 @@ void fec_ptp_init(struct net_device *ndev, struct platform_device *pdev)
        fep->ptp_caps.settime = fec_ptp_settime;
        fep->ptp_caps.enable = fec_ptp_enable;
 
+       fep->cycle_speed = clk_get_rate(fep->clk_ptp);
+
        spin_lock_init(&fep->tmreg_lock);
 
        fec_ptp_start_cyclecounter(ndev);
index 19c54a0..4b5e8a6 100644 (file)
@@ -133,8 +133,8 @@ static void gfar_netpoll(struct net_device *dev);
 #endif
 int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
 static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue);
-static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
-                             int amount_pull, struct napi_struct *napi);
+static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
+                              int amount_pull, struct napi_struct *napi);
 void gfar_halt(struct net_device *dev);
 static void gfar_halt_nodisable(struct net_device *dev);
 void gfar_start(struct net_device *dev);
@@ -231,7 +231,7 @@ static int gfar_alloc_skb_resources(struct net_device *ndev)
        dma_addr_t addr;
        int i, j, k;
        struct gfar_private *priv = netdev_priv(ndev);
-       struct device *dev = &priv->ofdev->dev;
+       struct device *dev = priv->dev;
        struct gfar_priv_tx_q *tx_queue = NULL;
        struct gfar_priv_rx_q *rx_queue = NULL;
 
@@ -277,14 +277,12 @@ static int gfar_alloc_skb_resources(struct net_device *ndev)
        /* Setup the skbuff rings */
        for (i = 0; i < priv->num_tx_queues; i++) {
                tx_queue = priv->tx_queue[i];
-               tx_queue->tx_skbuff = kmalloc(sizeof(*tx_queue->tx_skbuff) *
-                                             tx_queue->tx_ring_size,
-                                             GFP_KERNEL);
-               if (!tx_queue->tx_skbuff) {
-                       netif_err(priv, ifup, ndev,
-                                 "Could not allocate tx_skbuff\n");
+               tx_queue->tx_skbuff =
+                       kmalloc_array(tx_queue->tx_ring_size,
+                                     sizeof(*tx_queue->tx_skbuff),
+                                     GFP_KERNEL);
+               if (!tx_queue->tx_skbuff)
                        goto cleanup;
-               }
 
                for (k = 0; k < tx_queue->tx_ring_size; k++)
                        tx_queue->tx_skbuff[k] = NULL;
@@ -292,15 +290,12 @@ static int gfar_alloc_skb_resources(struct net_device *ndev)
 
        for (i = 0; i < priv->num_rx_queues; i++) {
                rx_queue = priv->rx_queue[i];
-               rx_queue->rx_skbuff = kmalloc(sizeof(*rx_queue->rx_skbuff) *
-                                             rx_queue->rx_ring_size,
-                                             GFP_KERNEL);
-
-               if (!rx_queue->rx_skbuff) {
-                       netif_err(priv, ifup, ndev,
-                                 "Could not allocate rx_skbuff\n");
+               rx_queue->rx_skbuff =
+                       kmalloc_array(rx_queue->rx_ring_size,
+                                     sizeof(*rx_queue->rx_skbuff),
+                                     GFP_KERNEL);
+               if (!rx_queue->rx_skbuff)
                        goto cleanup;
-               }
 
                for (j = 0; j < rx_queue->rx_ring_size; j++)
                        rx_queue->rx_skbuff[j] = NULL;
@@ -349,6 +344,9 @@ static void gfar_init_mac(struct net_device *ndev)
        /* Configure the coalescing support */
        gfar_configure_coalescing(priv, 0xFF, 0xFF);
 
+       /* set this when rx hw offload (TOE) functions are being used */
+       priv->uses_rxfcb = 0;
+
        if (priv->rx_filer_enable) {
                rctrl |= RCTRL_FILREN;
                /* Program the RIR0 reg with the required distribution */
@@ -359,8 +357,10 @@ static void gfar_init_mac(struct net_device *ndev)
        if (ndev->flags & IFF_PROMISC)
                rctrl |= RCTRL_PROM;
 
-       if (ndev->features & NETIF_F_RXCSUM)
+       if (ndev->features & NETIF_F_RXCSUM) {
                rctrl |= RCTRL_CHECKSUMMING;
+               priv->uses_rxfcb = 1;
+       }
 
        if (priv->extended_hash) {
                rctrl |= RCTRL_EXTHASH;
@@ -382,11 +382,15 @@ static void gfar_init_mac(struct net_device *ndev)
        }
 
        /* Enable HW time stamping if requested from user space */
-       if (priv->hwts_rx_en)
+       if (priv->hwts_rx_en) {
                rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE;
+               priv->uses_rxfcb = 1;
+       }
 
-       if (ndev->features & NETIF_F_HW_VLAN_RX)
+       if (ndev->features & NETIF_F_HW_VLAN_RX) {
                rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
+               priv->uses_rxfcb = 1;
+       }
 
        /* Init rctrl based on our settings */
        gfar_write(&regs->rctrl, rctrl);
@@ -505,20 +509,6 @@ void unlock_tx_qs(struct gfar_private *priv)
                spin_unlock(&priv->tx_queue[i]->txlock);
 }
 
-static bool gfar_is_vlan_on(struct gfar_private *priv)
-{
-       return (priv->ndev->features & NETIF_F_HW_VLAN_RX) ||
-              (priv->ndev->features & NETIF_F_HW_VLAN_TX);
-}
-
-/* Returns 1 if incoming frames use an FCB */
-static inline int gfar_uses_fcb(struct gfar_private *priv)
-{
-       return gfar_is_vlan_on(priv) ||
-              (priv->ndev->features & NETIF_F_RXCSUM) ||
-              (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER);
-}
-
 static void free_tx_pointers(struct gfar_private *priv)
 {
        int i;
@@ -580,19 +570,11 @@ static int gfar_parse_group(struct device_node *np,
        u32 *queue_mask;
        int i;
 
-       if (priv->mode == MQ_MG_MODE) {
-               for (i = 0; i < GFAR_NUM_IRQS; i++) {
-                       grp->irqinfo[i] = kzalloc(sizeof(struct gfar_irqinfo),
-                                                 GFP_KERNEL);
-                       if (!grp->irqinfo[i])
-                               return -ENOMEM;
-               }
-       } else {
-               grp->irqinfo[GFAR_TX] = kzalloc(sizeof(struct gfar_irqinfo),
-                                               GFP_KERNEL);
-               if (!grp->irqinfo[GFAR_TX])
+       for (i = 0; i < GFAR_NUM_IRQS; i++) {
+               grp->irqinfo[i] = kzalloc(sizeof(struct gfar_irqinfo),
+                                         GFP_KERNEL);
+               if (!grp->irqinfo[i])
                        return -ENOMEM;
-               grp->irqinfo[GFAR_RX] = grp->irqinfo[GFAR_ER] = NULL;
        }
 
        grp->regs = of_iomap(np, 0);
@@ -676,7 +658,6 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
                return -ENOMEM;
 
        priv = netdev_priv(dev);
-       priv->node = ofdev->dev.of_node;
        priv->ndev = dev;
 
        priv->num_tx_queues = num_tx_qs;
@@ -1014,7 +995,7 @@ static int gfar_probe(struct platform_device *ofdev)
        priv = netdev_priv(dev);
        priv->ndev = dev;
        priv->ofdev = ofdev;
-       priv->node = ofdev->dev.of_node;
+       priv->dev = &ofdev->dev;
        SET_NETDEV_DEV(dev, &ofdev->dev);
 
        spin_lock_init(&priv->bflock);
@@ -1051,8 +1032,6 @@ static int gfar_probe(struct platform_device *ofdev)
        /* Set the dev->base_addr to the gfar reg region */
        dev->base_addr = (unsigned long) regs;
 
-       SET_NETDEV_DEV(dev, &ofdev->dev);
-
        /* Fill in the dev structure */
        dev->watchdog_timeo = TX_TIMEOUT;
        dev->mtu = 1500;
@@ -1730,13 +1709,13 @@ static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue)
                if (!tx_queue->tx_skbuff[i])
                        continue;
 
-               dma_unmap_single(&priv->ofdev->dev, txbdp->bufPtr,
+               dma_unmap_single(priv->dev, txbdp->bufPtr,
                                 txbdp->length, DMA_TO_DEVICE);
                txbdp->lstatus = 0;
                for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags;
                     j++) {
                        txbdp++;
-                       dma_unmap_page(&priv->ofdev->dev, txbdp->bufPtr,
+                       dma_unmap_page(priv->dev, txbdp->bufPtr,
                                       txbdp->length, DMA_TO_DEVICE);
                }
                txbdp++;
@@ -1757,8 +1736,8 @@ static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
 
        for (i = 0; i < rx_queue->rx_ring_size; i++) {
                if (rx_queue->rx_skbuff[i]) {
-                       dma_unmap_single(&priv->ofdev->dev,
-                                        rxbdp->bufPtr, priv->rx_buffer_size,
+                       dma_unmap_single(priv->dev, rxbdp->bufPtr,
+                                        priv->rx_buffer_size,
                                         DMA_FROM_DEVICE);
                        dev_kfree_skb_any(rx_queue->rx_skbuff[i]);
                        rx_queue->rx_skbuff[i] = NULL;
@@ -1797,7 +1776,7 @@ static void free_skb_resources(struct gfar_private *priv)
                        free_skb_rx_queue(rx_queue);
        }
 
-       dma_free_coherent(&priv->ofdev->dev,
+       dma_free_coherent(priv->dev,
                          sizeof(struct txbd8) * priv->total_tx_ring_size +
                          sizeof(struct rxbd8) * priv->total_rx_ring_size,
                          priv->tx_queue[0]->tx_bd_base,
@@ -2177,7 +2156,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
                        if (i == nr_frags - 1)
                                lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
 
-                       bufaddr = skb_frag_dma_map(&priv->ofdev->dev,
+                       bufaddr = skb_frag_dma_map(priv->dev,
                                                   &skb_shinfo(skb)->frags[i],
                                                   0,
                                                   length,
@@ -2229,7 +2208,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
                lstatus |= BD_LFLAG(TXBD_TOE);
        }
 
-       txbdp_start->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data,
+       txbdp_start->bufPtr = dma_map_single(priv->dev, skb->data,
                                             skb_headlen(skb), DMA_TO_DEVICE);
 
        /* If time stamping is requested one additional TxBD must be set up. The
@@ -2342,10 +2321,13 @@ void gfar_check_rx_parser_mode(struct gfar_private *priv)
 
        tempval = gfar_read(&regs->rctrl);
        /* If parse is no longer required, then disable parser */
-       if (tempval & RCTRL_REQ_PARSER)
+       if (tempval & RCTRL_REQ_PARSER) {
                tempval |= RCTRL_PRSDEP_INIT;
-       else
+               priv->uses_rxfcb = 1;
+       } else {
                tempval &= ~RCTRL_PRSDEP_INIT;
+               priv->uses_rxfcb = 0;
+       }
        gfar_write(&regs->rctrl, tempval);
 }
 
@@ -2378,6 +2360,7 @@ void gfar_vlan_mode(struct net_device *dev, netdev_features_t features)
                tempval = gfar_read(&regs->rctrl);
                tempval |= (RCTRL_VLEX | RCTRL_PRSDEP_INIT);
                gfar_write(&regs->rctrl, tempval);
+               priv->uses_rxfcb = 1;
        } else {
                /* Disable VLAN tag extraction */
                tempval = gfar_read(&regs->rctrl);
@@ -2401,15 +2384,12 @@ static int gfar_change_mtu(struct net_device *dev, int new_mtu)
        int oldsize = priv->rx_buffer_size;
        int frame_size = new_mtu + ETH_HLEN;
 
-       if (gfar_is_vlan_on(priv))
-               frame_size += VLAN_HLEN;
-
        if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) {
                netif_err(priv, drv, dev, "Invalid MTU setting\n");
                return -EINVAL;
        }
 
-       if (gfar_uses_fcb(priv))
+       if (priv->uses_rxfcb)
                frame_size += GMAC_FCB_LEN;
 
        frame_size += priv->padding;
@@ -2542,7 +2522,7 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
                } else
                        buflen = bdp->length;
 
-               dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr,
+               dma_unmap_single(priv->dev, bdp->bufPtr,
                                 buflen, DMA_TO_DEVICE);
 
                if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
@@ -2561,7 +2541,7 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
                bdp = next_txbd(bdp, base, tx_ring_size);
 
                for (i = 0; i < frags; i++) {
-                       dma_unmap_page(&priv->ofdev->dev, bdp->bufPtr,
+                       dma_unmap_page(priv->dev, bdp->bufPtr,
                                       bdp->length, DMA_TO_DEVICE);
                        bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
                        bdp = next_txbd(bdp, base, tx_ring_size);
@@ -2627,7 +2607,7 @@ static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
        struct gfar_private *priv = netdev_priv(dev);
        dma_addr_t buf;
 
-       buf = dma_map_single(&priv->ofdev->dev, skb->data,
+       buf = dma_map_single(priv->dev, skb->data,
                             priv->rx_buffer_size, DMA_FROM_DEVICE);
        gfar_init_rxbdp(rx_queue, bdp, buf);
 }
@@ -2661,7 +2641,7 @@ static inline void count_errors(unsigned short status, struct net_device *dev)
        if (status & RXBD_TRUNCATED) {
                stats->rx_length_errors++;
 
-               estats->rx_trunc++;
+               atomic64_inc(&estats->rx_trunc);
 
                return;
        }
@@ -2670,20 +2650,20 @@ static inline void count_errors(unsigned short status, struct net_device *dev)
                stats->rx_length_errors++;
 
                if (status & RXBD_LARGE)
-                       estats->rx_large++;
+                       atomic64_inc(&estats->rx_large);
                else
-                       estats->rx_short++;
+                       atomic64_inc(&estats->rx_short);
        }
        if (status & RXBD_NONOCTET) {
                stats->rx_frame_errors++;
-               estats->rx_nonoctet++;
+               atomic64_inc(&estats->rx_nonoctet);
        }
        if (status & RXBD_CRCERR) {
-               estats->rx_crcerr++;
+               atomic64_inc(&estats->rx_crcerr);
                stats->rx_crc_errors++;
        }
        if (status & RXBD_OVERRUN) {
-               estats->rx_overrun++;
+               atomic64_inc(&estats->rx_overrun);
                stats->rx_crc_errors++;
        }
 }
@@ -2708,8 +2688,8 @@ static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
 
 
 /* gfar_process_frame() -- handle one incoming packet if skb isn't NULL. */
-static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
-                             int amount_pull, struct napi_struct *napi)
+static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
+                              int amount_pull, struct napi_struct *napi)
 {
        struct gfar_private *priv = netdev_priv(dev);
        struct rxfcb *fcb = NULL;
@@ -2756,10 +2736,8 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
        /* Send the packet up the stack */
        ret = napi_gro_receive(napi, skb);
 
-       if (GRO_DROP == ret)
-               priv->extra_stats.kernel_dropped++;
-
-       return 0;
+       if (unlikely(GRO_DROP == ret))
+               atomic64_inc(&priv->extra_stats.kernel_dropped);
 }
 
 /* gfar_clean_rx_ring() -- Processes each frame in the rx ring
@@ -2780,7 +2758,7 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
        bdp = rx_queue->cur_rx;
        base = rx_queue->rx_bd_base;
 
-       amount_pull = (gfar_uses_fcb(priv) ? GMAC_FCB_LEN : 0);
+       amount_pull = priv->uses_rxfcb ? GMAC_FCB_LEN : 0;
 
        while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) {
                struct sk_buff *newskb;
@@ -2792,7 +2770,7 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
 
                skb = rx_queue->rx_skbuff[rx_queue->skb_currx];
 
-               dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr,
+               dma_unmap_single(priv->dev, bdp->bufPtr,
                                 priv->rx_buffer_size, DMA_FROM_DEVICE);
 
                if (unlikely(!(bdp->status & RXBD_ERR) &&
@@ -2825,7 +2803,7 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
                        } else {
                                netif_warn(priv, rx_err, dev, "Missing skb!\n");
                                rx_queue->stats.rx_dropped++;
-                               priv->extra_stats.rx_skbmissing++;
+                               atomic64_inc(&priv->extra_stats.rx_skbmissing);
                        }
 
                }
@@ -3258,7 +3236,7 @@ static irqreturn_t gfar_error(int irq, void *grp_id)
                        netif_dbg(priv, tx_err, dev,
                                  "TX FIFO underrun, packet dropped\n");
                        dev->stats.tx_dropped++;
-                       priv->extra_stats.tx_underrun++;
+                       atomic64_inc(&priv->extra_stats.tx_underrun);
 
                        local_irq_save(flags);
                        lock_tx_qs(priv);
@@ -3273,7 +3251,7 @@ static irqreturn_t gfar_error(int irq, void *grp_id)
        }
        if (events & IEVENT_BSY) {
                dev->stats.rx_errors++;
-               priv->extra_stats.rx_bsy++;
+               atomic64_inc(&priv->extra_stats.rx_bsy);
 
                gfar_receive(irq, grp_id);
 
@@ -3282,19 +3260,19 @@ static irqreturn_t gfar_error(int irq, void *grp_id)
        }
        if (events & IEVENT_BABR) {
                dev->stats.rx_errors++;
-               priv->extra_stats.rx_babr++;
+               atomic64_inc(&priv->extra_stats.rx_babr);
 
                netif_dbg(priv, rx_err, dev, "babbling RX error\n");
        }
        if (events & IEVENT_EBERR) {
-               priv->extra_stats.eberr++;
+               atomic64_inc(&priv->extra_stats.eberr);
                netif_dbg(priv, rx_err, dev, "bus error\n");
        }
        if (events & IEVENT_RXC)
                netif_dbg(priv, rx_status, dev, "control frame\n");
 
        if (events & IEVENT_BABT) {
-               priv->extra_stats.tx_babt++;
+               atomic64_inc(&priv->extra_stats.tx_babt);
                netif_dbg(priv, tx_err, dev, "babbling TX error\n");
        }
        return IRQ_HANDLED;
index 71793f4..63a28d2 100644 (file)
@@ -627,34 +627,29 @@ struct rmon_mib
 };
 
 struct gfar_extra_stats {
-       u64 kernel_dropped;
-       u64 rx_large;
-       u64 rx_short;
-       u64 rx_nonoctet;
-       u64 rx_crcerr;
-       u64 rx_overrun;
-       u64 rx_bsy;
-       u64 rx_babr;
-       u64 rx_trunc;
-       u64 eberr;
-       u64 tx_babt;
-       u64 tx_underrun;
-       u64 rx_skbmissing;
-       u64 tx_timeout;
+       atomic64_t kernel_dropped;
+       atomic64_t rx_large;
+       atomic64_t rx_short;
+       atomic64_t rx_nonoctet;
+       atomic64_t rx_crcerr;
+       atomic64_t rx_overrun;
+       atomic64_t rx_bsy;
+       atomic64_t rx_babr;
+       atomic64_t rx_trunc;
+       atomic64_t eberr;
+       atomic64_t tx_babt;
+       atomic64_t tx_underrun;
+       atomic64_t rx_skbmissing;
+       atomic64_t tx_timeout;
 };
 
 #define GFAR_RMON_LEN ((sizeof(struct rmon_mib) - 16)/sizeof(u32))
-#define GFAR_EXTRA_STATS_LEN (sizeof(struct gfar_extra_stats)/sizeof(u64))
+#define GFAR_EXTRA_STATS_LEN \
+       (sizeof(struct gfar_extra_stats)/sizeof(atomic64_t))
 
-/* Number of stats in the stats structure (ignore car and cam regs)*/
+/* Number of stats exported via ethtool */
 #define GFAR_STATS_LEN (GFAR_RMON_LEN + GFAR_EXTRA_STATS_LEN)
 
-struct gfar_stats {
-       u64 extra[GFAR_EXTRA_STATS_LEN];
-       u64 rmon[GFAR_RMON_LEN];
-};
-
-
 struct gfar {
        u32     tsec_id;        /* 0x.000 - Controller ID register */
        u32     tsec_id2;       /* 0x.004 - Controller ID2 register */
@@ -1054,28 +1049,65 @@ enum gfar_errata {
  * the buffer descriptor determines the actual condition.
  */
 struct gfar_private {
-
-       /* Indicates how many tx, rx queues are enabled */
-       unsigned int num_tx_queues;
        unsigned int num_rx_queues;
-       unsigned int num_grps;
-       unsigned int mode;
-
-       /* The total tx and rx ring size for the enabled queues */
-       unsigned int total_tx_ring_size;
-       unsigned int total_rx_ring_size;
 
-       struct device_node *node;
+       struct device *dev;
        struct net_device *ndev;
-       struct platform_device *ofdev;
        enum gfar_errata errata;
+       unsigned int rx_buffer_size;
+
+       u16 uses_rxfcb;
+       u16 padding;
+
+       /* HW time stamping enabled flag */
+       int hwts_rx_en;
+       int hwts_tx_en;
 
-       struct gfar_priv_grp gfargrp[MAXGROUPS];
        struct gfar_priv_tx_q *tx_queue[MAX_TX_QS];
        struct gfar_priv_rx_q *rx_queue[MAX_RX_QS];
+       struct gfar_priv_grp gfargrp[MAXGROUPS];
+
+       u32 device_flags;
+
+       unsigned int mode;
+       unsigned int num_tx_queues;
+       unsigned int num_grps;
+
+       /* Network Statistics */
+       struct gfar_extra_stats extra_stats;
+
+       /* PHY stuff */
+       phy_interface_t interface;
+       struct device_node *phy_node;
+       struct device_node *tbi_node;
+       struct phy_device *phydev;
+       struct mii_bus *mii_bus;
+       int oldspeed;
+       int oldduplex;
+       int oldlink;
+
+       /* Bitfield update lock */
+       spinlock_t bflock;
+
+       uint32_t msg_enable;
+
+       struct work_struct reset_task;
+
+       struct platform_device *ofdev;
+       unsigned char
+               extended_hash:1,
+               bd_stash_en:1,
+               rx_filer_enable:1,
+               /* Wake-on-LAN enabled */
+               wol_en:1,
+               /* Enable priorty based Tx scheduling in Hw */
+               prio_sched_en:1;
+
+       /* The total tx and rx ring size for the enabled queues */
+       unsigned int total_tx_ring_size;
+       unsigned int total_rx_ring_size;
 
        /* RX per device parameters */
-       unsigned int rx_buffer_size;
        unsigned int rx_stash_size;
        unsigned int rx_stash_index;
 
@@ -1094,39 +1126,6 @@ struct gfar_private {
        unsigned int fifo_starve;
        unsigned int fifo_starve_off;
 
-       /* Bitfield update lock */
-       spinlock_t bflock;
-
-       phy_interface_t interface;
-       struct device_node *phy_node;
-       struct device_node *tbi_node;
-       u32 device_flags;
-       unsigned char
-               extended_hash:1,
-               bd_stash_en:1,
-               rx_filer_enable:1,
-               wol_en:1, /* Wake-on-LAN enabled */
-               prio_sched_en:1; /* Enable priorty based Tx scheduling in Hw */
-       unsigned short padding;
-
-       /* PHY stuff */
-       struct phy_device *phydev;
-       struct mii_bus *mii_bus;
-       int oldspeed;
-       int oldduplex;
-       int oldlink;
-
-       uint32_t msg_enable;
-
-       struct work_struct reset_task;
-
-       /* Network Statistics */
-       struct gfar_extra_stats extra_stats;
-
-       /* HW time stamping enabled flag */
-       int hwts_rx_en;
-       int hwts_tx_en;
-
        /*Filer table*/
        unsigned int ftp_rqfpr[MAX_FILER_IDX + 1];
        unsigned int ftp_rqfcr[MAX_FILER_IDX + 1];
index 45e59d5..75e89ac 100644 (file)
@@ -149,20 +149,17 @@ static void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy,
        int i;
        struct gfar_private *priv = netdev_priv(dev);
        struct gfar __iomem *regs = priv->gfargrp[0].regs;
-       u64 *extra = (u64 *) & priv->extra_stats;
+       atomic64_t *extra = (atomic64_t *)&priv->extra_stats;
+
+       for (i = 0; i < GFAR_EXTRA_STATS_LEN; i++)
+               buf[i] = atomic64_read(&extra[i]);
 
        if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
                u32 __iomem *rmon = (u32 __iomem *) &regs->rmon;
-               struct gfar_stats *stats = (struct gfar_stats *) buf;
-
-               for (i = 0; i < GFAR_RMON_LEN; i++)
-                       stats->rmon[i] = (u64) gfar_read(&rmon[i]);
 
-               for (i = 0; i < GFAR_EXTRA_STATS_LEN; i++)
-                       stats->extra[i] = extra[i];
-       } else
-               for (i = 0; i < GFAR_EXTRA_STATS_LEN; i++)
-                       buf[i] = extra[i];
+               for (; i < GFAR_STATS_LEN; i++, rmon++)
+                       buf[i] = (u64) gfar_read(rmon);
+       }
 }
 
 static int gfar_sset_count(struct net_device *dev, int sset)
index 91977d9..328f47c 100644 (file)
@@ -76,16 +76,16 @@ MODULE_PARM_DESC(msg_level, "msg_level");
 MODULE_PARM_DESC(prop_carrier_state, "Propagate carrier state of physical "
                 "port to stack. 1:yes, 0:no.  Default = 0 ");
 MODULE_PARM_DESC(rq3_entries, "Number of entries for Receive Queue 3 "
-                "[2^x - 1], x = [6..14]. Default = "
+                "[2^x - 1], x = [7..14]. Default = "
                 __MODULE_STRING(EHEA_DEF_ENTRIES_RQ3) ")");
 MODULE_PARM_DESC(rq2_entries, "Number of entries for Receive Queue 2 "
-                "[2^x - 1], x = [6..14]. Default = "
+                "[2^x - 1], x = [7..14]. Default = "
                 __MODULE_STRING(EHEA_DEF_ENTRIES_RQ2) ")");
 MODULE_PARM_DESC(rq1_entries, "Number of entries for Receive Queue 1 "
-                "[2^x - 1], x = [6..14]. Default = "
+                "[2^x - 1], x = [7..14]. Default = "
                 __MODULE_STRING(EHEA_DEF_ENTRIES_RQ1) ")");
 MODULE_PARM_DESC(sq_entries, " Number of entries for the Send Queue  "
-                "[2^x - 1], x = [6..14]. Default = "
+                "[2^x - 1], x = [7..14]. Default = "
                 __MODULE_STRING(EHEA_DEF_ENTRIES_SQ) ")");
 MODULE_PARM_DESC(use_mcs, " Multiple receive queues, 1: enable, 0: disable, "
                 "Default = 1");
index 2b6cd02..26d9cd5 100644 (file)
@@ -81,68 +81,69 @@ struct e1000_adapter;
 
 #include "e1000_hw.h"
 
-#define E1000_MAX_INTR 10
+#define E1000_MAX_INTR                 10
 
 /* TX/RX descriptor defines */
-#define E1000_DEFAULT_TXD                  256
-#define E1000_MAX_TXD                      256
-#define E1000_MIN_TXD                       48
-#define E1000_MAX_82544_TXD               4096
+#define E1000_DEFAULT_TXD              256
+#define E1000_MAX_TXD                  256
+#define E1000_MIN_TXD                  48
+#define E1000_MAX_82544_TXD            4096
 
-#define E1000_DEFAULT_RXD                  256
-#define E1000_MAX_RXD                      256
-#define E1000_MIN_RXD                       48
-#define E1000_MAX_82544_RXD               4096
+#define E1000_DEFAULT_RXD              256
+#define E1000_MAX_RXD                  256
+#define E1000_MIN_RXD                  48
+#define E1000_MAX_82544_RXD            4096
 
 #define E1000_MIN_ITR_USECS            10 /* 100000 irq/sec */
 #define E1000_MAX_ITR_USECS            10000 /* 100    irq/sec */
 
 /* this is the size past which hardware will drop packets when setting LPE=0 */
-#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
+#define MAXIMUM_ETHERNET_VLAN_SIZE     1522
 
 /* Supported Rx Buffer Sizes */
-#define E1000_RXBUFFER_128   128    /* Used for packet split */
-#define E1000_RXBUFFER_256   256    /* Used for packet split */
-#define E1000_RXBUFFER_512   512
-#define E1000_RXBUFFER_1024  1024
-#define E1000_RXBUFFER_2048  2048
-#define E1000_RXBUFFER_4096  4096
-#define E1000_RXBUFFER_8192  8192
-#define E1000_RXBUFFER_16384 16384
+#define E1000_RXBUFFER_128             128    /* Used for packet split */
+#define E1000_RXBUFFER_256             256    /* Used for packet split */
+#define E1000_RXBUFFER_512             512
+#define E1000_RXBUFFER_1024            1024
+#define E1000_RXBUFFER_2048            2048
+#define E1000_RXBUFFER_4096            4096
+#define E1000_RXBUFFER_8192            8192
+#define E1000_RXBUFFER_16384           16384
 
 /* SmartSpeed delimiters */
-#define E1000_SMARTSPEED_DOWNSHIFT 3
-#define E1000_SMARTSPEED_MAX       15
+#define E1000_SMARTSPEED_DOWNSHIFT     3
+#define E1000_SMARTSPEED_MAX           15
 
 /* Packet Buffer allocations */
-#define E1000_PBA_BYTES_SHIFT 0xA
-#define E1000_TX_HEAD_ADDR_SHIFT 7
-#define E1000_PBA_TX_MASK 0xFFFF0000
+#define E1000_PBA_BYTES_SHIFT          0xA
+#define E1000_TX_HEAD_ADDR_SHIFT       7
+#define E1000_PBA_TX_MASK              0xFFFF0000
 
 /* Flow Control Watermarks */
-#define E1000_FC_HIGH_DIFF 0x1638  /* High: 5688 bytes below Rx FIFO size */
-#define E1000_FC_LOW_DIFF 0x1640   /* Low:  5696 bytes below Rx FIFO size */
+#define E1000_FC_HIGH_DIFF     0x1638 /* High: 5688 bytes below Rx FIFO size */
+#define E1000_FC_LOW_DIFF      0x1640 /* Low:  5696 bytes below Rx FIFO size */
 
-#define E1000_FC_PAUSE_TIME 0xFFFF /* pause for the max or until send xon */
+#define E1000_FC_PAUSE_TIME    0xFFFF /* pause for the max or until send xon */
 
 /* How many Tx Descriptors do we need to call netif_wake_queue ? */
 #define E1000_TX_QUEUE_WAKE    16
 /* How many Rx Buffers do we bundle into one write to the hardware ? */
-#define E1000_RX_BUFFER_WRITE  16      /* Must be power of 2 */
+#define E1000_RX_BUFFER_WRITE  16 /* Must be power of 2 */
 
-#define AUTO_ALL_MODES            0
-#define E1000_EEPROM_82544_APM    0x0004
-#define E1000_EEPROM_APME         0x0400
+#define AUTO_ALL_MODES         0
+#define E1000_EEPROM_82544_APM 0x0004
+#define E1000_EEPROM_APME      0x0400
 
 #ifndef E1000_MASTER_SLAVE
 /* Switch to override PHY master/slave setting */
 #define E1000_MASTER_SLAVE     e1000_ms_hw_default
 #endif
 
-#define E1000_MNG_VLAN_NONE (-1)
+#define E1000_MNG_VLAN_NONE    (-1)
 
 /* wrapper around a pointer to a socket buffer,
- * so a DMA handle can be stored along with the buffer */
+ * so a DMA handle can be stored along with the buffer
+ */
 struct e1000_buffer {
        struct sk_buff *skb;
        dma_addr_t dma;
index 14e3051..43462d5 100644 (file)
@@ -115,12 +115,12 @@ static int e1000_get_settings(struct net_device *netdev,
        if (hw->media_type == e1000_media_type_copper) {
 
                ecmd->supported = (SUPPORTED_10baseT_Half |
-                                  SUPPORTED_10baseT_Full |
-                                  SUPPORTED_100baseT_Half |
-                                  SUPPORTED_100baseT_Full |
-                                  SUPPORTED_1000baseT_Full|
-                                  SUPPORTED_Autoneg |
-                                  SUPPORTED_TP);
+                                  SUPPORTED_10baseT_Full |
+                                  SUPPORTED_100baseT_Half |
+                                  SUPPORTED_100baseT_Full |
+                                  SUPPORTED_1000baseT_Full|
+                                  SUPPORTED_Autoneg |
+                                  SUPPORTED_TP);
                ecmd->advertising = ADVERTISED_TP;
 
                if (hw->autoneg == 1) {
@@ -161,8 +161,8 @@ static int e1000_get_settings(struct net_device *netdev,
                ethtool_cmd_speed_set(ecmd, adapter->link_speed);
 
                /* unfortunately FULL_DUPLEX != DUPLEX_FULL
-                *          and HALF_DUPLEX != DUPLEX_HALF */
-
+                * and HALF_DUPLEX != DUPLEX_HALF
+                */
                if (adapter->link_duplex == FULL_DUPLEX)
                        ecmd->duplex = DUPLEX_FULL;
                else
@@ -179,8 +179,7 @@ static int e1000_get_settings(struct net_device *netdev,
        if ((hw->media_type == e1000_media_type_copper) &&
            netif_carrier_ok(netdev))
                ecmd->eth_tp_mdix = (!!adapter->phy_info.mdix_mode ?
-                                                       ETH_TP_MDI_X :
-                                                       ETH_TP_MDI);
+                                    ETH_TP_MDI_X : ETH_TP_MDI);
        else
                ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
 
@@ -197,8 +196,7 @@ static int e1000_set_settings(struct net_device *netdev,
        struct e1000_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
 
-       /*
-        * MDI setting is only allowed when autoneg enabled because
+       /* MDI setting is only allowed when autoneg enabled because
         * some hardware doesn't allow MDI setting when speed or
         * duplex is forced.
         */
@@ -224,8 +222,8 @@ static int e1000_set_settings(struct net_device *netdev,
                                     ADVERTISED_Autoneg;
                else
                        hw->autoneg_advertised = ecmd->advertising |
-                                                ADVERTISED_TP |
-                                                ADVERTISED_Autoneg;
+                                                ADVERTISED_TP |
+                                                ADVERTISED_Autoneg;
                ecmd->advertising = hw->autoneg_advertised;
        } else {
                u32 speed = ethtool_cmd_speed(ecmd);
@@ -260,8 +258,7 @@ static u32 e1000_get_link(struct net_device *netdev)
 {
        struct e1000_adapter *adapter = netdev_priv(netdev);
 
-       /*
-        * If the link is not reported up to netdev, interrupts are disabled,
+       /* If the link is not reported up to netdev, interrupts are disabled,
         * and so the physical link state may have changed since we last
         * looked. Set get_link_status to make sure that the true link
         * state is interrogated, rather than pulling a cached and possibly
@@ -484,7 +481,7 @@ static int e1000_get_eeprom(struct net_device *netdev,
                le16_to_cpus(&eeprom_buff[i]);
 
        memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1),
-                       eeprom->len);
+              eeprom->len);
        kfree(eeprom_buff);
 
        return ret_val;
@@ -517,15 +514,17 @@ static int e1000_set_eeprom(struct net_device *netdev,
        ptr = (void *)eeprom_buff;
 
        if (eeprom->offset & 1) {
-               /* need read/modify/write of first changed EEPROM word */
-               /* only the second byte of the word is being modified */
+               /* need read/modify/write of first changed EEPROM word
+                * only the second byte of the word is being modified
+                */
                ret_val = e1000_read_eeprom(hw, first_word, 1,
                                            &eeprom_buff[0]);
                ptr++;
        }
        if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) {
-               /* need read/modify/write of last changed EEPROM word */
-               /* only the first byte of the word is being modified */
+               /* need read/modify/write of last changed EEPROM word
+                * only the first byte of the word is being modified
+                */
                ret_val = e1000_read_eeprom(hw, last_word, 1,
                                  &eeprom_buff[last_word - first_word]);
        }
@@ -606,11 +605,13 @@ static int e1000_set_ringparam(struct net_device *netdev,
        rx_old = adapter->rx_ring;
 
        err = -ENOMEM;
-       txdr = kcalloc(adapter->num_tx_queues, sizeof(struct e1000_tx_ring), GFP_KERNEL);
+       txdr = kcalloc(adapter->num_tx_queues, sizeof(struct e1000_tx_ring),
+                      GFP_KERNEL);
        if (!txdr)
                goto err_alloc_tx;
 
-       rxdr = kcalloc(adapter->num_rx_queues, sizeof(struct e1000_rx_ring), GFP_KERNEL);
+       rxdr = kcalloc(adapter->num_rx_queues, sizeof(struct e1000_rx_ring),
+                      GFP_KERNEL);
        if (!rxdr)
                goto err_alloc_rx;
 
@@ -619,12 +620,12 @@ static int e1000_set_ringparam(struct net_device *netdev,
 
        rxdr->count = max(ring->rx_pending,(u32)E1000_MIN_RXD);
        rxdr->count = min(rxdr->count,(u32)(mac_type < e1000_82544 ?
-               E1000_MAX_RXD : E1000_MAX_82544_RXD));
+                         E1000_MAX_RXD : E1000_MAX_82544_RXD));
        rxdr->count = ALIGN(rxdr->count, REQ_RX_DESCRIPTOR_MULTIPLE);
 
        txdr->count = max(ring->tx_pending,(u32)E1000_MIN_TXD);
        txdr->count = min(txdr->count,(u32)(mac_type < e1000_82544 ?
-               E1000_MAX_TXD : E1000_MAX_82544_TXD));
+                         E1000_MAX_TXD : E1000_MAX_82544_TXD));
        txdr->count = ALIGN(txdr->count, REQ_TX_DESCRIPTOR_MULTIPLE);
 
        for (i = 0; i < adapter->num_tx_queues; i++)
@@ -642,7 +643,8 @@ static int e1000_set_ringparam(struct net_device *netdev,
                        goto err_setup_tx;
 
                /* save the new, restore the old in order to free it,
-                * then restore the new back again */
+                * then restore the new back again
+                */
 
                adapter->rx_ring = rx_old;
                adapter->tx_ring = tx_old;
@@ -784,7 +786,6 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
        REG_SET_AND_CHECK(TCTL, 0xFFFFFFFF, 0x00000000);
 
        if (hw->mac_type >= e1000_82543) {
-
                REG_SET_AND_CHECK(RCTL, before, 0xFFFFFFFF);
                REG_PATTERN_TEST(RDBAL, 0xFFFFFFF0, 0xFFFFFFFF);
                REG_PATTERN_TEST(TXCW, 0xC000FFFF, 0x0000FFFF);
@@ -795,14 +796,11 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
                        REG_PATTERN_TEST(RA + (((i << 1) + 1) << 2), 0x8003FFFF,
                                         0xFFFFFFFF);
                }
-
        } else {
-
                REG_SET_AND_CHECK(RCTL, 0xFFFFFFFF, 0x01FFFFFF);
                REG_PATTERN_TEST(RDBAL, 0xFFFFF000, 0xFFFFFFFF);
                REG_PATTERN_TEST(TXCW, 0x0000FFFF, 0x0000FFFF);
                REG_PATTERN_TEST(TDBAL, 0xFFFFF000, 0xFFFFFFFF);
-
        }
 
        value = E1000_MC_TBL_SIZE;
@@ -858,13 +856,14 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
 
        *data = 0;
 
-       /* NOTE: we don't test MSI interrupts here, yet */
-       /* Hook up test interrupt handler just for this test */
+       /* NOTE: we don't test MSI interrupts here, yet
+        * Hook up test interrupt handler just for this test
+        */
        if (!request_irq(irq, e1000_test_intr, IRQF_PROBE_SHARED, netdev->name,
-                        netdev))
+                        netdev))
                shared_int = false;
        else if (request_irq(irq, e1000_test_intr, IRQF_SHARED,
-                netdev->name, netdev)) {
+                            netdev->name, netdev)) {
                *data = 1;
                return -1;
        }
@@ -1253,14 +1252,15 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
        ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
                        E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
                        E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */
-                       E1000_CTRL_FD);  /* Force Duplex to FULL */
+                       E1000_CTRL_FD); /* Force Duplex to FULL */
 
        if (hw->media_type == e1000_media_type_copper &&
           hw->phy_type == e1000_phy_m88)
                ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */
        else {
                /* Set the ILOS bit on the fiber Nic is half
-                * duplex link is detected. */
+                * duplex link is detected.
+                */
                stat_reg = er32(STATUS);
                if ((stat_reg & E1000_STATUS_FD) == 0)
                        ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU);
@@ -1446,7 +1446,7 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
 
                        ret_val = e1000_check_lbtest_frame(
                                        rxdr->buffer_info[l].skb,
-                                       1024);
+                                       1024);
                        if (!ret_val)
                                good_cnt++;
                        if (unlikely(++l == rxdr->count)) l = 0;
@@ -1493,7 +1493,8 @@ static int e1000_link_test(struct e1000_adapter *adapter, u64 *data)
                hw->serdes_has_link = false;
 
                /* On some blade server designs, link establishment
-                * could take as long as 2-3 minutes */
+                * could take as long as 2-3 minutes
+                */
                do {
                        e1000_check_for_link(hw);
                        if (hw->serdes_has_link)
@@ -1545,7 +1546,8 @@ static void e1000_diag_test(struct net_device *netdev,
                e_info(hw, "offline testing starting\n");
 
                /* Link test performed before hardware reset so autoneg doesn't
-                * interfere with test result */
+                * interfere with test result
+                */
                if (e1000_link_test(adapter, &data[4]))
                        eth_test->flags |= ETH_TEST_FL_FAILED;
 
@@ -1639,7 +1641,8 @@ static int e1000_wol_exclusion(struct e1000_adapter *adapter,
        default:
                /* dual port cards only support WoL on port A from now on
                 * unless it was enabled in the eeprom for port B
-                * so exclude FUNC_1 ports from having WoL enabled */
+                * so exclude FUNC_1 ports from having WoL enabled
+                */
                if (er32(STATUS) & E1000_STATUS_FUNC_1 &&
                    !adapter->eeprom_wol) {
                        wol->supported = 0;
@@ -1663,7 +1666,8 @@ static void e1000_get_wol(struct net_device *netdev,
        wol->wolopts = 0;
 
        /* this function will set ->supported = 0 and return 1 if wol is not
-        * supported by this hardware */
+        * supported by this hardware
+        */
        if (e1000_wol_exclusion(adapter, wol) ||
            !device_can_wakeup(&adapter->pdev->dev))
                return;
@@ -1839,7 +1843,7 @@ static void e1000_get_ethtool_stats(struct net_device *netdev,
                data[i] = (e1000_gstrings_stats[i].sizeof_stat ==
                        sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
        }
-/*     BUG_ON(i != E1000_STATS_LEN); */
+/* BUG_ON(i != E1000_STATS_LEN); */
 }
 
 static void e1000_get_strings(struct net_device *netdev, u32 stringset,
@@ -1859,37 +1863,37 @@ static void e1000_get_strings(struct net_device *netdev, u32 stringset,
                               ETH_GSTRING_LEN);
                        p += ETH_GSTRING_LEN;
                }
-/*             BUG_ON(p - data != E1000_STATS_LEN * ETH_GSTRING_LEN); */
+               /* BUG_ON(p - data != E1000_STATS_LEN * ETH_GSTRING_LEN); */
                break;
        }
 }
 
 static const struct ethtool_ops e1000_ethtool_ops = {
-       .get_settings           = e1000_get_settings,
-       .set_settings           = e1000_set_settings,
-       .get_drvinfo            = e1000_get_drvinfo,
-       .get_regs_len           = e1000_get_regs_len,
-       .get_regs               = e1000_get_regs,
-       .get_wol                = e1000_get_wol,
-       .set_wol                = e1000_set_wol,
-       .get_msglevel           = e1000_get_msglevel,
-       .set_msglevel           = e1000_set_msglevel,
-       .nway_reset             = e1000_nway_reset,
-       .get_link               = e1000_get_link,
-       .get_eeprom_len         = e1000_get_eeprom_len,
-       .get_eeprom             = e1000_get_eeprom,
-       .set_eeprom             = e1000_set_eeprom,
-       .get_ringparam          = e1000_get_ringparam,
-       .set_ringparam          = e1000_set_ringparam,
-       .get_pauseparam         = e1000_get_pauseparam,
-       .set_pauseparam         = e1000_set_pauseparam,
-       .self_test              = e1000_diag_test,
-       .get_strings            = e1000_get_strings,
-       .set_phys_id            = e1000_set_phys_id,
-       .get_ethtool_stats      = e1000_get_ethtool_stats,
-       .get_sset_count         = e1000_get_sset_count,
-       .get_coalesce           = e1000_get_coalesce,
-       .set_coalesce           = e1000_set_coalesce,
+       .get_settings           = e1000_get_settings,
+       .set_settings           = e1000_set_settings,
+       .get_drvinfo            = e1000_get_drvinfo,
+       .get_regs_len           = e1000_get_regs_len,
+       .get_regs               = e1000_get_regs,
+       .get_wol                = e1000_get_wol,
+       .set_wol                = e1000_set_wol,
+       .get_msglevel           = e1000_get_msglevel,
+       .set_msglevel           = e1000_set_msglevel,
+       .nway_reset             = e1000_nway_reset,
+       .get_link               = e1000_get_link,
+       .get_eeprom_len         = e1000_get_eeprom_len,
+       .get_eeprom             = e1000_get_eeprom,
+       .set_eeprom             = e1000_set_eeprom,
+       .get_ringparam          = e1000_get_ringparam,
+       .set_ringparam          = e1000_set_ringparam,
+       .get_pauseparam         = e1000_get_pauseparam,
+       .set_pauseparam         = e1000_set_pauseparam,
+       .self_test              = e1000_diag_test,
+       .get_strings            = e1000_get_strings,
+       .set_phys_id            = e1000_set_phys_id,
+       .get_ethtool_stats      = e1000_get_ethtool_stats,
+       .get_sset_count         = e1000_get_sset_count,
+       .get_coalesce           = e1000_get_coalesce,
+       .set_coalesce           = e1000_set_coalesce,
        .get_ts_info            = ethtool_op_get_ts_info,
 };
 
index 8fedd24..2879b96 100644 (file)
@@ -164,8 +164,9 @@ static void e1000_phy_init_script(struct e1000_hw *hw)
        if (hw->phy_init_script) {
                msleep(20);
 
-               /* Save off the current value of register 0x2F5B to be restored at
-                * the end of this routine. */
+               /* Save off the current value of register 0x2F5B to be restored
+                * at the end of this routine.
+                */
                ret_val = e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data);
 
                /* Disabled the PHY transmitter */
@@ -466,7 +467,8 @@ s32 e1000_reset_hw(struct e1000_hw *hw)
        case e1000_82541:
        case e1000_82541_rev_2:
                /* These controllers can't ack the 64-bit write when issuing the
-                * reset, so use IO-mapping as a workaround to issue the reset */
+                * reset, so use IO-mapping as a workaround to issue the reset
+                */
                E1000_WRITE_REG_IO(hw, CTRL, (ctrl | E1000_CTRL_RST));
                break;
        case e1000_82545_rev_3:
@@ -480,9 +482,9 @@ s32 e1000_reset_hw(struct e1000_hw *hw)
                break;
        }
 
-       /* After MAC reset, force reload of EEPROM to restore power-on settings to
-        * device.  Later controllers reload the EEPROM automatically, so just wait
-        * for reload to complete.
+       /* After MAC reset, force reload of EEPROM to restore power-on settings
+        * to device.  Later controllers reload the EEPROM automatically, so
+        * just wait for reload to complete.
         */
        switch (hw->mac_type) {
        case e1000_82542_rev2_0:
@@ -591,8 +593,8 @@ s32 e1000_init_hw(struct e1000_hw *hw)
                msleep(5);
        }
 
-       /* Setup the receive address. This involves initializing all of the Receive
-        * Address Registers (RARs 0 - 15).
+       /* Setup the receive address. This involves initializing all of the
+        * Receive Address Registers (RARs 0 - 15).
         */
        e1000_init_rx_addrs(hw);
 
@@ -611,7 +613,8 @@ s32 e1000_init_hw(struct e1000_hw *hw)
        for (i = 0; i < mta_size; i++) {
                E1000_WRITE_REG_ARRAY(hw, MTA, i, 0);
                /* use write flush to prevent Memory Write Block (MWB) from
-                * occurring when accessing our register space */
+                * occurring when accessing our register space
+                */
                E1000_WRITE_FLUSH();
        }
 
@@ -630,7 +633,9 @@ s32 e1000_init_hw(struct e1000_hw *hw)
        case e1000_82546_rev_3:
                break;
        default:
-               /* Workaround for PCI-X problem when BIOS sets MMRBC incorrectly. */
+               /* Workaround for PCI-X problem when BIOS sets MMRBC
+                * incorrectly.
+                */
                if (hw->bus_type == e1000_bus_type_pcix
                    && e1000_pcix_get_mmrbc(hw) > 2048)
                        e1000_pcix_set_mmrbc(hw, 2048);
@@ -660,7 +665,8 @@ s32 e1000_init_hw(struct e1000_hw *hw)
            hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3) {
                ctrl_ext = er32(CTRL_EXT);
                /* Relaxed ordering must be disabled to avoid a parity
-                * error crash in a PCI slot. */
+                * error crash in a PCI slot.
+                */
                ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
                ew32(CTRL_EXT, ctrl_ext);
        }
@@ -810,8 +816,9 @@ s32 e1000_setup_link(struct e1000_hw *hw)
                ew32(FCRTL, 0);
                ew32(FCRTH, 0);
        } else {
-               /* We need to set up the Receive Threshold high and low water marks
-                * as well as (optionally) enabling the transmission of XON frames.
+               /* We need to set up the Receive Threshold high and low water
+                * marks as well as (optionally) enabling the transmission of
+                * XON frames.
                 */
                if (hw->fc_send_xon) {
                        ew32(FCRTL, (hw->fc_low_water | E1000_FCRTL_XONE));
@@ -868,42 +875,46 @@ static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
        e1000_config_collision_dist(hw);
 
        /* Check for a software override of the flow control settings, and setup
-        * the device accordingly.  If auto-negotiation is enabled, then software
-        * will have to set the "PAUSE" bits to the correct value in the Tranmsit
-        * Config Word Register (TXCW) and re-start auto-negotiation.  However, if
-        * auto-negotiation is disabled, then software will have to manually
-        * configure the two flow control enable bits in the CTRL register.
+        * the device accordingly.  If auto-negotiation is enabled, then
+        * software will have to set the "PAUSE" bits to the correct value in
+        * the Tranmsit Config Word Register (TXCW) and re-start
+        * auto-negotiation.  However, if auto-negotiation is disabled, then
+        * software will have to manually configure the two flow control enable
+        * bits in the CTRL register.
         *
         * The possible values of the "fc" parameter are:
-        *      0:  Flow control is completely disabled
-        *      1:  Rx flow control is enabled (we can receive pause frames, but
-        *          not send pause frames).
-        *      2:  Tx flow control is enabled (we can send pause frames but we do
-        *          not support receiving pause frames).
-        *      3:  Both Rx and TX flow control (symmetric) are enabled.
+        *  0:  Flow control is completely disabled
+        *  1:  Rx flow control is enabled (we can receive pause frames, but
+        *      not send pause frames).
+        *  2:  Tx flow control is enabled (we can send pause frames but we do
+        *      not support receiving pause frames).
+        *  3:  Both Rx and TX flow control (symmetric) are enabled.
         */
        switch (hw->fc) {
        case E1000_FC_NONE:
-               /* Flow control is completely disabled by a software over-ride. */
+               /* Flow ctrl is completely disabled by a software over-ride */
                txcw = (E1000_TXCW_ANE | E1000_TXCW_FD);
                break;
        case E1000_FC_RX_PAUSE:
-               /* RX Flow control is enabled and TX Flow control is disabled by a
-                * software over-ride. Since there really isn't a way to advertise
-                * that we are capable of RX Pause ONLY, we will advertise that we
-                * support both symmetric and asymmetric RX PAUSE. Later, we will
-                *  disable the adapter's ability to send PAUSE frames.
+               /* Rx Flow control is enabled and Tx Flow control is disabled by
+                * a software over-ride. Since there really isn't a way to
+                * advertise that we are capable of Rx Pause ONLY, we will
+                * advertise that we support both symmetric and asymmetric Rx
+                * PAUSE. Later, we will disable the adapter's ability to send
+                * PAUSE frames.
                 */
                txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
                break;
        case E1000_FC_TX_PAUSE:
-               /* TX Flow control is enabled, and RX Flow control is disabled, by a
-                * software over-ride.
+               /* Tx Flow control is enabled, and Rx Flow control is disabled,
+                * by a software over-ride.
                 */
                txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR);
                break;
        case E1000_FC_FULL:
-               /* Flow control (both RX and TX) is enabled by a software over-ride. */
+               /* Flow control (both Rx and Tx) is enabled by a software
+                * over-ride.
+                */
                txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
                break;
        default:
@@ -912,11 +923,11 @@ static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
                break;
        }
 
-       /* Since auto-negotiation is enabled, take the link out of reset (the link
-        * will be in reset, because we previously reset the chip). This will
-        * restart auto-negotiation.  If auto-negotiation is successful then the
-        * link-up status bit will be set and the flow control enable bits (RFCE
-        * and TFCE) will be set according to their negotiated value.
+       /* Since auto-negotiation is enabled, take the link out of reset (the
+        * link will be in reset, because we previously reset the chip). This
+        * will restart auto-negotiation.  If auto-negotiation is successful
+        * then the link-up status bit will be set and the flow control enable
+        * bits (RFCE and TFCE) will be set according to their negotiated value.
         */
        e_dbg("Auto-negotiation enabled\n");
 
@@ -927,11 +938,12 @@ static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
        hw->txcw = txcw;
        msleep(1);
 
-       /* If we have a signal (the cable is plugged in) then poll for a "Link-Up"
-        * indication in the Device Status Register.  Time-out if a link isn't
-        * seen in 500 milliseconds seconds (Auto-negotiation should complete in
-        * less than 500 milliseconds even if the other end is doing it in SW).
-        * For internal serdes, we just assume a signal is present, then poll.
+       /* If we have a signal (the cable is plugged in) then poll for a
+        * "Link-Up" indication in the Device Status Register.  Time-out if a
+        * link isn't seen in 500 milliseconds seconds (Auto-negotiation should
+        * complete in less than 500 milliseconds even if the other end is doing
+        * it in SW). For internal serdes, we just assume a signal is present,
+        * then poll.
         */
        if (hw->media_type == e1000_media_type_internal_serdes ||
            (er32(CTRL) & E1000_CTRL_SWDPIN1) == signal) {
@@ -946,9 +958,9 @@ static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
                        e_dbg("Never got a valid link from auto-neg!!!\n");
                        hw->autoneg_failed = 1;
                        /* AutoNeg failed to achieve a link, so we'll call
-                        * e1000_check_for_link. This routine will force the link up if
-                        * we detect a signal. This will allow us to communicate with
-                        * non-autonegotiating link partners.
+                        * e1000_check_for_link. This routine will force the
+                        * link up if we detect a signal. This will allow us to
+                        * communicate with non-autonegotiating link partners.
                         */
                        ret_val = e1000_check_for_link(hw);
                        if (ret_val) {
@@ -1042,9 +1054,9 @@ static s32 e1000_copper_link_preconfig(struct e1000_hw *hw)
        e_dbg("e1000_copper_link_preconfig");
 
        ctrl = er32(CTRL);
-       /* With 82543, we need to force speed and duplex on the MAC equal to what
-        * the PHY speed and duplex configuration is. In addition, we need to
-        * perform a hardware reset on the PHY to take it out of reset.
+       /* With 82543, we need to force speed and duplex on the MAC equal to
+        * what the PHY speed and duplex configuration is. In addition, we need
+        * to perform a hardware reset on the PHY to take it out of reset.
         */
        if (hw->mac_type > e1000_82543) {
                ctrl |= E1000_CTRL_SLU;
@@ -1175,7 +1187,8 @@ static s32 e1000_copper_link_igp_setup(struct e1000_hw *hw)
 
                /* when autonegotiation advertisement is only 1000Mbps then we
                 * should disable SmartSpeed and enable Auto MasterSlave
-                * resolution as hardware default. */
+                * resolution as hardware default.
+                */
                if (hw->autoneg_advertised == ADVERTISE_1000_FULL) {
                        /* Disable SmartSpeed */
                        ret_val =
@@ -1485,13 +1498,15 @@ static s32 e1000_setup_copper_link(struct e1000_hw *hw)
 
        if (hw->autoneg) {
                /* Setup autoneg and flow control advertisement
-                * and perform autonegotiation */
+                * and perform autonegotiation
+                */
                ret_val = e1000_copper_link_autoneg(hw);
                if (ret_val)
                        return ret_val;
        } else {
                /* PHY will be set to 10H, 10F, 100H,or 100F
-                * depending on value from forced_speed_duplex. */
+                * depending on value from forced_speed_duplex.
+                */
                e_dbg("Forcing speed and duplex\n");
                ret_val = e1000_phy_force_speed_duplex(hw);
                if (ret_val) {
@@ -1609,7 +1624,8 @@ s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
         * setup the PHY advertisement registers accordingly.  If
         * auto-negotiation is enabled, then software will have to set the
         * "PAUSE" bits to the correct value in the Auto-Negotiation
-        * Advertisement Register (PHY_AUTONEG_ADV) and re-start auto-negotiation.
+        * Advertisement Register (PHY_AUTONEG_ADV) and re-start
+        * auto-negotiation.
         *
         * The possible values of the "fc" parameter are:
         *      0:  Flow control is completely disabled
@@ -1636,7 +1652,7 @@ s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
                 * capable of RX Pause ONLY, we will advertise that we
                 * support both symmetric and asymmetric RX PAUSE.  Later
                 * (in e1000_config_fc_after_link_up) we will disable the
-                *hw's ability to send PAUSE frames.
+                * hw's ability to send PAUSE frames.
                 */
                mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
                break;
@@ -1720,15 +1736,15 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
        /* Are we forcing Full or Half Duplex? */
        if (hw->forced_speed_duplex == e1000_100_full ||
            hw->forced_speed_duplex == e1000_10_full) {
-               /* We want to force full duplex so we SET the full duplex bits in the
-                * Device and MII Control Registers.
+               /* We want to force full duplex so we SET the full duplex bits
+                * in the Device and MII Control Registers.
                 */
                ctrl |= E1000_CTRL_FD;
                mii_ctrl_reg |= MII_CR_FULL_DUPLEX;
                e_dbg("Full Duplex\n");
        } else {
-               /* We want to force half duplex so we CLEAR the full duplex bits in
-                * the Device and MII Control Registers.
+               /* We want to force half duplex so we CLEAR the full duplex bits
+                * in the Device and MII Control Registers.
                 */
                ctrl &= ~E1000_CTRL_FD;
                mii_ctrl_reg &= ~MII_CR_FULL_DUPLEX;
@@ -1762,8 +1778,8 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
                if (ret_val)
                        return ret_val;
 
-               /* Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI
-                * forced whenever speed are duplex are forced.
+               /* Clear Auto-Crossover to force MDI manually. M88E1000 requires
+                * MDI forced whenever speed are duplex are forced.
                 */
                phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
                ret_val =
@@ -1814,10 +1830,10 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
                e_dbg("Waiting for forced speed/duplex link.\n");
                mii_status_reg = 0;
 
-               /* We will wait for autoneg to complete or 4.5 seconds to expire. */
+               /* Wait for autoneg to complete or 4.5 seconds to expire */
                for (i = PHY_FORCE_TIME; i > 0; i--) {
-                       /* Read the MII Status Register and wait for Auto-Neg Complete bit
-                        * to be set.
+                       /* Read the MII Status Register and wait for Auto-Neg
+                        * Complete bit to be set.
                         */
                        ret_val =
                            e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
@@ -1834,20 +1850,24 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
                        msleep(100);
                }
                if ((i == 0) && (hw->phy_type == e1000_phy_m88)) {
-                       /* We didn't get link.  Reset the DSP and wait again for link. */
+                       /* We didn't get link.  Reset the DSP and wait again
+                        * for link.
+                        */
                        ret_val = e1000_phy_reset_dsp(hw);
                        if (ret_val) {
                                e_dbg("Error Resetting PHY DSP\n");
                                return ret_val;
                        }
                }
-               /* This loop will early-out if the link condition has been met.  */
+               /* This loop will early-out if the link condition has been
+                * met
+                */
                for (i = PHY_FORCE_TIME; i > 0; i--) {
                        if (mii_status_reg & MII_SR_LINK_STATUS)
                                break;
                        msleep(100);
-                       /* Read the MII Status Register and wait for Auto-Neg Complete bit
-                        * to be set.
+                       /* Read the MII Status Register and wait for Auto-Neg
+                        * Complete bit to be set.
                         */
                        ret_val =
                            e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
@@ -1862,9 +1882,10 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
        }
 
        if (hw->phy_type == e1000_phy_m88) {
-               /* Because we reset the PHY above, we need to re-force TX_CLK in the
-                * Extended PHY Specific Control Register to 25MHz clock.  This value
-                * defaults back to a 2.5MHz clock when the PHY is reset.
+               /* Because we reset the PHY above, we need to re-force TX_CLK in
+                * the Extended PHY Specific Control Register to 25MHz clock.
+                * This value defaults back to a 2.5MHz clock when the PHY is
+                * reset.
                 */
                ret_val =
                    e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL,
@@ -1879,8 +1900,9 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
                if (ret_val)
                        return ret_val;
 
-               /* In addition, because of the s/w reset above, we need to enable CRS on
-                * TX.  This must be set for both full and half duplex operation.
+               /* In addition, because of the s/w reset above, we need to
+                * enable CRS on Tx.  This must be set for both full and half
+                * duplex operation.
                 */
                ret_val =
                    e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
@@ -1951,7 +1973,8 @@ static s32 e1000_config_mac_to_phy(struct e1000_hw *hw)
        e_dbg("e1000_config_mac_to_phy");
 
        /* 82544 or newer MAC, Auto Speed Detection takes care of
-        * MAC speed/duplex configuration.*/
+        * MAC speed/duplex configuration.
+        */
        if ((hw->mac_type >= e1000_82544) && (hw->mac_type != e1000_ce4100))
                return E1000_SUCCESS;
 
@@ -1985,7 +2008,7 @@ static s32 e1000_config_mac_to_phy(struct e1000_hw *hw)
                 * registers depending on negotiated values.
                 */
                ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS,
-                                            &phy_data);
+                                            &phy_data);
                if (ret_val)
                        return ret_val;
 
@@ -2002,7 +2025,7 @@ static s32 e1000_config_mac_to_phy(struct e1000_hw *hw)
                if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS)
                        ctrl |= E1000_CTRL_SPD_1000;
                else if ((phy_data & M88E1000_PSSR_SPEED) ==
-                        M88E1000_PSSR_100MBS)
+                        M88E1000_PSSR_100MBS)
                        ctrl |= E1000_CTRL_SPD_100;
        }
 
@@ -2135,9 +2158,9 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
                if (mii_status_reg & MII_SR_AUTONEG_COMPLETE) {
                        /* The AutoNeg process has completed, so we now need to
                         * read both the Auto Negotiation Advertisement Register
-                        * (Address 4) and the Auto_Negotiation Base Page Ability
-                        * Register (Address 5) to determine how flow control was
-                        * negotiated.
+                        * (Address 4) and the Auto_Negotiation Base Page
+                        * Ability Register (Address 5) to determine how flow
+                        * control was negotiated.
                         */
                        ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV,
                                                     &mii_nway_adv_reg);
@@ -2148,18 +2171,19 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
                        if (ret_val)
                                return ret_val;
 
-                       /* Two bits in the Auto Negotiation Advertisement Register
-                        * (Address 4) and two bits in the Auto Negotiation Base
-                        * Page Ability Register (Address 5) determine flow control
-                        * for both the PHY and the link partner.  The following
-                        * table, taken out of the IEEE 802.3ab/D6.0 dated March 25,
-                        * 1999, describes these PAUSE resolution bits and how flow
-                        * control is determined based upon these settings.
+                       /* Two bits in the Auto Negotiation Advertisement
+                        * Register (Address 4) and two bits in the Auto
+                        * Negotiation Base Page Ability Register (Address 5)
+                        * determine flow control for both the PHY and the link
+                        * partner.  The following table, taken out of the IEEE
+                        * 802.3ab/D6.0 dated March 25, 1999, describes these
+                        * PAUSE resolution bits and how flow control is
+                        * determined based upon these settings.
                         * NOTE:  DC = Don't Care
                         *
                         *   LOCAL DEVICE  |   LINK PARTNER
                         * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution
-                        *-------|---------|-------|---------|--------------------
+                        *-------|---------|-------|---------|------------------
                         *   0   |    0    |  DC   |   DC    | E1000_FC_NONE
                         *   0   |    1    |   0   |   DC    | E1000_FC_NONE
                         *   0   |    1    |   1   |    0    | E1000_FC_NONE
@@ -2178,17 +2202,18 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
                         *
                         *   LOCAL DEVICE  |   LINK PARTNER
                         * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
-                        *-------|---------|-------|---------|--------------------
+                        *-------|---------|-------|---------|------------------
                         *   1   |   DC    |   1   |   DC    | E1000_FC_FULL
                         *
                         */
                        if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
                            (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) {
-                               /* Now we need to check if the user selected RX ONLY
-                                * of pause frames.  In this case, we had to advertise
-                                * FULL flow control because we could not advertise RX
-                                * ONLY. Hence, we must now check to see if we need to
-                                * turn OFF  the TRANSMISSION of PAUSE frames.
+                               /* Now we need to check if the user selected Rx
+                                * ONLY of pause frames.  In this case, we had
+                                * to advertise FULL flow control because we
+                                * could not advertise Rx ONLY. Hence, we must
+                                * now check to see if we need to turn OFF the
+                                * TRANSMISSION of PAUSE frames.
                                 */
                                if (hw->original_fc == E1000_FC_FULL) {
                                        hw->fc = E1000_FC_FULL;
@@ -2203,7 +2228,7 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
                         *
                         *   LOCAL DEVICE  |   LINK PARTNER
                         * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
-                        *-------|---------|-------|---------|--------------------
+                        *-------|---------|-------|---------|------------------
                         *   0   |    1    |   1   |    1    | E1000_FC_TX_PAUSE
                         *
                         */
@@ -2220,7 +2245,7 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
                         *
                         *   LOCAL DEVICE  |   LINK PARTNER
                         * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
-                        *-------|---------|-------|---------|--------------------
+                        *-------|---------|-------|---------|------------------
                         *   1   |    1    |   0   |    1    | E1000_FC_RX_PAUSE
                         *
                         */
@@ -2233,25 +2258,27 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
                                e_dbg
                                    ("Flow Control = RX PAUSE frames only.\n");
                        }
-                       /* Per the IEEE spec, at this point flow control should be
-                        * disabled.  However, we want to consider that we could
-                        * be connected to a legacy switch that doesn't advertise
-                        * desired flow control, but can be forced on the link
-                        * partner.  So if we advertised no flow control, that is
-                        * what we will resolve to.  If we advertised some kind of
-                        * receive capability (Rx Pause Only or Full Flow Control)
-                        * and the link partner advertised none, we will configure
-                        * ourselves to enable Rx Flow Control only.  We can do
-                        * this safely for two reasons:  If the link partner really
-                        * didn't want flow control enabled, and we enable Rx, no
-                        * harm done since we won't be receiving any PAUSE frames
-                        * anyway.  If the intent on the link partner was to have
-                        * flow control enabled, then by us enabling RX only, we
-                        * can at least receive pause frames and process them.
-                        * This is a good idea because in most cases, since we are
-                        * predominantly a server NIC, more times than not we will
-                        * be asked to delay transmission of packets than asking
-                        * our link partner to pause transmission of frames.
+                       /* Per the IEEE spec, at this point flow control should
+                        * be disabled.  However, we want to consider that we
+                        * could be connected to a legacy switch that doesn't
+                        * advertise desired flow control, but can be forced on
+                        * the link partner.  So if we advertised no flow
+                        * control, that is what we will resolve to.  If we
+                        * advertised some kind of receive capability (Rx Pause
+                        * Only or Full Flow Control) and the link partner
+                        * advertised none, we will configure ourselves to
+                        * enable Rx Flow Control only.  We can do this safely
+                        * for two reasons:  If the link partner really
+                        * didn't want flow control enabled, and we enable Rx,
+                        * no harm done since we won't be receiving any PAUSE
+                        * frames anyway.  If the intent on the link partner was
+                        * to have flow control enabled, then by us enabling Rx
+                        * only, we can at least receive pause frames and
+                        * process them. This is a good idea because in most
+                        * cases, since we are predominantly a server NIC, more
+                        * times than not we will be asked to delay transmission
+                        * of packets than asking our link partner to pause
+                        * transmission of frames.
                         */
                        else if ((hw->original_fc == E1000_FC_NONE ||
                                  hw->original_fc == E1000_FC_TX_PAUSE) ||
@@ -2316,8 +2343,7 @@ static s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw)
        status = er32(STATUS);
        rxcw = er32(RXCW);
 
-       /*
-        * If we don't have link (auto-negotiation failed or link partner
+       /* If we don't have link (auto-negotiation failed or link partner
         * cannot auto-negotiate), and our link partner is not trying to
         * auto-negotiate with us (we are receiving idles or data),
         * we need to force link up. We also need to give auto-negotiation
@@ -2346,8 +2372,7 @@ static s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw)
                        goto out;
                }
        } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
-               /*
-                * If we are forcing link and we are receiving /C/ ordered
+               /* If we are forcing link and we are receiving /C/ ordered
                 * sets, re-enable auto-negotiation in the TXCW register
                 * and disable forced link in the Device Control register
                 * in an attempt to auto-negotiate with our link partner.
@@ -2358,8 +2383,7 @@ static s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw)
 
                hw->serdes_has_link = true;
        } else if (!(E1000_TXCW_ANE & er32(TXCW))) {
-               /*
-                * If we force link for non-auto-negotiation switch, check
+               /* If we force link for non-auto-negotiation switch, check
                 * link status based on MAC synchronization for internal
                 * serdes media type.
                 */
@@ -2468,15 +2492,17 @@ s32 e1000_check_for_link(struct e1000_hw *hw)
 
                if (phy_data & MII_SR_LINK_STATUS) {
                        hw->get_link_status = false;
-                       /* Check if there was DownShift, must be checked immediately after
-                        * link-up */
+                       /* Check if there was DownShift, must be checked
+                        * immediately after link-up
+                        */
                        e1000_check_downshift(hw);
 
                        /* If we are on 82544 or 82543 silicon and speed/duplex
-                        * are forced to 10H or 10F, then we will implement the polarity
-                        * reversal workaround.  We disable interrupts first, and upon
-                        * returning, place the devices interrupt state to its previous
-                        * value except for the link status change interrupt which will
+                        * are forced to 10H or 10F, then we will implement the
+                        * polarity reversal workaround.  We disable interrupts
+                        * first, and upon returning, place the devices
+                        * interrupt state to its previous value except for the
+                        * link status change interrupt which will
                         * happen due to the execution of this workaround.
                         */
 
@@ -2527,9 +2553,10 @@ s32 e1000_check_for_link(struct e1000_hw *hw)
                        }
                }
 
-               /* Configure Flow Control now that Auto-Neg has completed. First, we
-                * need to restore the desired flow control settings because we may
-                * have had to re-autoneg with a different link partner.
+               /* Configure Flow Control now that Auto-Neg has completed.
+                * First, we need to restore the desired flow control settings
+                * because we may have had to re-autoneg with a different link
+                * partner.
                 */
                ret_val = e1000_config_fc_after_link_up(hw);
                if (ret_val) {
@@ -2538,11 +2565,12 @@ s32 e1000_check_for_link(struct e1000_hw *hw)
                }
 
                /* At this point we know that we are on copper and we have
-                * auto-negotiated link.  These are conditions for checking the link
-                * partner capability register.  We use the link speed to determine if
-                * TBI compatibility needs to be turned on or off.  If the link is not
-                * at gigabit speed, then TBI compatibility is not needed.  If we are
-                * at gigabit speed, we turn on TBI compatibility.
+                * auto-negotiated link.  These are conditions for checking the
+                * link partner capability register.  We use the link speed to
+                * determine if TBI compatibility needs to be turned on or off.
+                * If the link is not at gigabit speed, then TBI compatibility
+                * is not needed.  If we are at gigabit speed, we turn on TBI
+                * compatibility.
                 */
                if (hw->tbi_compatibility_en) {
                        u16 speed, duplex;
@@ -2554,20 +2582,23 @@ s32 e1000_check_for_link(struct e1000_hw *hw)
                                return ret_val;
                        }
                        if (speed != SPEED_1000) {
-                               /* If link speed is not set to gigabit speed, we do not need
-                                * to enable TBI compatibility.
+                               /* If link speed is not set to gigabit speed, we
+                                * do not need to enable TBI compatibility.
                                 */
                                if (hw->tbi_compatibility_on) {
-                                       /* If we previously were in the mode, turn it off. */
+                                       /* If we previously were in the mode,
+                                        * turn it off.
+                                        */
                                        rctl = er32(RCTL);
                                        rctl &= ~E1000_RCTL_SBP;
                                        ew32(RCTL, rctl);
                                        hw->tbi_compatibility_on = false;
                                }
                        } else {
-                               /* If TBI compatibility is was previously off, turn it on. For
-                                * compatibility with a TBI link partner, we will store bad
-                                * packets. Some frames have an additional byte on the end and
+                               /* If TBI compatibility is was previously off,
+                                * turn it on. For compatibility with a TBI link
+                                * partner, we will store bad packets. Some
+                                * frames have an additional byte on the end and
                                 * will look like CRC errors to to the hardware.
                                 */
                                if (!hw->tbi_compatibility_on) {
@@ -2629,9 +2660,9 @@ s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex)
                *duplex = FULL_DUPLEX;
        }
 
-       /* IGP01 PHY may advertise full duplex operation after speed downgrade even
-        * if it is operating at half duplex.  Here we set the duplex settings to
-        * match the duplex in the link partner's capabilities.
+       /* IGP01 PHY may advertise full duplex operation after speed downgrade
+        * even if it is operating at half duplex.  Here we set the duplex
+        * settings to match the duplex in the link partner's capabilities.
         */
        if (hw->phy_type == e1000_phy_igp && hw->speed_downgraded) {
                ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_EXP, &phy_data);
@@ -2697,8 +2728,8 @@ static s32 e1000_wait_autoneg(struct e1000_hw *hw)
  */
 static void e1000_raise_mdi_clk(struct e1000_hw *hw, u32 *ctrl)
 {
-       /* Raise the clock input to the Management Data Clock (by setting the MDC
-        * bit), and then delay 10 microseconds.
+       /* Raise the clock input to the Management Data Clock (by setting the
+        * MDC bit), and then delay 10 microseconds.
         */
        ew32(CTRL, (*ctrl | E1000_CTRL_MDC));
        E1000_WRITE_FLUSH();
@@ -2712,8 +2743,8 @@ static void e1000_raise_mdi_clk(struct e1000_hw *hw, u32 *ctrl)
  */
 static void e1000_lower_mdi_clk(struct e1000_hw *hw, u32 *ctrl)
 {
-       /* Lower the clock input to the Management Data Clock (by clearing the MDC
-        * bit), and then delay 10 microseconds.
+       /* Lower the clock input to the Management Data Clock (by clearing the
+        * MDC bit), and then delay 10 microseconds.
         */
        ew32(CTRL, (*ctrl & ~E1000_CTRL_MDC));
        E1000_WRITE_FLUSH();
@@ -2746,10 +2777,10 @@ static void e1000_shift_out_mdi_bits(struct e1000_hw *hw, u32 data, u16 count)
        ctrl |= (E1000_CTRL_MDIO_DIR | E1000_CTRL_MDC_DIR);
 
        while (mask) {
-               /* A "1" is shifted out to the PHY by setting the MDIO bit to "1" and
-                * then raising and lowering the Management Data Clock. A "0" is
-                * shifted out to the PHY by setting the MDIO bit to "0" and then
-                * raising and lowering the clock.
+               /* A "1" is shifted out to the PHY by setting the MDIO bit to
+                * "1" and then raising and lowering the Management Data Clock.
+                * A "0" is shifted out to the PHY by setting the MDIO bit to
+                * "0" and then raising and lowering the clock.
                 */
                if (data & mask)
                        ctrl |= E1000_CTRL_MDIO;
@@ -2781,24 +2812,26 @@ static u16 e1000_shift_in_mdi_bits(struct e1000_hw *hw)
        u8 i;
 
        /* In order to read a register from the PHY, we need to shift in a total
-        * of 18 bits from the PHY. The first two bit (turnaround) times are used
-        * to avoid contention on the MDIO pin when a read operation is performed.
-        * These two bits are ignored by us and thrown away. Bits are "shifted in"
-        * by raising the input to the Management Data Clock (setting the MDC bit),
-        * and then reading the value of the MDIO bit.
+        * of 18 bits from the PHY. The first two bit (turnaround) times are
+        * used to avoid contention on the MDIO pin when a read operation is
+        * performed. These two bits are ignored by us and thrown away. Bits are
+        * "shifted in" by raising the input to the Management Data Clock
+        * (setting the MDC bit), and then reading the value of the MDIO bit.
         */
        ctrl = er32(CTRL);
 
-       /* Clear MDIO_DIR (SWDPIO1) to indicate this bit is to be used as input. */
+       /* Clear MDIO_DIR (SWDPIO1) to indicate this bit is to be used as
+        * input.
+        */
        ctrl &= ~E1000_CTRL_MDIO_DIR;
        ctrl &= ~E1000_CTRL_MDIO;
 
        ew32(CTRL, ctrl);
        E1000_WRITE_FLUSH();
 
-       /* Raise and Lower the clock before reading in the data. This accounts for
-        * the turnaround bits. The first clock occurred when we clocked out the
-        * last bit of the Register Address.
+       /* Raise and Lower the clock before reading in the data. This accounts
+        * for the turnaround bits. The first clock occurred when we clocked out
+        * the last bit of the Register Address.
         */
        e1000_raise_mdi_clk(hw, &ctrl);
        e1000_lower_mdi_clk(hw, &ctrl);
@@ -2870,8 +2903,8 @@ static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
 
        if (hw->mac_type > e1000_82543) {
                /* Set up Op-code, Phy Address, and register address in the MDI
-                * Control register.  The MAC will take care of interfacing with the
-                * PHY to retrieve the desired data.
+                * Control register.  The MAC will take care of interfacing with
+                * the PHY to retrieve the desired data.
                 */
                if (hw->mac_type == e1000_ce4100) {
                        mdic = ((reg_addr << E1000_MDIC_REG_SHIFT) |
@@ -2929,31 +2962,32 @@ static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
                        *phy_data = (u16) mdic;
                }
        } else {
-               /* We must first send a preamble through the MDIO pin to signal the
-                * beginning of an MII instruction.  This is done by sending 32
-                * consecutive "1" bits.
+               /* We must first send a preamble through the MDIO pin to signal
+                * the beginning of an MII instruction.  This is done by sending
+                * 32 consecutive "1" bits.
                 */
                e1000_shift_out_mdi_bits(hw, PHY_PREAMBLE, PHY_PREAMBLE_SIZE);
 
                /* Now combine the next few fields that are required for a read
                 * operation.  We use this method instead of calling the
-                * e1000_shift_out_mdi_bits routine five different times. The format of
-                * a MII read instruction consists of a shift out of 14 bits and is
-                * defined as follows:
+                * e1000_shift_out_mdi_bits routine five different times. The
+                * format of a MII read instruction consists of a shift out of
+                * 14 bits and is defined as follows:
                 *    <Preamble><SOF><Op Code><Phy Addr><Reg Addr>
-                * followed by a shift in of 18 bits.  This first two bits shifted in
-                * are TurnAround bits used to avoid contention on the MDIO pin when a
-                * READ operation is performed.  These two bits are thrown away
-                * followed by a shift in of 16 bits which contains the desired data.
+                * followed by a shift in of 18 bits.  This first two bits
+                * shifted in are TurnAround bits used to avoid contention on
+                * the MDIO pin when a READ operation is performed.  These two
+                * bits are thrown away followed by a shift in of 16 bits which
+                * contains the desired data.
                 */
                mdic = ((reg_addr) | (phy_addr << 5) |
                        (PHY_OP_READ << 10) | (PHY_SOF << 12));
 
                e1000_shift_out_mdi_bits(hw, mdic, 14);
 
-               /* Now that we've shifted out the read command to the MII, we need to
-                * "shift in" the 16-bit value (18 total bits) of the requested PHY
-                * register address.
+               /* Now that we've shifted out the read command to the MII, we
+                * need to "shift in" the 16-bit value (18 total bits) of the
+                * requested PHY register address.
                 */
                *phy_data = e1000_shift_in_mdi_bits(hw);
        }
@@ -3060,18 +3094,18 @@ static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
                        }
                }
        } else {
-               /* We'll need to use the SW defined pins to shift the write command
-                * out to the PHY. We first send a preamble to the PHY to signal the
-                * beginning of the MII instruction.  This is done by sending 32
-                * consecutive "1" bits.
+               /* We'll need to use the SW defined pins to shift the write
+                * command out to the PHY. We first send a preamble to the PHY
+                * to signal the beginning of the MII instruction.  This is done
+                * by sending 32 consecutive "1" bits.
                 */
                e1000_shift_out_mdi_bits(hw, PHY_PREAMBLE, PHY_PREAMBLE_SIZE);
 
-               /* Now combine the remaining required fields that will indicate a
-                * write operation. We use this method instead of calling the
-                * e1000_shift_out_mdi_bits routine for each field in the command. The
-                * format of a MII write instruction is as follows:
-                * <Preamble><SOF><Op Code><Phy Addr><Reg Addr><Turnaround><Data>.
+               /* Now combine the remaining required fields that will indicate
+                * write operation. We use this method instead of calling the
+                * e1000_shift_out_mdi_bits routine for each field in the
+                * command. The format of a MII write instruction is as follows:
+                * <Preamble><SOF><OpCode><PhyAddr><RegAddr><Turnaround><Data>.
                 */
                mdic = ((PHY_TURNAROUND) | (reg_addr << 2) | (phy_addr << 7) |
                        (PHY_OP_WRITE << 12) | (PHY_SOF << 14));
@@ -3100,10 +3134,10 @@ s32 e1000_phy_hw_reset(struct e1000_hw *hw)
        e_dbg("Resetting Phy...\n");
 
        if (hw->mac_type > e1000_82543) {
-               /* Read the device control register and assert the E1000_CTRL_PHY_RST
-                * bit. Then, take it out of reset.
+               /* Read the device control register and assert the
+                * E1000_CTRL_PHY_RST bit. Then, take it out of reset.
                 * For e1000 hardware, we delay for 10ms between the assert
-                * and deassert.
+                * and de-assert.
                 */
                ctrl = er32(CTRL);
                ew32(CTRL, ctrl | E1000_CTRL_PHY_RST);
@@ -3115,8 +3149,9 @@ s32 e1000_phy_hw_reset(struct e1000_hw *hw)
                E1000_WRITE_FLUSH();
 
        } else {
-               /* Read the Extended Device Control Register, assert the PHY_RESET_DIR
-                * bit to put the PHY into reset. Then, take it out of reset.
+               /* Read the Extended Device Control Register, assert the
+                * PHY_RESET_DIR bit to put the PHY into reset. Then, take it
+                * out of reset.
                 */
                ctrl_ext = er32(CTRL_EXT);
                ctrl_ext |= E1000_CTRL_EXT_SDP4_DIR;
@@ -3301,7 +3336,8 @@ static s32 e1000_phy_igp_get_info(struct e1000_hw *hw,
        e_dbg("e1000_phy_igp_get_info");
 
        /* The downshift status is checked only once, after link is established,
-        * and it stored in the hw->speed_downgraded parameter. */
+        * and it stored in the hw->speed_downgraded parameter.
+        */
        phy_info->downshift = (e1000_downshift) hw->speed_downgraded;
 
        /* IGP01E1000 does not need to support it. */
@@ -3327,7 +3363,9 @@ static s32 e1000_phy_igp_get_info(struct e1000_hw *hw,
 
        if ((phy_data & IGP01E1000_PSSR_SPEED_MASK) ==
            IGP01E1000_PSSR_SPEED_1000MBPS) {
-               /* Local/Remote Receiver Information are only valid at 1000 Mbps */
+               /* Local/Remote Receiver Information are only valid @ 1000
+                * Mbps
+                */
                ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data);
                if (ret_val)
                        return ret_val;
@@ -3379,7 +3417,8 @@ static s32 e1000_phy_m88_get_info(struct e1000_hw *hw,
        e_dbg("e1000_phy_m88_get_info");
 
        /* The downshift status is checked only once, after link is established,
-        * and it stored in the hw->speed_downgraded parameter. */
+        * and it stored in the hw->speed_downgraded parameter.
+        */
        phy_info->downshift = (e1000_downshift) hw->speed_downgraded;
 
        ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
@@ -3574,8 +3613,8 @@ s32 e1000_init_eeprom_params(struct e1000_hw *hw)
        }
 
        if (eeprom->type == e1000_eeprom_spi) {
-               /* eeprom_size will be an enum [0..8] that maps to eeprom sizes 128B to
-                * 32KB (incremented by powers of 2).
+               /* eeprom_size will be an enum [0..8] that maps to eeprom sizes
+                * 128B to 32KB (incremented by powers of 2).
                 */
                /* Set to default value for initial eeprom read. */
                eeprom->word_size = 64;
@@ -3585,8 +3624,9 @@ s32 e1000_init_eeprom_params(struct e1000_hw *hw)
                eeprom_size =
                    (eeprom_size & EEPROM_SIZE_MASK) >> EEPROM_SIZE_SHIFT;
                /* 256B eeprom size was not supported in earlier hardware, so we
-                * bump eeprom_size up one to ensure that "1" (which maps to 256B)
-                * is never the result used in the shifting logic below. */
+                * bump eeprom_size up one to ensure that "1" (which maps to
+                * 256B) is never the result used in the shifting logic below.
+                */
                if (eeprom_size)
                        eeprom_size++;
 
@@ -3618,8 +3658,8 @@ static void e1000_raise_ee_clk(struct e1000_hw *hw, u32 *eecd)
  */
 static void e1000_lower_ee_clk(struct e1000_hw *hw, u32 *eecd)
 {
-       /* Lower the clock input to the EEPROM (by clearing the SK bit), and then
-        * wait 50 microseconds.
+       /* Lower the clock input to the EEPROM (by clearing the SK bit), and
+        * then wait 50 microseconds.
         */
        *eecd = *eecd & ~E1000_EECD_SK;
        ew32(EECD, *eecd);
@@ -3651,10 +3691,11 @@ static void e1000_shift_out_ee_bits(struct e1000_hw *hw, u16 data, u16 count)
                eecd |= E1000_EECD_DO;
        }
        do {
-               /* A "1" is shifted out to the EEPROM by setting bit "DI" to a "1",
-                * and then raising and then lowering the clock (the SK bit controls
-                * the clock input to the EEPROM).  A "0" is shifted out to the EEPROM
-                * by setting "DI" to "0" and then raising and then lowering the clock.
+               /* A "1" is shifted out to the EEPROM by setting bit "DI" to a
+                * "1", and then raising and then lowering the clock (the SK bit
+                * controls the clock input to the EEPROM).  A "0" is shifted
+                * out to the EEPROM by setting "DI" to "0" and then raising and
+                * then lowering the clock.
                 */
                eecd &= ~E1000_EECD_DI;
 
@@ -3691,9 +3732,9 @@ static u16 e1000_shift_in_ee_bits(struct e1000_hw *hw, u16 count)
 
        /* In order to read a register from the EEPROM, we need to shift 'count'
         * bits in from the EEPROM. Bits are "shifted in" by raising the clock
-        * input to the EEPROM (setting the SK bit), and then reading the value of
-        * the "DO" bit.  During this "shifting in" process the "DI" bit should
-        * always be clear.
+        * input to the EEPROM (setting the SK bit), and then reading the value
+        * of the "DO" bit.  During this "shifting in" process the "DI" bit
+        * should always be clear.
         */
 
        eecd = er32(EECD);
@@ -3945,8 +3986,8 @@ static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
        if (eeprom->word_size == 0)
                e1000_init_eeprom_params(hw);
 
-       /* A check for invalid values:  offset too large, too many words, and not
-        * enough words.
+       /* A check for invalid values:  offset too large, too many words, and
+        * not enough words.
         */
        if ((offset >= eeprom->word_size)
            || (words > eeprom->word_size - offset) || (words == 0)) {
@@ -3964,7 +4005,8 @@ static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
                return -E1000_ERR_EEPROM;
 
        /* Set up the SPI or Microwire EEPROM for bit-bang reading.  We have
-        * acquired the EEPROM at this point, so any returns should release it */
+        * acquired the EEPROM at this point, so any returns should release it
+        */
        if (eeprom->type == e1000_eeprom_spi) {
                u16 word_in;
                u8 read_opcode = EEPROM_READ_OPCODE_SPI;
@@ -3976,7 +4018,9 @@ static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
 
                e1000_standby_eeprom(hw);
 
-               /* Some SPI eeproms use the 8th address bit embedded in the opcode */
+               /* Some SPI eeproms use the 8th address bit embedded in the
+                * opcode
+                */
                if ((eeprom->address_bits == 8) && (offset >= 128))
                        read_opcode |= EEPROM_A8_OPCODE_SPI;
 
@@ -3985,11 +4029,13 @@ static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
                e1000_shift_out_ee_bits(hw, (u16) (offset * 2),
                                        eeprom->address_bits);
 
-               /* Read the data.  The address of the eeprom internally increments with
-                * each byte (spi) being read, saving on the overhead of eeprom setup
-                * and tear-down.  The address counter will roll over if reading beyond
-                * the size of the eeprom, thus allowing the entire memory to be read
-                * starting from any offset. */
+               /* Read the data.  The address of the eeprom internally
+                * increments with each byte (spi) being read, saving on the
+                * overhead of eeprom setup and tear-down.  The address counter
+                * will roll over if reading beyond the size of the eeprom, thus
+                * allowing the entire memory to be read starting from any
+                * offset.
+                */
                for (i = 0; i < words; i++) {
                        word_in = e1000_shift_in_ee_bits(hw, 16);
                        data[i] = (word_in >> 8) | (word_in << 8);
@@ -4003,8 +4049,9 @@ static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
                        e1000_shift_out_ee_bits(hw, (u16) (offset + i),
                                                eeprom->address_bits);
 
-                       /* Read the data.  For microwire, each word requires the overhead
-                        * of eeprom setup and tear-down. */
+                       /* Read the data.  For microwire, each word requires the
+                        * overhead of eeprom setup and tear-down.
+                        */
                        data[i] = e1000_shift_in_ee_bits(hw, 16);
                        e1000_standby_eeprom(hw);
                }
@@ -4119,8 +4166,8 @@ static s32 e1000_do_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
        if (eeprom->word_size == 0)
                e1000_init_eeprom_params(hw);
 
-       /* A check for invalid values:  offset too large, too many words, and not
-        * enough words.
+       /* A check for invalid values:  offset too large, too many words, and
+        * not enough words.
         */
        if ((offset >= eeprom->word_size)
            || (words > eeprom->word_size - offset) || (words == 0)) {
@@ -4174,7 +4221,9 @@ static s32 e1000_write_eeprom_spi(struct e1000_hw *hw, u16 offset, u16 words,
 
                e1000_standby_eeprom(hw);
 
-               /* Some SPI eeproms use the 8th address bit embedded in the opcode */
+               /* Some SPI eeproms use the 8th address bit embedded in the
+                * opcode
+                */
                if ((eeprom->address_bits == 8) && (offset >= 128))
                        write_opcode |= EEPROM_A8_OPCODE_SPI;
 
@@ -4186,16 +4235,19 @@ static s32 e1000_write_eeprom_spi(struct e1000_hw *hw, u16 offset, u16 words,
 
                /* Send the data */
 
-               /* Loop to allow for up to whole page write (32 bytes) of eeprom */
+               /* Loop to allow for up to whole page write (32 bytes) of
+                * eeprom
+                */
                while (widx < words) {
                        u16 word_out = data[widx];
                        word_out = (word_out >> 8) | (word_out << 8);
                        e1000_shift_out_ee_bits(hw, word_out, 16);
                        widx++;
 
-                       /* Some larger eeprom sizes are capable of a 32-byte PAGE WRITE
-                        * operation, while the smaller eeproms are capable of an 8-byte
-                        * PAGE WRITE operation.  Break the inner loop to pass new address
+                       /* Some larger eeprom sizes are capable of a 32-byte
+                        * PAGE WRITE operation, while the smaller eeproms are
+                        * capable of an 8-byte PAGE WRITE operation.  Break the
+                        * inner loop to pass new address
                         */
                        if ((((offset + widx) * 2) % eeprom->page_size) == 0) {
                                e1000_standby_eeprom(hw);
@@ -4249,14 +4301,15 @@ static s32 e1000_write_eeprom_microwire(struct e1000_hw *hw, u16 offset,
                /* Send the data */
                e1000_shift_out_ee_bits(hw, data[words_written], 16);
 
-               /* Toggle the CS line.  This in effect tells the EEPROM to execute
-                * the previous command.
+               /* Toggle the CS line.  This in effect tells the EEPROM to
+                * execute the previous command.
                 */
                e1000_standby_eeprom(hw);
 
-               /* Read DO repeatedly until it is high (equal to '1').  The EEPROM will
-                * signal that the command has been completed by raising the DO signal.
-                * If DO does not go high in 10 milliseconds, then error out.
+               /* Read DO repeatedly until it is high (equal to '1').  The
+                * EEPROM will signal that the command has been completed by
+                * raising the DO signal. If DO does not go high in 10
+                * milliseconds, then error out.
                 */
                for (i = 0; i < 200; i++) {
                        eecd = er32(EECD);
@@ -4483,7 +4536,8 @@ static void e1000_clear_vfta(struct e1000_hw *hw)
        for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
                /* If the offset we want to clear is the same offset of the
                 * manageability VLAN ID, then clear all bits except that of the
-                * manageability unit */
+                * manageability unit
+                */
                vfta_value = (offset == vfta_offset) ? vfta_bit_in_reg : 0;
                E1000_WRITE_REG_ARRAY(hw, VFTA, offset, vfta_value);
                E1000_WRITE_FLUSH();
@@ -4911,12 +4965,12 @@ void e1000_tbi_adjust_stats(struct e1000_hw *hw, struct e1000_hw_stats *stats,
         * counters overcount this packet as a CRC error and undercount
         * the packet as a good packet
         */
-       /* This packet should not be counted as a CRC error.    */
+       /* This packet should not be counted as a CRC error. */
        stats->crcerrs--;
-       /* This packet does count as a Good Packet Received.    */
+       /* This packet does count as a Good Packet Received. */
        stats->gprc++;
 
-       /* Adjust the Good Octets received counters             */
+       /* Adjust the Good Octets received counters */
        carry_bit = 0x80000000 & stats->gorcl;
        stats->gorcl += frame_len;
        /* If the high bit of Gorcl (the low 32 bits of the Good Octets
@@ -5196,8 +5250,9 @@ static s32 e1000_check_polarity(struct e1000_hw *hw,
                if (ret_val)
                        return ret_val;
 
-               /* If speed is 1000 Mbps, must read the IGP01E1000_PHY_PCS_INIT_REG to
-                * find the polarity status */
+               /* If speed is 1000 Mbps, must read the
+                * IGP01E1000_PHY_PCS_INIT_REG to find the polarity status
+                */
                if ((phy_data & IGP01E1000_PSSR_SPEED_MASK) ==
                    IGP01E1000_PSSR_SPEED_1000MBPS) {
 
@@ -5213,8 +5268,9 @@ static s32 e1000_check_polarity(struct e1000_hw *hw,
                            e1000_rev_polarity_reversed :
                            e1000_rev_polarity_normal;
                } else {
-                       /* For 10 Mbps, read the polarity bit in the status register. (for
-                        * 100 Mbps this bit is always 0) */
+                       /* For 10 Mbps, read the polarity bit in the status
+                        * register. (for 100 Mbps this bit is always 0)
+                        */
                        *polarity =
                            (phy_data & IGP01E1000_PSSR_POLARITY_REVERSED) ?
                            e1000_rev_polarity_reversed :
@@ -5374,8 +5430,9 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up)
                }
        } else {
                if (hw->dsp_config_state == e1000_dsp_config_activated) {
-                       /* Save off the current value of register 0x2F5B to be restored at
-                        * the end of the routines. */
+                       /* Save off the current value of register 0x2F5B to be
+                        * restored at the end of the routines.
+                        */
                        ret_val =
                            e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data);
 
@@ -5391,7 +5448,7 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up)
                        msleep(20);
 
                        ret_val = e1000_write_phy_reg(hw, 0x0000,
-                                                     IGP01E1000_IEEE_FORCE_GIGA);
+                                                   IGP01E1000_IEEE_FORCE_GIGA);
                        if (ret_val)
                                return ret_val;
                        for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) {
@@ -5412,7 +5469,7 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up)
                        }
 
                        ret_val = e1000_write_phy_reg(hw, 0x0000,
-                                                     IGP01E1000_IEEE_RESTART_AUTONEG);
+                                       IGP01E1000_IEEE_RESTART_AUTONEG);
                        if (ret_val)
                                return ret_val;
 
@@ -5429,8 +5486,9 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up)
                }
 
                if (hw->ffe_config_state == e1000_ffe_config_active) {
-                       /* Save off the current value of register 0x2F5B to be restored at
-                        * the end of the routines. */
+                       /* Save off the current value of register 0x2F5B to be
+                        * restored at the end of the routines.
+                        */
                        ret_val =
                            e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data);
 
@@ -5446,7 +5504,7 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up)
                        msleep(20);
 
                        ret_val = e1000_write_phy_reg(hw, 0x0000,
-                                                     IGP01E1000_IEEE_FORCE_GIGA);
+                                                   IGP01E1000_IEEE_FORCE_GIGA);
                        if (ret_val)
                                return ret_val;
                        ret_val =
@@ -5456,7 +5514,7 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up)
                                return ret_val;
 
                        ret_val = e1000_write_phy_reg(hw, 0x0000,
-                                                     IGP01E1000_IEEE_RESTART_AUTONEG);
+                                       IGP01E1000_IEEE_RESTART_AUTONEG);
                        if (ret_val)
                                return ret_val;
 
@@ -5542,8 +5600,9 @@ static s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active)
                return E1000_SUCCESS;
 
        /* During driver activity LPLU should not be used or it will attain link
-        * from the lowest speeds starting from 10Mbps. The capability is used for
-        * Dx transitions and states */
+        * from the lowest speeds starting from 10Mbps. The capability is used
+        * for Dx transitions and states
+        */
        if (hw->mac_type == e1000_82541_rev_2
            || hw->mac_type == e1000_82547_rev_2) {
                ret_val =
@@ -5563,10 +5622,11 @@ static s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active)
                                return ret_val;
                }
 
-               /* LPLU and SmartSpeed are mutually exclusive.  LPLU is used during
-                * Dx states where the power conservation is most important.  During
-                * driver activity we should enable SmartSpeed, so performance is
-                * maintained. */
+               /* LPLU and SmartSpeed are mutually exclusive.  LPLU is used
+                * during Dx states where the power conservation is most
+                * important.  During driver activity we should enable
+                * SmartSpeed, so performance is maintained.
+                */
                if (hw->smart_speed == e1000_smart_speed_on) {
                        ret_val =
                            e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
index b20fff1..8502c62 100644 (file)
@@ -239,7 +239,6 @@ struct net_device *e1000_get_hw_dev(struct e1000_hw *hw)
  * e1000_init_module is the first routine called when the driver is
  * loaded. All it does is register with the PCI subsystem.
  **/
-
 static int __init e1000_init_module(void)
 {
        int ret;
@@ -266,7 +265,6 @@ module_init(e1000_init_module);
  * e1000_exit_module is called just before the driver is removed
  * from memory.
  **/
-
 static void __exit e1000_exit_module(void)
 {
        pci_unregister_driver(&e1000_driver);
@@ -301,7 +299,6 @@ static void e1000_free_irq(struct e1000_adapter *adapter)
  * e1000_irq_disable - Mask off interrupt generation on the NIC
  * @adapter: board private structure
  **/
-
 static void e1000_irq_disable(struct e1000_adapter *adapter)
 {
        struct e1000_hw *hw = &adapter->hw;
@@ -315,7 +312,6 @@ static void e1000_irq_disable(struct e1000_adapter *adapter)
  * e1000_irq_enable - Enable default interrupt generation settings
  * @adapter: board private structure
  **/
-
 static void e1000_irq_enable(struct e1000_adapter *adapter)
 {
        struct e1000_hw *hw = &adapter->hw;
@@ -398,11 +394,12 @@ static void e1000_configure(struct e1000_adapter *adapter)
        e1000_configure_rx(adapter);
        /* call E1000_DESC_UNUSED which always leaves
         * at least 1 descriptor unused to make sure
-        * next_to_use != next_to_clean */
+        * next_to_use != next_to_clean
+        */
        for (i = 0; i < adapter->num_rx_queues; i++) {
                struct e1000_rx_ring *ring = &adapter->rx_ring[i];
                adapter->alloc_rx_buf(adapter, ring,
-                                     E1000_DESC_UNUSED(ring));
+                                     E1000_DESC_UNUSED(ring));
        }
 }
 
@@ -433,9 +430,7 @@ int e1000_up(struct e1000_adapter *adapter)
  * The phy may be powered down to save power and turn off link when the
  * driver is unloaded and wake on lan is not enabled (among others)
  * *** this routine MUST be followed by a call to e1000_reset ***
- *
  **/
-
 void e1000_power_up_phy(struct e1000_adapter *adapter)
 {
        struct e1000_hw *hw = &adapter->hw;
@@ -444,7 +439,8 @@ void e1000_power_up_phy(struct e1000_adapter *adapter)
        /* Just clear the power down bit to wake the phy back up */
        if (hw->media_type == e1000_media_type_copper) {
                /* according to the manual, the phy will retain its
-                * settings across a power-down/up cycle */
+                * settings across a power-down/up cycle
+                */
                e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
                mii_reg &= ~MII_CR_POWER_DOWN;
                e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
@@ -459,7 +455,8 @@ static void e1000_power_down_phy(struct e1000_adapter *adapter)
         * The PHY cannot be powered down if any of the following is true *
         * (a) WoL is enabled
         * (b) AMT is active
-        * (c) SoL/IDER session is active */
+        * (c) SoL/IDER session is active
+        */
        if (!adapter->wol && hw->mac_type >= e1000_82540 &&
           hw->media_type == e1000_media_type_copper) {
                u16 mii_reg = 0;
@@ -529,8 +526,7 @@ void e1000_down(struct e1000_adapter *adapter)
 
        e1000_irq_disable(adapter);
 
-       /*
-        * Setting DOWN must be after irq_disable to prevent
+       /* Setting DOWN must be after irq_disable to prevent
         * a screaming interrupt.  Setting DOWN also prevents
         * tasks from rescheduling.
         */
@@ -627,14 +623,14 @@ void e1000_reset(struct e1000_adapter *adapter)
                 * rounded up to the next 1KB and expressed in KB.  Likewise,
                 * the Rx FIFO should be large enough to accommodate at least
                 * one full receive packet and is similarly rounded up and
-                * expressed in KB. */
+                * expressed in KB.
+                */
                pba = er32(PBA);
                /* upper 16 bits has Tx packet buffer allocation size in KB */
                tx_space = pba >> 16;
                /* lower 16 bits has Rx packet buffer allocation size in KB */
                pba &= 0xffff;
-               /*
-                * the tx fifo also stores 16 bytes of information about the tx
+               /* the Tx fifo also stores 16 bytes of information about the Tx
                 * but don't include ethernet FCS because hardware appends it
                 */
                min_tx_space = (hw->max_frame_size +
@@ -649,7 +645,8 @@ void e1000_reset(struct e1000_adapter *adapter)
 
                /* If current Tx allocation is less than the min Tx FIFO size,
                 * and the min Tx FIFO size is less than the current Rx FIFO
-                * allocation, take space away from current Rx allocation */
+                * allocation, take space away from current Rx allocation
+                */
                if (tx_space < min_tx_space &&
                    ((min_tx_space - tx_space) < pba)) {
                        pba = pba - (min_tx_space - tx_space);
@@ -663,8 +660,9 @@ void e1000_reset(struct e1000_adapter *adapter)
                                break;
                        }
 
-                       /* if short on rx space, rx wins and must trump tx
-                        * adjustment or use Early Receive if available */
+                       /* if short on Rx space, Rx wins and must trump Tx
+                        * adjustment or use Early Receive if available
+                        */
                        if (pba < min_rx_space)
                                pba = min_rx_space;
                }
@@ -672,8 +670,7 @@ void e1000_reset(struct e1000_adapter *adapter)
 
        ew32(PBA, pba);
 
-       /*
-        * flow control settings:
+       /* flow control settings:
         * The high water mark must be low enough to fit one full frame
         * (or the size used for early receive) above it in the Rx FIFO.
         * Set it to the lower of:
@@ -707,7 +704,8 @@ void e1000_reset(struct e1000_adapter *adapter)
                u32 ctrl = er32(CTRL);
                /* clear phy power management bit if we are in gig only mode,
                 * which if enabled will attempt negotiation to 100Mb, which
-                * can cause a loss of link at power off or driver unload */
+                * can cause a loss of link at power off or driver unload
+                */
                ctrl &= ~E1000_CTRL_SWDPIN3;
                ew32(CTRL, ctrl);
        }
@@ -808,9 +806,8 @@ static int e1000_is_need_ioport(struct pci_dev *pdev)
 static netdev_features_t e1000_fix_features(struct net_device *netdev,
        netdev_features_t features)
 {
-       /*
-        * Since there is no support for separate rx/tx vlan accel
-        * enable/disable make sure tx flag is always in same state as rx.
+       /* Since there is no support for separate Rx/Tx vlan accel
+        * enable/disable make sure Tx flag is always in same state as Rx.
         */
        if (features & NETIF_F_HW_VLAN_RX)
                features |= NETIF_F_HW_VLAN_TX;
@@ -1012,16 +1009,14 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (err)
                goto err_sw_init;
 
-       /*
-        * there is a workaround being applied below that limits
+       /* there is a workaround being applied below that limits
         * 64-bit DMA addresses to 64-bit hardware.  There are some
         * 32-bit adapters that Tx hang when given 64-bit DMA addresses
         */
        pci_using_dac = 0;
        if ((hw->bus_type == e1000_bus_type_pcix) &&
            !dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
-               /*
-                * according to DMA-API-HOWTO, coherent calls will always
+               /* according to DMA-API-HOWTO, coherent calls will always
                 * succeed if the set call did
                 */
                dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
@@ -1099,7 +1094,8 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        }
 
        /* before reading the EEPROM, reset the controller to
-        * put the device in a known good starting state */
+        * put the device in a known good starting state
+        */
 
        e1000_reset_hw(hw);
 
@@ -1107,8 +1103,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (e1000_validate_eeprom_checksum(hw) < 0) {
                e_err(probe, "The EEPROM Checksum Is Not Valid\n");
                e1000_dump_eeprom(adapter);
-               /*
-                * set MAC address to all zeroes to invalidate and temporary
+               /* set MAC address to all zeroes to invalidate and temporary
                 * disable this device for the user. This blocks regular
                 * traffic while still permitting ethtool ioctls from reaching
                 * the hardware as well as allowing the user to run the
@@ -1169,7 +1164,8 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        /* now that we have the eeprom settings, apply the special cases
         * where the eeprom may be wrong or the board simply won't support
-        * wake on lan on a particular port */
+        * wake on lan on a particular port
+        */
        switch (pdev->device) {
        case E1000_DEV_ID_82546GB_PCIE:
                adapter->eeprom_wol = 0;
@@ -1177,7 +1173,8 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        case E1000_DEV_ID_82546EB_FIBER:
        case E1000_DEV_ID_82546GB_FIBER:
                /* Wake events only supported on port A for dual fiber
-                * regardless of eeprom setting */
+                * regardless of eeprom setting
+                */
                if (er32(STATUS) & E1000_STATUS_FUNC_1)
                        adapter->eeprom_wol = 0;
                break;
@@ -1270,7 +1267,6 @@ err_pci_reg:
  * Hot-Plug event, or because the driver is going to be removed from
  * memory.
  **/
-
 static void e1000_remove(struct pci_dev *pdev)
 {
        struct net_device *netdev = pci_get_drvdata(pdev);
@@ -1306,7 +1302,6 @@ static void e1000_remove(struct pci_dev *pdev)
  * e1000_sw_init initializes the Adapter private data structure.
  * e1000_init_hw_struct MUST be called before this function
  **/
-
 static int e1000_sw_init(struct e1000_adapter *adapter)
 {
        adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
@@ -1337,7 +1332,6 @@ static int e1000_sw_init(struct e1000_adapter *adapter)
  * We allocate one ring per queue at run-time since we don't know the
  * number of queues at compile-time.
  **/
-
 static int e1000_alloc_queues(struct e1000_adapter *adapter)
 {
        adapter->tx_ring = kcalloc(adapter->num_tx_queues,
@@ -1367,7 +1361,6 @@ static int e1000_alloc_queues(struct e1000_adapter *adapter)
  * handler is registered with the OS, the watchdog task is started,
  * and the stack is notified that the interface is ready.
  **/
-
 static int e1000_open(struct net_device *netdev)
 {
        struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -1401,7 +1394,8 @@ static int e1000_open(struct net_device *netdev)
        /* before we allocate an interrupt, we must be ready to handle it.
         * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
         * as soon as we call pci_request_irq, so we have to setup our
-        * clean_rx handler before we do so.  */
+        * clean_rx handler before we do so.
+        */
        e1000_configure(adapter);
 
        err = e1000_request_irq(adapter);
@@ -1444,7 +1438,6 @@ err_setup_tx:
  * needs to be disabled.  A global MAC reset is issued to stop the
  * hardware, and all transmit and receive resources are freed.
  **/
-
 static int e1000_close(struct net_device *netdev)
 {
        struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -1459,10 +1452,11 @@ static int e1000_close(struct net_device *netdev)
        e1000_free_all_rx_resources(adapter);
 
        /* kill manageability vlan ID if supported, but not if a vlan with
-        * the same ID is registered on the host OS (let 8021q kill it) */
+        * the same ID is registered on the host OS (let 8021q kill it)
+        */
        if ((hw->mng_cookie.status &
-                         E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
-            !test_bit(adapter->mng_vlan_id, adapter->active_vlans)) {
+            E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
+           !test_bit(adapter->mng_vlan_id, adapter->active_vlans)) {
                e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
        }
 
@@ -1483,7 +1477,8 @@ static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start,
        unsigned long end = begin + len;
 
        /* First rev 82545 and 82546 need to not allow any memory
-        * write location to cross 64k boundary due to errata 23 */
+        * write location to cross 64k boundary due to errata 23
+        */
        if (hw->mac_type == e1000_82545 ||
            hw->mac_type == e1000_ce4100 ||
            hw->mac_type == e1000_82546) {
@@ -1500,7 +1495,6 @@ static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start,
  *
  * Return 0 on success, negative on failure
  **/
-
 static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
                                    struct e1000_tx_ring *txdr)
 {
@@ -1509,11 +1503,8 @@ static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
 
        size = sizeof(struct e1000_buffer) * txdr->count;
        txdr->buffer_info = vzalloc(size);
-       if (!txdr->buffer_info) {
-               e_err(probe, "Unable to allocate memory for the Tx descriptor "
-                     "ring\n");
+       if (!txdr->buffer_info)
                return -ENOMEM;
-       }
 
        /* round up to nearest 4K */
 
@@ -1577,7 +1568,6 @@ setup_tx_desc_die:
  *
  * Return 0 on success, negative on failure
  **/
-
 int e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
 {
        int i, err = 0;
@@ -1602,7 +1592,6 @@ int e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
  *
  * Configure the Tx unit of the MAC after a reset.
  **/
-
 static void e1000_configure_tx(struct e1000_adapter *adapter)
 {
        u64 tdba;
@@ -1623,8 +1612,10 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
                ew32(TDBAL, (tdba & 0x00000000ffffffffULL));
                ew32(TDT, 0);
                ew32(TDH, 0);
-               adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ? E1000_TDH : E1000_82542_TDH);
-               adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ? E1000_TDT : E1000_82542_TDT);
+               adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ?
+                                          E1000_TDH : E1000_82542_TDH);
+               adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ?
+                                          E1000_TDT : E1000_82542_TDT);
                break;
        }
 
@@ -1679,7 +1670,8 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
                adapter->txd_cmd |= E1000_TXD_CMD_RS;
 
        /* Cache if we're 82544 running in PCI-X because we'll
-        * need this to apply a workaround later in the send path. */
+        * need this to apply a workaround later in the send path.
+        */
        if (hw->mac_type == e1000_82544 &&
            hw->bus_type == e1000_bus_type_pcix)
                adapter->pcix_82544 = true;
@@ -1695,7 +1687,6 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
  *
  * Returns 0 on success, negative on failure
  **/
-
 static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
                                    struct e1000_rx_ring *rxdr)
 {
@@ -1704,11 +1695,8 @@ static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
 
        size = sizeof(struct e1000_buffer) * rxdr->count;
        rxdr->buffer_info = vzalloc(size);
-       if (!rxdr->buffer_info) {
-               e_err(probe, "Unable to allocate memory for the Rx descriptor "
-                     "ring\n");
+       if (!rxdr->buffer_info)
                return -ENOMEM;
-       }
 
        desc_len = sizeof(struct e1000_rx_desc);
 
@@ -1777,7 +1765,6 @@ setup_rx_desc_die:
  *
  * Return 0 on success, negative on failure
  **/
-
 int e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
 {
        int i, err = 0;
@@ -1846,7 +1833,8 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
        /* This is useful for sniffing bad packets. */
        if (adapter->netdev->features & NETIF_F_RXALL) {
                /* UPE and MPE will be handled by normal PROMISC logic
-                * in e1000e_set_rx_mode */
+                * in e1000e_set_rx_mode
+                */
                rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
                         E1000_RCTL_BAM | /* RX All Bcast Pkts */
                         E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
@@ -1868,7 +1856,6 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
  *
  * Configure the Rx unit of the MAC after a reset.
  **/
-
 static void e1000_configure_rx(struct e1000_adapter *adapter)
 {
        u64 rdba;
@@ -1901,7 +1888,8 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
        }
 
        /* Setup the HW Rx Head and Tail Descriptor Pointers and
-        * the Base and Length of the Rx Descriptor Ring */
+        * the Base and Length of the Rx Descriptor Ring
+        */
        switch (adapter->num_rx_queues) {
        case 1:
        default:
@@ -1911,8 +1899,10 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
                ew32(RDBAL, (rdba & 0x00000000ffffffffULL));
                ew32(RDT, 0);
                ew32(RDH, 0);
-               adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ? E1000_RDH : E1000_82542_RDH);
-               adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ? E1000_RDT : E1000_82542_RDT);
+               adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ?
+                                          E1000_RDH : E1000_82542_RDH);
+               adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ?
+                                          E1000_RDT : E1000_82542_RDT);
                break;
        }
 
@@ -1938,7 +1928,6 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
  *
  * Free all transmit software resources
  **/
-
 static void e1000_free_tx_resources(struct e1000_adapter *adapter,
                                    struct e1000_tx_ring *tx_ring)
 {
@@ -1961,7 +1950,6 @@ static void e1000_free_tx_resources(struct e1000_adapter *adapter,
  *
  * Free all transmit software resources
  **/
-
 void e1000_free_all_tx_resources(struct e1000_adapter *adapter)
 {
        int i;
@@ -1996,7 +1984,6 @@ static void e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
  * @adapter: board private structure
  * @tx_ring: ring to be cleaned
  **/
-
 static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
                                struct e1000_tx_ring *tx_ring)
 {
@@ -2032,7 +2019,6 @@ static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
  * e1000_clean_all_tx_rings - Free Tx Buffers for all queues
  * @adapter: board private structure
  **/
-
 static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter)
 {
        int i;
@@ -2048,7 +2034,6 @@ static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter)
  *
  * Free all receive software resources
  **/
-
 static void e1000_free_rx_resources(struct e1000_adapter *adapter,
                                    struct e1000_rx_ring *rx_ring)
 {
@@ -2071,7 +2056,6 @@ static void e1000_free_rx_resources(struct e1000_adapter *adapter,
  *
  * Free all receive software resources
  **/
-
 void e1000_free_all_rx_resources(struct e1000_adapter *adapter)
 {
        int i;
@@ -2085,7 +2069,6 @@ void e1000_free_all_rx_resources(struct e1000_adapter *adapter)
  * @adapter: board private structure
  * @rx_ring: ring to free buffers from
  **/
-
 static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
                                struct e1000_rx_ring *rx_ring)
 {
@@ -2144,7 +2127,6 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
  * e1000_clean_all_rx_rings - Free Rx Buffers for all queues
  * @adapter: board private structure
  **/
-
 static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter)
 {
        int i;
@@ -2204,7 +2186,6 @@ static void e1000_leave_82542_rst(struct e1000_adapter *adapter)
  *
  * Returns 0 on success, negative on failure
  **/
-
 static int e1000_set_mac(struct net_device *netdev, void *p)
 {
        struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -2239,7 +2220,6 @@ static int e1000_set_mac(struct net_device *netdev, void *p)
  * responsible for configuring the hardware for proper unicast, multicast,
  * promiscuous mode, and all-multi behavior.
  **/
-
 static void e1000_set_rx_mode(struct net_device *netdev)
 {
        struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -2252,10 +2232,8 @@ static void e1000_set_rx_mode(struct net_device *netdev)
        int mta_reg_count = E1000_NUM_MTA_REGISTERS;
        u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC);
 
-       if (!mcarray) {
-               e_err(probe, "memory allocation failed\n");
+       if (!mcarray)
                return;
-       }
 
        /* Check for Promiscuous and All Multicast modes */
 
@@ -2325,10 +2303,10 @@ static void e1000_set_rx_mode(struct net_device *netdev)
        }
 
        /* write the hash table completely, write from bottom to avoid
-        * both stupid write combining chipsets, and flushing each write */
+        * both stupid write combining chipsets, and flushing each write
+        */
        for (i = mta_reg_count - 1; i >= 0 ; i--) {
-               /*
-                * If we are on an 82544 has an errata where writing odd
+               /* If we are on an 82544 has an errata where writing odd
                 * offsets overwrites the previous even offset, but writing
                 * backwards over the range solves the issue by always
                 * writing the odd offset first
@@ -2466,8 +2444,8 @@ static void e1000_watchdog(struct work_struct *work)
                        bool txb2b = true;
                        /* update snapshot of PHY registers on LSC */
                        e1000_get_speed_and_duplex(hw,
-                                                  &adapter->link_speed,
-                                                  &adapter->link_duplex);
+                                                  &adapter->link_speed,
+                                                  &adapter->link_duplex);
 
                        ctrl = er32(CTRL);
                        pr_info("%s NIC Link is Up %d Mbps %s, "
@@ -2541,7 +2519,8 @@ link_up:
                        /* We've lost link, so the controller stops DMA,
                         * but we've got queued Tx work that's never going
                         * to get done, so reset controller to flush Tx.
-                        * (Do the reset outside of interrupt context). */
+                        * (Do the reset outside of interrupt context).
+                        */
                        adapter->tx_timeout_count++;
                        schedule_work(&adapter->reset_task);
                        /* exit immediately since reset is imminent */
@@ -2551,8 +2530,7 @@ link_up:
 
        /* Simple mode for Interrupt Throttle Rate (ITR) */
        if (hw->mac_type >= e1000_82540 && adapter->itr_setting == 4) {
-               /*
-                * Symmetric Tx/Rx gets a reduced ITR=2000;
+               /* Symmetric Tx/Rx gets a reduced ITR=2000;
                 * Total asymmetrical Tx or Rx gets ITR=8000;
                 * everyone else is between 2000-8000.
                 */
@@ -2667,18 +2645,16 @@ static void e1000_set_itr(struct e1000_adapter *adapter)
                goto set_itr_now;
        }
 
-       adapter->tx_itr = e1000_update_itr(adapter,
-                                   adapter->tx_itr,
-                                   adapter->total_tx_packets,
-                                   adapter->total_tx_bytes);
+       adapter->tx_itr = e1000_update_itr(adapter, adapter->tx_itr,
+                                          adapter->total_tx_packets,
+                                          adapter->total_tx_bytes);
        /* conservative mode (itr 3) eliminates the lowest_latency setting */
        if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
                adapter->tx_itr = low_latency;
 
-       adapter->rx_itr = e1000_update_itr(adapter,
-                                   adapter->rx_itr,
-                                   adapter->total_rx_packets,
-                                   adapter->total_rx_bytes);
+       adapter->rx_itr = e1000_update_itr(adapter, adapter->rx_itr,
+                                          adapter->total_rx_packets,
+                                          adapter->total_rx_bytes);
        /* conservative mode (itr 3) eliminates the lowest_latency setting */
        if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
                adapter->rx_itr = low_latency;
@@ -2704,10 +2680,11 @@ set_itr_now:
        if (new_itr != adapter->itr) {
                /* this attempts to bias the interrupt rate towards Bulk
                 * by adding intermediate steps when interrupt rate is
-                * increasing */
+                * increasing
+                */
                new_itr = new_itr > adapter->itr ?
-                            min(adapter->itr + (new_itr >> 2), new_itr) :
-                            new_itr;
+                         min(adapter->itr + (new_itr >> 2), new_itr) :
+                         new_itr;
                adapter->itr = new_itr;
                ew32(ITR, 1000000000 / (new_itr * 256));
        }
@@ -2869,7 +2846,8 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
                /* Workaround for Controller erratum --
                 * descriptor for non-tso packet in a linear SKB that follows a
                 * tso gets written back prematurely before the data is fully
-                * DMA'd to the controller */
+                * DMA'd to the controller
+                */
                if (!skb->data_len && tx_ring->last_tx_tso &&
                    !skb_is_gso(skb)) {
                        tx_ring->last_tx_tso = false;
@@ -2877,7 +2855,8 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
                }
 
                /* Workaround for premature desc write-backs
-                * in TSO mode.  Append 4-byte sentinel desc */
+                * in TSO mode.  Append 4-byte sentinel desc
+                */
                if (unlikely(mss && !nr_frags && size == len && size > 8))
                        size -= 4;
                /* work-around for errata 10 and it applies
@@ -2890,7 +2869,8 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
                        size = 2015;
 
                /* Workaround for potential 82544 hang in PCI-X.  Avoid
-                * terminating buffers within evenly-aligned dwords. */
+                * terminating buffers within evenly-aligned dwords.
+                */
                if (unlikely(adapter->pcix_82544 &&
                   !((unsigned long)(skb->data + offset + size - 1) & 4) &&
                   size > 4))
@@ -2902,7 +2882,7 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
                buffer_info->mapped_as_page = false;
                buffer_info->dma = dma_map_single(&pdev->dev,
                                                  skb->data + offset,
-                                                 size, DMA_TO_DEVICE);
+                                                 size, DMA_TO_DEVICE);
                if (dma_mapping_error(&pdev->dev, buffer_info->dma))
                        goto dma_error;
                buffer_info->next_to_watch = i;
@@ -2933,12 +2913,15 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
                        buffer_info = &tx_ring->buffer_info[i];
                        size = min(len, max_per_txd);
                        /* Workaround for premature desc write-backs
-                        * in TSO mode.  Append 4-byte sentinel desc */
-                       if (unlikely(mss && f == (nr_frags-1) && size == len && size > 8))
+                        * in TSO mode.  Append 4-byte sentinel desc
+                        */
+                       if (unlikely(mss && f == (nr_frags-1) &&
+                           size == len && size > 8))
                                size -= 4;
                        /* Workaround for potential 82544 hang in PCI-X.
                         * Avoid terminating buffers within evenly-aligned
-                        * dwords. */
+                        * dwords.
+                        */
                        bufend = (unsigned long)
                                page_to_phys(skb_frag_page(frag));
                        bufend += offset + size - 1;
@@ -3002,7 +2985,7 @@ static void e1000_tx_queue(struct e1000_adapter *adapter,
 
        if (likely(tx_flags & E1000_TX_FLAGS_TSO)) {
                txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
-                            E1000_TXD_CMD_TSE;
+                            E1000_TXD_CMD_TSE;
                txd_upper |= E1000_TXD_POPTS_TXSM << 8;
 
                if (likely(tx_flags & E1000_TX_FLAGS_IPV4))
@@ -3043,13 +3026,15 @@ static void e1000_tx_queue(struct e1000_adapter *adapter,
        /* Force memory writes to complete before letting h/w
         * know there are new descriptors to fetch.  (Only
         * applicable for weak-ordered memory model archs,
-        * such as IA-64). */
+        * such as IA-64).
+        */
        wmb();
 
        tx_ring->next_to_use = i;
        writel(i, hw->hw_addr + tx_ring->tdt);
        /* we need this if more than one processor can write to our tail
-        * at a time, it syncronizes IO on IA64/Altix systems */
+        * at a time, it synchronizes IO on IA64/Altix systems
+        */
        mmiowb();
 }
 
@@ -3098,11 +3083,13 @@ static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
        netif_stop_queue(netdev);
        /* Herbert's original patch had:
         *  smp_mb__after_netif_stop_queue();
-        * but since that doesn't exist yet, just open code it. */
+        * but since that doesn't exist yet, just open code it.
+        */
        smp_mb();
 
        /* We need to check again in a case another CPU has just
-        * made room available. */
+        * made room available.
+        */
        if (likely(E1000_DESC_UNUSED(tx_ring) < size))
                return -EBUSY;
 
@@ -3113,7 +3100,7 @@ static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
 }
 
 static int e1000_maybe_stop_tx(struct net_device *netdev,
-                               struct e1000_tx_ring *tx_ring, int size)
+                              struct e1000_tx_ring *tx_ring, int size)
 {
        if (likely(E1000_DESC_UNUSED(tx_ring) >= size))
                return 0;
@@ -3137,10 +3124,11 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
        int tso;
        unsigned int f;
 
-       /* This goes back to the question of how to logically map a tx queue
+       /* This goes back to the question of how to logically map a Tx queue
         * to a flow.  Right now, performance is impacted slightly negatively
-        * if using multiple tx queues.  If the stack breaks away from a
-        * single qdisc implementation, we can look at this again. */
+        * if using multiple Tx queues.  If the stack breaks away from a
+        * single qdisc implementation, we can look at this again.
+        */
        tx_ring = adapter->tx_ring;
 
        if (unlikely(skb->len <= 0)) {
@@ -3165,7 +3153,8 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
         * initiating the DMA for each buffer.  The calc is:
         * 4 = ceil(buffer len/mss).  To make sure we don't
         * overrun the FIFO, adjust the max buffer len if mss
-        * drops. */
+        * drops.
+        */
        if (mss) {
                u8 hdr_len;
                max_per_txd = min(mss << 2, max_per_txd);
@@ -3181,8 +3170,10 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
                                 * this hardware's requirements
                                 * NOTE: this is a TSO only workaround
                                 * if end byte alignment not correct move us
-                                * into the next dword */
-                               if ((unsigned long)(skb_tail_pointer(skb) - 1) & 4)
+                                * into the next dword
+                                */
+                               if ((unsigned long)(skb_tail_pointer(skb) - 1)
+                                   & 4)
                                        break;
                                /* fall through */
                                pull_size = min((unsigned int)4, skb->data_len);
@@ -3230,7 +3221,8 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
                count += nr_frags;
 
        /* need: count + 2 desc gap to keep tail from touching
-        * head, otherwise try next time */
+        * head, otherwise try next time
+        */
        if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2)))
                return NETDEV_TX_BUSY;
 
@@ -3269,7 +3261,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
                tx_flags |= E1000_TX_FLAGS_NO_FCS;
 
        count = e1000_tx_map(adapter, tx_ring, skb, first, max_per_txd,
-                            nr_frags, mss);
+                            nr_frags, mss);
 
        if (count) {
                netdev_sent_queue(netdev, skb->len);
@@ -3371,9 +3363,7 @@ static void e1000_dump(struct e1000_adapter *adapter)
        /* Print Registers */
        e1000_regdump(adapter);
 
-       /*
-        * transmit dump
-        */
+       /* transmit dump */
        pr_info("TX Desc ring0 dump\n");
 
        /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended)
@@ -3434,9 +3424,7 @@ static void e1000_dump(struct e1000_adapter *adapter)
        }
 
 rx_ring_summary:
-       /*
-        * receive dump
-        */
+       /* receive dump */
        pr_info("\nRX Desc ring dump\n");
 
        /* Legacy Receive Descriptor Format
@@ -3501,7 +3489,6 @@ exit:
  * e1000_tx_timeout - Respond to a Tx Hang
  * @netdev: network interface device structure
  **/
-
 static void e1000_tx_timeout(struct net_device *netdev)
 {
        struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -3529,7 +3516,6 @@ static void e1000_reset_task(struct work_struct *work)
  * Returns the address of the device statistics structure.
  * The statistics are actually updated from the watchdog.
  **/
-
 static struct net_device_stats *e1000_get_stats(struct net_device *netdev)
 {
        /* only return the current stats */
@@ -3543,7 +3529,6 @@ static struct net_device_stats *e1000_get_stats(struct net_device *netdev)
  *
  * Returns 0 on success, negative on failure
  **/
-
 static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
 {
        struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -3580,8 +3565,9 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
         * means we reserve 2 more, this pushes us to allocate from the next
         * larger slab size.
         * i.e. RXBUFFER_2048 --> size-4096 slab
-        *  however with the new *_jumbo_rx* routines, jumbo receives will use
-        *  fragmented skbs */
+        * however with the new *_jumbo_rx* routines, jumbo receives will use
+        * fragmented skbs
+        */
 
        if (max_frame <= E1000_RXBUFFER_2048)
                adapter->rx_buffer_len = E1000_RXBUFFER_2048;
@@ -3616,7 +3602,6 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
  * e1000_update_stats - Update the board statistics counters
  * @adapter: board private structure
  **/
-
 void e1000_update_stats(struct e1000_adapter *adapter)
 {
        struct net_device *netdev = adapter->netdev;
@@ -3627,8 +3612,7 @@ void e1000_update_stats(struct e1000_adapter *adapter)
 
 #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
 
-       /*
-        * Prevent stats update while adapter is being reset, or if the pci
+       /* Prevent stats update while adapter is being reset, or if the pci
         * connection is down.
         */
        if (adapter->link_speed == 0)
@@ -3718,7 +3702,8 @@ void e1000_update_stats(struct e1000_adapter *adapter)
        /* Rx Errors */
 
        /* RLEC on some newer hardware can be incorrect so build
-       * our own version based on RUC and ROC */
+        * our own version based on RUC and ROC
+        */
        netdev->stats.rx_errors = adapter->stats.rxerrc +
                adapter->stats.crcerrs + adapter->stats.algnerrc +
                adapter->stats.ruc + adapter->stats.roc +
@@ -3772,7 +3757,6 @@ void e1000_update_stats(struct e1000_adapter *adapter)
  * @irq: interrupt number
  * @data: pointer to a network interface device structure
  **/
-
 static irqreturn_t e1000_intr(int irq, void *data)
 {
        struct net_device *netdev = data;
@@ -3783,8 +3767,7 @@ static irqreturn_t e1000_intr(int irq, void *data)
        if (unlikely((!icr)))
                return IRQ_NONE;  /* Not our interrupt */
 
-       /*
-        * we might have caused the interrupt, but the above
+       /* we might have caused the interrupt, but the above
         * read cleared it, and just in case the driver is
         * down there is nothing to do so return handled
         */
@@ -3810,7 +3793,8 @@ static irqreturn_t e1000_intr(int irq, void *data)
                __napi_schedule(&adapter->napi);
        } else {
                /* this really should not happen! if it does it is basically a
-                * bug, but not a hard error, so enable ints and continue */
+                * bug, but not a hard error, so enable ints and continue
+                */
                if (!test_bit(__E1000_DOWN, &adapter->flags))
                        e1000_irq_enable(adapter);
        }
@@ -3824,7 +3808,8 @@ static irqreturn_t e1000_intr(int irq, void *data)
  **/
 static int e1000_clean(struct napi_struct *napi, int budget)
 {
-       struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi);
+       struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter,
+                                                    napi);
        int tx_clean_complete = 0, work_done = 0;
 
        tx_clean_complete = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]);
@@ -3915,11 +3900,12 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
 
        if (adapter->detect_tx_hung) {
                /* Detect a transmit hang in hardware, this serializes the
-                * check with the clearing of time_stamp and movement of i */
+                * check with the clearing of time_stamp and movement of i
+                */
                adapter->detect_tx_hung = false;
                if (tx_ring->buffer_info[eop].time_stamp &&
                    time_after(jiffies, tx_ring->buffer_info[eop].time_stamp +
-                              (adapter->tx_timeout_factor * HZ)) &&
+                              (adapter->tx_timeout_factor * HZ)) &&
                    !(er32(STATUS) & E1000_STATUS_TXOFF)) {
 
                        /* detected Tx unit hang */
@@ -3962,7 +3948,6 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
  * @csum:        receive descriptor csum field
  * @sk_buff:     socket buffer with received data
  **/
-
 static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
                              u32 csum, struct sk_buff *skb)
 {
@@ -3998,7 +3983,7 @@ static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
  * e1000_consume_page - helper function
  **/
 static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb,
-                               u16 length)
+                              u16 length)
 {
        bi->page = NULL;
        skb->len += length;
@@ -4094,11 +4079,11 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
                        if (TBI_ACCEPT(hw, status, rx_desc->errors, length,
                                       last_byte)) {
                                spin_lock_irqsave(&adapter->stats_lock,
-                                                 irq_flags);
+                                                 irq_flags);
                                e1000_tbi_adjust_stats(hw, &adapter->stats,
                                                       length, mapped);
                                spin_unlock_irqrestore(&adapter->stats_lock,
-                                                      irq_flags);
+                                                      irq_flags);
                                length--;
                        } else {
                                if (netdev->features & NETIF_F_RXALL)
@@ -4106,7 +4091,8 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
                                /* recycle both page and skb */
                                buffer_info->skb = skb;
                                /* an error means any chain goes out the window
-                                * too */
+                                * too
+                                */
                                if (rx_ring->rx_skb_top)
                                        dev_kfree_skb(rx_ring->rx_skb_top);
                                rx_ring->rx_skb_top = NULL;
@@ -4122,7 +4108,7 @@ process_skb:
                                /* this is the beginning of a chain */
                                rxtop = skb;
                                skb_fill_page_desc(rxtop, 0, buffer_info->page,
-                                                  0, length);
+                                                  0, length);
                        } else {
                                /* this is the middle of a chain */
                                skb_fill_page_desc(rxtop,
@@ -4140,38 +4126,42 @@ process_skb:
                                    skb_shinfo(rxtop)->nr_frags,
                                    buffer_info->page, 0, length);
                                /* re-use the current skb, we only consumed the
-                                * page */
+                                * page
+                                */
                                buffer_info->skb = skb;
                                skb = rxtop;
                                rxtop = NULL;
                                e1000_consume_page(buffer_info, skb, length);
                        } else {
                                /* no chain, got EOP, this buf is the packet
-                                * copybreak to save the put_page/alloc_page */
+                                * copybreak to save the put_page/alloc_page
+                                */
                                if (length <= copybreak &&
                                    skb_tailroom(skb) >= length) {
                                        u8 *vaddr;
                                        vaddr = kmap_atomic(buffer_info->page);
-                                       memcpy(skb_tail_pointer(skb), vaddr, length);
+                                       memcpy(skb_tail_pointer(skb), vaddr,
+                                              length);
                                        kunmap_atomic(vaddr);
                                        /* re-use the page, so don't erase
-                                        * buffer_info->page */
+                                        * buffer_info->page
+                                        */
                                        skb_put(skb, length);
                                } else {
                                        skb_fill_page_desc(skb, 0,
-                                                          buffer_info->page, 0,
-                                                          length);
+                                                          buffer_info->page, 0,
+                                                          length);
                                        e1000_consume_page(buffer_info, skb,
-                                                          length);
+                                                          length);
                                }
                        }
                }
 
                /* Receive Checksum Offload XXX recompute due to CRC strip? */
                e1000_rx_checksum(adapter,
-                                 (u32)(status) |
-                                 ((u32)(rx_desc->errors) << 24),
-                                 le16_to_cpu(rx_desc->csum), skb);
+                                 (u32)(status) |
+                                 ((u32)(rx_desc->errors) << 24),
+                                 le16_to_cpu(rx_desc->csum), skb);
 
                total_rx_bytes += (skb->len - 4); /* don't count FCS */
                if (likely(!(netdev->features & NETIF_F_RXFCS)))
@@ -4213,8 +4203,7 @@ next_desc:
        return cleaned;
 }
 
-/*
- * this should improve performance for small packets with large amounts
+/* this should improve performance for small packets with large amounts
  * of reassembly being done in the stack
  */
 static void e1000_check_copybreak(struct net_device *netdev,
@@ -4318,9 +4307,9 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
                                       last_byte)) {
                                spin_lock_irqsave(&adapter->stats_lock, flags);
                                e1000_tbi_adjust_stats(hw, &adapter->stats,
-                                                      length, skb->data);
+                                                      length, skb->data);
                                spin_unlock_irqrestore(&adapter->stats_lock,
-                                                      flags);
+                                                      flags);
                                length--;
                        } else {
                                if (netdev->features & NETIF_F_RXALL)
@@ -4385,10 +4374,9 @@ next_desc:
  * @rx_ring: pointer to receive ring structure
  * @cleaned_count: number of buffers to allocate this pass
  **/
-
 static void
 e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
-                             struct e1000_rx_ring *rx_ring, int cleaned_count)
+                            struct e1000_rx_ring *rx_ring, int cleaned_count)
 {
        struct net_device *netdev = adapter->netdev;
        struct pci_dev *pdev = adapter->pdev;
@@ -4429,7 +4417,7 @@ check_page:
 
                if (!buffer_info->dma) {
                        buffer_info->dma = dma_map_page(&pdev->dev,
-                                                       buffer_info->page, 0,
+                                                       buffer_info->page, 0,
                                                        buffer_info->length,
                                                        DMA_FROM_DEVICE);
                        if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
@@ -4459,7 +4447,8 @@ check_page:
                /* Force memory writes to complete before letting h/w
                 * know there are new descriptors to fetch.  (Only
                 * applicable for weak-ordered memory model archs,
-                * such as IA-64). */
+                * such as IA-64).
+                */
                wmb();
                writel(i, adapter->hw.hw_addr + rx_ring->rdt);
        }
@@ -4469,7 +4458,6 @@ check_page:
  * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
  * @adapter: address of board private structure
  **/
-
 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
                                   struct e1000_rx_ring *rx_ring,
                                   int cleaned_count)
@@ -4540,8 +4528,7 @@ map_skb:
                        break; /* while !buffer_info->skb */
                }
 
-               /*
-                * XXX if it was allocated cleanly it will never map to a
+               /* XXX if it was allocated cleanly it will never map to a
                 * boundary crossing
                 */
 
@@ -4579,7 +4566,8 @@ map_skb:
                /* Force memory writes to complete before letting h/w
                 * know there are new descriptors to fetch.  (Only
                 * applicable for weak-ordered memory model archs,
-                * such as IA-64). */
+                * such as IA-64).
+                */
                wmb();
                writel(i, hw->hw_addr + rx_ring->rdt);
        }
@@ -4589,7 +4577,6 @@ map_skb:
  * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers.
  * @adapter:
  **/
-
 static void e1000_smartspeed(struct e1000_adapter *adapter)
 {
        struct e1000_hw *hw = &adapter->hw;
@@ -4602,7 +4589,8 @@ static void e1000_smartspeed(struct e1000_adapter *adapter)
 
        if (adapter->smartspeed == 0) {
                /* If Master/Slave config fault is asserted twice,
-                * we assume back-to-back */
+                * we assume back-to-back
+                */
                e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
                if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
                e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
@@ -4615,7 +4603,7 @@ static void e1000_smartspeed(struct e1000_adapter *adapter)
                        adapter->smartspeed++;
                        if (!e1000_phy_setup_autoneg(hw) &&
                           !e1000_read_phy_reg(hw, PHY_CTRL,
-                                              &phy_ctrl)) {
+                                              &phy_ctrl)) {
                                phy_ctrl |= (MII_CR_AUTO_NEG_EN |
                                             MII_CR_RESTART_AUTO_NEG);
                                e1000_write_phy_reg(hw, PHY_CTRL,
@@ -4646,7 +4634,6 @@ static void e1000_smartspeed(struct e1000_adapter *adapter)
  * @ifreq:
  * @cmd:
  **/
-
 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
 {
        switch (cmd) {
@@ -4665,7 +4652,6 @@ static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
  * @ifreq:
  * @cmd:
  **/
-
 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
                           int cmd)
 {
@@ -4927,7 +4913,8 @@ int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
        hw->autoneg = 0;
 
        /* Make sure dplx is at most 1 bit and lsb of speed is not set
-        * for the switch() below to work */
+        * for the switch() below to work
+        */
        if ((spd & 1) || (dplx & ~1))
                goto err_inval;
 
@@ -5130,8 +5117,7 @@ static void e1000_shutdown(struct pci_dev *pdev)
 }
 
 #ifdef CONFIG_NET_POLL_CONTROLLER
-/*
- * Polling 'interrupt' - used by things like netconsole to send skbs
+/* Polling 'interrupt' - used by things like netconsole to send skbs
  * without having to re-enable interrupts. It's not called while
  * the interrupt routine is executing.
  */
index 750fc01..c9cde35 100644 (file)
@@ -267,7 +267,6 @@ static void e1000_check_copper_options(struct e1000_adapter *adapter);
  * value exists, a default value is used.  The final value is stored
  * in a variable in the adapter structure.
  **/
-
 void e1000_check_options(struct e1000_adapter *adapter)
 {
        struct e1000_option opt;
@@ -319,7 +318,8 @@ void e1000_check_options(struct e1000_adapter *adapter)
                        .def  = E1000_DEFAULT_RXD,
                        .arg  = { .r = {
                                .min = E1000_MIN_RXD,
-                               .max = mac_type < e1000_82544 ? E1000_MAX_RXD : E1000_MAX_82544_RXD
+                               .max = mac_type < e1000_82544 ? E1000_MAX_RXD :
+                                      E1000_MAX_82544_RXD
                        }}
                };
 
@@ -408,7 +408,7 @@ void e1000_check_options(struct e1000_adapter *adapter)
                if (num_TxAbsIntDelay > bd) {
                        adapter->tx_abs_int_delay = TxAbsIntDelay[bd];
                        e1000_validate_option(&adapter->tx_abs_int_delay, &opt,
-                                             adapter);
+                                             adapter);
                } else {
                        adapter->tx_abs_int_delay = opt.def;
                }
@@ -426,7 +426,7 @@ void e1000_check_options(struct e1000_adapter *adapter)
                if (num_RxIntDelay > bd) {
                        adapter->rx_int_delay = RxIntDelay[bd];
                        e1000_validate_option(&adapter->rx_int_delay, &opt,
-                                             adapter);
+                                             adapter);
                } else {
                        adapter->rx_int_delay = opt.def;
                }
@@ -444,7 +444,7 @@ void e1000_check_options(struct e1000_adapter *adapter)
                if (num_RxAbsIntDelay > bd) {
                        adapter->rx_abs_int_delay = RxAbsIntDelay[bd];
                        e1000_validate_option(&adapter->rx_abs_int_delay, &opt,
-                                             adapter);
+                                             adapter);
                } else {
                        adapter->rx_abs_int_delay = opt.def;
                }
@@ -479,16 +479,17 @@ void e1000_check_options(struct e1000_adapter *adapter)
                                break;
                        case 4:
                                e_dev_info("%s set to simplified "
-                                          "(2000-8000) ints mode\n", opt.name);
+                                          "(2000-8000) ints mode\n", opt.name);
                                adapter->itr_setting = adapter->itr;
                                break;
                        default:
                                e1000_validate_option(&adapter->itr, &opt,
-                                       adapter);
+                                                     adapter);
                                /* save the setting, because the dynamic bits
                                 * change itr.
                                 * clear the lower two bits because they are
-                                * used as control */
+                                * used as control
+                                */
                                adapter->itr_setting = adapter->itr & ~3;
                                break;
                        }
@@ -533,7 +534,6 @@ void e1000_check_options(struct e1000_adapter *adapter)
  *
  * Handles speed and duplex options on fiber adapters
  **/
-
 static void e1000_check_fiber_options(struct e1000_adapter *adapter)
 {
        int bd = adapter->bd_number;
@@ -559,7 +559,6 @@ static void e1000_check_fiber_options(struct e1000_adapter *adapter)
  *
  * Handles speed and duplex options on copper adapters
  **/
-
 static void e1000_check_copper_options(struct e1000_adapter *adapter)
 {
        struct e1000_option opt;
@@ -681,22 +680,22 @@ static void e1000_check_copper_options(struct e1000_adapter *adapter)
                e_dev_info("Using Autonegotiation at Half Duplex only\n");
                adapter->hw.autoneg = adapter->fc_autoneg = 1;
                adapter->hw.autoneg_advertised = ADVERTISE_10_HALF |
-                                                ADVERTISE_100_HALF;
+                                                ADVERTISE_100_HALF;
                break;
        case FULL_DUPLEX:
                e_dev_info("Full Duplex specified without Speed\n");
                e_dev_info("Using Autonegotiation at Full Duplex only\n");
                adapter->hw.autoneg = adapter->fc_autoneg = 1;
                adapter->hw.autoneg_advertised = ADVERTISE_10_FULL |
-                                                ADVERTISE_100_FULL |
-                                                ADVERTISE_1000_FULL;
+                                                ADVERTISE_100_FULL |
+                                                ADVERTISE_1000_FULL;
                break;
        case SPEED_10:
                e_dev_info("10 Mbps Speed specified without Duplex\n");
                e_dev_info("Using Autonegotiation at 10 Mbps only\n");
                adapter->hw.autoneg = adapter->fc_autoneg = 1;
                adapter->hw.autoneg_advertised = ADVERTISE_10_HALF |
-                                                ADVERTISE_10_FULL;
+                                                ADVERTISE_10_FULL;
                break;
        case SPEED_10 + HALF_DUPLEX:
                e_dev_info("Forcing to 10 Mbps Half Duplex\n");
@@ -715,7 +714,7 @@ static void e1000_check_copper_options(struct e1000_adapter *adapter)
                e_dev_info("Using Autonegotiation at 100 Mbps only\n");
                adapter->hw.autoneg = adapter->fc_autoneg = 1;
                adapter->hw.autoneg_advertised = ADVERTISE_100_HALF |
-                                                ADVERTISE_100_FULL;
+                                                ADVERTISE_100_FULL;
                break;
        case SPEED_100 + HALF_DUPLEX:
                e_dev_info("Forcing to 100 Mbps Half Duplex\n");
index a00457a..e099138 100644 (file)
 
 #include "e1000.h"
 
-#define E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL      0x00
-#define E1000_KMRNCTRLSTA_OFFSET_INB_CTRL       0x02
-#define E1000_KMRNCTRLSTA_OFFSET_HD_CTRL        0x10
-#define E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE         0x1F
-
-#define E1000_KMRNCTRLSTA_FIFO_CTRL_RX_BYPASS   0x0008
-#define E1000_KMRNCTRLSTA_FIFO_CTRL_TX_BYPASS   0x0800
-#define E1000_KMRNCTRLSTA_INB_CTRL_DIS_PADDING  0x0010
-
-#define E1000_KMRNCTRLSTA_HD_CTRL_10_100_DEFAULT 0x0004
-#define E1000_KMRNCTRLSTA_HD_CTRL_1000_DEFAULT  0x0000
-#define E1000_KMRNCTRLSTA_OPMODE_E_IDLE                 0x2000
-
-#define E1000_KMRNCTRLSTA_OPMODE_MASK           0x000C
-#define E1000_KMRNCTRLSTA_OPMODE_INBAND_MDIO    0x0004
-
-#define E1000_TCTL_EXT_GCEX_MASK 0x000FFC00 /* Gigabit Carry Extend Padding */
-#define DEFAULT_TCTL_EXT_GCEX_80003ES2LAN       0x00010000
-
-#define DEFAULT_TIPG_IPGT_1000_80003ES2LAN      0x8
-#define DEFAULT_TIPG_IPGT_10_100_80003ES2LAN    0x9
-
-/* GG82563 PHY Specific Status Register (Page 0, Register 16 */
-#define GG82563_PSCR_POLARITY_REVERSAL_DISABLE  0x0002 /* 1=Reversal Disab. */
-#define GG82563_PSCR_CROSSOVER_MODE_MASK        0x0060
-#define GG82563_PSCR_CROSSOVER_MODE_MDI                 0x0000 /* 00=Manual MDI */
-#define GG82563_PSCR_CROSSOVER_MODE_MDIX        0x0020 /* 01=Manual MDIX */
-#define GG82563_PSCR_CROSSOVER_MODE_AUTO        0x0060 /* 11=Auto crossover */
-
-/* PHY Specific Control Register 2 (Page 0, Register 26) */
-#define GG82563_PSCR2_REVERSE_AUTO_NEG          0x2000
-                                               /* 1=Reverse Auto-Negotiation */
-
-/* MAC Specific Control Register (Page 2, Register 21) */
-/* Tx clock speed for Link Down and 1000BASE-T for the following speeds */
-#define GG82563_MSCR_TX_CLK_MASK                0x0007
-#define GG82563_MSCR_TX_CLK_10MBPS_2_5          0x0004
-#define GG82563_MSCR_TX_CLK_100MBPS_25          0x0005
-#define GG82563_MSCR_TX_CLK_1000MBPS_25                 0x0007
-
-#define GG82563_MSCR_ASSERT_CRS_ON_TX           0x0010 /* 1=Assert */
-
-/* DSP Distance Register (Page 5, Register 26) */
-#define GG82563_DSPD_CABLE_LENGTH               0x0007 /* 0 = <50M
-                                                          1 = 50-80M
-                                                          2 = 80-110M
-                                                          3 = 110-140M
-                                                          4 = >140M
-                                                       */
-
-/* Kumeran Mode Control Register (Page 193, Register 16) */
-#define GG82563_KMCR_PASS_FALSE_CARRIER                 0x0800
-
-/* Max number of times Kumeran read/write should be validated */
-#define GG82563_MAX_KMRN_RETRY  0x5
-
-/* Power Management Control Register (Page 193, Register 20) */
-#define GG82563_PMCR_ENABLE_ELECTRICAL_IDLE     0x0001
-                                          /* 1=Enable SERDES Electrical Idle */
-
-/* In-Band Control Register (Page 194, Register 18) */
-#define GG82563_ICR_DIS_PADDING                         0x0010 /* Disable Padding */
-
 /* A table for the GG82563 cable length where the range is defined
  * with a lower bound at "index" and the upper bound at
  * "index + 5".
diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.h b/drivers/net/ethernet/intel/e1000e/80003es2lan.h
new file mode 100644 (file)
index 0000000..90d363b
--- /dev/null
@@ -0,0 +1,95 @@
+/*******************************************************************************
+
+  Intel PRO/1000 Linux driver
+  Copyright(c) 1999 - 2013 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _E1000E_80003ES2LAN_H_
+#define _E1000E_80003ES2LAN_H_
+
+#define E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL     0x00
+#define E1000_KMRNCTRLSTA_OFFSET_INB_CTRL      0x02
+#define E1000_KMRNCTRLSTA_OFFSET_HD_CTRL       0x10
+#define E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE        0x1F
+
+#define E1000_KMRNCTRLSTA_FIFO_CTRL_RX_BYPASS  0x0008
+#define E1000_KMRNCTRLSTA_FIFO_CTRL_TX_BYPASS  0x0800
+#define E1000_KMRNCTRLSTA_INB_CTRL_DIS_PADDING 0x0010
+
+#define E1000_KMRNCTRLSTA_HD_CTRL_10_100_DEFAULT 0x0004
+#define E1000_KMRNCTRLSTA_HD_CTRL_1000_DEFAULT 0x0000
+#define E1000_KMRNCTRLSTA_OPMODE_E_IDLE                0x2000
+
+#define E1000_KMRNCTRLSTA_OPMODE_MASK          0x000C
+#define E1000_KMRNCTRLSTA_OPMODE_INBAND_MDIO   0x0004
+
+#define E1000_TCTL_EXT_GCEX_MASK 0x000FFC00    /* Gig Carry Extend Padding */
+#define DEFAULT_TCTL_EXT_GCEX_80003ES2LAN      0x00010000
+
+#define DEFAULT_TIPG_IPGT_1000_80003ES2LAN     0x8
+#define DEFAULT_TIPG_IPGT_10_100_80003ES2LAN   0x9
+
+/* GG82563 PHY Specific Status Register (Page 0, Register 16 */
+#define GG82563_PSCR_POLARITY_REVERSAL_DISABLE 0x0002  /* 1=Reversal Dis */
+#define GG82563_PSCR_CROSSOVER_MODE_MASK       0x0060
+#define GG82563_PSCR_CROSSOVER_MODE_MDI                0x0000  /* 00=Manual MDI */
+#define GG82563_PSCR_CROSSOVER_MODE_MDIX       0x0020  /* 01=Manual MDIX */
+#define GG82563_PSCR_CROSSOVER_MODE_AUTO       0x0060  /* 11=Auto crossover */
+
+/* PHY Specific Control Register 2 (Page 0, Register 26) */
+#define GG82563_PSCR2_REVERSE_AUTO_NEG         0x2000  /* 1=Reverse Auto-Neg */
+
+/* MAC Specific Control Register (Page 2, Register 21) */
+/* Tx clock speed for Link Down and 1000BASE-T for the following speeds */
+#define GG82563_MSCR_TX_CLK_MASK               0x0007
+#define GG82563_MSCR_TX_CLK_10MBPS_2_5         0x0004
+#define GG82563_MSCR_TX_CLK_100MBPS_25         0x0005
+#define GG82563_MSCR_TX_CLK_1000MBPS_25                0x0007
+
+#define GG82563_MSCR_ASSERT_CRS_ON_TX          0x0010  /* 1=Assert */
+
+/* DSP Distance Register (Page 5, Register 26)
+ * 0 = <50M
+ * 1 = 50-80M
+ * 2 = 80-100M
+ * 3 = 110-140M
+ * 4 = >140M
+ */
+#define GG82563_DSPD_CABLE_LENGTH              0x0007
+
+/* Kumeran Mode Control Register (Page 193, Register 16) */
+#define GG82563_KMCR_PASS_FALSE_CARRIER                0x0800
+
+/* Max number of times Kumeran read/write should be validated */
+#define GG82563_MAX_KMRN_RETRY                 0x5
+
+/* Power Management Control Register (Page 193, Register 20) */
+/* 1=Enable SERDES Electrical Idle */
+#define GG82563_PMCR_ENABLE_ELECTRICAL_IDLE    0x0001
+
+/* In-Band Control Register (Page 194, Register 18) */
+#define GG82563_ICR_DIS_PADDING                        0x0010  /* Disable Padding */
+
+#endif
index cf86090..2faffbd 100644 (file)
 
 #include "e1000.h"
 
-#define ID_LED_RESERVED_F746 0xF746
-#define ID_LED_DEFAULT_82573 ((ID_LED_DEF1_DEF2 << 12) | \
-                             (ID_LED_OFF1_ON2  <<  8) | \
-                             (ID_LED_DEF1_DEF2 <<  4) | \
-                             (ID_LED_DEF1_DEF2))
-
-#define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000
-#define AN_RETRY_COUNT          5 /* Autoneg Retry Count value */
-#define E1000_BASE1000T_STATUS          10
-#define E1000_IDLE_ERROR_COUNT_MASK     0xFF
-#define E1000_RECEIVE_ERROR_COUNTER     21
-#define E1000_RECEIVE_ERROR_MAX         0xFFFF
-
-#define E1000_NVM_INIT_CTRL2_MNGM 0x6000 /* Manageability Operation Mode mask */
-
 static s32 e1000_get_phy_id_82571(struct e1000_hw *hw);
 static s32 e1000_setup_copper_link_82571(struct e1000_hw *hw);
 static s32 e1000_setup_fiber_serdes_link_82571(struct e1000_hw *hw);
@@ -1549,7 +1534,6 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
        rxcw = er32(RXCW);
 
        if ((rxcw & E1000_RXCW_SYNCH) && !(rxcw & E1000_RXCW_IV)) {
-
                /* Receiver is synchronized with no invalid bits.  */
                switch (mac->serdes_link_state) {
                case e1000_serdes_link_autoneg_complete:
diff --git a/drivers/net/ethernet/intel/e1000e/82571.h b/drivers/net/ethernet/intel/e1000e/82571.h
new file mode 100644 (file)
index 0000000..85cb1a3
--- /dev/null
@@ -0,0 +1,58 @@
+/*******************************************************************************
+
+  Intel PRO/1000 Linux driver
+  Copyright(c) 1999 - 2013 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _E1000E_82571_H_
+#define _E1000E_82571_H_
+
+#define ID_LED_RESERVED_F746   0xF746
+#define ID_LED_DEFAULT_82573   ((ID_LED_DEF1_DEF2 << 12) | \
+                                (ID_LED_OFF1_ON2  <<  8) | \
+                                (ID_LED_DEF1_DEF2 <<  4) | \
+                                (ID_LED_DEF1_DEF2))
+
+#define E1000_GCR_L1_ACT_WITHOUT_L0S_RX        0x08000000
+#define AN_RETRY_COUNT         5       /* Autoneg Retry Count value */
+
+/* Intr Throttling - RW */
+#define E1000_EITR_82574(_n)   (0x000E8 + (0x4 * (_n)))
+
+#define E1000_EIAC_82574       0x000DC /* Ext. Interrupt Auto Clear - RW */
+#define E1000_EIAC_MASK_82574  0x01F00000
+
+/* Manageability Operation Mode mask */
+#define E1000_NVM_INIT_CTRL2_MNGM      0x6000
+
+#define E1000_BASE1000T_STATUS         10
+#define E1000_IDLE_ERROR_COUNT_MASK    0xFF
+#define E1000_RECEIVE_ERROR_COUNTER    21
+#define E1000_RECEIVE_ERROR_MAX                0xFFFF
+bool e1000_check_phy_82574(struct e1000_hw *hw);
+bool e1000e_get_laa_state_82571(struct e1000_hw *hw);
+void e1000e_set_laa_state_82571(struct e1000_hw *hw, bool state);
+
+#endif
index 615b900..fc3a4fe 100644 (file)
 #define NVM_ALT_MAC_ADDR_PTR       0x0037
 #define NVM_CHECKSUM_REG           0x003F
 
-#define E1000_NVM_INIT_CTRL2_MNGM 0x6000 /* Manageability Operation Mode mask */
-
 #define E1000_NVM_CFG_DONE_PORT_0  0x40000 /* MNG config cycle done */
 #define E1000_NVM_CFG_DONE_PORT_1  0x80000 /* ...for second port */
 
 /* BME1000 PHY Specific Control Register */
 #define BME1000_PSCR_ENABLE_DOWNSHIFT   0x0800 /* 1 = enable downshift */
 
-/* PHY Low Power Idle Control */
-#define I82579_LPI_CTRL                                PHY_REG(772, 20)
-#define I82579_LPI_CTRL_100_ENABLE             0x2000
-#define I82579_LPI_CTRL_1000_ENABLE            0x4000
-#define I82579_LPI_CTRL_ENABLE_MASK            0x6000
-#define I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT   0x80
-
-/* Extended Management Interface (EMI) Registers */
-#define I82579_EMI_ADDR                0x10
-#define I82579_EMI_DATA                0x11
-#define I82579_LPI_UPDATE_TIMER        0x4805  /* in 40ns units + 40 ns base value */
-#define I82579_MSE_THRESHOLD   0x084F  /* 82579 Mean Square Error Threshold */
-#define I82577_MSE_THRESHOLD   0x0887  /* 82577 Mean Square Error Threshold */
-#define I82579_MSE_LINK_DOWN   0x2411  /* MSE count before dropping link */
-#define I82579_EEE_PCS_STATUS  0x182D  /* IEEE MMD Register 3.1 >> 8 */
-#define I82579_EEE_CAPABILITY  0x0410  /* IEEE MMD Register 3.20 */
-#define I82579_EEE_ADVERTISEMENT       0x040E  /* IEEE MMD Register 7.60 */
-#define I82579_EEE_LP_ABILITY          0x040F  /* IEEE MMD Register 7.61 */
-#define I82579_EEE_100_SUPPORTED       (1 << 1) /* 100BaseTx EEE supported */
-#define I82579_EEE_1000_SUPPORTED      (1 << 2) /* 1000BaseTx EEE supported */
-#define I217_EEE_PCS_STATUS    0x9401  /* IEEE MMD Register 3.1 */
-#define I217_EEE_CAPABILITY    0x8000  /* IEEE MMD Register 3.20 */
-#define I217_EEE_ADVERTISEMENT 0x8001  /* IEEE MMD Register 7.60 */
-#define I217_EEE_LP_ABILITY    0x8002  /* IEEE MMD Register 7.61 */
-
-#define E1000_EEE_RX_LPI_RCVD  0x0400  /* Tx LP idle received */
-#define E1000_EEE_TX_LPI_RCVD  0x0800  /* Rx LP idle received */
-
-#define PHY_PAGE_SHIFT 5
-#define PHY_REG(page, reg) (((page) << PHY_PAGE_SHIFT) | \
-                           ((reg) & MAX_PHY_REG_ADDRESS))
-
 /* Bits...
  * 15-5: page
  * 4-0: register offset
 /* SerDes Control */
 #define E1000_GEN_POLL_TIMEOUT          640
 
-/* FW Semaphore */
-#define E1000_FWSM_WLOCK_MAC_MASK      0x0380
-#define E1000_FWSM_WLOCK_MAC_SHIFT     7
-
 #endif /* _E1000_DEFINES_H_ */
index 4b0bd9c..fcc7581 100644 (file)
@@ -95,70 +95,6 @@ struct e1000_info;
 
 #define DEFAULT_JUMBO                  9234
 
-/* BM/HV Specific Registers */
-#define BM_PORT_CTRL_PAGE                 769
-
-#define PHY_UPPER_SHIFT                   21
-#define BM_PHY_REG(page, reg) \
-       (((reg) & MAX_PHY_REG_ADDRESS) |\
-        (((page) & 0xFFFF) << PHY_PAGE_SHIFT) |\
-        (((reg) & ~MAX_PHY_REG_ADDRESS) << (PHY_UPPER_SHIFT - PHY_PAGE_SHIFT)))
-
-/* PHY Wakeup Registers and defines */
-#define BM_PORT_GEN_CFG PHY_REG(BM_PORT_CTRL_PAGE, 17)
-#define BM_RCTL         PHY_REG(BM_WUC_PAGE, 0)
-#define BM_WUC          PHY_REG(BM_WUC_PAGE, 1)
-#define BM_WUFC         PHY_REG(BM_WUC_PAGE, 2)
-#define BM_WUS          PHY_REG(BM_WUC_PAGE, 3)
-#define BM_RAR_L(_i)    (BM_PHY_REG(BM_WUC_PAGE, 16 + ((_i) << 2)))
-#define BM_RAR_M(_i)    (BM_PHY_REG(BM_WUC_PAGE, 17 + ((_i) << 2)))
-#define BM_RAR_H(_i)    (BM_PHY_REG(BM_WUC_PAGE, 18 + ((_i) << 2)))
-#define BM_RAR_CTRL(_i) (BM_PHY_REG(BM_WUC_PAGE, 19 + ((_i) << 2)))
-#define BM_MTA(_i)      (BM_PHY_REG(BM_WUC_PAGE, 128 + ((_i) << 1)))
-
-#define BM_RCTL_UPE           0x0001          /* Unicast Promiscuous Mode */
-#define BM_RCTL_MPE           0x0002          /* Multicast Promiscuous Mode */
-#define BM_RCTL_MO_SHIFT      3               /* Multicast Offset Shift */
-#define BM_RCTL_MO_MASK       (3 << 3)        /* Multicast Offset Mask */
-#define BM_RCTL_BAM           0x0020          /* Broadcast Accept Mode */
-#define BM_RCTL_PMCF          0x0040          /* Pass MAC Control Frames */
-#define BM_RCTL_RFCE          0x0080          /* Rx Flow Control Enable */
-
-#define HV_STATS_PAGE  778
-#define HV_SCC_UPPER   PHY_REG(HV_STATS_PAGE, 16) /* Single Collision Count */
-#define HV_SCC_LOWER   PHY_REG(HV_STATS_PAGE, 17)
-#define HV_ECOL_UPPER  PHY_REG(HV_STATS_PAGE, 18) /* Excessive Coll. Count */
-#define HV_ECOL_LOWER  PHY_REG(HV_STATS_PAGE, 19)
-#define HV_MCC_UPPER   PHY_REG(HV_STATS_PAGE, 20) /* Multiple Coll. Count */
-#define HV_MCC_LOWER   PHY_REG(HV_STATS_PAGE, 21)
-#define HV_LATECOL_UPPER PHY_REG(HV_STATS_PAGE, 23) /* Late Collision Count */
-#define HV_LATECOL_LOWER PHY_REG(HV_STATS_PAGE, 24)
-#define HV_COLC_UPPER  PHY_REG(HV_STATS_PAGE, 25) /* Collision Count */
-#define HV_COLC_LOWER  PHY_REG(HV_STATS_PAGE, 26)
-#define HV_DC_UPPER    PHY_REG(HV_STATS_PAGE, 27) /* Defer Count */
-#define HV_DC_LOWER    PHY_REG(HV_STATS_PAGE, 28)
-#define HV_TNCRS_UPPER PHY_REG(HV_STATS_PAGE, 29) /* Transmit with no CRS */
-#define HV_TNCRS_LOWER PHY_REG(HV_STATS_PAGE, 30)
-
-#define E1000_FCRTV_PCH     0x05F40 /* PCH Flow Control Refresh Timer Value */
-
-/* BM PHY Copper Specific Status */
-#define BM_CS_STATUS                      17
-#define BM_CS_STATUS_LINK_UP              0x0400
-#define BM_CS_STATUS_RESOLVED             0x0800
-#define BM_CS_STATUS_SPEED_MASK           0xC000
-#define BM_CS_STATUS_SPEED_1000           0x8000
-
-/* 82577 Mobile Phy Status Register */
-#define HV_M_STATUS                       26
-#define HV_M_STATUS_AUTONEG_COMPLETE      0x1000
-#define HV_M_STATUS_SPEED_MASK            0x0300
-#define HV_M_STATUS_SPEED_1000            0x0200
-#define HV_M_STATUS_LINK_UP               0x0040
-
-#define E1000_ICH_FWSM_PCIM2PCI                0x01000000 /* ME PCIm-to-PCI active */
-#define E1000_ICH_FWSM_PCIM2PCI_COUNT  2000
-
 /* Time to wait before putting the device into D3 if there's no link (in ms). */
 #define LINK_TIMEOUT           100
 
@@ -574,137 +510,6 @@ extern const struct e1000_info e1000_pch2_info;
 extern const struct e1000_info e1000_pch_lpt_info;
 extern const struct e1000_info e1000_es2_info;
 
-extern s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num,
-                                        u32 pba_num_size);
-
-extern bool e1000e_enable_mng_pass_thru(struct e1000_hw *hw);
-
-extern bool e1000e_get_laa_state_82571(struct e1000_hw *hw);
-extern void e1000e_set_laa_state_82571(struct e1000_hw *hw, bool state);
-
-extern void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw);
-extern void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
-                                                bool state);
-extern void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw);
-extern void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw);
-extern void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw);
-extern void e1000_resume_workarounds_pchlan(struct e1000_hw *hw);
-extern s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable);
-extern s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable);
-extern void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw);
-
-extern s32 e1000e_check_for_copper_link(struct e1000_hw *hw);
-extern s32 e1000e_check_for_fiber_link(struct e1000_hw *hw);
-extern s32 e1000e_check_for_serdes_link(struct e1000_hw *hw);
-extern s32 e1000e_setup_led_generic(struct e1000_hw *hw);
-extern s32 e1000e_cleanup_led_generic(struct e1000_hw *hw);
-extern s32 e1000e_led_on_generic(struct e1000_hw *hw);
-extern s32 e1000e_led_off_generic(struct e1000_hw *hw);
-extern s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw);
-extern void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw);
-extern void e1000_set_lan_id_single_port(struct e1000_hw *hw);
-extern s32 e1000e_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, u16 *duplex);
-extern s32 e1000e_get_speed_and_duplex_fiber_serdes(struct e1000_hw *hw, u16 *speed, u16 *duplex);
-extern s32 e1000e_disable_pcie_master(struct e1000_hw *hw);
-extern s32 e1000e_get_auto_rd_done(struct e1000_hw *hw);
-extern s32 e1000e_id_led_init_generic(struct e1000_hw *hw);
-extern void e1000e_clear_hw_cntrs_base(struct e1000_hw *hw);
-extern s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw);
-extern s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw);
-extern s32 e1000e_copper_link_setup_igp(struct e1000_hw *hw);
-extern s32 e1000e_setup_link_generic(struct e1000_hw *hw);
-extern void e1000_clear_vfta_generic(struct e1000_hw *hw);
-extern void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count);
-extern void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw,
-                                              u8 *mc_addr_list,
-                                              u32 mc_addr_count);
-extern void e1000e_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index);
-extern s32 e1000e_set_fc_watermarks(struct e1000_hw *hw);
-extern void e1000e_set_pcie_no_snoop(struct e1000_hw *hw, u32 no_snoop);
-extern s32 e1000e_get_hw_semaphore(struct e1000_hw *hw);
-extern s32 e1000e_valid_led_default(struct e1000_hw *hw, u16 *data);
-extern void e1000e_config_collision_dist_generic(struct e1000_hw *hw);
-extern s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw);
-extern s32 e1000e_force_mac_fc(struct e1000_hw *hw);
-extern s32 e1000e_blink_led_generic(struct e1000_hw *hw);
-extern void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value);
-extern s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw);
-extern void e1000e_reset_adaptive(struct e1000_hw *hw);
-extern void e1000e_update_adaptive(struct e1000_hw *hw);
-
-extern s32 e1000e_setup_copper_link(struct e1000_hw *hw);
-extern s32 e1000e_get_phy_id(struct e1000_hw *hw);
-extern void e1000e_put_hw_semaphore(struct e1000_hw *hw);
-extern s32 e1000e_check_reset_block_generic(struct e1000_hw *hw);
-extern s32 e1000e_phy_force_speed_duplex_igp(struct e1000_hw *hw);
-extern s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw);
-extern s32 e1000e_get_phy_info_igp(struct e1000_hw *hw);
-extern s32 e1000_set_page_igp(struct e1000_hw *hw, u16 page);
-extern s32 e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data);
-extern s32 e1000e_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset,
-                                          u16 *data);
-extern s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw);
-extern s32 e1000e_set_d3_lplu_state(struct e1000_hw *hw, bool active);
-extern s32 e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data);
-extern s32 e1000e_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset,
-                                           u16 data);
-extern s32 e1000e_phy_sw_reset(struct e1000_hw *hw);
-extern s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw);
-extern s32 e1000e_get_cfg_done_generic(struct e1000_hw *hw);
-extern s32 e1000e_get_cable_length_m88(struct e1000_hw *hw);
-extern s32 e1000e_get_phy_info_m88(struct e1000_hw *hw);
-extern s32 e1000e_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data);
-extern s32 e1000e_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data);
-extern s32 e1000e_phy_init_script_igp3(struct e1000_hw *hw);
-extern enum e1000_phy_type e1000e_get_phy_type_from_id(u32 phy_id);
-extern s32 e1000e_determine_phy_address(struct e1000_hw *hw);
-extern s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data);
-extern s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data);
-extern s32 e1000_enable_phy_wakeup_reg_access_bm(struct e1000_hw *hw,
-                                                u16 *phy_reg);
-extern s32 e1000_disable_phy_wakeup_reg_access_bm(struct e1000_hw *hw,
-                                                 u16 *phy_reg);
-extern s32 e1000e_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data);
-extern s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data);
-extern void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl);
-extern s32 e1000e_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data);
-extern s32 e1000e_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset,
-                                        u16 data);
-extern s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data);
-extern s32 e1000e_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset,
-                                       u16 *data);
-extern s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
-                              u32 usec_interval, bool *success);
-extern s32 e1000e_phy_reset_dsp(struct e1000_hw *hw);
-extern void e1000_power_up_phy_copper(struct e1000_hw *hw);
-extern void e1000_power_down_phy_copper(struct e1000_hw *hw);
-extern s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data);
-extern s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data);
-extern s32 e1000e_check_downshift(struct e1000_hw *hw);
-extern s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data);
-extern s32 e1000_read_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset,
-                                        u16 *data);
-extern s32 e1000_read_phy_reg_page_hv(struct e1000_hw *hw, u32 offset,
-                                     u16 *data);
-extern s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data);
-extern s32 e1000_write_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset,
-                                         u16 data);
-extern s32 e1000_write_phy_reg_page_hv(struct e1000_hw *hw, u32 offset,
-                                      u16 data);
-extern s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw);
-extern s32 e1000_copper_link_setup_82577(struct e1000_hw *hw);
-extern s32 e1000_check_polarity_82577(struct e1000_hw *hw);
-extern s32 e1000_get_phy_info_82577(struct e1000_hw *hw);
-extern s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw);
-extern s32 e1000_get_cable_length_82577(struct e1000_hw *hw);
-
-extern s32 e1000_check_polarity_m88(struct e1000_hw *hw);
-extern s32 e1000_get_phy_info_ife(struct e1000_hw *hw);
-extern s32 e1000_check_polarity_ife(struct e1000_hw *hw);
-extern s32 e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw);
-extern s32 e1000_check_polarity_igp(struct e1000_hw *hw);
-extern bool e1000_check_phy_82574(struct e1000_hw *hw);
-extern s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data);
 extern void e1000e_ptp_init(struct e1000_adapter *adapter);
 extern void e1000e_ptp_remove(struct e1000_adapter *adapter);
 
@@ -733,15 +538,7 @@ static inline s32 e1e_wphy_locked(struct e1000_hw *hw, u32 offset, u16 data)
        return hw->phy.ops.write_reg_locked(hw, offset, data);
 }
 
-extern s32 e1000e_acquire_nvm(struct e1000_hw *hw);
-extern s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
-extern s32 e1000e_update_nvm_checksum_generic(struct e1000_hw *hw);
-extern s32 e1000e_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg);
-extern s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
-extern s32 e1000e_validate_nvm_checksum_generic(struct e1000_hw *hw);
-extern void e1000e_release_nvm(struct e1000_hw *hw);
 extern void e1000e_reload_nvm_generic(struct e1000_hw *hw);
-extern s32 e1000_read_mac_addr_generic(struct e1000_hw *hw);
 
 static inline s32 e1000e_read_mac_addr(struct e1000_hw *hw)
 {
@@ -776,10 +573,6 @@ static inline s32 e1000_get_phy_info(struct e1000_hw *hw)
        return hw->phy.ops.get_info(hw);
 }
 
-extern bool e1000e_check_mng_mode_generic(struct e1000_hw *hw);
-extern bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw);
-extern s32 e1000e_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length);
-
 static inline u32 __er32(struct e1000_hw *hw, unsigned long reg)
 {
        return readl(hw->hw_addr + reg);
index c6c3e92..2c18137 100644 (file)
@@ -130,7 +130,6 @@ static int e1000_get_settings(struct net_device *netdev,
        u32 speed;
 
        if (hw->phy.media_type == e1000_media_type_copper) {
-
                ecmd->supported = (SUPPORTED_10baseT_Half |
                                   SUPPORTED_10baseT_Full |
                                   SUPPORTED_100baseT_Half |
@@ -328,12 +327,12 @@ static int e1000_set_settings(struct net_device *netdev,
        }
 
        /* reset the link */
-
        if (netif_running(adapter->netdev)) {
                e1000e_down(adapter);
                e1000e_up(adapter);
-       } else
+       } else {
                e1000e_reset(adapter);
+       }
 
        clear_bit(__E1000_RESETTING, &adapter->state);
        return 0;
@@ -1355,7 +1354,6 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
                e1e_rphy(hw, PHY_REG(776, 18), &phy_reg);
                e1e_wphy(hw, PHY_REG(776, 18), phy_reg | 1);
                /* Enable loopback on the PHY */
-#define I82577_PHY_LBK_CTRL          19
                e1e_wphy(hw, I82577_PHY_LBK_CTRL, 0x8001);
                break;
        default:
index f32b19a..1e6b889 100644 (file)
 #ifndef _E1000_HW_H_
 #define _E1000_HW_H_
 
+#include "regs.h"
 #include "defines.h"
 
 struct e1000_hw;
 
-enum e1e_registers {
-       E1000_CTRL     = 0x00000, /* Device Control - RW */
-       E1000_STATUS   = 0x00008, /* Device Status - RO */
-       E1000_EECD     = 0x00010, /* EEPROM/Flash Control - RW */
-       E1000_EERD     = 0x00014, /* EEPROM Read - RW */
-       E1000_CTRL_EXT = 0x00018, /* Extended Device Control - RW */
-       E1000_FLA      = 0x0001C, /* Flash Access - RW */
-       E1000_MDIC     = 0x00020, /* MDI Control - RW */
-       E1000_SCTL     = 0x00024, /* SerDes Control - RW */
-       E1000_FCAL     = 0x00028, /* Flow Control Address Low - RW */
-       E1000_FCAH     = 0x0002C, /* Flow Control Address High -RW */
-       E1000_FEXTNVM4 = 0x00024, /* Future Extended NVM 4 - RW */
-       E1000_FEXTNVM  = 0x00028, /* Future Extended NVM - RW */
-       E1000_FCT      = 0x00030, /* Flow Control Type - RW */
-       E1000_VET      = 0x00038, /* VLAN Ether Type - RW */
-       E1000_FEXTNVM3 = 0x0003C, /* Future Extended NVM 3 - RW */
-       E1000_ICR      = 0x000C0, /* Interrupt Cause Read - R/clr */
-       E1000_ITR      = 0x000C4, /* Interrupt Throttling Rate - RW */
-       E1000_ICS      = 0x000C8, /* Interrupt Cause Set - WO */
-       E1000_IMS      = 0x000D0, /* Interrupt Mask Set - RW */
-       E1000_IMC      = 0x000D8, /* Interrupt Mask Clear - WO */
-       E1000_EIAC_82574 = 0x000DC, /* Ext. Interrupt Auto Clear - RW */
-       E1000_IAM      = 0x000E0, /* Interrupt Acknowledge Auto Mask */
-       E1000_IVAR     = 0x000E4, /* Interrupt Vector Allocation - RW */
-       E1000_FEXTNVM7  = 0x000E4, /* Future Extended NVM 7 - RW */
-       E1000_EITR_82574_BASE = 0x000E8, /* Interrupt Throttling - RW */
-#define E1000_EITR_82574(_n) (E1000_EITR_82574_BASE + (_n << 2))
-       E1000_LPIC     = 0x000FC, /* Low Power Idle Control - RW */
-       E1000_RCTL     = 0x00100, /* Rx Control - RW */
-       E1000_FCTTV    = 0x00170, /* Flow Control Transmit Timer Value - RW */
-       E1000_TXCW     = 0x00178, /* Tx Configuration Word - RW */
-       E1000_RXCW     = 0x00180, /* Rx Configuration Word - RO */
-       E1000_TCTL     = 0x00400, /* Tx Control - RW */
-       E1000_TCTL_EXT = 0x00404, /* Extended Tx Control - RW */
-       E1000_TIPG     = 0x00410, /* Tx Inter-packet gap -RW */
-       E1000_AIT      = 0x00458, /* Adaptive Interframe Spacing Throttle -RW */
-       E1000_LEDCTL   = 0x00E00, /* LED Control - RW */
-       E1000_EXTCNF_CTRL  = 0x00F00, /* Extended Configuration Control */
-       E1000_EXTCNF_SIZE  = 0x00F08, /* Extended Configuration Size */
-       E1000_PHY_CTRL     = 0x00F10, /* PHY Control Register in CSR */
-#define E1000_POEMB    E1000_PHY_CTRL  /* PHY OEM Bits */
-       E1000_PBA      = 0x01000, /* Packet Buffer Allocation - RW */
-       E1000_PBS      = 0x01008, /* Packet Buffer Size */
-       E1000_PBECCSTS = 0x0100C, /* Packet Buffer ECC Status - RW */
-       E1000_EEMNGCTL = 0x01010, /* MNG EEprom Control */
-       E1000_EEWR     = 0x0102C, /* EEPROM Write Register - RW */
-       E1000_FLOP     = 0x0103C, /* FLASH Opcode Register */
-       E1000_PBA_ECC  = 0x01100, /* PBA ECC Register */
-       E1000_ERT      = 0x02008, /* Early Rx Threshold - RW */
-       E1000_FCRTL    = 0x02160, /* Flow Control Receive Threshold Low - RW */
-       E1000_FCRTH    = 0x02168, /* Flow Control Receive Threshold High - RW */
-       E1000_PSRCTL   = 0x02170, /* Packet Split Receive Control - RW */
-/* Convenience macros
- *
- * Note: "_n" is the queue number of the register to be written to.
- *
- * Example usage:
- * E1000_RDBAL(current_rx_queue)
- */
-       E1000_RDBAL_BASE = 0x02800, /* Rx Descriptor Base Address Low - RW */
-#define E1000_RDBAL(_n)        (E1000_RDBAL_BASE + (_n << 8))
-       E1000_RDBAH_BASE = 0x02804, /* Rx Descriptor Base Address High - RW */
-#define E1000_RDBAH(_n)        (E1000_RDBAH_BASE + (_n << 8))
-       E1000_RDLEN_BASE = 0x02808, /* Rx Descriptor Length - RW */
-#define E1000_RDLEN(_n)        (E1000_RDLEN_BASE + (_n << 8))
-       E1000_RDH_BASE = 0x02810, /* Rx Descriptor Head - RW */
-#define E1000_RDH(_n)  (E1000_RDH_BASE + (_n << 8))
-       E1000_RDT_BASE = 0x02818, /* Rx Descriptor Tail - RW */
-#define E1000_RDT(_n)  (E1000_RDT_BASE + (_n << 8))
-       E1000_RDTR     = 0x02820, /* Rx Delay Timer - RW */
-       E1000_RXDCTL_BASE = 0x02828, /* Rx Descriptor Control - RW */
-#define E1000_RXDCTL(_n)   (E1000_RXDCTL_BASE + (_n << 8))
-       E1000_RADV     = 0x0282C, /* Rx Interrupt Absolute Delay Timer - RW */
-
-       E1000_KABGTXD  = 0x03004, /* AFE Band Gap Transmit Ref Data */
-       E1000_TDBAL_BASE = 0x03800, /* Tx Descriptor Base Address Low - RW */
-#define E1000_TDBAL(_n)        (E1000_TDBAL_BASE + (_n << 8))
-       E1000_TDBAH_BASE = 0x03804, /* Tx Descriptor Base Address High - RW */
-#define E1000_TDBAH(_n)        (E1000_TDBAH_BASE + (_n << 8))
-       E1000_TDLEN_BASE = 0x03808, /* Tx Descriptor Length - RW */
-#define E1000_TDLEN(_n)        (E1000_TDLEN_BASE + (_n << 8))
-       E1000_TDH_BASE = 0x03810, /* Tx Descriptor Head - RW */
-#define E1000_TDH(_n)  (E1000_TDH_BASE + (_n << 8))
-       E1000_TDT_BASE = 0x03818, /* Tx Descriptor Tail - RW */
-#define E1000_TDT(_n)  (E1000_TDT_BASE + (_n << 8))
-       E1000_TIDV     = 0x03820, /* Tx Interrupt Delay Value - RW */
-       E1000_TXDCTL_BASE = 0x03828, /* Tx Descriptor Control - RW */
-#define E1000_TXDCTL(_n)   (E1000_TXDCTL_BASE + (_n << 8))
-       E1000_TADV     = 0x0382C, /* Tx Interrupt Absolute Delay Val - RW */
-       E1000_TARC_BASE = 0x03840, /* Tx Arbitration Count (0) */
-#define E1000_TARC(_n)   (E1000_TARC_BASE + (_n << 8))
-       E1000_CRCERRS  = 0x04000, /* CRC Error Count - R/clr */
-       E1000_ALGNERRC = 0x04004, /* Alignment Error Count - R/clr */
-       E1000_SYMERRS  = 0x04008, /* Symbol Error Count - R/clr */
-       E1000_RXERRC   = 0x0400C, /* Receive Error Count - R/clr */
-       E1000_MPC      = 0x04010, /* Missed Packet Count - R/clr */
-       E1000_SCC      = 0x04014, /* Single Collision Count - R/clr */
-       E1000_ECOL     = 0x04018, /* Excessive Collision Count - R/clr */
-       E1000_MCC      = 0x0401C, /* Multiple Collision Count - R/clr */
-       E1000_LATECOL  = 0x04020, /* Late Collision Count - R/clr */
-       E1000_COLC     = 0x04028, /* Collision Count - R/clr */
-       E1000_DC       = 0x04030, /* Defer Count - R/clr */
-       E1000_TNCRS    = 0x04034, /* Tx-No CRS - R/clr */
-       E1000_SEC      = 0x04038, /* Sequence Error Count - R/clr */
-       E1000_CEXTERR  = 0x0403C, /* Carrier Extension Error Count - R/clr */
-       E1000_RLEC     = 0x04040, /* Receive Length Error Count - R/clr */
-       E1000_XONRXC   = 0x04048, /* XON Rx Count - R/clr */
-       E1000_XONTXC   = 0x0404C, /* XON Tx Count - R/clr */
-       E1000_XOFFRXC  = 0x04050, /* XOFF Rx Count - R/clr */
-       E1000_XOFFTXC  = 0x04054, /* XOFF Tx Count - R/clr */
-       E1000_FCRUC    = 0x04058, /* Flow Control Rx Unsupported Count- R/clr */
-       E1000_PRC64    = 0x0405C, /* Packets Rx (64 bytes) - R/clr */
-       E1000_PRC127   = 0x04060, /* Packets Rx (65-127 bytes) - R/clr */
-       E1000_PRC255   = 0x04064, /* Packets Rx (128-255 bytes) - R/clr */
-       E1000_PRC511   = 0x04068, /* Packets Rx (255-511 bytes) - R/clr */
-       E1000_PRC1023  = 0x0406C, /* Packets Rx (512-1023 bytes) - R/clr */
-       E1000_PRC1522  = 0x04070, /* Packets Rx (1024-1522 bytes) - R/clr */
-       E1000_GPRC     = 0x04074, /* Good Packets Rx Count - R/clr */
-       E1000_BPRC     = 0x04078, /* Broadcast Packets Rx Count - R/clr */
-       E1000_MPRC     = 0x0407C, /* Multicast Packets Rx Count - R/clr */
-       E1000_GPTC     = 0x04080, /* Good Packets Tx Count - R/clr */
-       E1000_GORCL    = 0x04088, /* Good Octets Rx Count Low - R/clr */
-       E1000_GORCH    = 0x0408C, /* Good Octets Rx Count High - R/clr */
-       E1000_GOTCL    = 0x04090, /* Good Octets Tx Count Low - R/clr */
-       E1000_GOTCH    = 0x04094, /* Good Octets Tx Count High - R/clr */
-       E1000_RNBC     = 0x040A0, /* Rx No Buffers Count - R/clr */
-       E1000_RUC      = 0x040A4, /* Rx Undersize Count - R/clr */
-       E1000_RFC      = 0x040A8, /* Rx Fragment Count - R/clr */
-       E1000_ROC      = 0x040AC, /* Rx Oversize Count - R/clr */
-       E1000_RJC      = 0x040B0, /* Rx Jabber Count - R/clr */
-       E1000_MGTPRC   = 0x040B4, /* Management Packets Rx Count - R/clr */
-       E1000_MGTPDC   = 0x040B8, /* Management Packets Dropped Count - R/clr */
-       E1000_MGTPTC   = 0x040BC, /* Management Packets Tx Count - R/clr */
-       E1000_TORL     = 0x040C0, /* Total Octets Rx Low - R/clr */
-       E1000_TORH     = 0x040C4, /* Total Octets Rx High - R/clr */
-       E1000_TOTL     = 0x040C8, /* Total Octets Tx Low - R/clr */
-       E1000_TOTH     = 0x040CC, /* Total Octets Tx High - R/clr */
-       E1000_TPR      = 0x040D0, /* Total Packets Rx - R/clr */
-       E1000_TPT      = 0x040D4, /* Total Packets Tx - R/clr */
-       E1000_PTC64    = 0x040D8, /* Packets Tx (64 bytes) - R/clr */
-       E1000_PTC127   = 0x040DC, /* Packets Tx (65-127 bytes) - R/clr */
-       E1000_PTC255   = 0x040E0, /* Packets Tx (128-255 bytes) - R/clr */
-       E1000_PTC511   = 0x040E4, /* Packets Tx (256-511 bytes) - R/clr */
-       E1000_PTC1023  = 0x040E8, /* Packets Tx (512-1023 bytes) - R/clr */
-       E1000_PTC1522  = 0x040EC, /* Packets Tx (1024-1522 Bytes) - R/clr */
-       E1000_MPTC     = 0x040F0, /* Multicast Packets Tx Count - R/clr */
-       E1000_BPTC     = 0x040F4, /* Broadcast Packets Tx Count - R/clr */
-       E1000_TSCTC    = 0x040F8, /* TCP Segmentation Context Tx - R/clr */
-       E1000_TSCTFC   = 0x040FC, /* TCP Segmentation Context Tx Fail - R/clr */
-       E1000_IAC      = 0x04100, /* Interrupt Assertion Count */
-       E1000_ICRXPTC  = 0x04104, /* Irq Cause Rx Packet Timer Expire Count */
-       E1000_ICRXATC  = 0x04108, /* Irq Cause Rx Abs Timer Expire Count */
-       E1000_ICTXPTC  = 0x0410C, /* Irq Cause Tx Packet Timer Expire Count */
-       E1000_ICTXATC  = 0x04110, /* Irq Cause Tx Abs Timer Expire Count */
-       E1000_ICTXQEC  = 0x04118, /* Irq Cause Tx Queue Empty Count */
-       E1000_ICTXQMTC = 0x0411C, /* Irq Cause Tx Queue MinThreshold Count */
-       E1000_ICRXDMTC = 0x04120, /* Irq Cause Rx Desc MinThreshold Count */
-       E1000_ICRXOC   = 0x04124, /* Irq Cause Receiver Overrun Count */
-       E1000_PCS_LCTL = 0x04208, /* PCS Link Control - RW */
-       E1000_PCS_LSTAT = 0x0420C, /* PCS Link Status - RO */
-       E1000_PCS_ANADV = 0x04218, /* AN advertisement - RW */
-       E1000_PCS_LPAB = 0x0421C, /* Link Partner Ability - RW */
-       E1000_RXCSUM   = 0x05000, /* Rx Checksum Control - RW */
-       E1000_RFCTL    = 0x05008, /* Receive Filter Control */
-       E1000_MTA      = 0x05200, /* Multicast Table Array - RW Array */
-       E1000_RAL_BASE = 0x05400, /* Receive Address Low - RW */
-#define E1000_RAL(_n)   (E1000_RAL_BASE + ((_n) * 8))
-#define E1000_RA        (E1000_RAL(0))
-       E1000_RAH_BASE = 0x05404, /* Receive Address High - RW */
-#define E1000_RAH(_n)   (E1000_RAH_BASE + ((_n) * 8))
-       E1000_SHRAL_PCH_LPT_BASE = 0x05408,
-#define E1000_SHRAL_PCH_LPT(_n)   (E1000_SHRAL_PCH_LPT_BASE + ((_n) * 8))
-       E1000_SHRAH_PCH_LTP_BASE = 0x0540C,
-#define E1000_SHRAH_PCH_LPT(_n)   (E1000_SHRAH_PCH_LTP_BASE + ((_n) * 8))
-       E1000_SHRAL_BASE = 0x05438, /* Shared Receive Address Low - RW */
-#define E1000_SHRAL(_n)   (E1000_SHRAL_BASE + ((_n) * 8))
-       E1000_SHRAH_BASE = 0x0543C, /* Shared Receive Address High - RW */
-#define E1000_SHRAH(_n)   (E1000_SHRAH_BASE + ((_n) * 8))
-       E1000_VFTA     = 0x05600, /* VLAN Filter Table Array - RW Array */
-       E1000_WUC      = 0x05800, /* Wakeup Control - RW */
-       E1000_WUFC     = 0x05808, /* Wakeup Filter Control - RW */
-       E1000_WUS      = 0x05810, /* Wakeup Status - RO */
-       E1000_MRQC     = 0x05818, /* Multiple Receive Control - RW */
-       E1000_MANC     = 0x05820, /* Management Control - RW */
-       E1000_FFLT     = 0x05F00, /* Flexible Filter Length Table - RW Array */
-       E1000_HOST_IF  = 0x08800, /* Host Interface */
-
-       E1000_KMRNCTRLSTA = 0x00034, /* MAC-PHY interface - RW */
-       E1000_MANC2H    = 0x05860, /* Management Control To Host - RW */
-       E1000_MDEF_BASE = 0x05890, /* Management Decision Filters */
-#define E1000_MDEF(_n)   (E1000_MDEF_BASE + ((_n) * 4))
-       E1000_SW_FW_SYNC = 0x05B5C, /* Software-Firmware Synchronization - RW */
-       E1000_GCR       = 0x05B00, /* PCI-Ex Control */
-       E1000_GCR2      = 0x05B64, /* PCI-Ex Control #2 */
-       E1000_FACTPS    = 0x05B30, /* Function Active and Power State to MNG */
-       E1000_SWSM      = 0x05B50, /* SW Semaphore */
-       E1000_FWSM      = 0x05B54, /* FW Semaphore */
-       E1000_SWSM2     = 0x05B58, /* Driver-only SW semaphore */
-       E1000_RETA_BASE = 0x05C00, /* Redirection Table - RW */
-#define E1000_RETA(_n) (E1000_RETA_BASE + ((_n) * 4))
-       E1000_RSSRK_BASE = 0x05C80, /* RSS Random Key - RW */
-#define E1000_RSSRK(_n)        (E1000_RSSRK_BASE + ((_n) * 4))
-       E1000_FFLT_DBG  = 0x05F04, /* Debug Register */
-       E1000_PCH_RAICC_BASE = 0x05F50, /* Receive Address Initial CRC */
-#define E1000_PCH_RAICC(_n)    (E1000_PCH_RAICC_BASE + ((_n) * 4))
-#define E1000_CRC_OFFSET       E1000_PCH_RAICC_BASE
-       E1000_HICR      = 0x08F00, /* Host Interface Control */
-       E1000_SYSTIML   = 0x0B600, /* System time register Low - RO */
-       E1000_SYSTIMH   = 0x0B604, /* System time register High - RO */
-       E1000_TIMINCA   = 0x0B608, /* Increment attributes register - RW */
-       E1000_TSYNCTXCTL = 0x0B614, /* Tx Time Sync Control register - RW */
-       E1000_TXSTMPL   = 0x0B618, /* Tx timestamp value Low - RO */
-       E1000_TXSTMPH   = 0x0B61C, /* Tx timestamp value High - RO */
-       E1000_TSYNCRXCTL = 0x0B620, /* Rx Time Sync Control register - RW */
-       E1000_RXSTMPL   = 0x0B624, /* Rx timestamp Low - RO */
-       E1000_RXSTMPH   = 0x0B628, /* Rx timestamp High - RO */
-       E1000_RXMTRL    = 0x0B634, /* Timesync Rx EtherType and Msg Type - RW */
-       E1000_RXUDP     = 0x0B638, /* Timesync Rx UDP Port - RW */
-};
-
-#define E1000_MAX_PHY_ADDR             4
-
-/* IGP01E1000 Specific Registers */
-#define IGP01E1000_PHY_PORT_CONFIG     0x10 /* Port Config */
-#define IGP01E1000_PHY_PORT_STATUS     0x11 /* Status */
-#define IGP01E1000_PHY_PORT_CTRL       0x12 /* Control */
-#define IGP01E1000_PHY_LINK_HEALTH     0x13 /* PHY Link Health */
-#define IGP02E1000_PHY_POWER_MGMT      0x19 /* Power Management */
-#define IGP01E1000_PHY_PAGE_SELECT     0x1F /* Page Select */
-#define BM_PHY_PAGE_SELECT             22   /* Page Select for BM */
-#define IGP_PAGE_SHIFT                 5
-#define PHY_REG_MASK                   0x1F
-
-#define BM_WUC_PAGE                    800
-#define BM_WUC_ADDRESS_OPCODE          0x11
-#define BM_WUC_DATA_OPCODE             0x12
-#define BM_WUC_ENABLE_PAGE             769
-#define BM_WUC_ENABLE_REG              17
-#define BM_WUC_ENABLE_BIT              (1 << 2)
-#define BM_WUC_HOST_WU_BIT             (1 << 4)
-#define BM_WUC_ME_WU_BIT               (1 << 5)
-
-#define BM_WUC PHY_REG(BM_WUC_PAGE, 1)
-#define BM_WUFC PHY_REG(BM_WUC_PAGE, 2)
-#define BM_WUS PHY_REG(BM_WUC_PAGE, 3)
-
-#define IGP01E1000_PHY_PCS_INIT_REG    0x00B4
-#define IGP01E1000_PHY_POLARITY_MASK   0x0078
-
-#define IGP01E1000_PSCR_AUTO_MDIX      0x1000
-#define IGP01E1000_PSCR_FORCE_MDI_MDIX 0x2000 /* 0=MDI, 1=MDIX */
-
-#define IGP01E1000_PSCFR_SMART_SPEED   0x0080
-
-#define IGP02E1000_PM_SPD              0x0001 /* Smart Power Down */
-#define IGP02E1000_PM_D0_LPLU          0x0002 /* For D0a states */
-#define IGP02E1000_PM_D3_LPLU          0x0004 /* For all other states */
-
-#define IGP01E1000_PLHR_SS_DOWNGRADE   0x8000
-
-#define IGP01E1000_PSSR_POLARITY_REVERSED      0x0002
-#define IGP01E1000_PSSR_MDIX                   0x0800
-#define IGP01E1000_PSSR_SPEED_MASK             0xC000
-#define IGP01E1000_PSSR_SPEED_1000MBPS         0xC000
-
-#define IGP02E1000_PHY_CHANNEL_NUM             4
-#define IGP02E1000_PHY_AGC_A                   0x11B1
-#define IGP02E1000_PHY_AGC_B                   0x12B1
-#define IGP02E1000_PHY_AGC_C                   0x14B1
-#define IGP02E1000_PHY_AGC_D                   0x18B1
-
-#define IGP02E1000_AGC_LENGTH_SHIFT    9 /* Course - 15:13, Fine - 12:9 */
-#define IGP02E1000_AGC_LENGTH_MASK     0x7F
-#define IGP02E1000_AGC_RANGE           15
-
-/* manage.c */
-#define E1000_VFTA_ENTRY_SHIFT         5
-#define E1000_VFTA_ENTRY_MASK          0x7F
-#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK        0x1F
-
-#define E1000_HICR_EN                  0x01  /* Enable bit - RO */
-/* Driver sets this bit when done to put command in RAM */
-#define E1000_HICR_C                   0x02
-#define E1000_HICR_FW_RESET_ENABLE     0x40
-#define E1000_HICR_FW_RESET            0x80
-
-#define E1000_FWSM_MODE_MASK           0xE
-#define E1000_FWSM_MODE_SHIFT          1
-
-#define E1000_MNG_IAMT_MODE            0x3
-#define E1000_MNG_DHCP_COOKIE_LENGTH   0x10
-#define E1000_MNG_DHCP_COOKIE_OFFSET   0x6F0
-#define E1000_MNG_DHCP_COMMAND_TIMEOUT 10
-#define E1000_MNG_DHCP_TX_PAYLOAD_CMD  64
-#define E1000_MNG_DHCP_COOKIE_STATUS_PARSING   0x1
-#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN      0x2
-
-/* nvm.c */
-#define E1000_STM_OPCODE  0xDB00
-
-#define E1000_KMRNCTRLSTA_OFFSET       0x001F0000
-#define E1000_KMRNCTRLSTA_OFFSET_SHIFT 16
-#define E1000_KMRNCTRLSTA_REN          0x00200000
-#define E1000_KMRNCTRLSTA_CTRL_OFFSET  0x1    /* Kumeran Control */
-#define E1000_KMRNCTRLSTA_DIAG_OFFSET  0x3    /* Kumeran Diagnostic */
-#define E1000_KMRNCTRLSTA_TIMEOUTS     0x4    /* Kumeran Timeouts */
-#define E1000_KMRNCTRLSTA_INBAND_PARAM 0x9    /* Kumeran InBand Parameters */
-#define E1000_KMRNCTRLSTA_IBIST_DISABLE        0x0200 /* Kumeran IBIST Disable */
-#define E1000_KMRNCTRLSTA_DIAG_NELPBK  0x1000 /* Nearend Loopback mode */
-#define E1000_KMRNCTRLSTA_K1_CONFIG    0x7
-#define E1000_KMRNCTRLSTA_K1_ENABLE    0x0002
-#define E1000_KMRNCTRLSTA_HD_CTRL      0x10   /* Kumeran HD Control */
-
-#define IFE_PHY_EXTENDED_STATUS_CONTROL        0x10
-#define IFE_PHY_SPECIAL_CONTROL                0x11 /* 100BaseTx PHY Special Control */
-#define IFE_PHY_SPECIAL_CONTROL_LED    0x1B /* PHY Special and LED Control */
-#define IFE_PHY_MDIX_CONTROL           0x1C /* MDI/MDI-X Control */
-
-/* IFE PHY Extended Status Control */
-#define IFE_PESC_POLARITY_REVERSED     0x0100
-
-/* IFE PHY Special Control */
-#define IFE_PSC_AUTO_POLARITY_DISABLE          0x0010
-#define IFE_PSC_FORCE_POLARITY                 0x0020
-
-/* IFE PHY Special Control and LED Control */
-#define IFE_PSCL_PROBE_MODE            0x0020
-#define IFE_PSCL_PROBE_LEDS_OFF                0x0006 /* Force LEDs 0 and 2 off */
-#define IFE_PSCL_PROBE_LEDS_ON         0x0007 /* Force LEDs 0 and 2 on */
-
-/* IFE PHY MDIX Control */
-#define IFE_PMC_MDIX_STATUS    0x0020 /* 1=MDI-X, 0=MDI */
-#define IFE_PMC_FORCE_MDIX     0x0040 /* 1=force MDI-X, 0=force MDI */
-#define IFE_PMC_AUTO_MDIX      0x0080 /* 1=enable auto MDI/MDI-X, 0=disable */
-
-#define E1000_CABLE_LENGTH_UNDEFINED   0xFF
-
 #define E1000_DEV_ID_82571EB_COPPER            0x105E
 #define E1000_DEV_ID_82571EB_FIBER             0x105F
 #define E1000_DEV_ID_82571EB_SERDES            0x1060
@@ -776,6 +440,11 @@ struct e1000_host_mng_command_info {
        u8 command_data[E1000_HI_MAX_MNG_DATA_LENGTH];
 };
 
+#include "mac.h"
+#include "phy.h"
+#include "nvm.h"
+#include "manage.h"
+
 /* Function pointers for the MAC. */
 struct e1000_mac_operations {
        s32  (*id_led_init)(struct e1000_hw *);
@@ -1005,4 +674,8 @@ struct e1000_hw {
        } dev_spec;
 };
 
+#include "82571.h"
+#include "80003es2lan.h"
+#include "ich8lan.h"
+
 #endif
index a019b46..dff7bff 100644 (file)
 
 #include "e1000.h"
 
-#define ICH_FLASH_GFPREG               0x0000
-#define ICH_FLASH_HSFSTS               0x0004
-#define ICH_FLASH_HSFCTL               0x0006
-#define ICH_FLASH_FADDR                        0x0008
-#define ICH_FLASH_FDATA0               0x0010
-#define ICH_FLASH_PR0                  0x0074
-
-#define ICH_FLASH_READ_COMMAND_TIMEOUT 500
-#define ICH_FLASH_WRITE_COMMAND_TIMEOUT        500
-#define ICH_FLASH_ERASE_COMMAND_TIMEOUT        3000000
-#define ICH_FLASH_LINEAR_ADDR_MASK     0x00FFFFFF
-#define ICH_FLASH_CYCLE_REPEAT_COUNT   10
-
-#define ICH_CYCLE_READ                 0
-#define ICH_CYCLE_WRITE                        2
-#define ICH_CYCLE_ERASE                        3
-
-#define FLASH_GFPREG_BASE_MASK         0x1FFF
-#define FLASH_SECTOR_ADDR_SHIFT                12
-
-#define ICH_FLASH_SEG_SIZE_256         256
-#define ICH_FLASH_SEG_SIZE_4K          4096
-#define ICH_FLASH_SEG_SIZE_8K          8192
-#define ICH_FLASH_SEG_SIZE_64K         65536
-
-
-#define E1000_ICH_FWSM_RSPCIPHY        0x00000040 /* Reset PHY on PCI Reset */
-/* FW established a valid mode */
-#define E1000_ICH_FWSM_FW_VALID                0x00008000
-
-#define E1000_ICH_MNG_IAMT_MODE                0x2
-
-#define ID_LED_DEFAULT_ICH8LAN  ((ID_LED_DEF1_DEF2 << 12) | \
-                                (ID_LED_DEF1_OFF2 <<  8) | \
-                                (ID_LED_DEF1_ON2  <<  4) | \
-                                (ID_LED_DEF1_DEF2))
-
-#define E1000_ICH_NVM_SIG_WORD         0x13
-#define E1000_ICH_NVM_SIG_MASK         0xC000
-#define E1000_ICH_NVM_VALID_SIG_MASK    0xC0
-#define E1000_ICH_NVM_SIG_VALUE         0x80
-
-#define E1000_ICH8_LAN_INIT_TIMEOUT    1500
-
-#define E1000_FEXTNVM_SW_CONFIG                1
-#define E1000_FEXTNVM_SW_CONFIG_ICH8M (1 << 27) /* Bit redefined for ICH8M :/ */
-
-#define E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK    0x0C000000
-#define E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC  0x08000000
-
-#define E1000_FEXTNVM4_BEACON_DURATION_MASK    0x7
-#define E1000_FEXTNVM4_BEACON_DURATION_8USEC   0x7
-#define E1000_FEXTNVM4_BEACON_DURATION_16USEC  0x3
-
-#define PCIE_ICH8_SNOOP_ALL            PCIE_NO_SNOOP_ALL
-
-#define E1000_ICH_RAR_ENTRIES          7
-#define E1000_PCH2_RAR_ENTRIES         5 /* RAR[0], SHRA[0-3] */
-#define E1000_PCH_LPT_RAR_ENTRIES      12 /* RAR[0], SHRA[0-10] */
-
-#define PHY_PAGE_SHIFT 5
-#define PHY_REG(page, reg) (((page) << PHY_PAGE_SHIFT) | \
-                          ((reg) & MAX_PHY_REG_ADDRESS))
-#define IGP3_KMRN_DIAG  PHY_REG(770, 19) /* KMRN Diagnostic */
-#define IGP3_VR_CTRL    PHY_REG(776, 18) /* Voltage Regulator Control */
-
-#define IGP3_KMRN_DIAG_PCS_LOCK_LOSS   0x0002
-#define IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK 0x0300
-#define IGP3_VR_CTRL_MODE_SHUTDOWN     0x0200
-
-#define HV_LED_CONFIG          PHY_REG(768, 30) /* LED Configuration */
-
-#define SW_FLAG_TIMEOUT    1000 /* SW Semaphore flag timeout in milliseconds */
-
-/* SMBus Control Phy Register */
-#define CV_SMB_CTRL            PHY_REG(769, 23)
-#define CV_SMB_CTRL_FORCE_SMBUS        0x0001
-
-/* SMBus Address Phy Register */
-#define HV_SMB_ADDR            PHY_REG(768, 26)
-#define HV_SMB_ADDR_MASK       0x007F
-#define HV_SMB_ADDR_PEC_EN     0x0200
-#define HV_SMB_ADDR_VALID      0x0080
-#define HV_SMB_ADDR_FREQ_MASK           0x1100
-#define HV_SMB_ADDR_FREQ_LOW_SHIFT      8
-#define HV_SMB_ADDR_FREQ_HIGH_SHIFT     12
-
-/* PHY Power Management Control */
-#define HV_PM_CTRL             PHY_REG(770, 17)
-#define HV_PM_CTRL_PLL_STOP_IN_K1_GIGA 0x100
-
-/* Intel Rapid Start Technology Support */
-#define I217_PROXY_CTRL                 BM_PHY_REG(BM_WUC_PAGE, 70)
-#define I217_PROXY_CTRL_AUTO_DISABLE    0x0080
-#define I217_SxCTRL                     PHY_REG(BM_PORT_CTRL_PAGE, 28)
-#define I217_SxCTRL_ENABLE_LPI_RESET    0x1000
-#define I217_CGFREG                     PHY_REG(772, 29)
-#define I217_CGFREG_ENABLE_MTA_RESET    0x0002
-#define I217_MEMPWR                     PHY_REG(772, 26)
-#define I217_MEMPWR_DISABLE_SMB_RELEASE 0x0010
-
-/* Strapping Option Register - RO */
-#define E1000_STRAP                     0x0000C
-#define E1000_STRAP_SMBUS_ADDRESS_MASK  0x00FE0000
-#define E1000_STRAP_SMBUS_ADDRESS_SHIFT 17
-#define E1000_STRAP_SMT_FREQ_MASK       0x00003000
-#define E1000_STRAP_SMT_FREQ_SHIFT      12
-
-/* OEM Bits Phy Register */
-#define HV_OEM_BITS            PHY_REG(768, 25)
-#define HV_OEM_BITS_LPLU       0x0004 /* Low Power Link Up */
-#define HV_OEM_BITS_GBE_DIS    0x0040 /* Gigabit Disable */
-#define HV_OEM_BITS_RESTART_AN 0x0400 /* Restart Auto-negotiation */
-
-#define E1000_NVM_K1_CONFIG 0x1B /* NVM K1 Config Word */
-#define E1000_NVM_K1_ENABLE 0x1  /* NVM Enable K1 bit */
-
-/* KMRN Mode Control */
-#define HV_KMRN_MODE_CTRL      PHY_REG(769, 16)
-#define HV_KMRN_MDIO_SLOW      0x0400
-
-/* KMRN FIFO Control and Status */
-#define HV_KMRN_FIFO_CTRLSTA                  PHY_REG(770, 16)
-#define HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK    0x7000
-#define HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT   12
-
 /* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
 /* Offset 04h HSFSTS */
 union ich8_hws_flash_status {
@@ -4117,7 +3991,6 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
                 * The SMBus release must also be disabled on LCD reset.
                 */
                if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
-
                        /* Enable proxy to reset only on power good. */
                        e1e_rphy_locked(hw, I217_PROXY_CTRL, &phy_reg);
                        phy_reg |= I217_PROXY_CTRL_AUTO_DISABLE;
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h
new file mode 100644 (file)
index 0000000..b6d3174
--- /dev/null
@@ -0,0 +1,268 @@
+/*******************************************************************************
+
+  Intel PRO/1000 Linux driver
+  Copyright(c) 1999 - 2013 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _E1000E_ICH8LAN_H_
+#define _E1000E_ICH8LAN_H_
+
+#define ICH_FLASH_GFPREG               0x0000
+#define ICH_FLASH_HSFSTS               0x0004
+#define ICH_FLASH_HSFCTL               0x0006
+#define ICH_FLASH_FADDR                        0x0008
+#define ICH_FLASH_FDATA0               0x0010
+#define ICH_FLASH_PR0                  0x0074
+
+/* Requires up to 10 seconds when MNG might be accessing part. */
+#define ICH_FLASH_READ_COMMAND_TIMEOUT 10000000
+#define ICH_FLASH_WRITE_COMMAND_TIMEOUT        10000000
+#define ICH_FLASH_ERASE_COMMAND_TIMEOUT        10000000
+#define ICH_FLASH_LINEAR_ADDR_MASK     0x00FFFFFF
+#define ICH_FLASH_CYCLE_REPEAT_COUNT   10
+
+#define ICH_CYCLE_READ                 0
+#define ICH_CYCLE_WRITE                        2
+#define ICH_CYCLE_ERASE                        3
+
+#define FLASH_GFPREG_BASE_MASK         0x1FFF
+#define FLASH_SECTOR_ADDR_SHIFT                12
+
+#define ICH_FLASH_SEG_SIZE_256         256
+#define ICH_FLASH_SEG_SIZE_4K          4096
+#define ICH_FLASH_SEG_SIZE_8K          8192
+#define ICH_FLASH_SEG_SIZE_64K         65536
+
+#define E1000_ICH_FWSM_RSPCIPHY        0x00000040      /* Reset PHY on PCI Reset */
+/* FW established a valid mode */
+#define E1000_ICH_FWSM_FW_VALID        0x00008000
+#define E1000_ICH_FWSM_PCIM2PCI        0x01000000      /* ME PCIm-to-PCI active */
+#define E1000_ICH_FWSM_PCIM2PCI_COUNT  2000
+
+#define E1000_ICH_MNG_IAMT_MODE                0x2
+
+#define E1000_FWSM_WLOCK_MAC_MASK      0x0380
+#define E1000_FWSM_WLOCK_MAC_SHIFT     7
+
+/* Shared Receive Address Registers */
+#define E1000_SHRAL_PCH_LPT(_i)                (0x05408 + ((_i) * 8))
+#define E1000_SHRAH_PCH_LPT(_i)                (0x0540C + ((_i) * 8))
+
+#define ID_LED_DEFAULT_ICH8LAN ((ID_LED_DEF1_DEF2 << 12) | \
+                                (ID_LED_OFF1_OFF2 <<  8) | \
+                                (ID_LED_OFF1_ON2  <<  4) | \
+                                (ID_LED_DEF1_DEF2))
+
+#define E1000_ICH_NVM_SIG_WORD         0x13
+#define E1000_ICH_NVM_SIG_MASK         0xC000
+#define E1000_ICH_NVM_VALID_SIG_MASK   0xC0
+#define E1000_ICH_NVM_SIG_VALUE                0x80
+
+#define E1000_ICH8_LAN_INIT_TIMEOUT    1500
+
+#define E1000_FEXTNVM_SW_CONFIG                1
+#define E1000_FEXTNVM_SW_CONFIG_ICH8M  (1 << 27)       /* different on ICH8M */
+
+#define E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK    0x0C000000
+#define E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC  0x08000000
+
+#define E1000_FEXTNVM4_BEACON_DURATION_MASK    0x7
+#define E1000_FEXTNVM4_BEACON_DURATION_8USEC   0x7
+#define E1000_FEXTNVM4_BEACON_DURATION_16USEC  0x3
+
+#define PCIE_ICH8_SNOOP_ALL    PCIE_NO_SNOOP_ALL
+
+#define E1000_ICH_RAR_ENTRIES  7
+#define E1000_PCH2_RAR_ENTRIES 5       /* RAR[0], SHRA[0-3] */
+#define E1000_PCH_LPT_RAR_ENTRIES      12      /* RAR[0], SHRA[0-10] */
+
+#define PHY_PAGE_SHIFT         5
+#define PHY_REG(page, reg)     (((page) << PHY_PAGE_SHIFT) | \
+                                ((reg) & MAX_PHY_REG_ADDRESS))
+#define IGP3_KMRN_DIAG PHY_REG(770, 19)        /* KMRN Diagnostic */
+#define IGP3_VR_CTRL   PHY_REG(776, 18)        /* Voltage Regulator Control */
+
+#define IGP3_KMRN_DIAG_PCS_LOCK_LOSS           0x0002
+#define IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK   0x0300
+#define IGP3_VR_CTRL_MODE_SHUTDOWN             0x0200
+
+/* PHY Wakeup Registers and defines */
+#define BM_PORT_GEN_CFG                PHY_REG(BM_PORT_CTRL_PAGE, 17)
+#define BM_RCTL                        PHY_REG(BM_WUC_PAGE, 0)
+#define BM_WUC                 PHY_REG(BM_WUC_PAGE, 1)
+#define BM_WUFC                        PHY_REG(BM_WUC_PAGE, 2)
+#define BM_WUS                 PHY_REG(BM_WUC_PAGE, 3)
+#define BM_RAR_L(_i)           (BM_PHY_REG(BM_WUC_PAGE, 16 + ((_i) << 2)))
+#define BM_RAR_M(_i)           (BM_PHY_REG(BM_WUC_PAGE, 17 + ((_i) << 2)))
+#define BM_RAR_H(_i)           (BM_PHY_REG(BM_WUC_PAGE, 18 + ((_i) << 2)))
+#define BM_RAR_CTRL(_i)                (BM_PHY_REG(BM_WUC_PAGE, 19 + ((_i) << 2)))
+#define BM_MTA(_i)             (BM_PHY_REG(BM_WUC_PAGE, 128 + ((_i) << 1)))
+
+#define BM_RCTL_UPE            0x0001  /* Unicast Promiscuous Mode */
+#define BM_RCTL_MPE            0x0002  /* Multicast Promiscuous Mode */
+#define BM_RCTL_MO_SHIFT       3       /* Multicast Offset Shift */
+#define BM_RCTL_MO_MASK                (3 << 3)        /* Multicast Offset Mask */
+#define BM_RCTL_BAM            0x0020  /* Broadcast Accept Mode */
+#define BM_RCTL_PMCF           0x0040  /* Pass MAC Control Frames */
+#define BM_RCTL_RFCE           0x0080  /* Rx Flow Control Enable */
+
+#define HV_LED_CONFIG          PHY_REG(768, 30)        /* LED Configuration */
+#define HV_MUX_DATA_CTRL       PHY_REG(776, 16)
+#define HV_MUX_DATA_CTRL_GEN_TO_MAC    0x0400
+#define HV_MUX_DATA_CTRL_FORCE_SPEED   0x0004
+#define HV_STATS_PAGE  778
+/* Half-duplex collision counts */
+#define HV_SCC_UPPER   PHY_REG(HV_STATS_PAGE, 16)      /* Single Collision */
+#define HV_SCC_LOWER   PHY_REG(HV_STATS_PAGE, 17)
+#define HV_ECOL_UPPER  PHY_REG(HV_STATS_PAGE, 18)      /* Excessive Coll. */
+#define HV_ECOL_LOWER  PHY_REG(HV_STATS_PAGE, 19)
+#define HV_MCC_UPPER   PHY_REG(HV_STATS_PAGE, 20)      /* Multiple Collision */
+#define HV_MCC_LOWER   PHY_REG(HV_STATS_PAGE, 21)
+#define HV_LATECOL_UPPER PHY_REG(HV_STATS_PAGE, 23)    /* Late Collision */
+#define HV_LATECOL_LOWER PHY_REG(HV_STATS_PAGE, 24)
+#define HV_COLC_UPPER  PHY_REG(HV_STATS_PAGE, 25)      /* Collision */
+#define HV_COLC_LOWER  PHY_REG(HV_STATS_PAGE, 26)
+#define HV_DC_UPPER    PHY_REG(HV_STATS_PAGE, 27)      /* Defer Count */
+#define HV_DC_LOWER    PHY_REG(HV_STATS_PAGE, 28)
+#define HV_TNCRS_UPPER PHY_REG(HV_STATS_PAGE, 29)      /* Tx with no CRS */
+#define HV_TNCRS_LOWER PHY_REG(HV_STATS_PAGE, 30)
+
+#define E1000_FCRTV_PCH        0x05F40 /* PCH Flow Control Refresh Timer Value */
+
+#define E1000_NVM_K1_CONFIG    0x1B    /* NVM K1 Config Word */
+#define E1000_NVM_K1_ENABLE    0x1     /* NVM Enable K1 bit */
+
+/* SMBus Control Phy Register */
+#define CV_SMB_CTRL            PHY_REG(769, 23)
+#define CV_SMB_CTRL_FORCE_SMBUS        0x0001
+
+/* SMBus Address Phy Register */
+#define HV_SMB_ADDR            PHY_REG(768, 26)
+#define HV_SMB_ADDR_MASK       0x007F
+#define HV_SMB_ADDR_PEC_EN     0x0200
+#define HV_SMB_ADDR_VALID      0x0080
+#define HV_SMB_ADDR_FREQ_MASK          0x1100
+#define HV_SMB_ADDR_FREQ_LOW_SHIFT     8
+#define HV_SMB_ADDR_FREQ_HIGH_SHIFT    12
+
+/* Strapping Option Register - RO */
+#define E1000_STRAP                    0x0000C
+#define E1000_STRAP_SMBUS_ADDRESS_MASK 0x00FE0000
+#define E1000_STRAP_SMBUS_ADDRESS_SHIFT        17
+#define E1000_STRAP_SMT_FREQ_MASK      0x00003000
+#define E1000_STRAP_SMT_FREQ_SHIFT     12
+
+/* OEM Bits Phy Register */
+#define HV_OEM_BITS            PHY_REG(768, 25)
+#define HV_OEM_BITS_LPLU       0x0004  /* Low Power Link Up */
+#define HV_OEM_BITS_GBE_DIS    0x0040  /* Gigabit Disable */
+#define HV_OEM_BITS_RESTART_AN 0x0400  /* Restart Auto-negotiation */
+
+/* KMRN Mode Control */
+#define HV_KMRN_MODE_CTRL      PHY_REG(769, 16)
+#define HV_KMRN_MDIO_SLOW      0x0400
+
+/* KMRN FIFO Control and Status */
+#define HV_KMRN_FIFO_CTRLSTA                   PHY_REG(770, 16)
+#define HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK     0x7000
+#define HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT    12
+
+/* PHY Power Management Control */
+#define HV_PM_CTRL             PHY_REG(770, 17)
+#define HV_PM_CTRL_PLL_STOP_IN_K1_GIGA 0x100
+
+#define SW_FLAG_TIMEOUT                1000    /* SW Semaphore flag timeout in ms */
+
+/* PHY Low Power Idle Control */
+#define I82579_LPI_CTRL                                PHY_REG(772, 20)
+#define I82579_LPI_CTRL_100_ENABLE             0x2000
+#define I82579_LPI_CTRL_1000_ENABLE            0x4000
+#define I82579_LPI_CTRL_ENABLE_MASK            0x6000
+#define I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT   0x80
+
+/* Extended Management Interface (EMI) Registers */
+#define I82579_EMI_ADDR                0x10
+#define I82579_EMI_DATA                0x11
+#define I82579_LPI_UPDATE_TIMER        0x4805  /* in 40ns units + 40 ns base value */
+#define I82579_MSE_THRESHOLD   0x084F  /* 82579 Mean Square Error Threshold */
+#define I82577_MSE_THRESHOLD   0x0887  /* 82577 Mean Square Error Threshold */
+#define I82579_MSE_LINK_DOWN   0x2411  /* MSE count before dropping link */
+#define I82579_EEE_PCS_STATUS          0x182D  /* IEEE MMD Register 3.1 >> 8 */
+#define I82579_EEE_CAPABILITY          0x0410  /* IEEE MMD Register 3.20 */
+#define I82579_EEE_ADVERTISEMENT       0x040E  /* IEEE MMD Register 7.60 */
+#define I82579_EEE_LP_ABILITY          0x040F  /* IEEE MMD Register 7.61 */
+#define I82579_EEE_100_SUPPORTED       (1 << 1)        /* 100BaseTx EEE */
+#define I82579_EEE_1000_SUPPORTED      (1 << 2)        /* 1000BaseTx EEE */
+#define I217_EEE_PCS_STATUS    0x9401  /* IEEE MMD Register 3.1 */
+#define I217_EEE_CAPABILITY    0x8000  /* IEEE MMD Register 3.20 */
+#define I217_EEE_ADVERTISEMENT 0x8001  /* IEEE MMD Register 7.60 */
+#define I217_EEE_LP_ABILITY    0x8002  /* IEEE MMD Register 7.61 */
+
+#define E1000_EEE_RX_LPI_RCVD  0x0400  /* Tx LP idle received */
+#define E1000_EEE_TX_LPI_RCVD  0x0800  /* Rx LP idle received */
+
+/* Intel Rapid Start Technology Support */
+#define I217_PROXY_CTRL                BM_PHY_REG(BM_WUC_PAGE, 70)
+#define I217_PROXY_CTRL_AUTO_DISABLE   0x0080
+#define I217_SxCTRL                    PHY_REG(BM_PORT_CTRL_PAGE, 28)
+#define I217_SxCTRL_ENABLE_LPI_RESET   0x1000
+#define I217_CGFREG                    PHY_REG(772, 29)
+#define I217_CGFREG_ENABLE_MTA_RESET   0x0002
+#define I217_MEMPWR                    PHY_REG(772, 26)
+#define I217_MEMPWR_DISABLE_SMB_RELEASE        0x0010
+
+/* Receive Address Initial CRC Calculation */
+#define E1000_PCH_RAICC(_n)    (0x05F50 + ((_n) * 4))
+
+/* Latency Tolerance Reporting */
+#define E1000_LTRV                     0x000F8
+#define E1000_LTRV_SCALE_MAX           5
+#define E1000_LTRV_SCALE_FACTOR                5
+#define E1000_LTRV_REQ_SHIFT           15
+#define E1000_LTRV_NOSNOOP_SHIFT       16
+#define E1000_LTRV_SEND                        (1 << 30)
+
+/* Proprietary Latency Tolerance Reporting PCI Capability */
+#define E1000_PCI_LTR_CAP_LPT          0xA8
+
+/* OBFF Control & Threshold Defines */
+#define E1000_SVCR_OFF_EN              0x00000001
+#define E1000_SVCR_OFF_MASKINT         0x00001000
+#define E1000_SVCR_OFF_TIMER_MASK      0xFFFF0000
+#define E1000_SVCR_OFF_TIMER_SHIFT     16
+#define E1000_SVT_OFF_HWM_MASK         0x0000001F
+
+void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw);
+void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
+                                                 bool state);
+void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw);
+void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw);
+void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw);
+void e1000_resume_workarounds_pchlan(struct e1000_hw *hw);
+s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable);
+void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw);
+s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable);
+s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data);
+#endif /* _E1000E_ICH8LAN_H_ */
diff --git a/drivers/net/ethernet/intel/e1000e/mac.h b/drivers/net/ethernet/intel/e1000e/mac.h
new file mode 100644 (file)
index 0000000..a61fee4
--- /dev/null
@@ -0,0 +1,74 @@
+/*******************************************************************************
+
+  Intel PRO/1000 Linux driver
+  Copyright(c) 1999 - 2013 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _E1000E_MAC_H_
+#define _E1000E_MAC_H_
+
+s32 e1000e_blink_led_generic(struct e1000_hw *hw);
+s32 e1000e_check_for_copper_link(struct e1000_hw *hw);
+s32 e1000e_check_for_fiber_link(struct e1000_hw *hw);
+s32 e1000e_check_for_serdes_link(struct e1000_hw *hw);
+s32 e1000e_cleanup_led_generic(struct e1000_hw *hw);
+s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw);
+s32 e1000e_disable_pcie_master(struct e1000_hw *hw);
+s32 e1000e_force_mac_fc(struct e1000_hw *hw);
+s32 e1000e_get_auto_rd_done(struct e1000_hw *hw);
+s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw);
+void e1000_set_lan_id_single_port(struct e1000_hw *hw);
+s32 e1000e_get_hw_semaphore(struct e1000_hw *hw);
+s32 e1000e_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed,
+                                      u16 *duplex);
+s32 e1000e_get_speed_and_duplex_fiber_serdes(struct e1000_hw *hw,
+                                            u16 *speed, u16 *duplex);
+s32 e1000e_id_led_init_generic(struct e1000_hw *hw);
+s32 e1000e_led_on_generic(struct e1000_hw *hw);
+s32 e1000e_led_off_generic(struct e1000_hw *hw);
+void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw,
+                                       u8 *mc_addr_list, u32 mc_addr_count);
+s32 e1000e_set_fc_watermarks(struct e1000_hw *hw);
+s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw);
+s32 e1000e_setup_led_generic(struct e1000_hw *hw);
+s32 e1000e_setup_link_generic(struct e1000_hw *hw);
+s32 e1000e_validate_mdi_setting_generic(struct e1000_hw *hw);
+s32 e1000e_validate_mdi_setting_crossover_generic(struct e1000_hw *hw);
+
+void e1000e_clear_hw_cntrs_base(struct e1000_hw *hw);
+void e1000_clear_vfta_generic(struct e1000_hw *hw);
+void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count);
+void e1000e_put_hw_semaphore(struct e1000_hw *hw);
+s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw);
+void e1000e_reset_adaptive(struct e1000_hw *hw);
+void e1000e_set_pcie_no_snoop(struct e1000_hw *hw, u32 no_snoop);
+void e1000e_update_adaptive(struct e1000_hw *hw);
+void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value);
+
+void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw);
+void e1000e_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index);
+void e1000e_config_collision_dist_generic(struct e1000_hw *hw);
+
+#endif
index 4dae0db..e4b0f1e 100644 (file)
 
 #include "e1000.h"
 
-enum e1000_mng_mode {
-       e1000_mng_mode_none = 0,
-       e1000_mng_mode_asf,
-       e1000_mng_mode_pt,
-       e1000_mng_mode_ipmi,
-       e1000_mng_mode_host_if_only
-};
-
-#define E1000_FACTPS_MNGCG             0x20000000
-
-/* Intel(R) Active Management Technology signature */
-#define E1000_IAMT_SIGNATURE           0x544D4149
-
 /**
  *  e1000_calculate_checksum - Calculate checksum for buffer
  *  @buffer: pointer to EEPROM
diff --git a/drivers/net/ethernet/intel/e1000e/manage.h b/drivers/net/ethernet/intel/e1000e/manage.h
new file mode 100644 (file)
index 0000000..326897c
--- /dev/null
@@ -0,0 +1,72 @@
+/*******************************************************************************
+
+  Intel PRO/1000 Linux driver
+  Copyright(c) 1999 - 2013 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _E1000E_MANAGE_H_
+#define _E1000E_MANAGE_H_
+
+bool e1000e_check_mng_mode_generic(struct e1000_hw *hw);
+bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw);
+s32 e1000e_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length);
+bool e1000e_enable_mng_pass_thru(struct e1000_hw *hw);
+
+enum e1000_mng_mode {
+       e1000_mng_mode_none = 0,
+       e1000_mng_mode_asf,
+       e1000_mng_mode_pt,
+       e1000_mng_mode_ipmi,
+       e1000_mng_mode_host_if_only
+};
+
+#define E1000_FACTPS_MNGCG                     0x20000000
+
+#define E1000_FWSM_MODE_MASK                   0xE
+#define E1000_FWSM_MODE_SHIFT                  1
+
+#define E1000_MNG_IAMT_MODE                    0x3
+#define E1000_MNG_DHCP_COOKIE_LENGTH           0x10
+#define E1000_MNG_DHCP_COOKIE_OFFSET           0x6F0
+#define E1000_MNG_DHCP_COMMAND_TIMEOUT         10
+#define E1000_MNG_DHCP_TX_PAYLOAD_CMD          64
+#define E1000_MNG_DHCP_COOKIE_STATUS_PARSING   0x1
+#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN      0x2
+
+#define E1000_VFTA_ENTRY_SHIFT                 5
+#define E1000_VFTA_ENTRY_MASK                  0x7F
+#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK                0x1F
+
+#define E1000_HICR_EN                  0x01    /* Enable bit - RO */
+/* Driver sets this bit when done to put command in RAM */
+#define E1000_HICR_C                   0x02
+#define E1000_HICR_SV                  0x04    /* Status Validity */
+#define E1000_HICR_FW_RESET_ENABLE     0x40
+#define E1000_HICR_FW_RESET            0x80
+
+/* Intel(R) Active Management Technology signature */
+#define E1000_IAMT_SIGNATURE           0x544D4149
+
+#endif
index e0eb5dd..a177b8b 100644 (file)
@@ -86,20 +86,7 @@ struct e1000_reg_info {
        char *name;
 };
 
-#define E1000_RDFH     0x02410 /* Rx Data FIFO Head - RW */
-#define E1000_RDFT     0x02418 /* Rx Data FIFO Tail - RW */
-#define E1000_RDFHS    0x02420 /* Rx Data FIFO Head Saved - RW */
-#define E1000_RDFTS    0x02428 /* Rx Data FIFO Tail Saved - RW */
-#define E1000_RDFPC    0x02430 /* Rx Data FIFO Packet Count - RW */
-
-#define E1000_TDFH     0x03410 /* Tx Data FIFO Head - RW */
-#define E1000_TDFT     0x03418 /* Tx Data FIFO Tail - RW */
-#define E1000_TDFHS    0x03420 /* Tx Data FIFO Head Saved - RW */
-#define E1000_TDFTS    0x03428 /* Tx Data FIFO Tail Saved - RW */
-#define E1000_TDFPC    0x03430 /* Tx Data FIFO Packet Count - RW */
-
 static const struct e1000_reg_info e1000_reg_info_tbl[] = {
-
        /* General Registers */
        {E1000_CTRL, "CTRL"},
        {E1000_STATUS, "STATUS"},
@@ -2024,7 +2011,6 @@ static void e1000_configure_msix(struct e1000_adapter *adapter)
        ctrl_ext |= E1000_CTRL_EXT_PBA_CLR;
 
        /* Auto-Mask Other interrupts upon ICR read */
-#define E1000_EIAC_MASK_82574   0x01F00000
        ew32(IAM, ~E1000_EIAC_MASK_82574 | E1000_IMS_OTHER);
        ctrl_ext |= E1000_CTRL_EXT_EIAME;
        ew32(CTRL_EXT, ctrl_ext);
@@ -4844,6 +4830,13 @@ static void e1000_watchdog_task(struct work_struct *work)
                                                   &adapter->link_speed,
                                                   &adapter->link_duplex);
                        e1000_print_link_info(adapter);
+
+                       /* check if SmartSpeed worked */
+                       e1000e_check_downshift(hw);
+                       if (phy->speed_downgraded)
+                               netdev_warn(netdev,
+                                           "Link Speed was downgraded by SmartSpeed\n");
+
                        /* On supported PHYs, check for duplex mismatch only
                         * if link has autonegotiated at 10/100 half
                         */
@@ -6407,7 +6400,6 @@ static void e1000_io_resume(struct pci_dev *pdev)
         */
        if (!(adapter->flags & FLAG_HAS_AMT))
                e1000e_get_hw_control(adapter);
-
 }
 
 static void e1000_print_device_info(struct e1000_adapter *adapter)
diff --git a/drivers/net/ethernet/intel/e1000e/nvm.h b/drivers/net/ethernet/intel/e1000e/nvm.h
new file mode 100644 (file)
index 0000000..45fc695
--- /dev/null
@@ -0,0 +1,47 @@
+/*******************************************************************************
+
+  Intel PRO/1000 Linux driver
+  Copyright(c) 1999 - 2013 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _E1000E_NVM_H_
+#define _E1000E_NVM_H_
+
+s32 e1000e_acquire_nvm(struct e1000_hw *hw);
+
+s32 e1000e_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg);
+s32 e1000_read_mac_addr_generic(struct e1000_hw *hw);
+s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num,
+                                 u32 pba_num_size);
+s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
+s32 e1000e_valid_led_default(struct e1000_hw *hw, u16 *data);
+s32 e1000e_validate_nvm_checksum_generic(struct e1000_hw *hw);
+s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
+s32 e1000e_update_nvm_checksum_generic(struct e1000_hw *hw);
+void e1000e_release_nvm(struct e1000_hw *hw);
+
+#define E1000_STM_OPCODE       0xDB00
+
+#endif
index ae656f1..0930c13 100644 (file)
@@ -53,48 +53,6 @@ static const u16 e1000_igp_2_cable_length_table[] = {
 #define IGP02E1000_CABLE_LENGTH_TABLE_SIZE \
                ARRAY_SIZE(e1000_igp_2_cable_length_table)
 
-#define BM_PHY_REG_PAGE(offset) \
-       ((u16)(((offset) >> PHY_PAGE_SHIFT) & 0xFFFF))
-#define BM_PHY_REG_NUM(offset) \
-       ((u16)(((offset) & MAX_PHY_REG_ADDRESS) |\
-        (((offset) >> (PHY_UPPER_SHIFT - PHY_PAGE_SHIFT)) &\
-               ~MAX_PHY_REG_ADDRESS)))
-
-#define HV_INTC_FC_PAGE_START             768
-#define I82578_ADDR_REG                   29
-#define I82577_ADDR_REG                   16
-#define I82577_CFG_REG                    22
-#define I82577_CFG_ASSERT_CRS_ON_TX       (1 << 15)
-#define I82577_CFG_ENABLE_DOWNSHIFT       (3 << 10) /* auto downshift 100/10 */
-#define I82577_CTRL_REG                   23
-
-/* 82577 specific PHY registers */
-#define I82577_PHY_CTRL_2            18
-#define I82577_PHY_STATUS_2          26
-#define I82577_PHY_DIAG_STATUS       31
-
-/* I82577 PHY Status 2 */
-#define I82577_PHY_STATUS2_REV_POLARITY   0x0400
-#define I82577_PHY_STATUS2_MDIX           0x0800
-#define I82577_PHY_STATUS2_SPEED_MASK     0x0300
-#define I82577_PHY_STATUS2_SPEED_1000MBPS 0x0200
-
-/* I82577 PHY Control 2 */
-#define I82577_PHY_CTRL2_MANUAL_MDIX      0x0200
-#define I82577_PHY_CTRL2_AUTO_MDI_MDIX    0x0400
-#define I82577_PHY_CTRL2_MDIX_CFG_MASK    0x0600
-
-/* I82577 PHY Diagnostics Status */
-#define I82577_DSTATUS_CABLE_LENGTH       0x03FC
-#define I82577_DSTATUS_CABLE_LENGTH_SHIFT 2
-
-/* BM PHY Copper Specific Control 1 */
-#define BM_CS_CTRL1                       16
-
-#define HV_MUX_DATA_CTRL               PHY_REG(776, 16)
-#define HV_MUX_DATA_CTRL_GEN_TO_MAC    0x0400
-#define HV_MUX_DATA_CTRL_FORCE_SPEED   0x0004
-
 /**
  *  e1000e_check_reset_block_generic - Check if PHY reset is blocked
  *  @hw: pointer to the HW structure
@@ -2516,7 +2474,6 @@ s32 e1000e_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data)
        hw->phy.addr = 1;
 
        if (offset > MAX_PHY_MULTI_PAGE_REG) {
-
                /* Page is shifted left, PHY expects (page x 32) */
                ret_val = e1000e_write_phy_reg_mdic(hw, BM_PHY_PAGE_SELECT,
                                                    page);
diff --git a/drivers/net/ethernet/intel/e1000e/phy.h b/drivers/net/ethernet/intel/e1000e/phy.h
new file mode 100644 (file)
index 0000000..f4f71b9
--- /dev/null
@@ -0,0 +1,242 @@
+/*******************************************************************************
+
+  Intel PRO/1000 Linux driver
+  Copyright(c) 1999 - 2013 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _E1000E_PHY_H_
+#define _E1000E_PHY_H_
+
+s32 e1000e_check_downshift(struct e1000_hw *hw);
+s32 e1000_check_polarity_m88(struct e1000_hw *hw);
+s32 e1000_check_polarity_igp(struct e1000_hw *hw);
+s32 e1000_check_polarity_ife(struct e1000_hw *hw);
+s32 e1000e_check_reset_block_generic(struct e1000_hw *hw);
+s32 e1000e_copper_link_setup_igp(struct e1000_hw *hw);
+s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw);
+s32 e1000e_phy_force_speed_duplex_igp(struct e1000_hw *hw);
+s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw);
+s32 e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw);
+s32 e1000e_get_cable_length_m88(struct e1000_hw *hw);
+s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw);
+s32 e1000e_get_cfg_done_generic(struct e1000_hw *hw);
+s32 e1000e_get_phy_id(struct e1000_hw *hw);
+s32 e1000e_get_phy_info_igp(struct e1000_hw *hw);
+s32 e1000e_get_phy_info_m88(struct e1000_hw *hw);
+s32 e1000_get_phy_info_ife(struct e1000_hw *hw);
+s32 e1000e_phy_sw_reset(struct e1000_hw *hw);
+void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl);
+s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw);
+s32 e1000e_phy_reset_dsp(struct e1000_hw *hw);
+s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000e_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000_set_page_igp(struct e1000_hw *hw, u16 page);
+s32 e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000e_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000e_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000e_set_d3_lplu_state(struct e1000_hw *hw, bool active);
+s32 e1000e_setup_copper_link(struct e1000_hw *hw);
+s32 e1000e_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000e_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000e_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000e_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
+                               u32 usec_interval, bool *success);
+s32 e1000e_phy_init_script_igp3(struct e1000_hw *hw);
+enum e1000_phy_type e1000e_get_phy_type_from_id(u32 phy_id);
+s32 e1000e_determine_phy_address(struct e1000_hw *hw);
+s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000_enable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg);
+s32 e1000_disable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg);
+s32 e1000e_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data);
+void e1000_power_up_phy_copper(struct e1000_hw *hw);
+void e1000_power_down_phy_copper(struct e1000_hw *hw);
+s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000_read_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000_read_phy_reg_page_hv(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000_write_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000_write_phy_reg_page_hv(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw);
+s32 e1000_copper_link_setup_82577(struct e1000_hw *hw);
+s32 e1000_check_polarity_82577(struct e1000_hw *hw);
+s32 e1000_get_phy_info_82577(struct e1000_hw *hw);
+s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw);
+s32 e1000_get_cable_length_82577(struct e1000_hw *hw);
+
+#define E1000_MAX_PHY_ADDR             8
+
+/* IGP01E1000 Specific Registers */
+#define IGP01E1000_PHY_PORT_CONFIG     0x10    /* Port Config */
+#define IGP01E1000_PHY_PORT_STATUS     0x11    /* Status */
+#define IGP01E1000_PHY_PORT_CTRL       0x12    /* Control */
+#define IGP01E1000_PHY_LINK_HEALTH     0x13    /* PHY Link Health */
+#define IGP02E1000_PHY_POWER_MGMT      0x19    /* Power Management */
+#define IGP01E1000_PHY_PAGE_SELECT     0x1F    /* Page Select */
+#define BM_PHY_PAGE_SELECT             22      /* Page Select for BM */
+#define IGP_PAGE_SHIFT                 5
+#define PHY_REG_MASK                   0x1F
+
+/* BM/HV Specific Registers */
+#define BM_PORT_CTRL_PAGE              769
+#define BM_WUC_PAGE                    800
+#define BM_WUC_ADDRESS_OPCODE          0x11
+#define BM_WUC_DATA_OPCODE             0x12
+#define BM_WUC_ENABLE_PAGE             BM_PORT_CTRL_PAGE
+#define BM_WUC_ENABLE_REG              17
+#define BM_WUC_ENABLE_BIT              (1 << 2)
+#define BM_WUC_HOST_WU_BIT             (1 << 4)
+#define BM_WUC_ME_WU_BIT               (1 << 5)
+
+#define PHY_UPPER_SHIFT                        21
+#define BM_PHY_REG(page, reg) \
+       (((reg) & MAX_PHY_REG_ADDRESS) |\
+        (((page) & 0xFFFF) << PHY_PAGE_SHIFT) |\
+        (((reg) & ~MAX_PHY_REG_ADDRESS) << (PHY_UPPER_SHIFT - PHY_PAGE_SHIFT)))
+#define BM_PHY_REG_PAGE(offset) \
+       ((u16)(((offset) >> PHY_PAGE_SHIFT) & 0xFFFF))
+#define BM_PHY_REG_NUM(offset) \
+       ((u16)(((offset) & MAX_PHY_REG_ADDRESS) |\
+        (((offset) >> (PHY_UPPER_SHIFT - PHY_PAGE_SHIFT)) &\
+               ~MAX_PHY_REG_ADDRESS)))
+
+#define HV_INTC_FC_PAGE_START          768
+#define I82578_ADDR_REG                        29
+#define I82577_ADDR_REG                        16
+#define I82577_CFG_REG                 22
+#define I82577_CFG_ASSERT_CRS_ON_TX    (1 << 15)
+#define I82577_CFG_ENABLE_DOWNSHIFT    (3 << 10)       /* auto downshift */
+#define I82577_CTRL_REG                        23
+
+/* 82577 specific PHY registers */
+#define I82577_PHY_CTRL_2              18
+#define I82577_PHY_LBK_CTRL            19
+#define I82577_PHY_STATUS_2            26
+#define I82577_PHY_DIAG_STATUS         31
+
+/* I82577 PHY Status 2 */
+#define I82577_PHY_STATUS2_REV_POLARITY                0x0400
+#define I82577_PHY_STATUS2_MDIX                        0x0800
+#define I82577_PHY_STATUS2_SPEED_MASK          0x0300
+#define I82577_PHY_STATUS2_SPEED_1000MBPS      0x0200
+
+/* I82577 PHY Control 2 */
+#define I82577_PHY_CTRL2_MANUAL_MDIX           0x0200
+#define I82577_PHY_CTRL2_AUTO_MDI_MDIX         0x0400
+#define I82577_PHY_CTRL2_MDIX_CFG_MASK         0x0600
+
+/* I82577 PHY Diagnostics Status */
+#define I82577_DSTATUS_CABLE_LENGTH            0x03FC
+#define I82577_DSTATUS_CABLE_LENGTH_SHIFT      2
+
+/* BM PHY Copper Specific Control 1 */
+#define BM_CS_CTRL1                    16
+
+/* BM PHY Copper Specific Status */
+#define BM_CS_STATUS                   17
+#define BM_CS_STATUS_LINK_UP           0x0400
+#define BM_CS_STATUS_RESOLVED          0x0800
+#define BM_CS_STATUS_SPEED_MASK                0xC000
+#define BM_CS_STATUS_SPEED_1000                0x8000
+
+/* 82577 Mobile Phy Status Register */
+#define HV_M_STATUS                    26
+#define HV_M_STATUS_AUTONEG_COMPLETE   0x1000
+#define HV_M_STATUS_SPEED_MASK         0x0300
+#define HV_M_STATUS_SPEED_1000         0x0200
+#define HV_M_STATUS_LINK_UP            0x0040
+
+#define IGP01E1000_PHY_PCS_INIT_REG    0x00B4
+#define IGP01E1000_PHY_POLARITY_MASK   0x0078
+
+#define IGP01E1000_PSCR_AUTO_MDIX      0x1000
+#define IGP01E1000_PSCR_FORCE_MDI_MDIX 0x2000  /* 0=MDI, 1=MDIX */
+
+#define IGP01E1000_PSCFR_SMART_SPEED   0x0080
+
+#define IGP02E1000_PM_SPD              0x0001  /* Smart Power Down */
+#define IGP02E1000_PM_D0_LPLU          0x0002  /* For D0a states */
+#define IGP02E1000_PM_D3_LPLU          0x0004  /* For all other states */
+
+#define IGP01E1000_PLHR_SS_DOWNGRADE   0x8000
+
+#define IGP01E1000_PSSR_POLARITY_REVERSED      0x0002
+#define IGP01E1000_PSSR_MDIX           0x0800
+#define IGP01E1000_PSSR_SPEED_MASK     0xC000
+#define IGP01E1000_PSSR_SPEED_1000MBPS 0xC000
+
+#define IGP02E1000_PHY_CHANNEL_NUM     4
+#define IGP02E1000_PHY_AGC_A           0x11B1
+#define IGP02E1000_PHY_AGC_B           0x12B1
+#define IGP02E1000_PHY_AGC_C           0x14B1
+#define IGP02E1000_PHY_AGC_D           0x18B1
+
+#define IGP02E1000_AGC_LENGTH_SHIFT    9       /* Course=15:13, Fine=12:9 */
+#define IGP02E1000_AGC_LENGTH_MASK     0x7F
+#define IGP02E1000_AGC_RANGE           15
+
+#define E1000_CABLE_LENGTH_UNDEFINED   0xFF
+
+#define E1000_KMRNCTRLSTA_OFFSET       0x001F0000
+#define E1000_KMRNCTRLSTA_OFFSET_SHIFT 16
+#define E1000_KMRNCTRLSTA_REN          0x00200000
+#define E1000_KMRNCTRLSTA_CTRL_OFFSET  0x1     /* Kumeran Control */
+#define E1000_KMRNCTRLSTA_DIAG_OFFSET  0x3     /* Kumeran Diagnostic */
+#define E1000_KMRNCTRLSTA_TIMEOUTS     0x4     /* Kumeran Timeouts */
+#define E1000_KMRNCTRLSTA_INBAND_PARAM 0x9     /* Kumeran InBand Parameters */
+#define E1000_KMRNCTRLSTA_IBIST_DISABLE        0x0200  /* Kumeran IBIST Disable */
+#define E1000_KMRNCTRLSTA_DIAG_NELPBK  0x1000  /* Nearend Loopback mode */
+#define E1000_KMRNCTRLSTA_K1_CONFIG    0x7
+#define E1000_KMRNCTRLSTA_K1_ENABLE    0x0002  /* enable K1 */
+#define E1000_KMRNCTRLSTA_HD_CTRL      0x10    /* Kumeran HD Control */
+
+#define IFE_PHY_EXTENDED_STATUS_CONTROL        0x10
+#define IFE_PHY_SPECIAL_CONTROL                0x11    /* 100BaseTx PHY Special Ctrl */
+#define IFE_PHY_SPECIAL_CONTROL_LED    0x1B    /* PHY Special and LED Ctrl */
+#define IFE_PHY_MDIX_CONTROL           0x1C    /* MDI/MDI-X Control */
+
+/* IFE PHY Extended Status Control */
+#define IFE_PESC_POLARITY_REVERSED     0x0100
+
+/* IFE PHY Special Control */
+#define IFE_PSC_AUTO_POLARITY_DISABLE  0x0010
+#define IFE_PSC_FORCE_POLARITY         0x0020
+
+/* IFE PHY Special Control and LED Control */
+#define IFE_PSCL_PROBE_MODE            0x0020
+#define IFE_PSCL_PROBE_LEDS_OFF                0x0006  /* Force LEDs 0 and 2 off */
+#define IFE_PSCL_PROBE_LEDS_ON         0x0007  /* Force LEDs 0 and 2 on */
+
+/* IFE PHY MDIX Control */
+#define IFE_PMC_MDIX_STATUS            0x0020  /* 1=MDI-X, 0=MDI */
+#define IFE_PMC_FORCE_MDIX             0x0040  /* 1=force MDI-X, 0=force MDI */
+#define IFE_PMC_AUTO_MDIX              0x0080  /* 1=enable auto, 0=disable */
+
+#endif
diff --git a/drivers/net/ethernet/intel/e1000e/regs.h b/drivers/net/ethernet/intel/e1000e/regs.h
new file mode 100644 (file)
index 0000000..794fe14
--- /dev/null
@@ -0,0 +1,252 @@
+/*******************************************************************************
+
+  Intel PRO/1000 Linux driver
+  Copyright(c) 1999 - 2013 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _E1000E_REGS_H_
+#define _E1000E_REGS_H_
+
+#define E1000_CTRL     0x00000 /* Device Control - RW */
+#define E1000_STATUS   0x00008 /* Device Status - RO */
+#define E1000_EECD     0x00010 /* EEPROM/Flash Control - RW */
+#define E1000_EERD     0x00014 /* EEPROM Read - RW */
+#define E1000_CTRL_EXT 0x00018 /* Extended Device Control - RW */
+#define E1000_FLA      0x0001C /* Flash Access - RW */
+#define E1000_MDIC     0x00020 /* MDI Control - RW */
+#define E1000_SCTL     0x00024 /* SerDes Control - RW */
+#define E1000_FCAL     0x00028 /* Flow Control Address Low - RW */
+#define E1000_FCAH     0x0002C /* Flow Control Address High -RW */
+#define E1000_FEXTNVM  0x00028 /* Future Extended NVM - RW */
+#define E1000_FEXTNVM3 0x0003C /* Future Extended NVM 3 - RW */
+#define E1000_FEXTNVM4 0x00024 /* Future Extended NVM 4 - RW */
+#define E1000_FEXTNVM7 0x000E4 /* Future Extended NVM 7 - RW */
+#define E1000_FCT      0x00030 /* Flow Control Type - RW */
+#define E1000_VET      0x00038 /* VLAN Ether Type - RW */
+#define E1000_ICR      0x000C0 /* Interrupt Cause Read - R/clr */
+#define E1000_ITR      0x000C4 /* Interrupt Throttling Rate - RW */
+#define E1000_ICS      0x000C8 /* Interrupt Cause Set - WO */
+#define E1000_IMS      0x000D0 /* Interrupt Mask Set - RW */
+#define E1000_IMC      0x000D8 /* Interrupt Mask Clear - WO */
+#define E1000_IAM      0x000E0 /* Interrupt Acknowledge Auto Mask */
+#define E1000_IVAR     0x000E4 /* Interrupt Vector Allocation Register - RW */
+#define E1000_SVCR     0x000F0
+#define E1000_SVT      0x000F4
+#define E1000_LPIC     0x000FC /* Low Power IDLE control */
+#define E1000_RCTL     0x00100 /* Rx Control - RW */
+#define E1000_FCTTV    0x00170 /* Flow Control Transmit Timer Value - RW */
+#define E1000_TXCW     0x00178 /* Tx Configuration Word - RW */
+#define E1000_RXCW     0x00180 /* Rx Configuration Word - RO */
+#define E1000_PBA_ECC  0x01100 /* PBA ECC Register */
+#define E1000_TCTL     0x00400 /* Tx Control - RW */
+#define E1000_TCTL_EXT 0x00404 /* Extended Tx Control - RW */
+#define E1000_TIPG     0x00410 /* Tx Inter-packet gap -RW */
+#define E1000_AIT      0x00458 /* Adaptive Interframe Spacing Throttle - RW */
+#define E1000_LEDCTL   0x00E00 /* LED Control - RW */
+#define E1000_EXTCNF_CTRL      0x00F00 /* Extended Configuration Control */
+#define E1000_EXTCNF_SIZE      0x00F08 /* Extended Configuration Size */
+#define E1000_PHY_CTRL 0x00F10 /* PHY Control Register in CSR */
+#define E1000_POEMB    E1000_PHY_CTRL  /* PHY OEM Bits */
+#define E1000_PBA      0x01000 /* Packet Buffer Allocation - RW */
+#define E1000_PBS      0x01008 /* Packet Buffer Size */
+#define E1000_PBECCSTS 0x0100C /* Packet Buffer ECC Status - RW */
+#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */
+#define E1000_EEWR     0x0102C /* EEPROM Write Register - RW */
+#define E1000_FLOP     0x0103C /* FLASH Opcode Register */
+#define E1000_ERT      0x02008 /* Early Rx Threshold - RW */
+#define E1000_FCRTL    0x02160 /* Flow Control Receive Threshold Low - RW */
+#define E1000_FCRTH    0x02168 /* Flow Control Receive Threshold High - RW */
+#define E1000_PSRCTL   0x02170 /* Packet Split Receive Control - RW */
+#define E1000_RDFH     0x02410 /* Rx Data FIFO Head - RW */
+#define E1000_RDFT     0x02418 /* Rx Data FIFO Tail - RW */
+#define E1000_RDFHS    0x02420 /* Rx Data FIFO Head Saved - RW */
+#define E1000_RDFTS    0x02428 /* Rx Data FIFO Tail Saved - RW */
+#define E1000_RDFPC    0x02430 /* Rx Data FIFO Packet Count - RW */
+/* Split and Replication Rx Control - RW */
+#define E1000_RDTR     0x02820 /* Rx Delay Timer - RW */
+#define E1000_RADV     0x0282C /* Rx Interrupt Absolute Delay Timer - RW */
+/* Convenience macros
+ *
+ * Note: "_n" is the queue number of the register to be written to.
+ *
+ * Example usage:
+ * E1000_RDBAL_REG(current_rx_queue)
+ */
+#define E1000_RDBAL(_n)        ((_n) < 4 ? (0x02800 + ((_n) * 0x100)) : \
+                        (0x0C000 + ((_n) * 0x40)))
+#define E1000_RDBAH(_n)        ((_n) < 4 ? (0x02804 + ((_n) * 0x100)) : \
+                        (0x0C004 + ((_n) * 0x40)))
+#define E1000_RDLEN(_n)        ((_n) < 4 ? (0x02808 + ((_n) * 0x100)) : \
+                        (0x0C008 + ((_n) * 0x40)))
+#define E1000_RDH(_n)  ((_n) < 4 ? (0x02810 + ((_n) * 0x100)) : \
+                        (0x0C010 + ((_n) * 0x40)))
+#define E1000_RDT(_n)  ((_n) < 4 ? (0x02818 + ((_n) * 0x100)) : \
+                        (0x0C018 + ((_n) * 0x40)))
+#define E1000_RXDCTL(_n)       ((_n) < 4 ? (0x02828 + ((_n) * 0x100)) : \
+                                (0x0C028 + ((_n) * 0x40)))
+#define E1000_TDBAL(_n)        ((_n) < 4 ? (0x03800 + ((_n) * 0x100)) : \
+                        (0x0E000 + ((_n) * 0x40)))
+#define E1000_TDBAH(_n)        ((_n) < 4 ? (0x03804 + ((_n) * 0x100)) : \
+                        (0x0E004 + ((_n) * 0x40)))
+#define E1000_TDLEN(_n)        ((_n) < 4 ? (0x03808 + ((_n) * 0x100)) : \
+                        (0x0E008 + ((_n) * 0x40)))
+#define E1000_TDH(_n)  ((_n) < 4 ? (0x03810 + ((_n) * 0x100)) : \
+                        (0x0E010 + ((_n) * 0x40)))
+#define E1000_TDT(_n)  ((_n) < 4 ? (0x03818 + ((_n) * 0x100)) : \
+                        (0x0E018 + ((_n) * 0x40)))
+#define E1000_TXDCTL(_n)       ((_n) < 4 ? (0x03828 + ((_n) * 0x100)) : \
+                                (0x0E028 + ((_n) * 0x40)))
+#define E1000_TARC(_n)         (0x03840 + ((_n) * 0x100))
+#define E1000_KABGTXD          0x03004 /* AFE Band Gap Transmit Ref Data */
+#define E1000_RAL(_i)          (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \
+                                (0x054E0 + ((_i - 16) * 8)))
+#define E1000_RAH(_i)          (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \
+                                (0x054E4 + ((_i - 16) * 8)))
+#define E1000_SHRAL(_i)                (0x05438 + ((_i) * 8))
+#define E1000_SHRAH(_i)                (0x0543C + ((_i) * 8))
+#define E1000_TDFH             0x03410 /* Tx Data FIFO Head - RW */
+#define E1000_TDFT             0x03418 /* Tx Data FIFO Tail - RW */
+#define E1000_TDFHS            0x03420 /* Tx Data FIFO Head Saved - RW */
+#define E1000_TDFTS            0x03428 /* Tx Data FIFO Tail Saved - RW */
+#define E1000_TDFPC            0x03430 /* Tx Data FIFO Packet Count - RW */
+#define E1000_TIDV     0x03820 /* Tx Interrupt Delay Value - RW */
+#define E1000_TADV     0x0382C /* Tx Interrupt Absolute Delay Val - RW */
+#define E1000_CRCERRS  0x04000 /* CRC Error Count - R/clr */
+#define E1000_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */
+#define E1000_SYMERRS  0x04008 /* Symbol Error Count - R/clr */
+#define E1000_RXERRC   0x0400C /* Receive Error Count - R/clr */
+#define E1000_MPC      0x04010 /* Missed Packet Count - R/clr */
+#define E1000_SCC      0x04014 /* Single Collision Count - R/clr */
+#define E1000_ECOL     0x04018 /* Excessive Collision Count - R/clr */
+#define E1000_MCC      0x0401C /* Multiple Collision Count - R/clr */
+#define E1000_LATECOL  0x04020 /* Late Collision Count - R/clr */
+#define E1000_COLC     0x04028 /* Collision Count - R/clr */
+#define E1000_DC       0x04030 /* Defer Count - R/clr */
+#define E1000_TNCRS    0x04034 /* Tx-No CRS - R/clr */
+#define E1000_SEC      0x04038 /* Sequence Error Count - R/clr */
+#define E1000_CEXTERR  0x0403C /* Carrier Extension Error Count - R/clr */
+#define E1000_RLEC     0x04040 /* Receive Length Error Count - R/clr */
+#define E1000_XONRXC   0x04048 /* XON Rx Count - R/clr */
+#define E1000_XONTXC   0x0404C /* XON Tx Count - R/clr */
+#define E1000_XOFFRXC  0x04050 /* XOFF Rx Count - R/clr */
+#define E1000_XOFFTXC  0x04054 /* XOFF Tx Count - R/clr */
+#define E1000_FCRUC    0x04058 /* Flow Control Rx Unsupported Count- R/clr */
+#define E1000_PRC64    0x0405C /* Packets Rx (64 bytes) - R/clr */
+#define E1000_PRC127   0x04060 /* Packets Rx (65-127 bytes) - R/clr */
+#define E1000_PRC255   0x04064 /* Packets Rx (128-255 bytes) - R/clr */
+#define E1000_PRC511   0x04068 /* Packets Rx (255-511 bytes) - R/clr */
+#define E1000_PRC1023  0x0406C /* Packets Rx (512-1023 bytes) - R/clr */
+#define E1000_PRC1522  0x04070 /* Packets Rx (1024-1522 bytes) - R/clr */
+#define E1000_GPRC     0x04074 /* Good Packets Rx Count - R/clr */
+#define E1000_BPRC     0x04078 /* Broadcast Packets Rx Count - R/clr */
+#define E1000_MPRC     0x0407C /* Multicast Packets Rx Count - R/clr */
+#define E1000_GPTC     0x04080 /* Good Packets Tx Count - R/clr */
+#define E1000_GORCL    0x04088 /* Good Octets Rx Count Low - R/clr */
+#define E1000_GORCH    0x0408C /* Good Octets Rx Count High - R/clr */
+#define E1000_GOTCL    0x04090 /* Good Octets Tx Count Low - R/clr */
+#define E1000_GOTCH    0x04094 /* Good Octets Tx Count High - R/clr */
+#define E1000_RNBC     0x040A0 /* Rx No Buffers Count - R/clr */
+#define E1000_RUC      0x040A4 /* Rx Undersize Count - R/clr */
+#define E1000_RFC      0x040A8 /* Rx Fragment Count - R/clr */
+#define E1000_ROC      0x040AC /* Rx Oversize Count - R/clr */
+#define E1000_RJC      0x040B0 /* Rx Jabber Count - R/clr */
+#define E1000_MGTPRC   0x040B4 /* Management Packets Rx Count - R/clr */
+#define E1000_MGTPDC   0x040B8 /* Management Packets Dropped Count - R/clr */
+#define E1000_MGTPTC   0x040BC /* Management Packets Tx Count - R/clr */
+#define E1000_TORL     0x040C0 /* Total Octets Rx Low - R/clr */
+#define E1000_TORH     0x040C4 /* Total Octets Rx High - R/clr */
+#define E1000_TOTL     0x040C8 /* Total Octets Tx Low - R/clr */
+#define E1000_TOTH     0x040CC /* Total Octets Tx High - R/clr */
+#define E1000_TPR      0x040D0 /* Total Packets Rx - R/clr */
+#define E1000_TPT      0x040D4 /* Total Packets Tx - R/clr */
+#define E1000_PTC64    0x040D8 /* Packets Tx (64 bytes) - R/clr */
+#define E1000_PTC127   0x040DC /* Packets Tx (65-127 bytes) - R/clr */
+#define E1000_PTC255   0x040E0 /* Packets Tx (128-255 bytes) - R/clr */
+#define E1000_PTC511   0x040E4 /* Packets Tx (256-511 bytes) - R/clr */
+#define E1000_PTC1023  0x040E8 /* Packets Tx (512-1023 bytes) - R/clr */
+#define E1000_PTC1522  0x040EC /* Packets Tx (1024-1522 Bytes) - R/clr */
+#define E1000_MPTC     0x040F0 /* Multicast Packets Tx Count - R/clr */
+#define E1000_BPTC     0x040F4 /* Broadcast Packets Tx Count - R/clr */
+#define E1000_TSCTC    0x040F8 /* TCP Segmentation Context Tx - R/clr */
+#define E1000_TSCTFC   0x040FC /* TCP Segmentation Context Tx Fail - R/clr */
+#define E1000_IAC      0x04100 /* Interrupt Assertion Count */
+#define E1000_ICRXPTC  0x04104 /* Interrupt Cause Rx Pkt Timer Expire Count */
+#define E1000_ICRXATC  0x04108 /* Interrupt Cause Rx Abs Timer Expire Count */
+#define E1000_ICTXPTC  0x0410C /* Interrupt Cause Tx Pkt Timer Expire Count */
+#define E1000_ICTXATC  0x04110 /* Interrupt Cause Tx Abs Timer Expire Count */
+#define E1000_ICTXQEC  0x04118 /* Interrupt Cause Tx Queue Empty Count */
+#define E1000_ICTXQMTC 0x0411C /* Interrupt Cause Tx Queue Min Thresh Count */
+#define E1000_ICRXDMTC 0x04120 /* Interrupt Cause Rx Desc Min Thresh Count */
+#define E1000_ICRXOC   0x04124 /* Interrupt Cause Receiver Overrun Count */
+#define E1000_CRC_OFFSET       0x05F50 /* CRC Offset register */
+
+#define E1000_PCS_LCTL 0x04208 /* PCS Link Control - RW */
+#define E1000_PCS_LSTAT        0x0420C /* PCS Link Status - RO */
+#define E1000_PCS_ANADV        0x04218 /* AN advertisement - RW */
+#define E1000_PCS_LPAB 0x0421C /* Link Partner Ability - RW */
+#define E1000_RXCSUM   0x05000 /* Rx Checksum Control - RW */
+#define E1000_RFCTL    0x05008 /* Receive Filter Control */
+#define E1000_MTA      0x05200 /* Multicast Table Array - RW Array */
+#define E1000_RA       0x05400 /* Receive Address - RW Array */
+#define E1000_VFTA     0x05600 /* VLAN Filter Table Array - RW Array */
+#define E1000_WUC      0x05800 /* Wakeup Control - RW */
+#define E1000_WUFC     0x05808 /* Wakeup Filter Control - RW */
+#define E1000_WUS      0x05810 /* Wakeup Status - RO */
+#define E1000_MANC     0x05820 /* Management Control - RW */
+#define E1000_FFLT     0x05F00 /* Flexible Filter Length Table - RW Array */
+#define E1000_HOST_IF  0x08800 /* Host Interface */
+
+#define E1000_KMRNCTRLSTA      0x00034 /* MAC-PHY interface - RW */
+#define E1000_MANC2H           0x05860 /* Management Control To Host - RW */
+/* Management Decision Filters */
+#define E1000_MDEF(_n)         (0x05890 + (4 * (_n)))
+#define E1000_SW_FW_SYNC       0x05B5C /* SW-FW Synchronization - RW */
+#define E1000_GCR      0x05B00 /* PCI-Ex Control */
+#define E1000_GCR2     0x05B64 /* PCI-Ex Control #2 */
+#define E1000_FACTPS   0x05B30 /* Function Active and Power State to MNG */
+#define E1000_SWSM     0x05B50 /* SW Semaphore */
+#define E1000_FWSM     0x05B54 /* FW Semaphore */
+/* Driver-only SW semaphore (not used by BOOT agents) */
+#define E1000_SWSM2    0x05B58
+#define E1000_FFLT_DBG 0x05F04 /* Debug Register */
+#define E1000_HICR     0x08F00 /* Host Interface Control */
+
+/* RSS registers */
+#define E1000_MRQC     0x05818 /* Multiple Receive Control - RW */
+#define E1000_RETA(_i) (0x05C00 + ((_i) * 4))  /* Redirection Table - RW */
+#define E1000_RSSRK(_i)        (0x05C80 + ((_i) * 4))  /* RSS Random Key - RW */
+#define E1000_TSYNCRXCTL       0x0B620 /* Rx Time Sync Control register - RW */
+#define E1000_TSYNCTXCTL       0x0B614 /* Tx Time Sync Control register - RW */
+#define E1000_RXSTMPL  0x0B624 /* Rx timestamp Low - RO */
+#define E1000_RXSTMPH  0x0B628 /* Rx timestamp High - RO */
+#define E1000_TXSTMPL  0x0B618 /* Tx timestamp value Low - RO */
+#define E1000_TXSTMPH  0x0B61C /* Tx timestamp value High - RO */
+#define E1000_SYSTIML  0x0B600 /* System time register Low - RO */
+#define E1000_SYSTIMH  0x0B604 /* System time register High - RO */
+#define E1000_TIMINCA  0x0B608 /* Increment attributes register - RW */
+#define E1000_RXMTRL   0x0B634 /* Time sync Rx EtherType and Msg Type - RW */
+#define E1000_RXUDP    0x0B638 /* Time Sync Rx UDP Port - RW */
+
+#endif
index 54a7c20..84e7e09 100644 (file)
@@ -111,184 +111,168 @@ static bool igb_sgmii_uses_mdio_82575(struct e1000_hw *hw)
        return ext_mdio;
 }
 
-static s32 igb_get_invariants_82575(struct e1000_hw *hw)
+/**
+ *  igb_init_phy_params_82575 - Init PHY func ptrs.
+ *  @hw: pointer to the HW structure
+ **/
+static s32 igb_init_phy_params_82575(struct e1000_hw *hw)
 {
        struct e1000_phy_info *phy = &hw->phy;
-       struct e1000_nvm_info *nvm = &hw->nvm;
-       struct e1000_mac_info *mac = &hw->mac;
-       struct e1000_dev_spec_82575 * dev_spec = &hw->dev_spec._82575;
-       u32 eecd;
-       s32 ret_val;
-       u16 size;
-       u32 ctrl_ext = 0;
+       s32 ret_val = 0;
+       u32 ctrl_ext;
 
-       switch (hw->device_id) {
-       case E1000_DEV_ID_82575EB_COPPER:
-       case E1000_DEV_ID_82575EB_FIBER_SERDES:
-       case E1000_DEV_ID_82575GB_QUAD_COPPER:
-               mac->type = e1000_82575;
-               break;
-       case E1000_DEV_ID_82576:
-       case E1000_DEV_ID_82576_NS:
-       case E1000_DEV_ID_82576_NS_SERDES:
-       case E1000_DEV_ID_82576_FIBER:
-       case E1000_DEV_ID_82576_SERDES:
-       case E1000_DEV_ID_82576_QUAD_COPPER:
-       case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
-       case E1000_DEV_ID_82576_SERDES_QUAD:
-               mac->type = e1000_82576;
-               break;
-       case E1000_DEV_ID_82580_COPPER:
-       case E1000_DEV_ID_82580_FIBER:
-       case E1000_DEV_ID_82580_QUAD_FIBER:
-       case E1000_DEV_ID_82580_SERDES:
-       case E1000_DEV_ID_82580_SGMII:
-       case E1000_DEV_ID_82580_COPPER_DUAL:
-       case E1000_DEV_ID_DH89XXCC_SGMII:
-       case E1000_DEV_ID_DH89XXCC_SERDES:
-       case E1000_DEV_ID_DH89XXCC_BACKPLANE:
-       case E1000_DEV_ID_DH89XXCC_SFP:
-               mac->type = e1000_82580;
-               break;
-       case E1000_DEV_ID_I350_COPPER:
-       case E1000_DEV_ID_I350_FIBER:
-       case E1000_DEV_ID_I350_SERDES:
-       case E1000_DEV_ID_I350_SGMII:
-               mac->type = e1000_i350;
-               break;
-       case E1000_DEV_ID_I210_COPPER:
-       case E1000_DEV_ID_I210_COPPER_OEM1:
-       case E1000_DEV_ID_I210_COPPER_IT:
-       case E1000_DEV_ID_I210_FIBER:
-       case E1000_DEV_ID_I210_SERDES:
-       case E1000_DEV_ID_I210_SGMII:
-               mac->type = e1000_i210;
-               break;
-       case E1000_DEV_ID_I211_COPPER:
-               mac->type = e1000_i211;
-               break;
-       default:
-               return -E1000_ERR_MAC_INIT;
-               break;
+       if (hw->phy.media_type != e1000_media_type_copper) {
+               phy->type = e1000_phy_none;
+               goto out;
        }
 
-       /* Set media type */
-       /*
-        * The 82575 uses bits 22:23 for link mode. The mode can be changed
-        * based on the EEPROM. We cannot rely upon device ID. There
-        * is no distinguishable difference between fiber and internal
-        * SerDes mode on the 82575. There can be an external PHY attached
-        * on the SGMII interface. For this, we'll set sgmii_active to true.
-        */
-       phy->media_type = e1000_media_type_copper;
-       dev_spec->sgmii_active = false;
+       phy->autoneg_mask       = AUTONEG_ADVERTISE_SPEED_DEFAULT;
+       phy->reset_delay_us     = 100;
 
        ctrl_ext = rd32(E1000_CTRL_EXT);
-       switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) {
-       case E1000_CTRL_EXT_LINK_MODE_SGMII:
-               dev_spec->sgmii_active = true;
-               break;
-       case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX:
-       case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES:
-               hw->phy.media_type = e1000_media_type_internal_serdes;
-               break;
-       default:
-               break;
+
+       if (igb_sgmii_active_82575(hw)) {
+               phy->ops.reset = igb_phy_hw_reset_sgmii_82575;
+               ctrl_ext |= E1000_CTRL_I2C_ENA;
+       } else {
+               phy->ops.reset = igb_phy_hw_reset;
+               ctrl_ext &= ~E1000_CTRL_I2C_ENA;
        }
 
-       /* Set mta register count */
-       mac->mta_reg_count = 128;
-       /* Set rar entry count */
-       switch (mac->type) {
-       case e1000_82576:
-               mac->rar_entry_count = E1000_RAR_ENTRIES_82576;
+       wr32(E1000_CTRL_EXT, ctrl_ext);
+       igb_reset_mdicnfg_82580(hw);
+
+       if (igb_sgmii_active_82575(hw) && !igb_sgmii_uses_mdio_82575(hw)) {
+               phy->ops.read_reg = igb_read_phy_reg_sgmii_82575;
+               phy->ops.write_reg = igb_write_phy_reg_sgmii_82575;
+       } else {
+               switch (hw->mac.type) {
+               case e1000_82580:
+               case e1000_i350:
+                       phy->ops.read_reg = igb_read_phy_reg_82580;
+                       phy->ops.write_reg = igb_write_phy_reg_82580;
+                       break;
+               case e1000_i210:
+               case e1000_i211:
+                       phy->ops.read_reg = igb_read_phy_reg_gs40g;
+                       phy->ops.write_reg = igb_write_phy_reg_gs40g;
+                       break;
+               default:
+                       phy->ops.read_reg = igb_read_phy_reg_igp;
+                       phy->ops.write_reg = igb_write_phy_reg_igp;
+               }
+       }
+
+       /* set lan id */
+       hw->bus.func = (rd32(E1000_STATUS) & E1000_STATUS_FUNC_MASK) >>
+                       E1000_STATUS_FUNC_SHIFT;
+
+       /* Set phy->phy_addr and phy->id. */
+       ret_val = igb_get_phy_id_82575(hw);
+       if (ret_val)
+               return ret_val;
+
+       /* Verify phy id and set remaining function pointers */
+       switch (phy->id) {
+       case I347AT4_E_PHY_ID:
+       case M88E1112_E_PHY_ID:
+       case M88E1111_I_PHY_ID:
+               phy->type               = e1000_phy_m88;
+               phy->ops.get_phy_info   = igb_get_phy_info_m88;
+               if (phy->id == I347AT4_E_PHY_ID ||
+                   phy->id == M88E1112_E_PHY_ID)
+                       phy->ops.get_cable_length =
+                                        igb_get_cable_length_m88_gen2;
+               else
+                       phy->ops.get_cable_length = igb_get_cable_length_m88;
+               phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88;
                break;
-       case e1000_82580:
-               mac->rar_entry_count = E1000_RAR_ENTRIES_82580;
+       case IGP03E1000_E_PHY_ID:
+               phy->type = e1000_phy_igp_3;
+               phy->ops.get_phy_info = igb_get_phy_info_igp;
+               phy->ops.get_cable_length = igb_get_cable_length_igp_2;
+               phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_igp;
+               phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82575;
+               phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state;
                break;
-       case e1000_i350:
-               mac->rar_entry_count = E1000_RAR_ENTRIES_I350;
+       case I82580_I_PHY_ID:
+       case I350_I_PHY_ID:
+               phy->type = e1000_phy_82580;
+               phy->ops.force_speed_duplex =
+                                        igb_phy_force_speed_duplex_82580;
+               phy->ops.get_cable_length = igb_get_cable_length_82580;
+               phy->ops.get_phy_info = igb_get_phy_info_82580;
+               phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82580;
+               phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state_82580;
                break;
-       default:
-               mac->rar_entry_count = E1000_RAR_ENTRIES_82575;
+       case I210_I_PHY_ID:
+               phy->type               = e1000_phy_i210;
+               phy->ops.check_polarity = igb_check_polarity_m88;
+               phy->ops.get_phy_info   = igb_get_phy_info_m88;
+               phy->ops.get_cable_length = igb_get_cable_length_m88_gen2;
+               phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82580;
+               phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state_82580;
+               phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88;
                break;
+       default:
+               ret_val = -E1000_ERR_PHY;
+               goto out;
        }
-       /* reset */
-       if (mac->type >= e1000_82580)
-               mac->ops.reset_hw = igb_reset_hw_82580;
-       else
-               mac->ops.reset_hw = igb_reset_hw_82575;
 
-       if (mac->type >= e1000_i210) {
-               mac->ops.acquire_swfw_sync = igb_acquire_swfw_sync_i210;
-               mac->ops.release_swfw_sync = igb_release_swfw_sync_i210;
-       } else {
-               mac->ops.acquire_swfw_sync = igb_acquire_swfw_sync_82575;
-               mac->ops.release_swfw_sync = igb_release_swfw_sync_82575;
-       }
+out:
+       return ret_val;
+}
 
-       /* Set if part includes ASF firmware */
-       mac->asf_firmware_present = true;
-       /* Set if manageability features are enabled. */
-       mac->arc_subsystem_valid =
-               (rd32(E1000_FWSM) & E1000_FWSM_MODE_MASK)
-                       ? true : false;
-       /* enable EEE on i350 parts and later parts */
-       if (mac->type >= e1000_i350)
-               dev_spec->eee_disable = false;
-       else
-               dev_spec->eee_disable = true;
-       /* physical interface link setup */
-       mac->ops.setup_physical_interface =
-               (hw->phy.media_type == e1000_media_type_copper)
-                       ? igb_setup_copper_link_82575
-                       : igb_setup_serdes_link_82575;
+/**
+ *  igb_init_nvm_params_82575 - Init NVM func ptrs.
+ *  @hw: pointer to the HW structure
+ **/
+s32 igb_init_nvm_params_82575(struct e1000_hw *hw)
+{
+       struct e1000_nvm_info *nvm = &hw->nvm;
+       u32 eecd = rd32(E1000_EECD);
+       u16 size;
 
-       /* NVM initialization */
-       eecd = rd32(E1000_EECD);
        size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
                     E1000_EECD_SIZE_EX_SHIFT);
-
-       /*
-        * Added to a constant, "size" becomes the left-shift value
+       /* Added to a constant, "size" becomes the left-shift value
         * for setting word_size.
         */
        size += NVM_WORD_SIZE_BASE_SHIFT;
 
-       /*
-        * Check for invalid size
+       /* Just in case size is out of range, cap it to the largest
+        * EEPROM size supported
         */
-       if ((hw->mac.type == e1000_82576) && (size > 15)) {
-               pr_notice("The NVM size is not valid, defaulting to 32K\n");
+       if (size > 15)
                size = 15;
-       }
 
        nvm->word_size = 1 << size;
        if (hw->mac.type < e1000_i210) {
-               nvm->opcode_bits        = 8;
-               nvm->delay_usec         = 1;
+               nvm->opcode_bits = 8;
+               nvm->delay_usec = 1;
+
                switch (nvm->override) {
                case e1000_nvm_override_spi_large:
-                       nvm->page_size    = 32;
+                       nvm->page_size = 32;
                        nvm->address_bits = 16;
                        break;
                case e1000_nvm_override_spi_small:
-                       nvm->page_size    = 8;
+                       nvm->page_size = 8;
                        nvm->address_bits = 8;
                        break;
                default:
-                       nvm->page_size    = eecd
-                               & E1000_EECD_ADDR_BITS ? 32 : 8;
-                       nvm->address_bits = eecd
-                               & E1000_EECD_ADDR_BITS ? 16 : 8;
+                       nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8;
+                       nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ?
+                                           16 : 8;
                        break;
                }
                if (nvm->word_size == (1 << 15))
                        nvm->page_size = 128;
 
                nvm->type = e1000_nvm_eeprom_spi;
-       } else
+       } else {
                nvm->type = e1000_nvm_flash_hw;
+       }
 
        /* NVM Function Pointers */
        switch (hw->mac.type) {
@@ -345,118 +329,176 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
                break;
        }
 
-       /* if part supports SR-IOV then initialize mailbox parameters */
+       return 0;
+}
+
+/**
+ *  igb_init_mac_params_82575 - Init MAC func ptrs.
+ *  @hw: pointer to the HW structure
+ **/
+static s32 igb_init_mac_params_82575(struct e1000_hw *hw)
+{
+       struct e1000_mac_info *mac = &hw->mac;
+       struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
+
+       /* Set mta register count */
+       mac->mta_reg_count = 128;
+       /* Set rar entry count */
        switch (mac->type) {
        case e1000_82576:
+               mac->rar_entry_count = E1000_RAR_ENTRIES_82576;
+               break;
+       case e1000_82580:
+               mac->rar_entry_count = E1000_RAR_ENTRIES_82580;
+               break;
        case e1000_i350:
-               igb_init_mbx_params_pf(hw);
+               mac->rar_entry_count = E1000_RAR_ENTRIES_I350;
                break;
        default:
+               mac->rar_entry_count = E1000_RAR_ENTRIES_82575;
                break;
        }
+       /* reset */
+       if (mac->type >= e1000_82580)
+               mac->ops.reset_hw = igb_reset_hw_82580;
+       else
+               mac->ops.reset_hw = igb_reset_hw_82575;
 
-       /* setup PHY parameters */
-       if (phy->media_type != e1000_media_type_copper) {
-               phy->type = e1000_phy_none;
-               return 0;
-       }
-
-       phy->autoneg_mask        = AUTONEG_ADVERTISE_SPEED_DEFAULT;
-       phy->reset_delay_us      = 100;
-
-       ctrl_ext = rd32(E1000_CTRL_EXT);
+       if (mac->type >= e1000_i210) {
+               mac->ops.acquire_swfw_sync = igb_acquire_swfw_sync_i210;
+               mac->ops.release_swfw_sync = igb_release_swfw_sync_i210;
 
-       /* PHY function pointers */
-       if (igb_sgmii_active_82575(hw)) {
-               phy->ops.reset      = igb_phy_hw_reset_sgmii_82575;
-               ctrl_ext |= E1000_CTRL_I2C_ENA;
        } else {
-               phy->ops.reset      = igb_phy_hw_reset;
-               ctrl_ext &= ~E1000_CTRL_I2C_ENA;
+               mac->ops.acquire_swfw_sync = igb_acquire_swfw_sync_82575;
+               mac->ops.release_swfw_sync = igb_release_swfw_sync_82575;
        }
 
-       wr32(E1000_CTRL_EXT, ctrl_ext);
-       igb_reset_mdicnfg_82580(hw);
-
-       if (igb_sgmii_active_82575(hw) && !igb_sgmii_uses_mdio_82575(hw)) {
-               phy->ops.read_reg   = igb_read_phy_reg_sgmii_82575;
-               phy->ops.write_reg  = igb_write_phy_reg_sgmii_82575;
-       } else if ((hw->mac.type == e1000_82580)
-               || (hw->mac.type == e1000_i350)) {
-               phy->ops.read_reg   = igb_read_phy_reg_82580;
-               phy->ops.write_reg  = igb_write_phy_reg_82580;
-       } else if (hw->phy.type >= e1000_phy_i210) {
-               phy->ops.read_reg   = igb_read_phy_reg_gs40g;
-               phy->ops.write_reg  = igb_write_phy_reg_gs40g;
-       } else {
-               phy->ops.read_reg   = igb_read_phy_reg_igp;
-               phy->ops.write_reg  = igb_write_phy_reg_igp;
-       }
+       /* Set if part includes ASF firmware */
+       mac->asf_firmware_present = true;
+       /* Set if manageability features are enabled. */
+       mac->arc_subsystem_valid =
+               (rd32(E1000_FWSM) & E1000_FWSM_MODE_MASK)
+                       ? true : false;
+       /* enable EEE on i350 parts and later parts */
+       if (mac->type >= e1000_i350)
+               dev_spec->eee_disable = false;
+       else
+               dev_spec->eee_disable = true;
+       /* physical interface link setup */
+       mac->ops.setup_physical_interface =
+               (hw->phy.media_type == e1000_media_type_copper)
+                       ? igb_setup_copper_link_82575
+                       : igb_setup_serdes_link_82575;
 
-       /* set lan id */
-       hw->bus.func = (rd32(E1000_STATUS) & E1000_STATUS_FUNC_MASK) >>
-                      E1000_STATUS_FUNC_SHIFT;
+       return 0;
+}
 
-       /* Set phy->phy_addr and phy->id. */
-       ret_val = igb_get_phy_id_82575(hw);
-       if (ret_val)
-               return ret_val;
+static s32 igb_get_invariants_82575(struct e1000_hw *hw)
+{
+       struct e1000_mac_info *mac = &hw->mac;
+       struct e1000_dev_spec_82575 * dev_spec = &hw->dev_spec._82575;
+       s32 ret_val;
+       u32 ctrl_ext = 0;
 
-       /* Verify phy id and set remaining function pointers */
-       switch (phy->id) {
-       case I347AT4_E_PHY_ID:
-       case M88E1112_E_PHY_ID:
-       case M88E1111_I_PHY_ID:
-               phy->type                   = e1000_phy_m88;
-               phy->ops.get_phy_info       = igb_get_phy_info_m88;
+       switch (hw->device_id) {
+       case E1000_DEV_ID_82575EB_COPPER:
+       case E1000_DEV_ID_82575EB_FIBER_SERDES:
+       case E1000_DEV_ID_82575GB_QUAD_COPPER:
+               mac->type = e1000_82575;
+               break;
+       case E1000_DEV_ID_82576:
+       case E1000_DEV_ID_82576_NS:
+       case E1000_DEV_ID_82576_NS_SERDES:
+       case E1000_DEV_ID_82576_FIBER:
+       case E1000_DEV_ID_82576_SERDES:
+       case E1000_DEV_ID_82576_QUAD_COPPER:
+       case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
+       case E1000_DEV_ID_82576_SERDES_QUAD:
+               mac->type = e1000_82576;
+               break;
+       case E1000_DEV_ID_82580_COPPER:
+       case E1000_DEV_ID_82580_FIBER:
+       case E1000_DEV_ID_82580_QUAD_FIBER:
+       case E1000_DEV_ID_82580_SERDES:
+       case E1000_DEV_ID_82580_SGMII:
+       case E1000_DEV_ID_82580_COPPER_DUAL:
+       case E1000_DEV_ID_DH89XXCC_SGMII:
+       case E1000_DEV_ID_DH89XXCC_SERDES:
+       case E1000_DEV_ID_DH89XXCC_BACKPLANE:
+       case E1000_DEV_ID_DH89XXCC_SFP:
+               mac->type = e1000_82580;
+               break;
+       case E1000_DEV_ID_I350_COPPER:
+       case E1000_DEV_ID_I350_FIBER:
+       case E1000_DEV_ID_I350_SERDES:
+       case E1000_DEV_ID_I350_SGMII:
+               mac->type = e1000_i350;
+               break;
+       case E1000_DEV_ID_I210_COPPER:
+       case E1000_DEV_ID_I210_COPPER_OEM1:
+       case E1000_DEV_ID_I210_COPPER_IT:
+       case E1000_DEV_ID_I210_FIBER:
+       case E1000_DEV_ID_I210_SERDES:
+       case E1000_DEV_ID_I210_SGMII:
+               mac->type = e1000_i210;
+               break;
+       case E1000_DEV_ID_I211_COPPER:
+               mac->type = e1000_i211;
+               break;
+       default:
+               return -E1000_ERR_MAC_INIT;
+               break;
+       }
 
-               if (phy->id == I347AT4_E_PHY_ID ||
-                   phy->id == M88E1112_E_PHY_ID)
-                       phy->ops.get_cable_length = igb_get_cable_length_m88_gen2;
-               else
-                       phy->ops.get_cable_length = igb_get_cable_length_m88;
+       /* Set media type */
+       /*
+        * The 82575 uses bits 22:23 for link mode. The mode can be changed
+        * based on the EEPROM. We cannot rely upon device ID. There
+        * is no distinguishable difference between fiber and internal
+        * SerDes mode on the 82575. There can be an external PHY attached
+        * on the SGMII interface. For this, we'll set sgmii_active to true.
+        */
+       hw->phy.media_type = e1000_media_type_copper;
+       dev_spec->sgmii_active = false;
 
-               if (phy->id == I210_I_PHY_ID) {
-                       phy->ops.get_cable_length =
-                                        igb_get_cable_length_m88_gen2;
-                       phy->ops.set_d0_lplu_state =
-                                       igb_set_d0_lplu_state_82580;
-                       phy->ops.set_d3_lplu_state =
-                                       igb_set_d3_lplu_state_82580;
-               }
-               phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88;
+       ctrl_ext = rd32(E1000_CTRL_EXT);
+       switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) {
+       case E1000_CTRL_EXT_LINK_MODE_SGMII:
+               dev_spec->sgmii_active = true;
                break;
-       case IGP03E1000_E_PHY_ID:
-               phy->type                   = e1000_phy_igp_3;
-               phy->ops.get_phy_info       = igb_get_phy_info_igp;
-               phy->ops.get_cable_length   = igb_get_cable_length_igp_2;
-               phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_igp;
-               phy->ops.set_d0_lplu_state  = igb_set_d0_lplu_state_82575;
-               phy->ops.set_d3_lplu_state  = igb_set_d3_lplu_state;
+       case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX:
+       case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES:
+               hw->phy.media_type = e1000_media_type_internal_serdes;
                break;
-       case I82580_I_PHY_ID:
-       case I350_I_PHY_ID:
-               phy->type                   = e1000_phy_82580;
-               phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_82580;
-               phy->ops.get_cable_length   = igb_get_cable_length_82580;
-               phy->ops.get_phy_info       = igb_get_phy_info_82580;
-               phy->ops.set_d0_lplu_state  = igb_set_d0_lplu_state_82580;
-               phy->ops.set_d3_lplu_state  = igb_set_d3_lplu_state_82580;
+       default:
                break;
-       case I210_I_PHY_ID:
-               phy->type                   = e1000_phy_i210;
-               phy->ops.get_phy_info       = igb_get_phy_info_m88;
-               phy->ops.check_polarity     = igb_check_polarity_m88;
-               phy->ops.get_cable_length   = igb_get_cable_length_m88_gen2;
-               phy->ops.set_d0_lplu_state  = igb_set_d0_lplu_state_82580;
-               phy->ops.set_d3_lplu_state  = igb_set_d3_lplu_state_82580;
-               phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88;
+       }
+
+       /* mac initialization and operations */
+       ret_val = igb_init_mac_params_82575(hw);
+       if (ret_val)
+               goto out;
+
+       /* NVM initialization */
+       ret_val = igb_init_nvm_params_82575(hw);
+       if (ret_val)
+               goto out;
+
+       /* if part supports SR-IOV then initialize mailbox parameters */
+       switch (mac->type) {
+       case e1000_82576:
+       case e1000_i350:
+               igb_init_mbx_params_pf(hw);
                break;
        default:
-               return -E1000_ERR_PHY;
+               break;
        }
 
-       return 0;
+       /* setup PHY parameters */
+       ret_val = igb_init_phy_params_82575(hw);
+
+out:
+       return ret_val;
 }
 
 /**
index 4b78053..d27edbc 100644 (file)
@@ -139,8 +139,6 @@ struct vf_data_storage {
 #define IGB_RX_HDR_LEN         IGB_RXBUFFER_256
 #define IGB_RX_BUFSZ           IGB_RXBUFFER_2048
 
-/* How many Tx Descriptors do we need to call netif_wake_queue ? */
-#define IGB_TX_QUEUE_WAKE      16
 /* How many Rx Buffers do we bundle into one write to the hardware ? */
 #define IGB_RX_BUFFER_WRITE    16      /* Must be power of 2 */
 
@@ -169,6 +167,17 @@ enum igb_tx_flags {
 #define IGB_TX_FLAGS_VLAN_MASK         0xffff0000
 #define IGB_TX_FLAGS_VLAN_SHIFT        16
 
+/*
+ * The largest size we can write to the descriptor is 65535.  In order to
+ * maintain a power of two alignment we have to limit ourselves to 32K.
+ */
+#define IGB_MAX_TXD_PWR        15
+#define IGB_MAX_DATA_PER_TXD   (1 << IGB_MAX_TXD_PWR)
+
+/* Tx Descriptors needed, worst case */
+#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IGB_MAX_DATA_PER_TXD)
+#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
+
 /* wrapper around a pointer to a socket buffer,
  * so a DMA handle can be stored along with the buffer */
 struct igb_tx_buffer {
@@ -275,10 +284,18 @@ struct igb_q_vector {
 enum e1000_ring_flags_t {
        IGB_RING_FLAG_RX_SCTP_CSUM,
        IGB_RING_FLAG_RX_LB_VLAN_BSWAP,
+       IGB_RING_FLAG_RX_BUILD_SKB_ENABLED,
        IGB_RING_FLAG_TX_CTX_IDX,
        IGB_RING_FLAG_TX_DETECT_HANG
 };
 
+#define ring_uses_build_skb(ring) \
+       test_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags)
+#define set_ring_build_skb_enabled(ring) \
+       set_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags)
+#define clear_ring_build_skb_enabled(ring) \
+       clear_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags)
+
 #define IGB_TXD_DCMD (E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_RS)
 
 #define IGB_RX_DESC(R, i)          \
index 40b5d56..a3830a8 100644 (file)
@@ -1891,7 +1891,7 @@ static int igb_link_test(struct igb_adapter *adapter, u64 *data)
        } else {
                hw->mac.ops.check_for_link(&adapter->hw);
                if (hw->mac.autoneg)
-                       msleep(4000);
+                       msleep(5000);
 
                if (!(rd32(E1000_STATUS) & E1000_STATUS_LU))
                        *data = 1;
index 1aaf193..ed79a1c 100644 (file)
@@ -3354,6 +3354,20 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
        wr32(E1000_RXDCTL(reg_idx), rxdctl);
 }
 
+static void igb_set_rx_buffer_len(struct igb_adapter *adapter,
+                                 struct igb_ring *rx_ring)
+{
+#define IGB_MAX_BUILD_SKB_SIZE \
+       (SKB_WITH_OVERHEAD(IGB_RX_BUFSZ) - \
+        (NET_SKB_PAD + NET_IP_ALIGN + IGB_TS_HDR_LEN))
+
+       /* set build_skb flag */
+       if (adapter->max_frame_size <= IGB_MAX_BUILD_SKB_SIZE)
+               set_ring_build_skb_enabled(rx_ring);
+       else
+               clear_ring_build_skb_enabled(rx_ring);
+}
+
 /**
  * igb_configure_rx - Configure receive Unit after Reset
  * @adapter: board private structure
@@ -3373,8 +3387,11 @@ static void igb_configure_rx(struct igb_adapter *adapter)
 
        /* Setup the HW Rx Head and Tail Descriptor Pointers and
         * the Base and Length of the Rx Descriptor Ring */
-       for (i = 0; i < adapter->num_rx_queues; i++)
-               igb_configure_rx_ring(adapter, adapter->rx_ring[i]);
+       for (i = 0; i < adapter->num_rx_queues; i++) {
+               struct igb_ring *rx_ring = adapter->rx_ring[i];
+               igb_set_rx_buffer_len(adapter, rx_ring);
+               igb_configure_rx_ring(adapter, rx_ring);
+       }
 }
 
 /**
@@ -4417,13 +4434,6 @@ static void igb_tx_olinfo_status(struct igb_ring *tx_ring,
        tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
 }
 
-/*
- * The largest size we can write to the descriptor is 65535.  In order to
- * maintain a power of two alignment we have to limit ourselves to 32K.
- */
-#define IGB_MAX_TXD_PWR        15
-#define IGB_MAX_DATA_PER_TXD   (1<<IGB_MAX_TXD_PWR)
-
 static void igb_tx_map(struct igb_ring *tx_ring,
                       struct igb_tx_buffer *first,
                       const u8 hdr_len)
@@ -4592,15 +4602,25 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
        struct igb_tx_buffer *first;
        int tso;
        u32 tx_flags = 0;
+       u16 count = TXD_USE_COUNT(skb_headlen(skb));
        __be16 protocol = vlan_get_protocol(skb);
        u8 hdr_len = 0;
 
-       /* need: 1 descriptor per page,
+       /* need: 1 descriptor per page * PAGE_SIZE/IGB_MAX_DATA_PER_TXD,
+        *       + 1 desc for skb_headlen/IGB_MAX_DATA_PER_TXD,
         *       + 2 desc gap to keep tail from touching head,
-        *       + 1 desc for skb->data,
         *       + 1 desc for context descriptor,
-        * otherwise try next time */
-       if (igb_maybe_stop_tx(tx_ring, skb_shinfo(skb)->nr_frags + 4)) {
+        * otherwise try next time
+        */
+       if (NETDEV_FRAG_PAGE_MAX_SIZE > IGB_MAX_DATA_PER_TXD) {
+               unsigned short f;
+               for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
+                       count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
+       } else {
+               count += skb_shinfo(skb)->nr_frags;
+       }
+
+       if (igb_maybe_stop_tx(tx_ring, count + 3)) {
                /* this is a hard error */
                return NETDEV_TX_BUSY;
        }
@@ -4642,7 +4662,7 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
        igb_tx_map(tx_ring, first, hdr_len);
 
        /* Make sure there is space in the ring for the next send. */
-       igb_maybe_stop_tx(tx_ring, MAX_SKB_FRAGS + 4);
+       igb_maybe_stop_tx(tx_ring, DESC_NEEDED);
 
        return NETDEV_TX_OK;
 
@@ -6046,9 +6066,10 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
                }
        }
 
+#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
        if (unlikely(total_packets &&
                     netif_carrier_ok(tx_ring->netdev) &&
-                    igb_desc_unused(tx_ring) >= IGB_TX_QUEUE_WAKE)) {
+                    igb_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) {
                /* Make sure that anybody stopping the queue after this
                 * sees the new next_to_clean.
                 */
@@ -6097,6 +6118,41 @@ static void igb_reuse_rx_page(struct igb_ring *rx_ring,
                                         DMA_FROM_DEVICE);
 }
 
+static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
+                                 struct page *page,
+                                 unsigned int truesize)
+{
+       /* avoid re-using remote pages */
+       if (unlikely(page_to_nid(page) != numa_node_id()))
+               return false;
+
+#if (PAGE_SIZE < 8192)
+       /* if we are only owner of page we can reuse it */
+       if (unlikely(page_count(page) != 1))
+               return false;
+
+       /* flip page offset to other buffer */
+       rx_buffer->page_offset ^= IGB_RX_BUFSZ;
+
+       /* since we are the only owner of the page and we need to
+        * increment it, just set the value to 2 in order to avoid
+        * an unnecessary locked operation
+        */
+       atomic_set(&page->_count, 2);
+#else
+       /* move offset up to the next cache line */
+       rx_buffer->page_offset += truesize;
+
+       if (rx_buffer->page_offset > (PAGE_SIZE - IGB_RX_BUFSZ))
+               return false;
+
+       /* bump ref count on page before it is given to the stack */
+       get_page(page);
+#endif
+
+       return true;
+}
+
 /**
  * igb_add_rx_frag - Add contents of Rx buffer to sk_buff
  * @rx_ring: rx descriptor ring to transact packets on
@@ -6119,6 +6175,11 @@ static bool igb_add_rx_frag(struct igb_ring *rx_ring,
 {
        struct page *page = rx_buffer->page;
        unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
+#if (PAGE_SIZE < 8192)
+       unsigned int truesize = IGB_RX_BUFSZ;
+#else
+       unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
+#endif
 
        if ((size <= IGB_RX_HDR_LEN) && !skb_is_nonlinear(skb)) {
                unsigned char *va = page_address(page) + rx_buffer->page_offset;
@@ -6141,38 +6202,88 @@ static bool igb_add_rx_frag(struct igb_ring *rx_ring,
        }
 
        skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
-                       rx_buffer->page_offset, size, IGB_RX_BUFSZ);
+                       rx_buffer->page_offset, size, truesize);
 
-       /* avoid re-using remote pages */
-       if (unlikely(page_to_nid(page) != numa_node_id()))
-               return false;
+       return igb_can_reuse_rx_page(rx_buffer, page, truesize);
+}
 
+static struct sk_buff *igb_build_rx_buffer(struct igb_ring *rx_ring,
+                                          union e1000_adv_rx_desc *rx_desc)
+{
+       struct igb_rx_buffer *rx_buffer;
+       struct sk_buff *skb;
+       struct page *page;
+       void *page_addr;
+       unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
 #if (PAGE_SIZE < 8192)
-       /* if we are only owner of page we can reuse it */
-       if (unlikely(page_count(page) != 1))
-               return false;
+       unsigned int truesize = IGB_RX_BUFSZ;
+#else
+       unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
+                               SKB_DATA_ALIGN(NET_SKB_PAD +
+                                              NET_IP_ALIGN +
+                                              size);
+#endif
 
-       /* flip page offset to other buffer */
-       rx_buffer->page_offset ^= IGB_RX_BUFSZ;
+       /* If we spanned a buffer we have a huge mess so test for it */
+       BUG_ON(unlikely(!igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP)));
 
-       /*
-        * since we are the only owner of the page and we need to
-        * increment it, just set the value to 2 in order to avoid
-        * an unnecessary locked operation
-        */
-       atomic_set(&page->_count, 2);
-#else
-       /* move offset up to the next cache line */
-       rx_buffer->page_offset += SKB_DATA_ALIGN(size);
+       /* Guarantee this function can be used by verifying buffer sizes */
+       BUILD_BUG_ON(SKB_WITH_OVERHEAD(IGB_RX_BUFSZ) < (NET_SKB_PAD +
+                                                       NET_IP_ALIGN +
+                                                       IGB_TS_HDR_LEN +
+                                                       ETH_FRAME_LEN +
+                                                       ETH_FCS_LEN));
 
-       if (rx_buffer->page_offset > (PAGE_SIZE - IGB_RX_BUFSZ))
-               return false;
+       rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
+       page = rx_buffer->page;
+       prefetchw(page);
 
-       /* bump ref count on page before it is given to the stack */
-       get_page(page);
+       page_addr = page_address(page) + rx_buffer->page_offset;
+
+       /* prefetch first cache line of first page */
+       prefetch(page_addr + NET_SKB_PAD + NET_IP_ALIGN);
+#if L1_CACHE_BYTES < 128
+       prefetch(page_addr + L1_CACHE_BYTES + NET_SKB_PAD + NET_IP_ALIGN);
 #endif
 
-       return true;
+       /* build an skb to around the page buffer */
+       skb = build_skb(page_addr, truesize);
+       if (unlikely(!skb)) {
+               rx_ring->rx_stats.alloc_failed++;
+               return NULL;
+       }
+
+       /* we are reusing so sync this buffer for CPU use */
+       dma_sync_single_range_for_cpu(rx_ring->dev,
+                                     rx_buffer->dma,
+                                     rx_buffer->page_offset,
+                                     IGB_RX_BUFSZ,
+                                     DMA_FROM_DEVICE);
+
+       /* update pointers within the skb to store the data */
+       skb_reserve(skb, NET_IP_ALIGN + NET_SKB_PAD);
+       __skb_put(skb, size);
+
+       /* pull timestamp out of packet data */
+       if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
+               igb_ptp_rx_pktstamp(rx_ring->q_vector, skb->data, skb);
+               __skb_pull(skb, IGB_TS_HDR_LEN);
+       }
+
+       if (igb_can_reuse_rx_page(rx_buffer, page, truesize)) {
+               /* hand second half of page back to the ring */
+               igb_reuse_rx_page(rx_ring, rx_buffer);
+       } else {
+               /* we are not reusing the buffer so unmap it */
+               dma_unmap_page(rx_ring->dev, rx_buffer->dma,
+                              PAGE_SIZE, DMA_FROM_DEVICE);
+       }
+
+       /* clear contents of buffer_info */
+       rx_buffer->dma = 0;
+       rx_buffer->page = NULL;
+
+       return skb;
 }
 
 static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring,
@@ -6184,13 +6295,6 @@ static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring,
 
        rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
 
-       /*
-        * This memory barrier is needed to keep us from reading
-        * any other fields out of the rx_desc until we know the
-        * RXD_STAT_DD bit is set
-        */
-       rmb();
-
        page = rx_buffer->page;
        prefetchw(page);
 
@@ -6590,8 +6694,17 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
                if (!igb_test_staterr(rx_desc, E1000_RXD_STAT_DD))
                        break;
 
+               /* This memory barrier is needed to keep us from reading
+                * any other fields out of the rx_desc until we know the
+                * RXD_STAT_DD bit is set
+                */
+               rmb();
+
                /* retrieve a buffer from the ring */
-               skb = igb_fetch_rx_buffer(rx_ring, rx_desc, skb);
+               if (ring_uses_build_skb(rx_ring))
+                       skb = igb_build_rx_buffer(rx_ring, rx_desc);
+               else
+                       skb = igb_fetch_rx_buffer(rx_ring, rx_desc, skb);
 
                /* exit if we failed to retrieve a buffer */
                if (!skb)
@@ -6678,6 +6791,14 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
        return true;
 }
 
+static inline unsigned int igb_rx_offset(struct igb_ring *rx_ring)
+{
+       if (ring_uses_build_skb(rx_ring))
+               return NET_SKB_PAD + NET_IP_ALIGN;
+       else
+               return 0;
+}
+
 /**
  * igb_alloc_rx_buffers - Replace used receive buffers; packet split
  * @adapter: address of board private structure
@@ -6704,7 +6825,9 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
                 * Refresh the desc even if buffer_addrs didn't change
                 * because each write-back erases this info.
                 */
-               rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
+               rx_desc->read.pkt_addr = cpu_to_le64(bi->dma +
+                                                    bi->page_offset +
+                                                    igb_rx_offset(rx_ring));
 
                rx_desc++;
                bi++;
@@ -7608,7 +7731,7 @@ static DEFINE_SPINLOCK(i2c_clients_lock);
  *  @adapter: adapter struct
  *  @dev_addr: device address of i2c needed.
  */
-struct i2c_client *
+static struct i2c_client *
 igb_get_i2c_client(struct igb_adapter *adapter, u8 dev_addr)
 {
        ulong flags;
@@ -7631,13 +7754,8 @@ igb_get_i2c_client(struct igb_adapter *adapter, u8 dev_addr)
                }
        }
 
-       /* no client_list found, create a new one as long as
-        * irqs are not disabled
-        */
-       if (unlikely(irqs_disabled()))
-               goto exit;
-
-       client_list = kzalloc(sizeof(*client_list), GFP_KERNEL);
+       /* no client_list found, create a new one */
+       client_list = kzalloc(sizeof(*client_list), GFP_ATOMIC);
        if (client_list == NULL)
                goto exit;
 
index fdca7b6..a1463e3 100644 (file)
@@ -127,8 +127,8 @@ struct igbvf_buffer {
                /* Tx */
                struct {
                        unsigned long time_stamp;
+                       union e1000_adv_tx_desc *next_to_watch;
                        u16 length;
-                       u16 next_to_watch;
                        u16 mapped_as_page;
                };
                /* Rx */
index f53f713..d60cd43 100644 (file)
@@ -797,20 +797,31 @@ static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring)
        struct sk_buff *skb;
        union e1000_adv_tx_desc *tx_desc, *eop_desc;
        unsigned int total_bytes = 0, total_packets = 0;
-       unsigned int i, eop, count = 0;
+       unsigned int i, count = 0;
        bool cleaned = false;
 
        i = tx_ring->next_to_clean;
-       eop = tx_ring->buffer_info[i].next_to_watch;
-       eop_desc = IGBVF_TX_DESC_ADV(*tx_ring, eop);
+       buffer_info = &tx_ring->buffer_info[i];
+       eop_desc = buffer_info->next_to_watch;
+
+       do {
+               /* if next_to_watch is not set then there is no work pending */
+               if (!eop_desc)
+                       break;
+
+               /* prevent any other reads prior to eop_desc */
+               read_barrier_depends();
+
+               /* if DD is not set pending work has not been completed */
+               if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)))
+                       break;
+
+               /* clear next_to_watch to prevent false hangs */
+               buffer_info->next_to_watch = NULL;
 
-       while ((eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)) &&
-              (count < tx_ring->count)) {
-               rmb();  /* read buffer_info after eop_desc status */
                for (cleaned = false; !cleaned; count++) {
                        tx_desc = IGBVF_TX_DESC_ADV(*tx_ring, i);
-                       buffer_info = &tx_ring->buffer_info[i];
-                       cleaned = (i == eop);
+                       cleaned = (tx_desc == eop_desc);
                        skb = buffer_info->skb;
 
                        if (skb) {
@@ -831,10 +842,12 @@ static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring)
                        i++;
                        if (i == tx_ring->count)
                                i = 0;
+
+                       buffer_info = &tx_ring->buffer_info[i];
                }
-               eop = tx_ring->buffer_info[i].next_to_watch;
-               eop_desc = IGBVF_TX_DESC_ADV(*tx_ring, eop);
-       }
+
+               eop_desc = buffer_info->next_to_watch;
+       } while (count < tx_ring->count);
 
        tx_ring->next_to_clean = i;
 
@@ -1961,7 +1974,6 @@ static int igbvf_tso(struct igbvf_adapter *adapter,
        context_desc->seqnum_seed = 0;
 
        buffer_info->time_stamp = jiffies;
-       buffer_info->next_to_watch = i;
        buffer_info->dma = 0;
        i++;
        if (i == tx_ring->count)
@@ -2021,7 +2033,6 @@ static inline bool igbvf_tx_csum(struct igbvf_adapter *adapter,
                context_desc->mss_l4len_idx = 0;
 
                buffer_info->time_stamp = jiffies;
-               buffer_info->next_to_watch = i;
                buffer_info->dma = 0;
                i++;
                if (i == tx_ring->count)
@@ -2061,8 +2072,7 @@ static int igbvf_maybe_stop_tx(struct net_device *netdev, int size)
 
 static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
                                    struct igbvf_ring *tx_ring,
-                                   struct sk_buff *skb,
-                                   unsigned int first)
+                                  struct sk_buff *skb)
 {
        struct igbvf_buffer *buffer_info;
        struct pci_dev *pdev = adapter->pdev;
@@ -2077,7 +2087,6 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
        buffer_info->length = len;
        /* set time_stamp *before* dma to help avoid a possible race */
        buffer_info->time_stamp = jiffies;
-       buffer_info->next_to_watch = i;
        buffer_info->mapped_as_page = false;
        buffer_info->dma = dma_map_single(&pdev->dev, skb->data, len,
                                          DMA_TO_DEVICE);
@@ -2100,7 +2109,6 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
                BUG_ON(len >= IGBVF_MAX_DATA_PER_TXD);
                buffer_info->length = len;
                buffer_info->time_stamp = jiffies;
-               buffer_info->next_to_watch = i;
                buffer_info->mapped_as_page = true;
                buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag, 0, len,
                                                DMA_TO_DEVICE);
@@ -2109,7 +2117,6 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
        }
 
        tx_ring->buffer_info[i].skb = skb;
-       tx_ring->buffer_info[first].next_to_watch = i;
 
        return ++count;
 
@@ -2120,7 +2127,6 @@ dma_error:
        buffer_info->dma = 0;
        buffer_info->time_stamp = 0;
        buffer_info->length = 0;
-       buffer_info->next_to_watch = 0;
        buffer_info->mapped_as_page = false;
        if (count)
                count--;
@@ -2139,7 +2145,8 @@ dma_error:
 
 static inline void igbvf_tx_queue_adv(struct igbvf_adapter *adapter,
                                       struct igbvf_ring *tx_ring,
-                                      int tx_flags, int count, u32 paylen,
+                                     int tx_flags, int count,
+                                     unsigned int first, u32 paylen,
                                       u8 hdr_len)
 {
        union e1000_adv_tx_desc *tx_desc = NULL;
@@ -2189,6 +2196,7 @@ static inline void igbvf_tx_queue_adv(struct igbvf_adapter *adapter,
         * such as IA-64). */
        wmb();
 
+       tx_ring->buffer_info[first].next_to_watch = tx_desc;
        tx_ring->next_to_use = i;
        writel(i, adapter->hw.hw_addr + tx_ring->tail);
        /* we need this if more than one processor can write to our tail
@@ -2255,11 +2263,11 @@ static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb,
         * count reflects descriptors mapped, if 0 then mapping error
         * has occurred and we need to rewind the descriptor queue
         */
-       count = igbvf_tx_map_adv(adapter, tx_ring, skb, first);
+       count = igbvf_tx_map_adv(adapter, tx_ring, skb);
 
        if (count) {
                igbvf_tx_queue_adv(adapter, tx_ring, tx_flags, count,
-                                  skb->len, hdr_len);
+                                  first, skb->len, hdr_len);
                /* Make sure there is space in the ring for the next send. */
                igbvf_maybe_stop_tx(netdev, MAX_SKB_FRAGS + 4);
        } else {
index c756412..ea48083 100644 (file)
@@ -708,11 +708,8 @@ ixgb_setup_tx_resources(struct ixgb_adapter *adapter)
 
        size = sizeof(struct ixgb_buffer) * txdr->count;
        txdr->buffer_info = vzalloc(size);
-       if (!txdr->buffer_info) {
-               netif_err(adapter, probe, adapter->netdev,
-                         "Unable to allocate transmit descriptor ring memory\n");
+       if (!txdr->buffer_info)
                return -ENOMEM;
-       }
 
        /* round up to nearest 4K */
 
@@ -797,11 +794,8 @@ ixgb_setup_rx_resources(struct ixgb_adapter *adapter)
 
        size = sizeof(struct ixgb_buffer) * rxdr->count;
        rxdr->buffer_info = vzalloc(size);
-       if (!rxdr->buffer_info) {
-               netif_err(adapter, probe, adapter->netdev,
-                         "Unable to allocate receive descriptor ring\n");
+       if (!rxdr->buffer_info)
                return -ENOMEM;
-       }
 
        /* Round up to nearest 4K */
 
index 687c83d..be2989e 100644 (file)
@@ -1,7 +1,7 @@
 ################################################################################
 #
 # Intel 10 Gigabit PCI Express Linux driver
-# Copyright(c) 1999 - 2012 Intel Corporation.
+# Copyright(c) 1999 - 2013 Intel Corporation.
 #
 # This program is free software; you can redistribute it and/or modify it
 # under the terms and conditions of the GNU General Public License,
index 8371ae4..a8e10cf 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2012 Intel Corporation.
+  Copyright(c) 1999 - 2013 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -92,8 +92,6 @@
  */
 #define IXGBE_RX_HDR_SIZE IXGBE_RXBUFFER_256
 
-#define MAXIMUM_ETHERNET_VLAN_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN)
-
 /* How many Rx Buffers do we bundle into one write to the hardware ? */
 #define IXGBE_RX_BUFFER_WRITE  16      /* Must be power of 2 */
 
@@ -158,7 +156,7 @@ struct vf_macvlans {
 
 /* Tx Descriptors needed, worst case */
 #define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD)
-#define DESC_NEEDED ((MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE)) + 4)
+#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
 
 /* wrapper around a pointer to a socket buffer,
  * so a DMA handle can be stored along with the buffer */
@@ -203,6 +201,7 @@ struct ixgbe_rx_queue_stats {
 
 enum ixgbe_ring_state_t {
        __IXGBE_TX_FDIR_INIT_DONE,
+       __IXGBE_TX_XPS_INIT_DONE,
        __IXGBE_TX_DETECT_HANG,
        __IXGBE_HANG_CHECK_ARMED,
        __IXGBE_RX_RSC_ENABLED,
@@ -280,15 +279,10 @@ enum ixgbe_ring_f_enum {
 
 #define IXGBE_MAX_RSS_INDICES  16
 #define IXGBE_MAX_VMDQ_INDICES 64
-#define IXGBE_MAX_FDIR_INDICES 64
-#ifdef IXGBE_FCOE
+#define IXGBE_MAX_FDIR_INDICES 63      /* based on q_vector limit */
 #define IXGBE_MAX_FCOE_INDICES  8
-#define MAX_RX_QUEUES (IXGBE_MAX_FDIR_INDICES + IXGBE_MAX_FCOE_INDICES)
-#define MAX_TX_QUEUES (IXGBE_MAX_FDIR_INDICES + IXGBE_MAX_FCOE_INDICES)
-#else
-#define MAX_RX_QUEUES IXGBE_MAX_FDIR_INDICES
-#define MAX_TX_QUEUES IXGBE_MAX_FDIR_INDICES
-#endif /* IXGBE_FCOE */
+#define MAX_RX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1)
+#define MAX_TX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1)
 struct ixgbe_ring_feature {
        u16 limit;      /* upper limit on feature indices */
        u16 indices;    /* current value of indices */
@@ -626,6 +620,7 @@ enum ixgbe_state_t {
        __IXGBE_DOWN,
        __IXGBE_SERVICE_SCHED,
        __IXGBE_IN_SFP_INIT,
+       __IXGBE_READ_I2C,
 };
 
 struct ixgbe_cb {
@@ -706,8 +701,8 @@ extern bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw);
 extern void ixgbe_set_rx_mode(struct net_device *netdev);
 #ifdef CONFIG_IXGBE_DCB
 extern void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter);
-extern int ixgbe_setup_tc(struct net_device *dev, u8 tc);
 #endif
+extern int ixgbe_setup_tc(struct net_device *dev, u8 tc);
 extern void ixgbe_tx_ctxtdesc(struct ixgbe_ring *, u32, u32, u32, u32);
 extern void ixgbe_do_reset(struct net_device *netdev);
 #ifdef CONFIG_IXGBE_HWMON
index 7fd3833..d0113fc 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2012 Intel Corporation.
+  Copyright(c) 1999 - 2013 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -1003,15 +1003,16 @@ static s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val)
 }
 
 /**
- *  ixgbe_read_i2c_eeprom_82598 - Reads 8 bit word over I2C interface.
+ *  ixgbe_read_i2c_phy_82598 - Reads 8 bit word over I2C interface.
  *  @hw: pointer to hardware structure
- *  @byte_offset: EEPROM byte offset to read
+ *  @dev_addr: address to read from
+ *  @byte_offset: byte offset to read from dev_addr
  *  @eeprom_data: value read
  *
- *  Performs 8 byte read operation to SFP module's EEPROM over I2C interface.
+ *  Performs 8 byte read operation to SFP module's data over I2C interface.
  **/
-static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
-                                      u8 *eeprom_data)
+static s32 ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr,
+                                   u8 byte_offset, u8 *eeprom_data)
 {
        s32 status = 0;
        u16 sfp_addr = 0;
@@ -1025,7 +1026,7 @@ static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
                 * 0xC30D.  These registers are used to talk to the SFP+
                 * module's EEPROM through the SDA/SCL (I2C) interface.
                 */
-               sfp_addr = (IXGBE_I2C_EEPROM_DEV_ADDR << 8) + byte_offset;
+               sfp_addr = (dev_addr << 8) + byte_offset;
                sfp_addr = (sfp_addr | IXGBE_I2C_EEPROM_READ_MASK);
                hw->phy.ops.write_reg(hw,
                                      IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR,
@@ -1057,7 +1058,6 @@ static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
                *eeprom_data = (u8)(sfp_data >> 8);
        } else {
                status = IXGBE_ERR_PHY;
-               goto out;
        }
 
 out:
@@ -1065,6 +1065,36 @@ out:
 }
 
 /**
+ *  ixgbe_read_i2c_eeprom_82598 - Reads 8 bit word over I2C interface.
+ *  @hw: pointer to hardware structure
+ *  @byte_offset: EEPROM byte offset to read
+ *  @eeprom_data: value read
+ *
+ *  Performs 8 byte read operation to SFP module's EEPROM over I2C interface.
+ **/
+static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
+                                      u8 *eeprom_data)
+{
+       return ixgbe_read_i2c_phy_82598(hw, IXGBE_I2C_EEPROM_DEV_ADDR,
+                                       byte_offset, eeprom_data);
+}
+
+/**
+ *  ixgbe_read_i2c_sff8472_82598 - Reads 8 bit word over I2C interface.
+ *  @hw: pointer to hardware structure
+ *  @byte_offset: byte offset at address 0xA2
+ *  @eeprom_data: value read
+ *
+ *  Performs 8 byte read operation to SFP module's SFF-8472 data over I2C
+ **/
+static s32 ixgbe_read_i2c_sff8472_82598(struct ixgbe_hw *hw, u8 byte_offset,
+                                      u8 *sff8472_data)
+{
+       return ixgbe_read_i2c_phy_82598(hw, IXGBE_I2C_EEPROM_DEV_ADDR2,
+                                       byte_offset, sff8472_data);
+}
+
+/**
  *  ixgbe_get_supported_physical_layer_82598 - Returns physical layer type
  *  @hw: pointer to hardware structure
  *
@@ -1297,6 +1327,7 @@ static struct ixgbe_phy_operations phy_ops_82598 = {
        .write_reg              = &ixgbe_write_phy_reg_generic,
        .setup_link             = &ixgbe_setup_phy_link_generic,
        .setup_link_speed       = &ixgbe_setup_phy_link_speed_generic,
+       .read_i2c_sff8472       = &ixgbe_read_i2c_sff8472_82598,
        .read_i2c_eeprom        = &ixgbe_read_i2c_eeprom_82598,
        .check_overtemp   = &ixgbe_tn_check_overtemp,
 };
index 3350461..203a00c 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2012 Intel Corporation.
+  Copyright(c) 1999 - 2013 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -2241,6 +2241,7 @@ static struct ixgbe_phy_operations phy_ops_82599 = {
        .setup_link_speed       = &ixgbe_setup_phy_link_speed_generic,
        .read_i2c_byte          = &ixgbe_read_i2c_byte_generic,
        .write_i2c_byte         = &ixgbe_write_i2c_byte_generic,
+       .read_i2c_sff8472       = &ixgbe_read_i2c_sff8472_generic,
        .read_i2c_eeprom        = &ixgbe_read_i2c_eeprom_generic,
        .write_i2c_eeprom       = &ixgbe_write_i2c_eeprom_generic,
        .check_overtemp         = &ixgbe_tn_check_overtemp,
index 5e68afd..99e472e 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2012 Intel Corporation.
+  Copyright(c) 1999 - 2013 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
index f7a0970..bc3948e 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2012 Intel Corporation.
+  Copyright(c) 1999 - 2013 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
index 9bc17c0..1f2c805 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2012 Intel Corporation.
+  Copyright(c) 1999 - 2013 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
index 1f4108e..1634de8 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2012 Intel Corporation.
+  Copyright(c) 1999 - 2013 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
index 87592b4..ac78077 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2012 Intel Corporation.
+  Copyright(c) 1999 - 2013 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
index ba83570..3164f54 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2012 Intel Corporation.
+  Copyright(c) 1999 - 2013 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
index 4eac80d..05e23b8 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2012 Intel Corporation.
+  Copyright(c) 1999 - 2013 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
index 4dec47f..a4ef076 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2012 Intel Corporation.
+  Copyright(c) 1999 - 2013 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
index c261333..f3d68f9 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2012 Intel Corporation.
+  Copyright(c) 1999 - 2013 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -449,7 +449,6 @@ static u8 ixgbe_dcbnl_getcap(struct net_device *netdev, int capid, u8 *cap)
 static int ixgbe_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
-       u8 rval = 0;
 
        if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
                switch (tcid) {
@@ -460,14 +459,14 @@ static int ixgbe_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num)
                        *num = adapter->dcb_cfg.num_tcs.pfc_tcs;
                        break;
                default:
-                       rval = -EINVAL;
+                       return -EINVAL;
                        break;
                }
        } else {
-               rval = -EINVAL;
+               return -EINVAL;
        }
 
-       return rval;
+       return 0;
 }
 
 static int ixgbe_dcbnl_setnumtcs(struct net_device *netdev, int tcid, u8 num)
index 3504686..c5933f6 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2012 Intel Corporation.
+  Copyright(c) 1999 - 2013 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
index 216203e..f4d2e9e 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2012 Intel Corporation.
+  Copyright(c) 1999 - 2013 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -39,6 +39,7 @@
 #include <linux/uaccess.h>
 
 #include "ixgbe.h"
+#include "ixgbe_phy.h"
 
 
 #define IXGBE_ALL_RAR_ENTRIES 16
@@ -1040,6 +1041,9 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
                        p = (char *) adapter +
                                        ixgbe_gstrings_stats[i].stat_offset;
                        break;
+               default:
+                       data[i] = 0;
+                       continue;
                }
 
                data[i] = (ixgbe_gstrings_stats[i].sizeof_stat ==
@@ -1096,8 +1100,10 @@ static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
 
        switch (stringset) {
        case ETH_SS_TEST:
-               memcpy(data, *ixgbe_gstrings_test,
-                      IXGBE_TEST_LEN * ETH_GSTRING_LEN);
+               for (i = 0; i < IXGBE_TEST_LEN; i++) {
+                       memcpy(data, ixgbe_gstrings_test[i], ETH_GSTRING_LEN);
+                       data += ETH_GSTRING_LEN;
+               }
                break;
        case ETH_SS_STATS:
                for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
@@ -2107,13 +2113,17 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
        struct ixgbe_q_vector *q_vector;
        int i;
-       u16 tx_itr_param, rx_itr_param;
+       u16 tx_itr_param, rx_itr_param, tx_itr_prev;
        bool need_reset = false;
 
-       /* don't accept tx specific changes if we've got mixed RxTx vectors */
-       if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count
-           && ec->tx_coalesce_usecs)
-               return -EINVAL;
+       if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) {
+               /* reject Tx specific changes in case of mixed RxTx vectors */
+               if (ec->tx_coalesce_usecs)
+                       return -EINVAL;
+               tx_itr_prev = adapter->rx_itr_setting;
+       } else {
+               tx_itr_prev = adapter->tx_itr_setting;
+       }
 
        if ((ec->rx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)) ||
            (ec->tx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)))
@@ -2139,8 +2149,25 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
        else
                tx_itr_param = adapter->tx_itr_setting;
 
+       /* mixed Rx/Tx */
+       if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count)
+               adapter->tx_itr_setting = adapter->rx_itr_setting;
+
+#if IS_ENABLED(CONFIG_BQL)
+       /* detect ITR changes that require update of TXDCTL.WTHRESH */
+       if ((adapter->tx_itr_setting > 1) &&
+           (adapter->tx_itr_setting < IXGBE_100K_ITR)) {
+               if ((tx_itr_prev == 1) ||
+                   (tx_itr_prev > IXGBE_100K_ITR))
+                       need_reset = true;
+       } else {
+               if ((tx_itr_prev > 1) &&
+                   (tx_itr_prev < IXGBE_100K_ITR))
+                       need_reset = true;
+       }
+#endif
        /* check the old value and enable RSC if necessary */
-       need_reset = ixgbe_update_rsc(adapter);
+       need_reset |= ixgbe_update_rsc(adapter);
 
        for (i = 0; i < adapter->num_q_vectors; i++) {
                q_vector = adapter->q_vector[i];
@@ -2726,6 +2753,225 @@ static int ixgbe_get_ts_info(struct net_device *dev,
        return 0;
 }
 
+static unsigned int ixgbe_max_channels(struct ixgbe_adapter *adapter)
+{
+       unsigned int max_combined;
+       u8 tcs = netdev_get_num_tc(adapter->netdev);
+
+       if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
+               /* We only support one q_vector without MSI-X */
+               max_combined = 1;
+       } else if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
+               /* SR-IOV currently only allows one queue on the PF */
+               max_combined = 1;
+       } else if (tcs > 1) {
+               /* For DCB report channels per traffic class */
+               if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
+                       /* 8 TC w/ 4 queues per TC */
+                       max_combined = 4;
+               } else if (tcs > 4) {
+                       /* 8 TC w/ 8 queues per TC */
+                       max_combined = 8;
+               } else {
+                       /* 4 TC w/ 16 queues per TC */
+                       max_combined = 16;
+               }
+       } else if (adapter->atr_sample_rate) {
+               /* support up to 64 queues with ATR */
+               max_combined = IXGBE_MAX_FDIR_INDICES;
+       } else {
+               /* support up to 16 queues with RSS */
+               max_combined = IXGBE_MAX_RSS_INDICES;
+       }
+
+       return max_combined;
+}
+
+static void ixgbe_get_channels(struct net_device *dev,
+                              struct ethtool_channels *ch)
+{
+       struct ixgbe_adapter *adapter = netdev_priv(dev);
+
+       /* report maximum channels */
+       ch->max_combined = ixgbe_max_channels(adapter);
+
+       /* report info for other vector */
+       if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
+               ch->max_other = NON_Q_VECTORS;
+               ch->other_count = NON_Q_VECTORS;
+       }
+
+       /* record RSS queues */
+       ch->combined_count = adapter->ring_feature[RING_F_RSS].indices;
+
+       /* nothing else to report if RSS is disabled */
+       if (ch->combined_count == 1)
+               return;
+
+       /* we do not support ATR queueing if SR-IOV is enabled */
+       if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
+               return;
+
+       /* same thing goes for being DCB enabled */
+       if (netdev_get_num_tc(dev) > 1)
+               return;
+
+       /* if ATR is disabled we can exit */
+       if (!adapter->atr_sample_rate)
+               return;
+
+       /* report flow director queues as maximum channels */
+       ch->combined_count = adapter->ring_feature[RING_F_FDIR].indices;
+}
+
+static int ixgbe_set_channels(struct net_device *dev,
+                             struct ethtool_channels *ch)
+{
+       struct ixgbe_adapter *adapter = netdev_priv(dev);
+       unsigned int count = ch->combined_count;
+
+       /* verify they are not requesting separate vectors */
+       if (!count || ch->rx_count || ch->tx_count)
+               return -EINVAL;
+
+       /* verify other_count has not changed */
+       if (ch->other_count != NON_Q_VECTORS)
+               return -EINVAL;
+
+       /* verify the number of channels does not exceed hardware limits */
+       if (count > ixgbe_max_channels(adapter))
+               return -EINVAL;
+
+       /* update feature limits from largest to smallest supported values */
+       adapter->ring_feature[RING_F_FDIR].limit = count;
+
+       /* cap RSS limit at 16 */
+       if (count > IXGBE_MAX_RSS_INDICES)
+               count = IXGBE_MAX_RSS_INDICES;
+       adapter->ring_feature[RING_F_RSS].limit = count;
+
+#ifdef IXGBE_FCOE
+       /* cap FCoE limit at 8 */
+       if (count > IXGBE_FCRETA_SIZE)
+               count = IXGBE_FCRETA_SIZE;
+       adapter->ring_feature[RING_F_FCOE].limit = count;
+
+#endif
+       /* use setup TC to update any traffic class queue mapping */
+       return ixgbe_setup_tc(dev, netdev_get_num_tc(dev));
+}
+
+static int ixgbe_get_module_info(struct net_device *dev,
+                                      struct ethtool_modinfo *modinfo)
+{
+       struct ixgbe_adapter *adapter = netdev_priv(dev);
+       struct ixgbe_hw *hw = &adapter->hw;
+       u32 status;
+       u8 sff8472_rev, addr_mode;
+       int ret_val = 0;
+       bool page_swap = false;
+
+       /* avoid concurent i2c reads */
+       while (test_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
+               msleep(100);
+
+       /* used by the service task */
+       set_bit(__IXGBE_READ_I2C, &adapter->state);
+
+       /* Check whether we support SFF-8472 or not */
+       status = hw->phy.ops.read_i2c_eeprom(hw,
+                                            IXGBE_SFF_SFF_8472_COMP,
+                                            &sff8472_rev);
+       if (status != 0) {
+               ret_val = -EIO;
+               goto err_out;
+       }
+
+       /* addressing mode is not supported */
+       status = hw->phy.ops.read_i2c_eeprom(hw,
+                                            IXGBE_SFF_SFF_8472_SWAP,
+                                            &addr_mode);
+       if (status != 0) {
+               ret_val = -EIO;
+               goto err_out;
+       }
+
+       if (addr_mode & IXGBE_SFF_ADDRESSING_MODE) {
+               e_err(drv, "Address change required to access page 0xA2, but not supported. Please report the module type to the driver maintainers.\n");
+               page_swap = true;
+       }
+
+       if (sff8472_rev == IXGBE_SFF_SFF_8472_UNSUP || page_swap) {
+               /* We have a SFP, but it does not support SFF-8472 */
+               modinfo->type = ETH_MODULE_SFF_8079;
+               modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
+       } else {
+               /* We have a SFP which supports a revision of SFF-8472. */
+               modinfo->type = ETH_MODULE_SFF_8472;
+               modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
+       }
+
+err_out:
+       clear_bit(__IXGBE_READ_I2C, &adapter->state);
+       return ret_val;
+}
+
+static int ixgbe_get_module_eeprom(struct net_device *dev,
+                                        struct ethtool_eeprom *ee,
+                                        u8 *data)
+{
+       struct ixgbe_adapter *adapter = netdev_priv(dev);
+       struct ixgbe_hw *hw = &adapter->hw;
+       u32 status = IXGBE_ERR_PHY_ADDR_INVALID;
+       u8 databyte = 0xFF;
+       int i = 0;
+       int ret_val = 0;
+
+       /* ixgbe_get_module_info is called before this function in all
+        * cases, so we do not need any checks we already do above,
+        * and can trust ee->len to be a known value.
+        */
+
+       while (test_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
+               msleep(100);
+       set_bit(__IXGBE_READ_I2C, &adapter->state);
+
+       /* Read the first block, SFF-8079 */
+       for (i = 0; i < ETH_MODULE_SFF_8079_LEN; i++) {
+               status = hw->phy.ops.read_i2c_eeprom(hw, i, &databyte);
+               if (status != 0) {
+                       /* Error occured while reading module */
+                       ret_val = -EIO;
+                       goto err_out;
+               }
+               data[i] = databyte;
+       }
+
+       /* If the second block is requested, check if SFF-8472 is supported. */
+       if (ee->len == ETH_MODULE_SFF_8472_LEN) {
+               if (data[IXGBE_SFF_SFF_8472_COMP] == IXGBE_SFF_SFF_8472_UNSUP)
+                       return -EOPNOTSUPP;
+
+               /* Read the second block, SFF-8472 */
+               for (i = ETH_MODULE_SFF_8079_LEN;
+                    i < ETH_MODULE_SFF_8472_LEN; i++) {
+                       status = hw->phy.ops.read_i2c_sff8472(hw,
+                               i - ETH_MODULE_SFF_8079_LEN, &databyte);
+                       if (status != 0) {
+                               /* Error occured while reading module */
+                               ret_val = -EIO;
+                               goto err_out;
+                       }
+                       data[i] = databyte;
+               }
+       }
+
+err_out:
+       clear_bit(__IXGBE_READ_I2C, &adapter->state);
+
+       return ret_val;
+}
+
 static const struct ethtool_ops ixgbe_ethtool_ops = {
        .get_settings           = ixgbe_get_settings,
        .set_settings           = ixgbe_set_settings,
@@ -2754,7 +3000,11 @@ static const struct ethtool_ops ixgbe_ethtool_ops = {
        .set_coalesce           = ixgbe_set_coalesce,
        .get_rxnfc              = ixgbe_get_rxnfc,
        .set_rxnfc              = ixgbe_set_rxnfc,
+       .get_channels           = ixgbe_get_channels,
+       .set_channels           = ixgbe_set_channels,
        .get_ts_info            = ixgbe_get_ts_info,
+       .get_module_info        = ixgbe_get_module_info,
+       .get_module_eeprom      = ixgbe_get_module_eeprom,
 };
 
 void ixgbe_set_ethtool_ops(struct net_device *netdev)
index 4968367..f58db45 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2012 Intel Corporation.
+  Copyright(c) 1999 - 2013 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -716,10 +716,8 @@ int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter *adapter)
 
        /* Extra buffer to be shared by all DDPs for HW work around */
        buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_ATOMIC);
-       if (!buffer) {
-               e_err(drv, "failed to allocate extra DDP buffer\n");
+       if (!buffer)
                return -ENOMEM;
-       }
 
        dma = dma_map_single(dev, buffer, IXGBE_FCBUFF_MIN, DMA_FROM_DEVICE);
        if (dma_mapping_error(dev, dma)) {
index bf724da..3a02759 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2012 Intel Corporation.
+  Copyright(c) 1999 - 2013 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
index 8c74f73..ef5f7a6 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2012 Intel Corporation.
+  Copyright(c) 1999 - 2013 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -386,7 +386,6 @@ static bool ixgbe_set_dcb_sriov_queues(struct ixgbe_adapter *adapter)
                fcoe = &adapter->ring_feature[RING_F_FCOE];
 
                /* limit ourselves based on feature limits */
-               fcoe_i = min_t(u16, fcoe_i, num_online_cpus());
                fcoe_i = min_t(u16, fcoe_i, fcoe->limit);
 
                if (fcoe_i) {
@@ -562,9 +561,6 @@ static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
                fcoe_i = min_t(u16, fcoe_i, fcoe->limit);
 
                if (vmdq_i > 1 && fcoe_i) {
-                       /* reserve no more than number of CPUs */
-                       fcoe_i = min_t(u16, fcoe_i, num_online_cpus());
-
                        /* alloc queues for FCoE separately */
                        fcoe->indices = fcoe_i;
                        fcoe->offset = vmdq_i * rss_i;
@@ -623,8 +619,7 @@ static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
        if (rss_i > 1 && adapter->atr_sample_rate) {
                f = &adapter->ring_feature[RING_F_FDIR];
 
-               f->indices = min_t(u16, num_online_cpus(), f->limit);
-               rss_i = max_t(u16, rss_i, f->indices);
+               rss_i = f->indices = f->limit;
 
                if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
                        adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
@@ -776,19 +771,23 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
 {
        struct ixgbe_q_vector *q_vector;
        struct ixgbe_ring *ring;
-       int node = -1;
+       int node = NUMA_NO_NODE;
        int cpu = -1;
        int ring_count, size;
+       u8 tcs = netdev_get_num_tc(adapter->netdev);
 
        ring_count = txr_count + rxr_count;
        size = sizeof(struct ixgbe_q_vector) +
               (sizeof(struct ixgbe_ring) * ring_count);
 
        /* customize cpu for Flow Director mapping */
-       if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
-               if (cpu_online(v_idx)) {
-                       cpu = v_idx;
-                       node = cpu_to_node(cpu);
+       if ((tcs <= 1) && !(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) {
+               u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
+               if (rss_i > 1 && adapter->atr_sample_rate) {
+                       if (cpu_online(v_idx)) {
+                               cpu = v_idx;
+                               node = cpu_to_node(cpu);
+                       }
                }
        }
 
index ac41361..68478d6 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2012 Intel Corporation.
+  Copyright(c) 1999 - 2013 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -66,7 +66,7 @@ static char ixgbe_default_device_descr[] =
 #define DRV_VERSION "3.11.33-k"
 const char ixgbe_driver_version[] = DRV_VERSION;
 static const char ixgbe_copyright[] =
-                               "Copyright (c) 1999-2012 Intel Corporation.";
+                               "Copyright (c) 1999-2013 Intel Corporation.";
 
 static const struct ixgbe_info *ixgbe_info_tbl[] = {
        [board_82598] = &ixgbe_82598_info,
@@ -838,7 +838,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
                        break;
 
                /* prevent any other reads prior to eop_desc */
-               rmb();
+               read_barrier_depends();
 
                /* if DD is not set pending work has not been completed */
                if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
@@ -1399,6 +1399,7 @@ static void ixgbe_set_rsc_gso_size(struct ixgbe_ring *ring,
        /* set gso_size to avoid messing up TCP MSS */
        skb_shinfo(skb)->gso_size = DIV_ROUND_UP((skb->len - hdr_len),
                                                 IXGBE_CB(skb)->append_cnt);
+       skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
 }
 
 static void ixgbe_update_rsc_stats(struct ixgbe_ring *rx_ring,
@@ -2785,13 +2786,19 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
 
        /*
         * set WTHRESH to encourage burst writeback, it should not be set
-        * higher than 1 when ITR is 0 as it could cause false TX hangs
+        * higher than 1 when:
+        * - ITR is 0 as it could cause false TX hangs
+        * - ITR is set to > 100k int/sec and BQL is enabled
         *
         * In order to avoid issues WTHRESH + PTHRESH should always be equal
         * to or less than the number of on chip descriptors, which is
         * currently 40.
         */
+#if IS_ENABLED(CONFIG_BQL)
+       if (!ring->q_vector || (ring->q_vector->itr < IXGBE_100K_ITR))
+#else
        if (!ring->q_vector || (ring->q_vector->itr < 8))
+#endif
                txdctl |= (1 << 16);    /* WTHRESH = 1 */
        else
                txdctl |= (8 << 16);    /* WTHRESH = 8 */
@@ -2812,6 +2819,16 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
                ring->atr_sample_rate = 0;
        }
 
+       /* initialize XPS */
+       if (!test_and_set_bit(__IXGBE_TX_XPS_INIT_DONE, &ring->state)) {
+               struct ixgbe_q_vector *q_vector = ring->q_vector;
+
+               if (q_vector)
+                       netif_set_xps_queue(adapter->netdev,
+                                           &q_vector->affinity_mask,
+                                           ring->queue_index);
+       }
+
        clear_bit(__IXGBE_HANG_CHECK_ARMED, &ring->state);
 
        /* enable queue */
@@ -4464,7 +4481,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
 {
        struct ixgbe_hw *hw = &adapter->hw;
        struct pci_dev *pdev = adapter->pdev;
-       unsigned int rss;
+       unsigned int rss, fdir;
        u32 fwsm;
 #ifdef CONFIG_IXGBE_DCB
        int j;
@@ -4479,38 +4496,57 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
        hw->subsystem_vendor_id = pdev->subsystem_vendor;
        hw->subsystem_device_id = pdev->subsystem_device;
 
-       /* Set capability flags */
+       /* Set common capability flags and settings */
        rss = min_t(int, IXGBE_MAX_RSS_INDICES, num_online_cpus());
        adapter->ring_feature[RING_F_RSS].limit = rss;
+       adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
+       adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
+       adapter->max_q_vectors = MAX_Q_VECTORS_82599;
+       adapter->atr_sample_rate = 20;
+       fdir = min_t(int, IXGBE_MAX_FDIR_INDICES, num_online_cpus());
+       adapter->ring_feature[RING_F_FDIR].limit = fdir;
+       adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_64K;
+#ifdef CONFIG_IXGBE_DCA
+       adapter->flags |= IXGBE_FLAG_DCA_CAPABLE;
+#endif
+#ifdef IXGBE_FCOE
+       adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE;
+       adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
+#ifdef CONFIG_IXGBE_DCB
+       /* Default traffic class to use for FCoE */
+       adapter->fcoe.up = IXGBE_FCOE_DEFTC;
+#endif /* CONFIG_IXGBE_DCB */
+#endif /* IXGBE_FCOE */
+
+       /* Set MAC specific capability flags and exceptions */
        switch (hw->mac.type) {
        case ixgbe_mac_82598EB:
+               adapter->flags2 &= ~IXGBE_FLAG2_RSC_CAPABLE;
+               adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
+
                if (hw->device_id == IXGBE_DEV_ID_82598AT)
                        adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
+
                adapter->max_q_vectors = MAX_Q_VECTORS_82598;
+               adapter->ring_feature[RING_F_FDIR].limit = 0;
+               adapter->atr_sample_rate = 0;
+               adapter->fdir_pballoc = 0;
+#ifdef IXGBE_FCOE
+               adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
+               adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
+#ifdef CONFIG_IXGBE_DCB
+               adapter->fcoe.up = 0;
+#endif /* IXGBE_DCB */
+#endif /* IXGBE_FCOE */
+               break;
+       case ixgbe_mac_82599EB:
+               if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM)
+                       adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
                break;
        case ixgbe_mac_X540:
                fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
                if (fwsm & IXGBE_FWSM_TS_ENABLED)
                        adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
-       case ixgbe_mac_82599EB:
-               adapter->max_q_vectors = MAX_Q_VECTORS_82599;
-               adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
-               adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
-               if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM)
-                       adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
-               /* Flow Director hash filters enabled */
-               adapter->atr_sample_rate = 20;
-               adapter->ring_feature[RING_F_FDIR].limit =
-                                                        IXGBE_MAX_FDIR_INDICES;
-               adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_64K;
-#ifdef IXGBE_FCOE
-               adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE;
-               adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
-#ifdef CONFIG_IXGBE_DCB
-               /* Default traffic class to use for FCoE */
-               adapter->fcoe.up = IXGBE_FCOE_DEFTC;
-#endif
-#endif /* IXGBE_FCOE */
                break;
        default:
                break;
@@ -4869,7 +4905,7 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
         */
        if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) &&
            (adapter->hw.mac.type == ixgbe_mac_82599EB) &&
-           (max_frame > MAXIMUM_ETHERNET_VLAN_SIZE))
+           (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)))
                e_warn(probe, "Setting MTU > 1500 will disable legacy VFs\n");
 
        e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu);
@@ -5679,6 +5715,10 @@ static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter)
            !(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET))
                return;
 
+       /* concurent i2c reads are not supported */
+       if (test_bit(__IXGBE_READ_I2C, &adapter->state))
+               return;
+
        /* someone else is in init, wait until next service event */
        if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
                return;
@@ -6344,38 +6384,40 @@ static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
        return __ixgbe_maybe_stop_tx(tx_ring, size);
 }
 
+#ifdef IXGBE_FCOE
 static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
 {
-       struct ixgbe_adapter *adapter = netdev_priv(dev);
-       int txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) :
-                                              smp_processor_id();
-#ifdef IXGBE_FCOE
-       __be16 protocol = vlan_get_protocol(skb);
+       struct ixgbe_adapter *adapter;
+       struct ixgbe_ring_feature *f;
+       int txq;
 
-       if (((protocol == htons(ETH_P_FCOE)) ||
-           (protocol == htons(ETH_P_FIP))) &&
-           (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) {
-               struct ixgbe_ring_feature *f;
+       /*
+        * only execute the code below if protocol is FCoE
+        * or FIP and we have FCoE enabled on the adapter
+        */
+       switch (vlan_get_protocol(skb)) {
+       case __constant_htons(ETH_P_FCOE):
+       case __constant_htons(ETH_P_FIP):
+               adapter = netdev_priv(dev);
 
-               f = &adapter->ring_feature[RING_F_FCOE];
+               if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
+                       break;
+       default:
+               return __netdev_pick_tx(dev, skb);
+       }
 
-               while (txq >= f->indices)
-                       txq -= f->indices;
-               txq += adapter->ring_feature[RING_F_FCOE].offset;
+       f = &adapter->ring_feature[RING_F_FCOE];
 
-               return txq;
-       }
-#endif
+       txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) :
+                                          smp_processor_id();
 
-       if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
-               while (unlikely(txq >= dev->real_num_tx_queues))
-                       txq -= dev->real_num_tx_queues;
-               return txq;
-       }
+       while (txq >= f->indices)
+               txq -= f->indices;
 
-       return skb_tx_hash(dev, skb);
+       return txq + f->offset;
 }
 
+#endif
 netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
                          struct ixgbe_adapter *adapter,
                          struct ixgbe_ring *tx_ring)
@@ -6780,6 +6822,7 @@ static void ixgbe_set_prio_tc_map(struct ixgbe_adapter *adapter)
        }
 }
 
+#endif /* CONFIG_IXGBE_DCB */
 /**
  * ixgbe_setup_tc - configure net_device for multiple traffic classes
  *
@@ -6805,6 +6848,7 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
                ixgbe_close(dev);
        ixgbe_clear_interrupt_scheme(adapter);
 
+#ifdef CONFIG_IXGBE_DCB
        if (tc) {
                netdev_set_num_tc(dev, tc);
                ixgbe_set_prio_tc_map(adapter);
@@ -6827,31 +6871,24 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
                adapter->dcb_cfg.pfc_mode_enable = false;
        }
 
-       ixgbe_init_interrupt_scheme(adapter);
        ixgbe_validate_rtr(adapter, tc);
+
+#endif /* CONFIG_IXGBE_DCB */
+       ixgbe_init_interrupt_scheme(adapter);
+
        if (netif_running(dev))
-               ixgbe_open(dev);
+               return ixgbe_open(dev);
 
        return 0;
 }
 
-#endif /* CONFIG_IXGBE_DCB */
 #ifdef CONFIG_PCI_IOV
 void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter)
 {
        struct net_device *netdev = adapter->netdev;
 
        rtnl_lock();
-#ifdef CONFIG_IXGBE_DCB
        ixgbe_setup_tc(netdev, netdev_get_num_tc(netdev));
-#else
-       if (netif_running(netdev))
-               ixgbe_close(netdev);
-       ixgbe_clear_interrupt_scheme(adapter);
-       ixgbe_init_interrupt_scheme(adapter);
-       if (netif_running(netdev))
-               ixgbe_open(netdev);
-#endif
        rtnl_unlock();
 }
 
@@ -7001,7 +7038,7 @@ static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
        return err;
 }
 
-static int ixgbe_ndo_fdb_del(struct ndmsg *ndm,
+static int ixgbe_ndo_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
                             struct net_device *dev,
                             const unsigned char *addr)
 {
@@ -7078,7 +7115,8 @@ static int ixgbe_ndo_bridge_setlink(struct net_device *dev,
 }
 
 static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
-                                   struct net_device *dev)
+                                   struct net_device *dev,
+                                   u32 filter_mask)
 {
        struct ixgbe_adapter *adapter = netdev_priv(dev);
        u16 mode;
@@ -7098,7 +7136,9 @@ static const struct net_device_ops ixgbe_netdev_ops = {
        .ndo_open               = ixgbe_open,
        .ndo_stop               = ixgbe_close,
        .ndo_start_xmit         = ixgbe_xmit_frame,
+#ifdef IXGBE_FCOE
        .ndo_select_queue       = ixgbe_select_queue,
+#endif
        .ndo_set_rx_mode        = ixgbe_set_rx_mode,
        .ndo_validate_addr      = eth_validate_addr,
        .ndo_set_mac_address    = ixgbe_set_mac,
@@ -7210,9 +7250,8 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data];
        static int cards_found;
        int i, err, pci_using_dac;
+       unsigned int indices = MAX_TX_QUEUES;
        u8 part_str[IXGBE_PBANUM_LENGTH];
-       unsigned int indices = num_possible_cpus();
-       unsigned int dcb_max = 0;
 #ifdef IXGBE_FCOE
        u16 device_caps;
 #endif
@@ -7261,25 +7300,15 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        pci_set_master(pdev);
        pci_save_state(pdev);
 
+       if (ii->mac == ixgbe_mac_82598EB) {
 #ifdef CONFIG_IXGBE_DCB
-       if (ii->mac == ixgbe_mac_82598EB)
-               dcb_max = min_t(unsigned int, indices * MAX_TRAFFIC_CLASS,
-                               IXGBE_MAX_RSS_INDICES);
-       else
-               dcb_max = min_t(unsigned int, indices * MAX_TRAFFIC_CLASS,
-                               IXGBE_MAX_FDIR_INDICES);
+               /* 8 TC w/ 4 queues per TC */
+               indices = 4 * MAX_TRAFFIC_CLASS;
+#else
+               indices = IXGBE_MAX_RSS_INDICES;
 #endif
+       }
 
-       if (ii->mac == ixgbe_mac_82598EB)
-               indices = min_t(unsigned int, indices, IXGBE_MAX_RSS_INDICES);
-       else
-               indices = min_t(unsigned int, indices, IXGBE_MAX_FDIR_INDICES);
-
-#ifdef IXGBE_FCOE
-       indices += min_t(unsigned int, num_possible_cpus(),
-                        IXGBE_MAX_FCOE_INDICES);
-#endif
-       indices = max_t(unsigned int, dcb_max, indices);
        netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices);
        if (!netdev) {
                err = -ENOMEM;
@@ -7434,13 +7463,17 @@ skip_sriov:
 
 #ifdef IXGBE_FCOE
        if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) {
+               unsigned int fcoe_l;
+
                if (hw->mac.ops.get_device_caps) {
                        hw->mac.ops.get_device_caps(hw, &device_caps);
                        if (device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS)
                                adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
                }
 
-               adapter->ring_feature[RING_F_FCOE].limit = IXGBE_FCRETA_SIZE;
+
+               fcoe_l = min_t(int, IXGBE_FCRETA_SIZE, num_online_cpus());
+               adapter->ring_feature[RING_F_FCOE].limit = fcoe_l;
 
                netdev->features |= NETIF_F_FSO |
                                    NETIF_F_FCOE_CRC;
index 1f3e32b..d4a64e6 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2012 Intel Corporation.
+  Copyright(c) 1999 - 2013 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
index 42dd65e..e44ff47 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2012 Intel Corporation.
+  Copyright(c) 1999 - 2013 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
index f4b2c0d..060d2ad 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2012 Intel Corporation.
+  Copyright(c) 1999 - 2013 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -852,11 +852,9 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
 
        status = hw->phy.ops.read_i2c_eeprom(hw,
                                             IXGBE_SFF_IDENTIFIER,
-                                            &identifier);
+                                            &identifier);
 
-       if (status == IXGBE_ERR_SWFW_SYNC ||
-           status == IXGBE_ERR_I2C ||
-           status == IXGBE_ERR_SFP_NOT_PRESENT)
+       if (status != 0)
                goto err_read_i2c_eeprom;
 
        /* LAN ID is needed for sfp_type determination */
@@ -870,26 +868,20 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
                                                     IXGBE_SFF_1GBE_COMP_CODES,
                                                     &comp_codes_1g);
 
-               if (status == IXGBE_ERR_SWFW_SYNC ||
-                   status == IXGBE_ERR_I2C ||
-                   status == IXGBE_ERR_SFP_NOT_PRESENT)
+               if (status != 0)
                        goto err_read_i2c_eeprom;
 
                status = hw->phy.ops.read_i2c_eeprom(hw,
                                                     IXGBE_SFF_10GBE_COMP_CODES,
                                                     &comp_codes_10g);
 
-               if (status == IXGBE_ERR_SWFW_SYNC ||
-                   status == IXGBE_ERR_I2C ||
-                   status == IXGBE_ERR_SFP_NOT_PRESENT)
+               if (status != 0)
                        goto err_read_i2c_eeprom;
                status = hw->phy.ops.read_i2c_eeprom(hw,
                                                     IXGBE_SFF_CABLE_TECHNOLOGY,
                                                     &cable_tech);
 
-               if (status == IXGBE_ERR_SWFW_SYNC ||
-                   status == IXGBE_ERR_I2C ||
-                   status == IXGBE_ERR_SFP_NOT_PRESENT)
+               if (status != 0)
                        goto err_read_i2c_eeprom;
 
                 /* ID Module
@@ -984,30 +976,24 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
                if (hw->phy.type != ixgbe_phy_nl) {
                        hw->phy.id = identifier;
                        status = hw->phy.ops.read_i2c_eeprom(hw,
-                                                   IXGBE_SFF_VENDOR_OUI_BYTE0,
-                                                   &oui_bytes[0]);
+                                                   IXGBE_SFF_VENDOR_OUI_BYTE0,
+                                                   &oui_bytes[0]);
 
-                       if (status == IXGBE_ERR_SWFW_SYNC ||
-                           status == IXGBE_ERR_I2C ||
-                           status == IXGBE_ERR_SFP_NOT_PRESENT)
+                       if (status != 0)
                                goto err_read_i2c_eeprom;
 
                        status = hw->phy.ops.read_i2c_eeprom(hw,
                                                    IXGBE_SFF_VENDOR_OUI_BYTE1,
                                                    &oui_bytes[1]);
 
-                       if (status == IXGBE_ERR_SWFW_SYNC ||
-                           status == IXGBE_ERR_I2C ||
-                           status == IXGBE_ERR_SFP_NOT_PRESENT)
+                       if (status != 0)
                                goto err_read_i2c_eeprom;
 
                        status = hw->phy.ops.read_i2c_eeprom(hw,
                                                    IXGBE_SFF_VENDOR_OUI_BYTE2,
                                                    &oui_bytes[2]);
 
-                       if (status == IXGBE_ERR_SWFW_SYNC ||
-                           status == IXGBE_ERR_I2C ||
-                           status == IXGBE_ERR_SFP_NOT_PRESENT)
+                       if (status != 0)
                                goto err_read_i2c_eeprom;
 
                        vendor_oui =
@@ -1204,6 +1190,22 @@ s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
 }
 
 /**
+ *  ixgbe_read_i2c_sff8472_generic - Reads 8 bit word over I2C interface
+ *  @hw: pointer to hardware structure
+ *  @byte_offset: byte offset at address 0xA2
+ *  @eeprom_data: value read
+ *
+ *  Performs byte read operation to SFP module's SFF-8472 data over I2C
+ **/
+s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset,
+                                  u8 *sff8472_data)
+{
+       return hw->phy.ops.read_i2c_byte(hw, byte_offset,
+                                        IXGBE_I2C_EEPROM_DEV_ADDR2,
+                                        sff8472_data);
+}
+
+/**
  *  ixgbe_write_i2c_eeprom_generic - Writes 8 bit EEPROM word over I2C interface
  *  @hw: pointer to hardware structure
  *  @byte_offset: EEPROM byte offset to write
@@ -1291,9 +1293,9 @@ s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
                break;
 
 fail:
+               ixgbe_i2c_bus_clear(hw);
                hw->mac.ops.release_swfw_sync(hw, swfw_mask);
                msleep(100);
-               ixgbe_i2c_bus_clear(hw);
                retry++;
                if (retry < max_retry)
                        hw_dbg(hw, "I2C byte read error - Retrying.\n");
index 51b0a91..886a343 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2012 Intel Corporation.
+  Copyright(c) 1999 - 2013 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -30,6 +30,7 @@
 
 #include "ixgbe_type.h"
 #define IXGBE_I2C_EEPROM_DEV_ADDR    0xA0
+#define IXGBE_I2C_EEPROM_DEV_ADDR2   0xA2
 
 /* EEPROM byte offsets */
 #define IXGBE_SFF_IDENTIFIER         0x0
@@ -41,6 +42,8 @@
 #define IXGBE_SFF_10GBE_COMP_CODES   0x3
 #define IXGBE_SFF_CABLE_TECHNOLOGY   0x8
 #define IXGBE_SFF_CABLE_SPEC_COMP    0x3C
+#define IXGBE_SFF_SFF_8472_SWAP      0x5C
+#define IXGBE_SFF_SFF_8472_COMP      0x5E
 
 /* Bitmasks */
 #define IXGBE_SFF_DA_PASSIVE_CABLE           0x4
@@ -51,6 +54,7 @@
 #define IXGBE_SFF_1GBASET_CAPABLE            0x8
 #define IXGBE_SFF_10GBASESR_CAPABLE          0x10
 #define IXGBE_SFF_10GBASELR_CAPABLE          0x20
+#define IXGBE_SFF_ADDRESSING_MODE           0x4
 #define IXGBE_I2C_EEPROM_READ_MASK           0x100
 #define IXGBE_I2C_EEPROM_STATUS_MASK         0x3
 #define IXGBE_I2C_EEPROM_STATUS_NO_OPERATION 0x0
@@ -88,6 +92,9 @@
 #define IXGBE_TN_LASI_STATUS_REG        0x9005
 #define IXGBE_TN_LASI_STATUS_TEMP_ALARM 0x0008
 
+/* SFP+ SFF-8472 Compliance code */
+#define IXGBE_SFF_SFF_8472_UNSUP      0x00
+
 s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw);
 s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw);
 s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw);
@@ -125,6 +132,8 @@ s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
                                  u8 dev_addr, u8 data);
 s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
                                   u8 *eeprom_data);
+s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset,
+                                  u8 *sff8472_data);
 s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
                                    u8 eeprom_data);
 #endif /* _IXGBE_PHY_H_ */
index 53d2047..331987d 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2012 Intel Corporation.
+  Copyright(c) 1999 - 2013 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
index ee3507f..d44b4d2 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2012 Intel Corporation.
+  Copyright(c) 1999 - 2013 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
index 008f9ce..4713f9f 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2012 Intel Corporation.
+  Copyright(c) 1999 - 2013 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
index 16ddf14..d118def 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2012 Intel Corporation.
+  Copyright(c) 1999 - 2013 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
index 0bdcc88..6652e96 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2012 Intel Corporation.
+  Copyright(c) 1999 - 2013 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -2874,6 +2874,7 @@ struct ixgbe_phy_operations {
        s32 (*get_firmware_version)(struct ixgbe_hw *, u16 *);
        s32 (*read_i2c_byte)(struct ixgbe_hw *, u8, u8, u8 *);
        s32 (*write_i2c_byte)(struct ixgbe_hw *, u8, u8, u8);
+       s32 (*read_i2c_sff8472)(struct ixgbe_hw *, u8 , u8 *);
        s32 (*read_i2c_eeprom)(struct ixgbe_hw *, u8 , u8 *);
        s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
        s32 (*check_overtemp)(struct ixgbe_hw *);
index 2fa5843..66c5e94 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2012 Intel Corporation.
+  Copyright(c) 1999 - 2013 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -878,6 +878,7 @@ static struct ixgbe_phy_operations phy_ops_X540 = {
        .setup_link_speed       = &ixgbe_setup_phy_link_speed_generic,
        .read_i2c_byte          = &ixgbe_read_i2c_byte_generic,
        .write_i2c_byte         = &ixgbe_write_i2c_byte_generic,
+       .read_i2c_sff8472       = &ixgbe_read_i2c_sff8472_generic,
        .read_i2c_eeprom        = &ixgbe_read_i2c_eeprom_generic,
        .write_i2c_eeprom       = &ixgbe_write_i2c_eeprom_generic,
        .check_overtemp         = &ixgbe_tn_check_overtemp,
index f523f02..00f25b5 100644 (file)
@@ -712,16 +712,13 @@ static int mlx4_en_ethtool_add_mac_rule_by_ipv4(struct mlx4_en_priv *priv,
                                                __be32 ipv4_dst)
 {
 #ifdef CONFIG_INET
-       __be64 be_mac = 0;
        unsigned char mac[ETH_ALEN];
 
        if (!ipv4_is_multicast(ipv4_dst)) {
-               if (cmd->fs.flow_type & FLOW_MAC_EXT) {
+               if (cmd->fs.flow_type & FLOW_MAC_EXT)
                        memcpy(&mac, cmd->fs.h_ext.h_dest, ETH_ALEN);
-               } else {
-                       be_mac = cpu_to_be64((priv->mac & MLX4_MAC_MASK) << 16);
-                       memcpy(&mac, &be_mac, ETH_ALEN);
-               }
+               else
+                       memcpy(&mac, priv->dev->dev_addr, ETH_ALEN);
        } else {
                ip_eth_mc_map(ipv4_dst, mac);
        }
@@ -744,7 +741,6 @@ static int add_ip_rule(struct mlx4_en_priv *priv,
        spec_l3 = kzalloc(sizeof(*spec_l3), GFP_KERNEL);
        spec_l2 = kzalloc(sizeof(*spec_l2), GFP_KERNEL);
        if (!spec_l2 || !spec_l3) {
-               en_err(priv, "Fail to alloc ethtool rule.\n");
                err = -ENOMEM;
                goto free_spec;
        }
@@ -785,7 +781,6 @@ static int add_tcp_udp_rule(struct mlx4_en_priv *priv,
        spec_l3 = kzalloc(sizeof(*spec_l3), GFP_KERNEL);
        spec_l4 = kzalloc(sizeof(*spec_l4), GFP_KERNEL);
        if (!spec_l2 || !spec_l3 || !spec_l4) {
-               en_err(priv, "Fail to alloc ethtool rule.\n");
                err = -ENOMEM;
                goto free_spec;
        }
index f3c7961..e3c3d12 100644 (file)
@@ -95,6 +95,28 @@ int en_print(const char *level, const struct mlx4_en_priv *priv,
        return i;
 }
 
+void mlx4_en_update_loopback_state(struct net_device *dev,
+                                  netdev_features_t features)
+{
+       struct mlx4_en_priv *priv = netdev_priv(dev);
+
+       priv->flags &= ~(MLX4_EN_FLAG_RX_FILTER_NEEDED|
+                       MLX4_EN_FLAG_ENABLE_HW_LOOPBACK);
+
+       /* Drop the packet if SRIOV is not enabled
+        * and not performing the selftest or flb disabled
+        */
+       if (mlx4_is_mfunc(priv->mdev->dev) &&
+           !(features & NETIF_F_LOOPBACK) && !priv->validate_loopback)
+               priv->flags |= MLX4_EN_FLAG_RX_FILTER_NEEDED;
+
+       /* Set dmac in Tx WQE if we are in SRIOV mode or if loopback selftest
+        * is requested
+        */
+       if (mlx4_is_mfunc(priv->mdev->dev) || priv->validate_loopback)
+               priv->flags |= MLX4_EN_FLAG_ENABLE_HW_LOOPBACK;
+}
+
 static int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
 {
        struct mlx4_en_profile *params = &mdev->profile;
index ac1c14f..5088dc5 100644 (file)
@@ -132,17 +132,14 @@ static void mlx4_en_filter_work(struct work_struct *work)
                .priority = MLX4_DOMAIN_RFS,
        };
        int rc;
-       __be64 mac;
        __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
 
        list_add_tail(&spec_eth.list, &rule.list);
        list_add_tail(&spec_ip.list, &rule.list);
        list_add_tail(&spec_tcp.list, &rule.list);
 
-       mac = cpu_to_be64((priv->mac & MLX4_MAC_MASK) << 16);
-
        rule.qpn = priv->rss_map.qps[filter->rxq_index].qpn;
-       memcpy(spec_eth.eth.dst_mac, &mac, ETH_ALEN);
+       memcpy(spec_eth.eth.dst_mac, priv->dev->dev_addr, ETH_ALEN);
        memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
 
        filter->activated = 0;
@@ -413,6 +410,235 @@ static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
        return 0;
 }
 
+static void mlx4_en_u64_to_mac(unsigned char dst_mac[ETH_ALEN + 2], u64 src_mac)
+{
+       unsigned int i;
+       for (i = ETH_ALEN - 1; i; --i) {
+               dst_mac[i] = src_mac & 0xff;
+               src_mac >>= 8;
+       }
+       memset(&dst_mac[ETH_ALEN], 0, 2);
+}
+
+static int mlx4_en_uc_steer_add(struct mlx4_en_priv *priv,
+                               unsigned char *mac, int *qpn, u64 *reg_id)
+{
+       struct mlx4_en_dev *mdev = priv->mdev;
+       struct mlx4_dev *dev = mdev->dev;
+       int err;
+
+       switch (dev->caps.steering_mode) {
+       case MLX4_STEERING_MODE_B0: {
+               struct mlx4_qp qp;
+               u8 gid[16] = {0};
+
+               qp.qpn = *qpn;
+               memcpy(&gid[10], mac, ETH_ALEN);
+               gid[5] = priv->port;
+
+               err = mlx4_unicast_attach(dev, &qp, gid, 0, MLX4_PROT_ETH);
+               break;
+       }
+       case MLX4_STEERING_MODE_DEVICE_MANAGED: {
+               struct mlx4_spec_list spec_eth = { {NULL} };
+               __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
+
+               struct mlx4_net_trans_rule rule = {
+                       .queue_mode = MLX4_NET_TRANS_Q_FIFO,
+                       .exclusive = 0,
+                       .allow_loopback = 1,
+                       .promisc_mode = MLX4_FS_PROMISC_NONE,
+                       .priority = MLX4_DOMAIN_NIC,
+               };
+
+               rule.port = priv->port;
+               rule.qpn = *qpn;
+               INIT_LIST_HEAD(&rule.list);
+
+               spec_eth.id = MLX4_NET_TRANS_RULE_ID_ETH;
+               memcpy(spec_eth.eth.dst_mac, mac, ETH_ALEN);
+               memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
+               list_add_tail(&spec_eth.list, &rule.list);
+
+               err = mlx4_flow_attach(dev, &rule, reg_id);
+               break;
+       }
+       default:
+               return -EINVAL;
+       }
+       if (err)
+               en_warn(priv, "Failed Attaching Unicast\n");
+
+       return err;
+}
+
+static void mlx4_en_uc_steer_release(struct mlx4_en_priv *priv,
+                                    unsigned char *mac, int qpn, u64 reg_id)
+{
+       struct mlx4_en_dev *mdev = priv->mdev;
+       struct mlx4_dev *dev = mdev->dev;
+
+       switch (dev->caps.steering_mode) {
+       case MLX4_STEERING_MODE_B0: {
+               struct mlx4_qp qp;
+               u8 gid[16] = {0};
+
+               qp.qpn = qpn;
+               memcpy(&gid[10], mac, ETH_ALEN);
+               gid[5] = priv->port;
+
+               mlx4_unicast_detach(dev, &qp, gid, MLX4_PROT_ETH);
+               break;
+       }
+       case MLX4_STEERING_MODE_DEVICE_MANAGED: {
+               mlx4_flow_detach(dev, reg_id);
+               break;
+       }
+       default:
+               en_err(priv, "Invalid steering mode.\n");
+       }
+}
+
+static int mlx4_en_get_qp(struct mlx4_en_priv *priv)
+{
+       struct mlx4_en_dev *mdev = priv->mdev;
+       struct mlx4_dev *dev = mdev->dev;
+       struct mlx4_mac_entry *entry;
+       int index = 0;
+       int err = 0;
+       u64 reg_id;
+       int *qpn = &priv->base_qpn;
+       u64 mac = mlx4_en_mac_to_u64(priv->dev->dev_addr);
+
+       en_dbg(DRV, priv, "Registering MAC: %pM for adding\n",
+              priv->dev->dev_addr);
+       index = mlx4_register_mac(dev, priv->port, mac);
+       if (index < 0) {
+               err = index;
+               en_err(priv, "Failed adding MAC: %pM\n",
+                      priv->dev->dev_addr);
+               return err;
+       }
+
+       if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) {
+               int base_qpn = mlx4_get_base_qpn(dev, priv->port);
+               *qpn = base_qpn + index;
+               return 0;
+       }
+
+       err = mlx4_qp_reserve_range(dev, 1, 1, qpn);
+       en_dbg(DRV, priv, "Reserved qp %d\n", *qpn);
+       if (err) {
+               en_err(priv, "Failed to reserve qp for mac registration\n");
+               goto qp_err;
+       }
+
+       err = mlx4_en_uc_steer_add(priv, priv->dev->dev_addr, qpn, &reg_id);
+       if (err)
+               goto steer_err;
+
+       entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+       if (!entry) {
+               err = -ENOMEM;
+               goto alloc_err;
+       }
+       memcpy(entry->mac, priv->dev->dev_addr, sizeof(entry->mac));
+       entry->reg_id = reg_id;
+
+       hlist_add_head_rcu(&entry->hlist,
+                          &priv->mac_hash[entry->mac[MLX4_EN_MAC_HASH_IDX]]);
+
+       return 0;
+
+alloc_err:
+       mlx4_en_uc_steer_release(priv, priv->dev->dev_addr, *qpn, reg_id);
+
+steer_err:
+       mlx4_qp_release_range(dev, *qpn, 1);
+
+qp_err:
+       mlx4_unregister_mac(dev, priv->port, mac);
+       return err;
+}
+
+static void mlx4_en_put_qp(struct mlx4_en_priv *priv)
+{
+       struct mlx4_en_dev *mdev = priv->mdev;
+       struct mlx4_dev *dev = mdev->dev;
+       int qpn = priv->base_qpn;
+       u64 mac = mlx4_en_mac_to_u64(priv->dev->dev_addr);
+
+       en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n",
+              priv->dev->dev_addr);
+       mlx4_unregister_mac(dev, priv->port, mac);
+
+       if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) {
+               struct mlx4_mac_entry *entry;
+               struct hlist_node *n, *tmp;
+               struct hlist_head *bucket;
+               unsigned int mac_hash;
+
+               mac_hash = priv->dev->dev_addr[MLX4_EN_MAC_HASH_IDX];
+               bucket = &priv->mac_hash[mac_hash];
+               hlist_for_each_entry_safe(entry, n, tmp, bucket, hlist) {
+                       if (ether_addr_equal_64bits(entry->mac,
+                                                   priv->dev->dev_addr)) {
+                               en_dbg(DRV, priv, "Releasing qp: port %d, MAC %pM, qpn %d\n",
+                                      priv->port, priv->dev->dev_addr, qpn);
+                               mlx4_en_uc_steer_release(priv, entry->mac,
+                                                        qpn, entry->reg_id);
+                               mlx4_qp_release_range(dev, qpn, 1);
+
+                               hlist_del_rcu(&entry->hlist);
+                               kfree_rcu(entry, rcu);
+                               break;
+                       }
+               }
+       }
+}
+
+static int mlx4_en_replace_mac(struct mlx4_en_priv *priv, int qpn,
+                              unsigned char *new_mac, unsigned char *prev_mac)
+{
+       struct mlx4_en_dev *mdev = priv->mdev;
+       struct mlx4_dev *dev = mdev->dev;
+       int err = 0;
+       u64 new_mac_u64 = mlx4_en_mac_to_u64(new_mac);
+
+       if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) {
+               struct hlist_head *bucket;
+               unsigned int mac_hash;
+               struct mlx4_mac_entry *entry;
+               struct hlist_node *n, *tmp;
+               u64 prev_mac_u64 = mlx4_en_mac_to_u64(prev_mac);
+
+               bucket = &priv->mac_hash[prev_mac[MLX4_EN_MAC_HASH_IDX]];
+               hlist_for_each_entry_safe(entry, n, tmp, bucket, hlist) {
+                       if (ether_addr_equal_64bits(entry->mac, prev_mac)) {
+                               mlx4_en_uc_steer_release(priv, entry->mac,
+                                                        qpn, entry->reg_id);
+                               mlx4_unregister_mac(dev, priv->port,
+                                                   prev_mac_u64);
+                               hlist_del_rcu(&entry->hlist);
+                               synchronize_rcu();
+                               memcpy(entry->mac, new_mac, ETH_ALEN);
+                               entry->reg_id = 0;
+                               mac_hash = new_mac[MLX4_EN_MAC_HASH_IDX];
+                               hlist_add_head_rcu(&entry->hlist,
+                                                  &priv->mac_hash[mac_hash]);
+                               mlx4_register_mac(dev, priv->port, new_mac_u64);
+                               err = mlx4_en_uc_steer_add(priv, new_mac,
+                                                          &qpn,
+                                                          &entry->reg_id);
+                               return err;
+                       }
+               }
+               return -EINVAL;
+       }
+
+       return __mlx4_replace_mac(dev, priv->port, qpn, new_mac_u64);
+}
+
 u64 mlx4_en_mac_to_u64(u8 *addr)
 {
        u64 mac = 0;
@@ -435,7 +661,6 @@ static int mlx4_en_set_mac(struct net_device *dev, void *addr)
                return -EADDRNOTAVAIL;
 
        memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN);
-       priv->mac = mlx4_en_mac_to_u64(dev->dev_addr);
        queue_work(mdev->workqueue, &priv->mac_task);
        return 0;
 }
@@ -450,13 +675,14 @@ static void mlx4_en_do_set_mac(struct work_struct *work)
        mutex_lock(&mdev->state_lock);
        if (priv->port_up) {
                /* Remove old MAC and insert the new one */
-               err = mlx4_replace_mac(mdev->dev, priv->port,
-                                      priv->base_qpn, priv->mac);
+               err = mlx4_en_replace_mac(priv, priv->base_qpn,
+                                         priv->dev->dev_addr, priv->prev_mac);
                if (err)
                        en_err(priv, "Failed changing HW MAC address\n");
+               memcpy(priv->prev_mac, priv->dev->dev_addr,
+                      sizeof(priv->prev_mac));
        } else
-               en_dbg(HW, priv, "Port is down while "
-                                "registering mac, exiting...\n");
+               en_dbg(HW, priv, "Port is down while registering mac, exiting...\n");
 
        mutex_unlock(&mdev->state_lock);
 }
@@ -482,7 +708,6 @@ static void mlx4_en_cache_mclist(struct net_device *dev)
        netdev_for_each_mc_addr(ha, dev) {
                tmp = kzalloc(sizeof(struct mlx4_en_mc_list), GFP_ATOMIC);
                if (!tmp) {
-                       en_err(priv, "failed to allocate multicast list\n");
                        mlx4_en_clear_list(dev);
                        return;
                }
@@ -526,181 +751,153 @@ static void update_mclist_flags(struct mlx4_en_priv *priv,
                        }
                }
                if (!found) {
-                       new_mc = kmalloc(sizeof(struct mlx4_en_mc_list),
+                       new_mc = kmemdup(src_tmp,
+                                        sizeof(struct mlx4_en_mc_list),
                                         GFP_KERNEL);
-                       if (!new_mc) {
-                               en_err(priv, "Failed to allocate current multicast list\n");
+                       if (!new_mc)
                                return;
-                       }
-                       memcpy(new_mc, src_tmp,
-                              sizeof(struct mlx4_en_mc_list));
+
                        new_mc->action = MCLIST_ADD;
                        list_add_tail(&new_mc->list, dst);
                }
        }
 }
 
-static void mlx4_en_set_multicast(struct net_device *dev)
+static void mlx4_en_set_rx_mode(struct net_device *dev)
 {
        struct mlx4_en_priv *priv = netdev_priv(dev);
 
        if (!priv->port_up)
                return;
 
-       queue_work(priv->mdev->workqueue, &priv->mcast_task);
+       queue_work(priv->mdev->workqueue, &priv->rx_mode_task);
 }
 
-static void mlx4_en_do_set_multicast(struct work_struct *work)
+static void mlx4_en_set_promisc_mode(struct mlx4_en_priv *priv,
+                                    struct mlx4_en_dev *mdev)
 {
-       struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
-                                                mcast_task);
-       struct mlx4_en_dev *mdev = priv->mdev;
-       struct net_device *dev = priv->dev;
-       struct mlx4_en_mc_list *mclist, *tmp;
-       u64 mcast_addr = 0;
-       u8 mc_list[16] = {0};
        int err = 0;
 
-       mutex_lock(&mdev->state_lock);
-       if (!mdev->device_up) {
-               en_dbg(HW, priv, "Card is not up, "
-                                "ignoring multicast change.\n");
-               goto out;
-       }
-       if (!priv->port_up) {
-               en_dbg(HW, priv, "Port is down, "
-                                "ignoring  multicast change.\n");
-               goto out;
-       }
-
-       if (!netif_carrier_ok(dev)) {
-               if (!mlx4_en_QUERY_PORT(mdev, priv->port)) {
-                       if (priv->port_state.link_state) {
-                               priv->last_link_state = MLX4_DEV_EVENT_PORT_UP;
-                               netif_carrier_on(dev);
-                               en_dbg(LINK, priv, "Link Up\n");
-                       }
-               }
-       }
-
-       /*
-        * Promsicuous mode: disable all filters
-        */
-
-       if (dev->flags & IFF_PROMISC) {
-               if (!(priv->flags & MLX4_EN_FLAG_PROMISC)) {
-                       if (netif_msg_rx_status(priv))
-                               en_warn(priv, "Entering promiscuous mode\n");
-                       priv->flags |= MLX4_EN_FLAG_PROMISC;
-
-                       /* Enable promiscouos mode */
-                       switch (mdev->dev->caps.steering_mode) {
-                       case MLX4_STEERING_MODE_DEVICE_MANAGED:
-                               err = mlx4_flow_steer_promisc_add(mdev->dev,
-                                                                 priv->port,
-                                                                 priv->base_qpn,
-                                                                 MLX4_FS_PROMISC_UPLINK);
-                               if (err)
-                                       en_err(priv, "Failed enabling promiscuous mode\n");
-                               priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
-                               break;
-
-                       case MLX4_STEERING_MODE_B0:
-                               err = mlx4_unicast_promisc_add(mdev->dev,
-                                                              priv->base_qpn,
-                                                              priv->port);
-                               if (err)
-                                       en_err(priv, "Failed enabling unicast promiscuous mode\n");
-
-                               /* Add the default qp number as multicast
-                                * promisc
-                                */
-                               if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
-                                       err = mlx4_multicast_promisc_add(mdev->dev,
-                                                                        priv->base_qpn,
-                                                                        priv->port);
-                                       if (err)
-                                               en_err(priv, "Failed enabling multicast promiscuous mode\n");
-                                       priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
-                               }
-                               break;
-
-                       case MLX4_STEERING_MODE_A0:
-                               err = mlx4_SET_PORT_qpn_calc(mdev->dev,
-                                                            priv->port,
-                                                            priv->base_qpn,
-                                                            1);
-                               if (err)
-                                       en_err(priv, "Failed enabling promiscuous mode\n");
-                               break;
-                       }
-
-                       /* Disable port multicast filter (unconditionally) */
-                       err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
-                                                 0, MLX4_MCAST_DISABLE);
-                       if (err)
-                               en_err(priv, "Failed disabling "
-                                            "multicast filter\n");
-
-                       /* Disable port VLAN filter */
-                       err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
-                       if (err)
-                               en_err(priv, "Failed disabling VLAN filter\n");
-               }
-               goto out;
-       }
-
-       /*
-        * Not in promiscuous mode
-        */
-
-       if (priv->flags & MLX4_EN_FLAG_PROMISC) {
+       if (!(priv->flags & MLX4_EN_FLAG_PROMISC)) {
                if (netif_msg_rx_status(priv))
-                       en_warn(priv, "Leaving promiscuous mode\n");
-               priv->flags &= ~MLX4_EN_FLAG_PROMISC;
+                       en_warn(priv, "Entering promiscuous mode\n");
+               priv->flags |= MLX4_EN_FLAG_PROMISC;
 
-               /* Disable promiscouos mode */
+               /* Enable promiscouos mode */
                switch (mdev->dev->caps.steering_mode) {
                case MLX4_STEERING_MODE_DEVICE_MANAGED:
-                       err = mlx4_flow_steer_promisc_remove(mdev->dev,
-                                                            priv->port,
-                                                            MLX4_FS_PROMISC_UPLINK);
+                       err = mlx4_flow_steer_promisc_add(mdev->dev,
+                                                         priv->port,
+                                                         priv->base_qpn,
+                                                         MLX4_FS_PROMISC_UPLINK);
                        if (err)
-                               en_err(priv, "Failed disabling promiscuous mode\n");
-                       priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
+                               en_err(priv, "Failed enabling promiscuous mode\n");
+                       priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
                        break;
 
                case MLX4_STEERING_MODE_B0:
-                       err = mlx4_unicast_promisc_remove(mdev->dev,
-                                                         priv->base_qpn,
-                                                         priv->port);
+                       err = mlx4_unicast_promisc_add(mdev->dev,
+                                                      priv->base_qpn,
+                                                      priv->port);
                        if (err)
-                               en_err(priv, "Failed disabling unicast promiscuous mode\n");
-                       /* Disable Multicast promisc */
-                       if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
-                               err = mlx4_multicast_promisc_remove(mdev->dev,
-                                                                   priv->base_qpn,
-                                                                   priv->port);
+                               en_err(priv, "Failed enabling unicast promiscuous mode\n");
+
+                       /* Add the default qp number as multicast
+                        * promisc
+                        */
+                       if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
+                               err = mlx4_multicast_promisc_add(mdev->dev,
+                                                                priv->base_qpn,
+                                                                priv->port);
                                if (err)
-                                       en_err(priv, "Failed disabling multicast promiscuous mode\n");
-                               priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
+                                       en_err(priv, "Failed enabling multicast promiscuous mode\n");
+                               priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
                        }
                        break;
 
                case MLX4_STEERING_MODE_A0:
                        err = mlx4_SET_PORT_qpn_calc(mdev->dev,
                                                     priv->port,
-                                                    priv->base_qpn, 0);
+                                                    priv->base_qpn,
+                                                    1);
                        if (err)
-                               en_err(priv, "Failed disabling promiscuous mode\n");
+                               en_err(priv, "Failed enabling promiscuous mode\n");
                        break;
                }
 
-               /* Enable port VLAN filter */
+               /* Disable port multicast filter (unconditionally) */
+               err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
+                                         0, MLX4_MCAST_DISABLE);
+               if (err)
+                       en_err(priv, "Failed disabling multicast filter\n");
+
+               /* Disable port VLAN filter */
                err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
                if (err)
-                       en_err(priv, "Failed enabling VLAN filter\n");
+                       en_err(priv, "Failed disabling VLAN filter\n");
        }
+}
+
+static void mlx4_en_clear_promisc_mode(struct mlx4_en_priv *priv,
+                                      struct mlx4_en_dev *mdev)
+{
+       int err = 0;
+
+       if (netif_msg_rx_status(priv))
+               en_warn(priv, "Leaving promiscuous mode\n");
+       priv->flags &= ~MLX4_EN_FLAG_PROMISC;
+
+       /* Disable promiscouos mode */
+       switch (mdev->dev->caps.steering_mode) {
+       case MLX4_STEERING_MODE_DEVICE_MANAGED:
+               err = mlx4_flow_steer_promisc_remove(mdev->dev,
+                                                    priv->port,
+                                                    MLX4_FS_PROMISC_UPLINK);
+               if (err)
+                       en_err(priv, "Failed disabling promiscuous mode\n");
+               priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
+               break;
+
+       case MLX4_STEERING_MODE_B0:
+               err = mlx4_unicast_promisc_remove(mdev->dev,
+                                                 priv->base_qpn,
+                                                 priv->port);
+               if (err)
+                       en_err(priv, "Failed disabling unicast promiscuous mode\n");
+               /* Disable Multicast promisc */
+               if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
+                       err = mlx4_multicast_promisc_remove(mdev->dev,
+                                                           priv->base_qpn,
+                                                           priv->port);
+                       if (err)
+                               en_err(priv, "Failed disabling multicast promiscuous mode\n");
+                       priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
+               }
+               break;
+
+       case MLX4_STEERING_MODE_A0:
+               err = mlx4_SET_PORT_qpn_calc(mdev->dev,
+                                            priv->port,
+                                            priv->base_qpn, 0);
+               if (err)
+                       en_err(priv, "Failed disabling promiscuous mode\n");
+               break;
+       }
+
+       /* Enable port VLAN filter */
+       err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
+       if (err)
+               en_err(priv, "Failed enabling VLAN filter\n");
+}
+
+static void mlx4_en_do_multicast(struct mlx4_en_priv *priv,
+                                struct net_device *dev,
+                                struct mlx4_en_dev *mdev)
+{
+       struct mlx4_en_mc_list *mclist, *tmp;
+       u64 mcast_addr = 0;
+       u8 mc_list[16] = {0};
+       int err = 0;
 
        /* Enable/disable the multicast filter according to IFF_ALLMULTI */
        if (dev->flags & IFF_ALLMULTI) {
@@ -814,6 +1011,170 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
                        }
                }
        }
+}
+
+static void mlx4_en_do_uc_filter(struct mlx4_en_priv *priv,
+                                struct net_device *dev,
+                                struct mlx4_en_dev *mdev)
+{
+       struct netdev_hw_addr *ha;
+       struct mlx4_mac_entry *entry;
+       struct hlist_node *n, *tmp;
+       bool found;
+       u64 mac;
+       int err = 0;
+       struct hlist_head *bucket;
+       unsigned int i;
+       int removed = 0;
+       u32 prev_flags;
+
+       /* Note that we do not need to protect our mac_hash traversal with rcu,
+        * since all modification code is protected by mdev->state_lock
+        */
+
+       /* find what to remove */
+       for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) {
+               bucket = &priv->mac_hash[i];
+               hlist_for_each_entry_safe(entry, n, tmp, bucket, hlist) {
+                       found = false;
+                       netdev_for_each_uc_addr(ha, dev) {
+                               if (ether_addr_equal_64bits(entry->mac,
+                                                           ha->addr)) {
+                                       found = true;
+                                       break;
+                               }
+                       }
+
+                       /* MAC address of the port is not in uc list */
+                       if (ether_addr_equal_64bits(entry->mac, dev->dev_addr))
+                               found = true;
+
+                       if (!found) {
+                               mac = mlx4_en_mac_to_u64(entry->mac);
+                               mlx4_en_uc_steer_release(priv, entry->mac,
+                                                        priv->base_qpn,
+                                                        entry->reg_id);
+                               mlx4_unregister_mac(mdev->dev, priv->port, mac);
+
+                               hlist_del_rcu(&entry->hlist);
+                               kfree_rcu(entry, rcu);
+                               en_dbg(DRV, priv, "Removed MAC %pM on port:%d\n",
+                                      entry->mac, priv->port);
+                               ++removed;
+                       }
+               }
+       }
+
+       /* if we didn't remove anything, there is no use in trying to add
+        * again once we are in a forced promisc mode state
+        */
+       if ((priv->flags & MLX4_EN_FLAG_FORCE_PROMISC) && 0 == removed)
+               return;
+
+       prev_flags = priv->flags;
+       priv->flags &= ~MLX4_EN_FLAG_FORCE_PROMISC;
+
+       /* find what to add */
+       netdev_for_each_uc_addr(ha, dev) {
+               found = false;
+               bucket = &priv->mac_hash[ha->addr[MLX4_EN_MAC_HASH_IDX]];
+               hlist_for_each_entry(entry, n, bucket, hlist) {
+                       if (ether_addr_equal_64bits(entry->mac, ha->addr)) {
+                               found = true;
+                               break;
+                       }
+               }
+
+               if (!found) {
+                       entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+                       if (!entry) {
+                               en_err(priv, "Failed adding MAC %pM on port:%d (out of memory)\n",
+                                      ha->addr, priv->port);
+                               priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC;
+                               break;
+                       }
+                       mac = mlx4_en_mac_to_u64(ha->addr);
+                       memcpy(entry->mac, ha->addr, ETH_ALEN);
+                       err = mlx4_register_mac(mdev->dev, priv->port, mac);
+                       if (err < 0) {
+                               en_err(priv, "Failed registering MAC %pM on port %d: %d\n",
+                                      ha->addr, priv->port, err);
+                               kfree(entry);
+                               priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC;
+                               break;
+                       }
+                       err = mlx4_en_uc_steer_add(priv, ha->addr,
+                                                  &priv->base_qpn,
+                                                  &entry->reg_id);
+                       if (err) {
+                               en_err(priv, "Failed adding MAC %pM on port %d: %d\n",
+                                      ha->addr, priv->port, err);
+                               mlx4_unregister_mac(mdev->dev, priv->port, mac);
+                               kfree(entry);
+                               priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC;
+                               break;
+                       } else {
+                               unsigned int mac_hash;
+                               en_dbg(DRV, priv, "Added MAC %pM on port:%d\n",
+                                      ha->addr, priv->port);
+                               mac_hash = ha->addr[MLX4_EN_MAC_HASH_IDX];
+                               bucket = &priv->mac_hash[mac_hash];
+                               hlist_add_head_rcu(&entry->hlist, bucket);
+                       }
+               }
+       }
+
+       if (priv->flags & MLX4_EN_FLAG_FORCE_PROMISC) {
+               en_warn(priv, "Forcing promiscuous mode on port:%d\n",
+                       priv->port);
+       } else if (prev_flags & MLX4_EN_FLAG_FORCE_PROMISC) {
+               en_warn(priv, "Stop forcing promiscuous mode on port:%d\n",
+                       priv->port);
+       }
+}
+
+static void mlx4_en_do_set_rx_mode(struct work_struct *work)
+{
+       struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
+                                                rx_mode_task);
+       struct mlx4_en_dev *mdev = priv->mdev;
+       struct net_device *dev = priv->dev;
+
+       mutex_lock(&mdev->state_lock);
+       if (!mdev->device_up) {
+               en_dbg(HW, priv, "Card is not up, ignoring rx mode change.\n");
+               goto out;
+       }
+       if (!priv->port_up) {
+               en_dbg(HW, priv, "Port is down, ignoring rx mode change.\n");
+               goto out;
+       }
+
+       if (!netif_carrier_ok(dev)) {
+               if (!mlx4_en_QUERY_PORT(mdev, priv->port)) {
+                       if (priv->port_state.link_state) {
+                               priv->last_link_state = MLX4_DEV_EVENT_PORT_UP;
+                               netif_carrier_on(dev);
+                               en_dbg(LINK, priv, "Link Up\n");
+                       }
+               }
+       }
+
+       if (dev->priv_flags & IFF_UNICAST_FLT)
+               mlx4_en_do_uc_filter(priv, dev, mdev);
+
+       /* Promsicuous mode: disable all filters */
+       if ((dev->flags & IFF_PROMISC) ||
+           (priv->flags & MLX4_EN_FLAG_FORCE_PROMISC)) {
+               mlx4_en_set_promisc_mode(priv, mdev);
+               goto out;
+       }
+
+       /* Not in promiscuous mode */
+       if (priv->flags & MLX4_EN_FLAG_PROMISC)
+               mlx4_en_clear_promisc_mode(priv, mdev);
+
+       mlx4_en_do_multicast(priv, dev, mdev);
 out:
        mutex_unlock(&mdev->state_lock);
 }
@@ -876,9 +1237,8 @@ static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
        priv->rx_usecs = MLX4_EN_RX_COAL_TIME;
        priv->tx_frames = MLX4_EN_TX_COAL_PKTS;
        priv->tx_usecs = MLX4_EN_TX_COAL_TIME;
-       en_dbg(INTR, priv, "Default coalesing params for mtu:%d - "
-                          "rx_frames:%d rx_usecs:%d\n",
-                priv->dev->mtu, priv->rx_frames, priv->rx_usecs);
+       en_dbg(INTR, priv, "Default coalesing params for mtu:%d - rx_frames:%d rx_usecs:%d\n",
+              priv->dev->mtu, priv->rx_frames, priv->rx_usecs);
 
        /* Setup cq moderation params */
        for (i = 0; i < priv->rx_ring_num; i++) {
@@ -959,8 +1319,8 @@ static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
                        cq->moder_time = moder_time;
                        err = mlx4_en_set_cq_moder(priv, cq);
                        if (err)
-                               en_err(priv, "Failed modifying moderation "
-                                            "for cq:%d\n", ring);
+                               en_err(priv, "Failed modifying moderation for cq:%d\n",
+                                      ring);
                }
                priv->last_moder_packets[ring] = rx_packets;
                priv->last_moder_bytes[ring] = rx_bytes;
@@ -1077,8 +1437,7 @@ int mlx4_en_start_port(struct net_device *dev)
 
        /* Set qp number */
        en_dbg(DRV, priv, "Getting qp number for port %d\n", priv->port);
-       err = mlx4_get_eth_qp(mdev->dev, priv->port,
-                               priv->mac, &priv->base_qpn);
+       err = mlx4_en_get_qp(priv);
        if (err) {
                en_err(priv, "Failed getting eth qp\n");
                goto cq_err;
@@ -1141,8 +1500,8 @@ int mlx4_en_start_port(struct net_device *dev)
                                    priv->prof->rx_pause,
                                    priv->prof->rx_ppp);
        if (err) {
-               en_err(priv, "Failed setting port general configurations "
-                            "for port %d, with error %d\n", priv->port, err);
+               en_err(priv, "Failed setting port general configurations for port %d, with error %d\n",
+                      priv->port, err);
                goto tx_err;
        }
        /* Set default qp number */
@@ -1172,7 +1531,7 @@ int mlx4_en_start_port(struct net_device *dev)
        priv->flags &= ~(MLX4_EN_FLAG_PROMISC | MLX4_EN_FLAG_MC_PROMISC);
 
        /* Schedule multicast task to populate multicast list */
-       queue_work(mdev->workqueue, &priv->mcast_task);
+       queue_work(mdev->workqueue, &priv->rx_mode_task);
 
        mlx4_set_stats_bitmap(mdev->dev, &priv->stats_bitmap);
 
@@ -1191,7 +1550,7 @@ tx_err:
 rss_err:
        mlx4_en_release_rss_steer(priv);
 mac_err:
-       mlx4_put_eth_qp(mdev->dev, priv->port, priv->mac, priv->base_qpn);
+       mlx4_en_put_qp(priv);
 cq_err:
        while (rx_index--)
                mlx4_en_deactivate_cq(priv, &priv->rx_cq[rx_index]);
@@ -1290,7 +1649,7 @@ void mlx4_en_stop_port(struct net_device *dev, int detach)
        mlx4_en_release_rss_steer(priv);
 
        /* Unregister Mac address for the port */
-       mlx4_put_eth_qp(mdev->dev, priv->port, priv->mac, priv->base_qpn);
+       mlx4_en_put_qp(priv);
        if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAGS2_REASSIGN_MAC_EN))
                mdev->mac_removed[priv->port] = 1;
 
@@ -1323,15 +1682,12 @@ static void mlx4_en_restart(struct work_struct *work)
                                                 watchdog_task);
        struct mlx4_en_dev *mdev = priv->mdev;
        struct net_device *dev = priv->dev;
-       int i;
 
        en_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port);
 
        mutex_lock(&mdev->state_lock);
        if (priv->port_up) {
                mlx4_en_stop_port(dev, 1);
-               for (i = 0; i < priv->tx_ring_num; i++)
-                       netdev_tx_reset_queue(priv->tx_ring[i].tx_queue);
                if (mlx4_en_start_port(dev))
                        en_err(priv, "Failed restarting port %d\n", priv->port);
        }
@@ -1563,17 +1919,92 @@ static int mlx4_en_set_features(struct net_device *netdev,
                priv->ctrl_flags &=
                        cpu_to_be32(~MLX4_WQE_CTRL_FORCE_LOOPBACK);
 
+       mlx4_en_update_loopback_state(netdev, features);
+
        return 0;
 
 }
 
+static int mlx4_en_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
+                          struct net_device *dev,
+                          const unsigned char *addr, u16 flags)
+{
+       struct mlx4_en_priv *priv = netdev_priv(dev);
+       struct mlx4_dev *mdev = priv->mdev->dev;
+       int err;
+
+       if (!mlx4_is_mfunc(mdev))
+               return -EOPNOTSUPP;
+
+       /* Hardware does not support aging addresses, allow only
+        * permanent addresses if ndm_state is given
+        */
+       if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
+               en_info(priv, "Add FDB only supports static addresses\n");
+               return -EINVAL;
+       }
+
+       if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
+               err = dev_uc_add_excl(dev, addr);
+       else if (is_multicast_ether_addr(addr))
+               err = dev_mc_add_excl(dev, addr);
+       else
+               err = -EINVAL;
+
+       /* Only return duplicate errors if NLM_F_EXCL is set */
+       if (err == -EEXIST && !(flags & NLM_F_EXCL))
+               err = 0;
+
+       return err;
+}
+
+static int mlx4_en_fdb_del(struct ndmsg *ndm,
+                          struct nlattr *tb[],
+                          struct net_device *dev,
+                          const unsigned char *addr)
+{
+       struct mlx4_en_priv *priv = netdev_priv(dev);
+       struct mlx4_dev *mdev = priv->mdev->dev;
+       int err;
+
+       if (!mlx4_is_mfunc(mdev))
+               return -EOPNOTSUPP;
+
+       if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
+               en_info(priv, "Del FDB only supports static addresses\n");
+               return -EINVAL;
+       }
+
+       if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
+               err = dev_uc_del(dev, addr);
+       else if (is_multicast_ether_addr(addr))
+               err = dev_mc_del(dev, addr);
+       else
+               err = -EINVAL;
+
+       return err;
+}
+
+static int mlx4_en_fdb_dump(struct sk_buff *skb,
+                           struct netlink_callback *cb,
+                           struct net_device *dev, int idx)
+{
+       struct mlx4_en_priv *priv = netdev_priv(dev);
+       struct mlx4_dev *mdev = priv->mdev->dev;
+
+       if (mlx4_is_mfunc(mdev))
+               idx = ndo_dflt_fdb_dump(skb, cb, dev, idx);
+
+       return idx;
+}
+
 static const struct net_device_ops mlx4_netdev_ops = {
        .ndo_open               = mlx4_en_open,
        .ndo_stop               = mlx4_en_close,
        .ndo_start_xmit         = mlx4_en_xmit,
        .ndo_select_queue       = mlx4_en_select_queue,
        .ndo_get_stats          = mlx4_en_get_stats,
-       .ndo_set_rx_mode        = mlx4_en_set_multicast,
+       .ndo_set_rx_mode        = mlx4_en_set_rx_mode,
        .ndo_set_mac_address    = mlx4_en_set_mac,
        .ndo_validate_addr      = eth_validate_addr,
        .ndo_change_mtu         = mlx4_en_change_mtu,
@@ -1588,6 +2019,9 @@ static const struct net_device_ops mlx4_netdev_ops = {
 #ifdef CONFIG_RFS_ACCEL
        .ndo_rx_flow_steer      = mlx4_en_filter_rfs,
 #endif
+       .ndo_fdb_add            = mlx4_en_fdb_add,
+       .ndo_fdb_del            = mlx4_en_fdb_del,
+       .ndo_fdb_dump           = mlx4_en_fdb_dump,
 };
 
 int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
@@ -1644,7 +2078,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
        priv->mac_index = -1;
        priv->msg_enable = MLX4_EN_MSG_LEVEL;
        spin_lock_init(&priv->stats_lock);
-       INIT_WORK(&priv->mcast_task, mlx4_en_do_set_multicast);
+       INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode);
        INIT_WORK(&priv->mac_task, mlx4_en_do_set_mac);
        INIT_WORK(&priv->watchdog_task, mlx4_en_restart);
        INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate);
@@ -1654,16 +2088,24 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
                dev->dcbnl_ops = &mlx4_en_dcbnl_ops;
 #endif
 
+       for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i)
+               INIT_HLIST_HEAD(&priv->mac_hash[i]);
+
        /* Query for default mac and max mtu */
        priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port];
-       priv->mac = mdev->dev->caps.def_mac[priv->port];
-       if (ILLEGAL_MAC(priv->mac)) {
-               en_err(priv, "Port: %d, invalid mac burned: 0x%llx, quiting\n",
-                        priv->port, priv->mac);
+
+       /* Set default MAC */
+       dev->addr_len = ETH_ALEN;
+       mlx4_en_u64_to_mac(dev->dev_addr, mdev->dev->caps.def_mac[priv->port]);
+       if (!is_valid_ether_addr(dev->dev_addr)) {
+               en_err(priv, "Port: %d, invalid mac burned: %pM, quiting\n",
+                      priv->port, dev->dev_addr);
                err = -EINVAL;
                goto out;
        }
 
+       memcpy(priv->prev_mac, dev->dev_addr, sizeof(priv->prev_mac));
+
        priv->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
                                          DS_SIZE * MLX4_EN_MAX_RX_FRAGS);
        err = mlx4_en_alloc_resources(priv);
@@ -1694,11 +2136,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
 
        SET_ETHTOOL_OPS(dev, &mlx4_en_ethtool_ops);
 
-       /* Set defualt MAC */
-       dev->addr_len = ETH_ALEN;
-       for (i = 0; i < ETH_ALEN; i++)
-               dev->dev_addr[ETH_ALEN - 1 - i] = (u8) (priv->mac >> (8 * i));
-
        /*
         * Set driver features
         */
@@ -1718,6 +2155,9 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
            MLX4_STEERING_MODE_DEVICE_MANAGED)
                dev->hw_features |= NETIF_F_NTUPLE;
 
+       if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0)
+               dev->priv_flags |= IFF_UNICAST_FLT;
+
        mdev->pndev[port] = dev;
 
        netif_carrier_off(dev);
@@ -1731,6 +2171,8 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
        en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num);
        en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num);
 
+       mlx4_en_update_loopback_state(priv->dev, priv->dev->features);
+
        /* Configure port */
        mlx4_en_calc_rx_buf(dev);
        err = mlx4_SET_PORT_general(mdev->dev, priv->port,
index fed26d8..ce38654 100644 (file)
@@ -563,9 +563,6 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
        unsigned int length;
        int polled = 0;
        int ip_summed;
-       struct ethhdr *ethh;
-       dma_addr_t dma;
-       u64 s_mac;
        int factor = priv->cqe_factor;
 
        if (!priv->port_up)
@@ -603,21 +600,41 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
                        goto next;
                }
 
-               /* Get pointer to first fragment since we haven't skb yet and
-                * cast it to ethhdr struct */
-               dma = be64_to_cpu(rx_desc->data[0].addr);
-               dma_sync_single_for_cpu(priv->ddev, dma, sizeof(*ethh),
-                                       DMA_FROM_DEVICE);
-               ethh = (struct ethhdr *)(page_address(frags[0].page) +
-                                        frags[0].offset);
-               s_mac = mlx4_en_mac_to_u64(ethh->h_source);
-
-               /* If source MAC is equal to our own MAC and not performing
-                * the selftest or flb disabled - drop the packet */
-               if (s_mac == priv->mac &&
-                   !((dev->features & NETIF_F_LOOPBACK) ||
-                     priv->validate_loopback))
-                       goto next;
+               /* Check if we need to drop the packet if SRIOV is not enabled
+                * and not performing the selftest or flb disabled
+                */
+               if (priv->flags & MLX4_EN_FLAG_RX_FILTER_NEEDED) {
+                       struct ethhdr *ethh;
+                       dma_addr_t dma;
+                       /* Get pointer to first fragment since we haven't
+                        * skb yet and cast it to ethhdr struct
+                        */
+                       dma = be64_to_cpu(rx_desc->data[0].addr);
+                       dma_sync_single_for_cpu(priv->ddev, dma, sizeof(*ethh),
+                                               DMA_FROM_DEVICE);
+                       ethh = (struct ethhdr *)(page_address(frags[0].page) +
+                                                frags[0].offset);
+
+                       if (is_multicast_ether_addr(ethh->h_dest)) {
+                               struct mlx4_mac_entry *entry;
+                               struct hlist_node *n;
+                               struct hlist_head *bucket;
+                               unsigned int mac_hash;
+
+                               /* Drop the packet, since HW loopback-ed it */
+                               mac_hash = ethh->h_source[MLX4_EN_MAC_HASH_IDX];
+                               bucket = &priv->mac_hash[mac_hash];
+                               rcu_read_lock();
+                               hlist_for_each_entry_rcu(entry, n, bucket, hlist) {
+                                       if (ether_addr_equal_64bits(entry->mac,
+                                                                   ethh->h_source)) {
+                                               rcu_read_unlock();
+                                               goto next;
+                                       }
+                               }
+                               rcu_read_unlock();
+                       }
+               }
 
                /*
                 * Packet is OK - process it.
@@ -835,11 +852,9 @@ static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, int qpn,
        struct mlx4_qp_context *context;
        int err = 0;
 
-       context = kmalloc(sizeof *context , GFP_KERNEL);
-       if (!context) {
-               en_err(priv, "Failed to allocate qp context\n");
+       context = kmalloc(sizeof(*context), GFP_KERNEL);
+       if (!context)
                return -ENOMEM;
-       }
 
        err = mlx4_qp_alloc(mdev->dev, qpn, qp);
        if (err) {
index bf2e5d3..3488c6d 100644 (file)
@@ -87,6 +87,8 @@ static int mlx4_en_test_loopback(struct mlx4_en_priv *priv)
         priv->loopback_ok = 0;
        priv->validate_loopback = 1;
 
+       mlx4_en_update_loopback_state(priv->dev, priv->dev->features);
+
        /* xmit */
        if (mlx4_en_test_loopback_xmit(priv)) {
                en_err(priv, "Transmitting loopback packet failed\n");
@@ -107,6 +109,7 @@ static int mlx4_en_test_loopback(struct mlx4_en_priv *priv)
 mlx4_en_test_loopback_exit:
 
        priv->validate_loopback = 0;
+       mlx4_en_update_loopback_state(priv->dev, priv->dev->features);
        return !loopback_ok;
 }
 
index 30724d8..49308cc 100644 (file)
@@ -294,6 +294,8 @@ int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring)
                cnt++;
        }
 
+       netdev_tx_reset_queue(ring->tx_queue);
+
        if (cnt)
                en_dbg(DRV, priv, "Freed %d uncompleted tx descriptors\n", cnt);
 
@@ -640,7 +642,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
                ring->tx_csum++;
        }
 
-       if (mlx4_is_mfunc(mdev->dev) || priv->validate_loopback) {
+       if (priv->flags & MLX4_EN_FLAG_ENABLE_HW_LOOPBACK) {
                /* Copy dst mac address to wqe. This allows loopback in eSwitch,
                 * so that VFs and PF can communicate with each other
                 */
index 12ddae6..b9dde13 100644 (file)
@@ -380,7 +380,7 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
                }
        }
 
-       if ((dev_cap->flags &
+       if ((dev->caps.flags &
            (MLX4_DEV_CAP_FLAG_64B_CQE | MLX4_DEV_CAP_FLAG_64B_EQE)) &&
            mlx4_is_master(dev))
                dev->caps.function_caps |= MLX4_FUNC_CAP_64B_EQE_CQE;
@@ -1833,12 +1833,9 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
        info->dev = dev;
        info->port = port;
        if (!mlx4_is_slave(dev)) {
-               INIT_RADIX_TREE(&info->mac_tree, GFP_KERNEL);
                mlx4_init_mac_table(dev, &info->mac_table);
                mlx4_init_vlan_table(dev, &info->vlan_table);
-               info->base_qpn =
-                       dev->caps.reserved_qps_base[MLX4_QP_REGION_ETH_ADDR] +
-                       (port - 1) * (1 << log_num_mac);
+               info->base_qpn = mlx4_get_base_qpn(dev, port);
        }
 
        sprintf(info->dev_name, "mlx4_port%d", port);
index 172daaa..ed4a695 100644 (file)
@@ -653,11 +653,6 @@ struct mlx4_set_port_rqp_calc_context {
        __be32 mcast;
 };
 
-struct mlx4_mac_entry {
-       u64 mac;
-       u64 reg_id;
-};
-
 struct mlx4_port_info {
        struct mlx4_dev        *dev;
        int                     port;
@@ -667,7 +662,6 @@ struct mlx4_port_info {
        char                    dev_mtu_name[16];
        struct device_attribute port_mtu_attr;
        struct mlx4_mac_table   mac_table;
-       struct radix_tree_root  mac_tree;
        struct mlx4_vlan_table  vlan_table;
        int                     base_qpn;
 };
@@ -916,7 +910,6 @@ int __mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align,
 void __mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt);
 int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac);
 void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac);
-int __mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac);
 int __mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
                     int start_index, int npages, u64 *page_list);
 int __mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx);
index 43f0165..c313d7e 100644 (file)
@@ -198,7 +198,6 @@ enum cq_type {
  */
 #define ROUNDUP_LOG2(x)                ilog2(roundup_pow_of_two(x))
 #define XNOR(x, y)             (!(x) == !(y))
-#define ILLEGAL_MAC(addr)      (addr == 0xffffffffffffULL || addr == 0x0)
 
 
 struct mlx4_en_tx_info {
@@ -432,6 +431,21 @@ struct ethtool_flow_id {
        u64 id;
 };
 
+enum {
+       MLX4_EN_FLAG_PROMISC            = (1 << 0),
+       MLX4_EN_FLAG_MC_PROMISC         = (1 << 1),
+       /* whether we need to enable hardware loopback by putting dmac
+        * in Tx WQE
+        */
+       MLX4_EN_FLAG_ENABLE_HW_LOOPBACK = (1 << 2),
+       /* whether we need to drop packets that hardware loopback-ed */
+       MLX4_EN_FLAG_RX_FILTER_NEEDED   = (1 << 3),
+       MLX4_EN_FLAG_FORCE_PROMISC      = (1 << 4)
+};
+
+#define MLX4_EN_MAC_HASH_SIZE (1 << BITS_PER_BYTE)
+#define MLX4_EN_MAC_HASH_IDX 5
+
 struct mlx4_en_priv {
        struct mlx4_en_dev *mdev;
        struct mlx4_en_port_profile *prof;
@@ -472,7 +486,7 @@ struct mlx4_en_priv {
        int registered;
        int allocated;
        int stride;
-       u64 mac;
+       unsigned char prev_mac[ETH_ALEN + 2];
        int mac_index;
        unsigned max_mtu;
        int base_qpn;
@@ -481,8 +495,6 @@ struct mlx4_en_priv {
        struct mlx4_en_rss_map rss_map;
        __be32 ctrl_flags;
        u32 flags;
-#define MLX4_EN_FLAG_PROMISC   0x1
-#define MLX4_EN_FLAG_MC_PROMISC        0x2
        u8 num_tx_rings_p_up;
        u32 tx_ring_num;
        u32 rx_ring_num;
@@ -496,7 +508,7 @@ struct mlx4_en_priv {
        struct mlx4_en_cq *tx_cq;
        struct mlx4_en_cq rx_cq[MAX_RX_RINGS];
        struct mlx4_qp drop_qp;
-       struct work_struct mcast_task;
+       struct work_struct rx_mode_task;
        struct work_struct mac_task;
        struct work_struct watchdog_task;
        struct work_struct linkstate_task;
@@ -513,6 +525,7 @@ struct mlx4_en_priv {
        bool wol;
        struct device *ddev;
        int base_tx_qpn;
+       struct hlist_head mac_hash[MLX4_EN_MAC_HASH_SIZE];
 
 #ifdef CONFIG_MLX4_EN_DCB
        struct ieee_ets ets;
@@ -532,8 +545,18 @@ enum mlx4_en_wol {
        MLX4_EN_WOL_ENABLED = (1ULL << 62),
 };
 
+struct mlx4_mac_entry {
+       struct hlist_node hlist;
+       unsigned char mac[ETH_ALEN + 2];
+       u64 reg_id;
+       struct rcu_head rcu;
+};
+
 #define MLX4_EN_WOL_DO_MODIFY (1ULL << 63)
 
+void mlx4_en_update_loopback_state(struct net_device *dev,
+                                  netdev_features_t features);
+
 void mlx4_en_destroy_netdev(struct net_device *dev);
 int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
                        struct mlx4_en_port_profile *prof);
index 4c51b05..719ead1 100644 (file)
@@ -74,87 +74,6 @@ void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table)
        table->total = 0;
 }
 
-static int mlx4_uc_steer_add(struct mlx4_dev *dev, u8 port,
-                            u64 mac, int *qpn, u64 *reg_id)
-{
-       __be64 be_mac;
-       int err;
-
-       mac &= MLX4_MAC_MASK;
-       be_mac = cpu_to_be64(mac << 16);
-
-       switch (dev->caps.steering_mode) {
-       case MLX4_STEERING_MODE_B0: {
-               struct mlx4_qp qp;
-               u8 gid[16] = {0};
-
-               qp.qpn = *qpn;
-               memcpy(&gid[10], &be_mac, ETH_ALEN);
-               gid[5] = port;
-
-               err = mlx4_unicast_attach(dev, &qp, gid, 0, MLX4_PROT_ETH);
-               break;
-       }
-       case MLX4_STEERING_MODE_DEVICE_MANAGED: {
-               struct mlx4_spec_list spec_eth = { {NULL} };
-               __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
-
-               struct mlx4_net_trans_rule rule = {
-                       .queue_mode = MLX4_NET_TRANS_Q_FIFO,
-                       .exclusive = 0,
-                       .allow_loopback = 1,
-                       .promisc_mode = MLX4_FS_PROMISC_NONE,
-                       .priority = MLX4_DOMAIN_NIC,
-               };
-
-               rule.port = port;
-               rule.qpn = *qpn;
-               INIT_LIST_HEAD(&rule.list);
-
-               spec_eth.id = MLX4_NET_TRANS_RULE_ID_ETH;
-               memcpy(spec_eth.eth.dst_mac, &be_mac, ETH_ALEN);
-               memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
-               list_add_tail(&spec_eth.list, &rule.list);
-
-               err = mlx4_flow_attach(dev, &rule, reg_id);
-               break;
-       }
-       default:
-               return -EINVAL;
-       }
-       if (err)
-               mlx4_warn(dev, "Failed Attaching Unicast\n");
-
-       return err;
-}
-
-static void mlx4_uc_steer_release(struct mlx4_dev *dev, u8 port,
-                                 u64 mac, int qpn, u64 reg_id)
-{
-       switch (dev->caps.steering_mode) {
-       case MLX4_STEERING_MODE_B0: {
-               struct mlx4_qp qp;
-               u8 gid[16] = {0};
-               __be64 be_mac;
-
-               qp.qpn = qpn;
-               mac &= MLX4_MAC_MASK;
-               be_mac = cpu_to_be64(mac << 16);
-               memcpy(&gid[10], &be_mac, ETH_ALEN);
-               gid[5] = port;
-
-               mlx4_unicast_detach(dev, &qp, gid, MLX4_PROT_ETH);
-               break;
-       }
-       case MLX4_STEERING_MODE_DEVICE_MANAGED: {
-               mlx4_flow_detach(dev, reg_id);
-               break;
-       }
-       default:
-               mlx4_err(dev, "Invalid steering mode.\n");
-       }
-}
-
 static int validate_index(struct mlx4_dev *dev,
                          struct mlx4_mac_table *table, int index)
 {
@@ -181,92 +100,6 @@ static int find_index(struct mlx4_dev *dev,
        return -EINVAL;
 }
 
-int mlx4_get_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn)
-{
-       struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
-       struct mlx4_mac_entry *entry;
-       int index = 0;
-       int err = 0;
-       u64 reg_id;
-
-       mlx4_dbg(dev, "Registering MAC: 0x%llx for adding\n",
-                       (unsigned long long) mac);
-       index = mlx4_register_mac(dev, port, mac);
-       if (index < 0) {
-               err = index;
-               mlx4_err(dev, "Failed adding MAC: 0x%llx\n",
-                        (unsigned long long) mac);
-               return err;
-       }
-
-       if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) {
-               *qpn = info->base_qpn + index;
-               return 0;
-       }
-
-       err = mlx4_qp_reserve_range(dev, 1, 1, qpn);
-       mlx4_dbg(dev, "Reserved qp %d\n", *qpn);
-       if (err) {
-               mlx4_err(dev, "Failed to reserve qp for mac registration\n");
-               goto qp_err;
-       }
-
-       err = mlx4_uc_steer_add(dev, port, mac, qpn, &reg_id);
-       if (err)
-               goto steer_err;
-
-       entry = kmalloc(sizeof *entry, GFP_KERNEL);
-       if (!entry) {
-               err = -ENOMEM;
-               goto alloc_err;
-       }
-       entry->mac = mac;
-       entry->reg_id = reg_id;
-       err = radix_tree_insert(&info->mac_tree, *qpn, entry);
-       if (err)
-               goto insert_err;
-       return 0;
-
-insert_err:
-       kfree(entry);
-
-alloc_err:
-       mlx4_uc_steer_release(dev, port, mac, *qpn, reg_id);
-
-steer_err:
-       mlx4_qp_release_range(dev, *qpn, 1);
-
-qp_err:
-       mlx4_unregister_mac(dev, port, mac);
-       return err;
-}
-EXPORT_SYMBOL_GPL(mlx4_get_eth_qp);
-
-void mlx4_put_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int qpn)
-{
-       struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
-       struct mlx4_mac_entry *entry;
-
-       mlx4_dbg(dev, "Registering MAC: 0x%llx for deleting\n",
-                (unsigned long long) mac);
-       mlx4_unregister_mac(dev, port, mac);
-
-       if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) {
-               entry = radix_tree_lookup(&info->mac_tree, qpn);
-               if (entry) {
-                       mlx4_dbg(dev, "Releasing qp: port %d, mac 0x%llx,"
-                                " qpn %d\n", port,
-                                (unsigned long long) mac, qpn);
-                       mlx4_uc_steer_release(dev, port, entry->mac,
-                                             qpn, entry->reg_id);
-                       mlx4_qp_release_range(dev, qpn, 1);
-                       radix_tree_delete(&info->mac_tree, qpn);
-                       kfree(entry);
-               }
-       }
-}
-EXPORT_SYMBOL_GPL(mlx4_put_eth_qp);
-
 static int mlx4_set_port_mac_table(struct mlx4_dev *dev, u8 port,
                                   __be64 *entries)
 {
@@ -359,6 +192,12 @@ int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac)
 }
 EXPORT_SYMBOL_GPL(mlx4_register_mac);
 
+int mlx4_get_base_qpn(struct mlx4_dev *dev, u8 port)
+{
+       return dev->caps.reserved_qps_base[MLX4_QP_REGION_ETH_ADDR] +
+                       (port - 1) * (1 << dev->caps.log_num_macs);
+}
+EXPORT_SYMBOL_GPL(mlx4_get_base_qpn);
 
 void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac)
 {
@@ -397,29 +236,13 @@ void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac)
 }
 EXPORT_SYMBOL_GPL(mlx4_unregister_mac);
 
-int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac)
+int __mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac)
 {
        struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
        struct mlx4_mac_table *table = &info->mac_table;
-       struct mlx4_mac_entry *entry;
        int index = qpn - info->base_qpn;
        int err = 0;
 
-       if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) {
-               entry = radix_tree_lookup(&info->mac_tree, qpn);
-               if (!entry)
-                       return -EINVAL;
-               mlx4_uc_steer_release(dev, port, entry->mac,
-                                     qpn, entry->reg_id);
-               mlx4_unregister_mac(dev, port, entry->mac);
-               entry->mac = new_mac;
-               entry->reg_id = 0;
-               mlx4_register_mac(dev, port, new_mac);
-               err = mlx4_uc_steer_add(dev, port, entry->mac,
-                                       &qpn, &entry->reg_id);
-               return err;
-       }
-
        /* CX1 doesn't support multi-functions */
        mutex_lock(&table->mutex);
 
@@ -439,7 +262,7 @@ out:
        mutex_unlock(&table->mutex);
        return err;
 }
-EXPORT_SYMBOL_GPL(mlx4_replace_mac);
+EXPORT_SYMBOL_GPL(__mlx4_replace_mac);
 
 static int mlx4_set_port_vlan_table(struct mlx4_dev *dev, u8 port,
                                    __be32 *entries)
index db50598..4782dcf 100644 (file)
@@ -201,7 +201,6 @@ int netxen_alloc_sw_resources(struct netxen_adapter *adapter)
 
        struct netxen_cmd_buffer *cmd_buf_arr;
        struct net_device *netdev = adapter->netdev;
-       struct pci_dev *pdev = adapter->pdev;
 
        tx_ring = kzalloc(sizeof(struct nx_host_tx_ring), GFP_KERNEL);
        if (tx_ring == NULL)
index 457ca8e..61b594c 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * QLogic qlcnic NIC Driver
- * Copyright (c)  2009-2010 QLogic Corporation
+ * Copyright (c) 2009-2013 QLogic Corporation
  *
  * See LICENSE.qlcnic for copyright and licensing details.
  */
@@ -1442,7 +1442,9 @@ int qlcnic_send_lro_cleanup(struct qlcnic_adapter *adapter);
 void qlcnic_update_cmd_producer(struct qlcnic_host_tx_ring *);
 
 /* Functions from qlcnic_ethtool.c */
-int qlcnic_check_loopback_buff(unsigned char *data, u8 mac[]);
+int qlcnic_check_loopback_buff(unsigned char *, u8 []);
+int qlcnic_do_lb_test(struct qlcnic_adapter *, u8);
+int qlcnic_loopback_test(struct net_device *, u8);
 
 /* Functions from qlcnic_main.c */
 int qlcnic_reset_context(struct qlcnic_adapter *);
index 65233c8..f7b39d1 100644 (file)
@@ -1,3 +1,10 @@
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ *
+ * See LICENSE.qlcnic for copyright and licensing details.
+ */
+
 #include "qlcnic.h"
 #include <linux/if_vlan.h>
 #include <linux/ipv6.h>
@@ -5,13 +12,6 @@
 #include <linux/interrupt.h>
 
 #define QLCNIC_MAX_TX_QUEUES           1
-
-#define QLCNIC_MBX_RSP(reg)            LSW(reg)
-#define QLCNIC_MBX_NUM_REGS(reg)       (MSW(reg) & 0x1FF)
-#define QLCNIC_MBX_STATUS(reg)         (((reg) >> 25) & 0x7F)
-#define QLCNIC_MBX_HOST(ahw, i)        ((ahw)->pci_base0 + ((i) * 4))
-#define QLCNIC_MBX_FW(ahw, i)          ((ahw)->pci_base0 + 0x800 + ((i) * 4))
-
 #define RSS_HASHTYPE_IP_TCP            0x3
 
 /* status descriptor mailbox data
@@ -257,8 +257,6 @@ static struct qlcnic_hardware_ops qlcnic_83xx_hw_ops = {
        .config_intr_coal               = qlcnic_83xx_config_intr_coal,
        .config_rss                     = qlcnic_83xx_config_rss,
        .config_hw_lro                  = qlcnic_83xx_config_hw_lro,
-       .config_loopback                = qlcnic_83xx_set_lb_mode,
-       .clear_loopback                 = qlcnic_83xx_clear_lb_mode,
        .config_promisc_mode            = qlcnic_83xx_nic_set_promisc,
        .change_l2_filter               = qlcnic_83xx_change_l2_filter,
        .get_board_info                 = qlcnic_83xx_get_port_info,
@@ -666,6 +664,21 @@ static void qlcnic_dump_mbx(struct qlcnic_adapter *adapter,
        pr_info("\n");
 }
 
+/* Mailbox response for mac rcode */
+static u32 qlcnic_83xx_mac_rcode(struct qlcnic_adapter *adapter)
+{
+       u32 fw_data;
+       u8 mac_cmd_rcode;
+
+       fw_data = readl(QLCNIC_MBX_FW(adapter->ahw, 2));
+       mac_cmd_rcode = (u8)fw_data;
+       if (mac_cmd_rcode == QLC_83XX_NO_NIC_RESOURCE ||
+           mac_cmd_rcode == QLC_83XX_MAC_PRESENT ||
+           mac_cmd_rcode == QLC_83XX_MAC_ABSENT)
+               return QLCNIC_RCODE_SUCCESS;
+       return 1;
+}
+
 static u32 qlcnic_83xx_mbx_poll(struct qlcnic_adapter *adapter)
 {
        u32 data;
@@ -688,8 +701,8 @@ int qlcnic_83xx_mbx_op(struct qlcnic_adapter *adapter,
 {
        int i;
        u16 opcode;
-       u8 mbx_err_code, mac_cmd_rcode;
-       u32 rsp, mbx_val, fw_data, rsp_num, mbx_cmd, temp, fw[8];
+       u8 mbx_err_code;
+       u32 rsp, mbx_val, fw_data, rsp_num, mbx_cmd;
        struct qlcnic_hardware_context *ahw = adapter->ahw;
 
        opcode = LSW(cmd->req.arg[0]);
@@ -724,79 +737,44 @@ int qlcnic_83xx_mbx_op(struct qlcnic_adapter *adapter,
        QLCWRX(ahw, QLCNIC_HOST_MBX_CTRL, QLCNIC_SET_OWNER);
 poll:
        rsp = qlcnic_83xx_mbx_poll(adapter);
-       /* Get the FW response data */
-       fw_data = readl(QLCNIC_MBX_FW(ahw, 0));
-       mbx_err_code = QLCNIC_MBX_STATUS(fw_data);
-       rsp_num = QLCNIC_MBX_NUM_REGS(fw_data);
-       opcode = QLCNIC_MBX_RSP(fw_data);
-
        if (rsp != QLCNIC_RCODE_TIMEOUT) {
-               if (opcode == QLCNIC_MBX_LINK_EVENT) {
-                       for (i = 0; i < rsp_num; i++) {
-                               temp = readl(QLCNIC_MBX_FW(ahw, i));
-                               fw[i] = temp;
-                       }
-                       qlcnic_83xx_handle_link_aen(adapter, fw);
-                       /* clear fw mbx control register */
-                       QLCWRX(ahw, QLCNIC_FW_MBX_CTRL, QLCNIC_CLR_OWNER);
-                       mbx_val = QLCRDX(ahw, QLCNIC_HOST_MBX_CTRL);
-                       if (mbx_val)
-                               goto poll;
-               } else if (opcode == QLCNIC_MBX_COMP_EVENT) {
-                       for (i = 0; i < rsp_num; i++) {
-                               temp = readl(QLCNIC_MBX_FW(ahw, i));
-                               fw[i] = temp;
-                       }
-                       qlcnic_83xx_handle_idc_comp_aen(adapter, fw);
-                       /* clear fw mbx control register */
-                       QLCWRX(ahw, QLCNIC_FW_MBX_CTRL, QLCNIC_CLR_OWNER);
-                       mbx_val = QLCRDX(ahw, QLCNIC_HOST_MBX_CTRL);
-                       if (mbx_val)
-                               goto poll;
-               } else if (opcode == QLCNIC_MBX_REQUEST_EVENT) {
-                       /* IDC Request Notification */
-                       for (i = 0; i < rsp_num; i++) {
-                               temp = readl(QLCNIC_MBX_FW(ahw, i));
-                               fw[i] = temp;
-                       }
-                       for (i = 0; i < QLC_83XX_MBX_AEN_CNT; i++) {
-                               temp = QLCNIC_MBX_RSP(fw[i]);
-                               adapter->ahw->mbox_aen[i] = temp;
-                       }
-                       queue_delayed_work(adapter->qlcnic_wq,
-                                          &adapter->idc_aen_work, 0);
-                       /* clear fw mbx control register */
-                       QLCWRX(ahw, QLCNIC_FW_MBX_CTRL, QLCNIC_CLR_OWNER);
+               /* Get the FW response data */
+               fw_data = readl(QLCNIC_MBX_FW(ahw, 0));
+               if (fw_data &  QLCNIC_MBX_ASYNC_EVENT) {
+                       qlcnic_83xx_process_aen(adapter);
                        mbx_val = QLCRDX(ahw, QLCNIC_HOST_MBX_CTRL);
                        if (mbx_val)
                                goto poll;
-               } else if ((mbx_err_code == QLCNIC_MBX_RSP_OK) ||
-                          (mbx_err_code == QLCNIC_MBX_PORT_RSP_OK)) {
-                       qlcnic_83xx_get_mbx_data(adapter, cmd);
+               }
+               mbx_err_code = QLCNIC_MBX_STATUS(fw_data);
+               rsp_num = QLCNIC_MBX_NUM_REGS(fw_data);
+               opcode = QLCNIC_MBX_RSP(fw_data);
+               qlcnic_83xx_get_mbx_data(adapter, cmd);
+
+               switch (mbx_err_code) {
+               case QLCNIC_MBX_RSP_OK:
+               case QLCNIC_MBX_PORT_RSP_OK:
                        rsp = QLCNIC_RCODE_SUCCESS;
-               } else {
-                       qlcnic_83xx_get_mbx_data(adapter, cmd);
+                       break;
+               default:
                        if (opcode == QLCNIC_CMD_CONFIG_MAC_VLAN) {
-                               fw_data = readl(QLCNIC_MBX_FW(ahw, 2));
-                               mac_cmd_rcode = (u8)fw_data;
-                               if (mac_cmd_rcode == QLC_83XX_NO_NIC_RESOURCE ||
-                                   mac_cmd_rcode == QLC_83XX_MAC_PRESENT ||
-                                   mac_cmd_rcode == QLC_83XX_MAC_ABSENT) {
-                                       rsp = QLCNIC_RCODE_SUCCESS;
+                               rsp = qlcnic_83xx_mac_rcode(adapter);
+                               if (!rsp)
                                        goto out;
-                               }
                        }
-                       dev_info(&adapter->pdev->dev,
-                                "MBX command 0x%x failed with err:0x%x\n",
-                                opcode, mbx_err_code);
+                       dev_err(&adapter->pdev->dev,
+                               "MBX command 0x%x failed with err:0x%x\n",
+                               opcode, mbx_err_code);
                        rsp = mbx_err_code;
                        qlcnic_dump_mbx(adapter, cmd);
+                       break;
                }
-       } else {
-               dev_info(&adapter->pdev->dev,
-                        "MBX command 0x%x timed out\n", opcode);
-               qlcnic_dump_mbx(adapter, cmd);
+               goto out;
        }
+
+       dev_err(&adapter->pdev->dev, "MBX command 0x%x timed out\n",
+               QLCNIC_MBX_RSP(mbx_cmd));
+       rsp = QLCNIC_RCODE_TIMEOUT;
 out:
        /* clear fw mbx control register */
        QLCWRX(ahw, QLCNIC_FW_MBX_CTRL, QLCNIC_CLR_OWNER);
@@ -868,20 +846,10 @@ static void qlcnic_83xx_handle_idc_comp_aen(struct qlcnic_adapter *adapter,
 
 void qlcnic_83xx_process_aen(struct qlcnic_adapter *adapter)
 {
-       u32 mask, resp, event[QLC_83XX_MBX_AEN_CNT];
+       u32 event[QLC_83XX_MBX_AEN_CNT];
        int i;
        struct qlcnic_hardware_context *ahw = adapter->ahw;
 
-       if (!spin_trylock(&ahw->mbx_lock)) {
-               mask = QLCRDX(adapter->ahw, QLCNIC_DEF_INT_MASK);
-               writel(0, adapter->ahw->pci_base0 + mask);
-               return;
-       }
-       resp = QLCRDX(ahw, QLCNIC_FW_MBX_CTRL);
-
-       if (!(resp & QLCNIC_SET_OWNER))
-               goto out;
-
        for (i = 0; i < QLC_83XX_MBX_AEN_CNT; i++)
                event[i] = readl(QLCNIC_MBX_FW(ahw, i));
 
@@ -916,10 +884,6 @@ void qlcnic_83xx_process_aen(struct qlcnic_adapter *adapter)
        }
 
        QLCWRX(ahw, QLCNIC_FW_MBX_CTRL, QLCNIC_CLR_OWNER);
-out:
-       mask = QLCRDX(adapter->ahw, QLCNIC_DEF_INT_MASK);
-       writel(0, adapter->ahw->pci_base0 + mask);
-       spin_unlock(&ahw->mbx_lock);
 }
 
 static int qlcnic_83xx_add_rings(struct qlcnic_adapter *adapter)
@@ -1166,6 +1130,100 @@ out:
        return err;
 }
 
+static int qlcnic_83xx_diag_alloc_res(struct net_device *netdev, int test)
+{
+       struct qlcnic_adapter *adapter = netdev_priv(netdev);
+       struct qlcnic_host_sds_ring *sds_ring;
+       struct qlcnic_host_rds_ring *rds_ring;
+       u8 ring;
+       int ret;
+
+       netif_device_detach(netdev);
+
+       if (netif_running(netdev))
+               __qlcnic_down(adapter, netdev);
+
+       qlcnic_detach(adapter);
+
+       adapter->max_sds_rings = 1;
+       adapter->ahw->diag_test = test;
+       adapter->ahw->linkup = 0;
+
+       ret = qlcnic_attach(adapter);
+       if (ret) {
+               netif_device_attach(netdev);
+               return ret;
+       }
+
+       ret = qlcnic_fw_create_ctx(adapter);
+       if (ret) {
+               qlcnic_detach(adapter);
+               netif_device_attach(netdev);
+               return ret;
+       }
+
+       for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+               rds_ring = &adapter->recv_ctx->rds_rings[ring];
+               qlcnic_post_rx_buffers(adapter, rds_ring, ring);
+       }
+
+       if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) {
+               for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+                       sds_ring = &adapter->recv_ctx->sds_rings[ring];
+                       qlcnic_83xx_enable_intr(adapter, sds_ring);
+               }
+       }
+
+       if (adapter->ahw->diag_test == QLCNIC_LOOPBACK_TEST) {
+               /* disable and free mailbox interrupt */
+               qlcnic_83xx_free_mbx_intr(adapter);
+               adapter->ahw->loopback_state = 0;
+               adapter->ahw->hw_ops->setup_link_event(adapter, 1);
+       }
+
+       set_bit(__QLCNIC_DEV_UP, &adapter->state);
+       return 0;
+}
+
+static void qlcnic_83xx_diag_free_res(struct net_device *netdev,
+                                       int max_sds_rings)
+{
+       struct qlcnic_adapter *adapter = netdev_priv(netdev);
+       struct qlcnic_host_sds_ring *sds_ring;
+       int ring, err;
+
+       clear_bit(__QLCNIC_DEV_UP, &adapter->state);
+       if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) {
+               for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+                       sds_ring = &adapter->recv_ctx->sds_rings[ring];
+                       writel(1, sds_ring->crb_intr_mask);
+               }
+       }
+
+       qlcnic_fw_destroy_ctx(adapter);
+       qlcnic_detach(adapter);
+
+       if (adapter->ahw->diag_test == QLCNIC_LOOPBACK_TEST) {
+               err = qlcnic_83xx_setup_mbx_intr(adapter);
+               if (err) {
+                       dev_err(&adapter->pdev->dev,
+                               "%s: failed to setup mbx interrupt\n",
+                               __func__);
+                       goto out;
+               }
+       }
+       adapter->ahw->diag_test = 0;
+       adapter->max_sds_rings = max_sds_rings;
+
+       if (qlcnic_attach(adapter))
+               goto out;
+
+       if (netif_running(netdev))
+               __qlcnic_up(adapter, netdev);
+out:
+       netif_device_attach(netdev);
+}
+
 int qlcnic_83xx_config_led(struct qlcnic_adapter *adapter, u32 state,
                           u32 beacon)
 {
@@ -1232,10 +1290,10 @@ void qlcnic_83xx_register_nic_idc_func(struct qlcnic_adapter *adapter,
 
        if (enable) {
                qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_INIT_NIC_FUNC);
-               cmd.req.arg[1] = 1 | BIT_0;
+               cmd.req.arg[1] = BIT_0 | BIT_31;
        } else {
                qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_STOP_NIC_FUNC);
-               cmd.req.arg[1] = 0 | BIT_0;
+               cmd.req.arg[1] = BIT_0 | BIT_31;
        }
        status = qlcnic_issue_cmd(adapter, &cmd);
        if (status)
@@ -1313,6 +1371,57 @@ int qlcnic_83xx_nic_set_promisc(struct qlcnic_adapter *adapter, u32 mode)
        return err;
 }
 
+int qlcnic_83xx_loopback_test(struct net_device *netdev, u8 mode)
+{
+       struct qlcnic_adapter *adapter = netdev_priv(netdev);
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+       int ret = 0, loop = 0, max_sds_rings = adapter->max_sds_rings;
+
+       QLCDB(adapter, DRV, "%s loopback test in progress\n",
+             mode == QLCNIC_ILB_MODE ? "internal" : "external");
+       if (ahw->op_mode == QLCNIC_NON_PRIV_FUNC) {
+               dev_warn(&adapter->pdev->dev,
+                        "Loopback test not supported for non privilege function\n");
+               return ret;
+       }
+
+       if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
+               return -EBUSY;
+
+       ret = qlcnic_83xx_diag_alloc_res(netdev, QLCNIC_LOOPBACK_TEST);
+       if (ret)
+               goto fail_diag_alloc;
+
+       ret = qlcnic_83xx_set_lb_mode(adapter, mode);
+       if (ret)
+               goto free_diag_res;
+
+       /* Poll for link up event before running traffic */
+       do {
+               msleep(500);
+               qlcnic_83xx_process_aen(adapter);
+               if (loop++ > QLCNIC_ILB_MAX_RCV_LOOP) {
+                       dev_info(&adapter->pdev->dev,
+                                "Firmware didn't sent link up event to loopback request\n");
+                       ret = -QLCNIC_FW_NOT_RESPOND;
+                       qlcnic_83xx_clear_lb_mode(adapter, mode);
+                       goto free_diag_res;
+               }
+       } while ((adapter->ahw->linkup && ahw->has_link_events) != 1);
+
+       ret = qlcnic_do_lb_test(adapter, mode);
+
+       qlcnic_83xx_clear_lb_mode(adapter, mode);
+
+free_diag_res:
+       qlcnic_83xx_diag_free_res(netdev, max_sds_rings);
+
+fail_diag_alloc:
+       adapter->max_sds_rings = max_sds_rings;
+       clear_bit(__QLCNIC_RESETTING, &adapter->state);
+       return ret;
+}
+
 int qlcnic_83xx_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
 {
        struct qlcnic_hardware_context *ahw = adapter->ahw;
@@ -1341,13 +1450,15 @@ int qlcnic_83xx_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
                return status;
        }
 
-       /* Wait until firmware send IDC Completion AEN */
+       /* Wait for Link and IDC Completion AEN */
        do {
                msleep(300);
+               qlcnic_83xx_process_aen(adapter);
                if (loop++ > QLCNIC_ILB_MAX_RCV_LOOP) {
                        dev_err(&adapter->pdev->dev,
                                "FW did not generate IDC completion AEN\n");
                        clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
+                       qlcnic_83xx_clear_lb_mode(adapter, mode);
                        return -EIO;
                }
        } while (test_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status));
@@ -1379,9 +1490,10 @@ int qlcnic_83xx_clear_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
                return status;
        }
 
-       /* Wait until firmware send IDC Completion AEN */
+       /* Wait for Link and IDC Completion AEN */
        do {
                msleep(300);
+               qlcnic_83xx_process_aen(adapter);
                if (loop++ > QLCNIC_ILB_MAX_RCV_LOOP) {
                        dev_err(&adapter->pdev->dev,
                                "Firmware didn't sent IDC completion AEN\n");
@@ -1613,7 +1725,21 @@ static void qlcnic_83xx_handle_link_aen(struct qlcnic_adapter *adapter,
 irqreturn_t qlcnic_83xx_handle_aen(int irq, void *data)
 {
        struct qlcnic_adapter *adapter = data;
-       qlcnic_83xx_process_aen(adapter);
+       unsigned long flags;
+       u32 mask, resp, event;
+
+       spin_lock_irqsave(&adapter->ahw->mbx_lock, flags);
+       resp = QLCRDX(adapter->ahw, QLCNIC_FW_MBX_CTRL);
+       if (!(resp & QLCNIC_SET_OWNER))
+               goto out;
+       event = readl(QLCNIC_MBX_FW(adapter->ahw, 0));
+       if (event &  QLCNIC_MBX_ASYNC_EVENT)
+               qlcnic_83xx_process_aen(adapter);
+out:
+       mask = QLCRDX(adapter->ahw, QLCNIC_DEF_INT_MASK);
+       writel(0, adapter->ahw->pci_base0 + mask);
+       spin_unlock_irqrestore(&adapter->ahw->mbx_lock, flags);
+
        return IRQ_HANDLED;
 }
 
@@ -2635,25 +2761,37 @@ int qlcnic_83xx_get_registers(struct qlcnic_adapter *adapter, u32 *regs_buff)
        return i;
 }
 
-int qlcnic_83xx_interrupt_test(struct qlcnic_adapter *adapter,
-                              struct qlcnic_cmd_args *cmd)
+int qlcnic_83xx_interrupt_test(struct net_device *netdev)
 {
-       u8 val;
-       int ret;
+       struct qlcnic_adapter *adapter = netdev_priv(netdev);
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+       struct qlcnic_cmd_args cmd;
        u32 data;
        u16 intrpt_id, id;
+       u8 val;
+       int ret, max_sds_rings = adapter->max_sds_rings;
+
+       if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
+               return -EIO;
+
+       ret = qlcnic_83xx_diag_alloc_res(netdev, QLCNIC_INTERRUPT_TEST);
+       if (ret)
+               goto fail_diag_irq;
+
+       ahw->diag_cnt = 0;
+       qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_INTRPT_TEST);
 
        if (adapter->flags & QLCNIC_MSIX_ENABLED)
-               intrpt_id = adapter->ahw->intr_tbl[0].id;
+               intrpt_id = ahw->intr_tbl[0].id;
        else
-               intrpt_id = QLCRDX(adapter->ahw, QLCNIC_DEF_INT_ID);
+               intrpt_id = QLCRDX(ahw, QLCNIC_DEF_INT_ID);
 
-       cmd->req.arg[1] = 1;
-       cmd->req.arg[2] = intrpt_id;
-       cmd->req.arg[3] = BIT_0;
+       cmd.req.arg[1] = 1;
+       cmd.req.arg[2] = intrpt_id;
+       cmd.req.arg[3] = BIT_0;
 
-       ret = qlcnic_issue_cmd(adapter, cmd);
-       data = cmd->rsp.arg[2];
+       ret = qlcnic_issue_cmd(adapter, &cmd);
+       data = cmd.rsp.arg[2];
        id = LSW(data);
        val = LSB(MSW(data));
        if (id != intrpt_id)
@@ -2661,9 +2799,21 @@ int qlcnic_83xx_interrupt_test(struct qlcnic_adapter *adapter,
                         "Interrupt generated: 0x%x, requested:0x%x\n",
                         id, intrpt_id);
        if (val)
-               dev_info(&adapter->pdev->dev,
+               dev_err(&adapter->pdev->dev,
                         "Interrupt test error: 0x%x\n", val);
+       if (ret)
+               goto done;
+
+       msleep(20);
+       ret = !ahw->diag_cnt;
+
+done:
+       qlcnic_free_mbx_args(&cmd);
+       qlcnic_83xx_diag_free_res(netdev, max_sds_rings);
 
+fail_diag_irq:
+       adapter->max_sds_rings = max_sds_rings;
+       clear_bit(__QLCNIC_RESETTING, &adapter->state);
        return ret;
 }
 
index 2b44eb1..f60e28a 100644 (file)
@@ -1,3 +1,10 @@
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ *
+ * See LICENSE.qlcnic for copyright and licensing details.
+ */
+
 #ifndef __QLCNIC_83XX_HW_H
 #define __QLCNIC_83XX_HW_H
 
@@ -130,9 +137,6 @@ struct qlc_83xx_reset {
 #define QLC_83XX_IDC_MINOR_VERSION                     0
 #define QLC_83XX_IDC_FLASH_PARAM_ADDR                  0x3e8020
 
-/* Mailbox process AEN count */
-#define QLC_83XX_MBX_AEN_CNT 5
-
 struct qlcnic_adapter;
 struct qlc_83xx_idc {
        int (*state_entry) (struct qlcnic_adapter *);
@@ -149,6 +153,12 @@ struct qlc_83xx_idc {
        char            **name;
 };
 
+#define QLCNIC_MBX_RSP(reg)            LSW(reg)
+#define QLCNIC_MBX_NUM_REGS(reg)       (MSW(reg) & 0x1FF)
+#define QLCNIC_MBX_STATUS(reg)         (((reg) >> 25) & 0x7F)
+#define QLCNIC_MBX_HOST(ahw, i)        ((ahw)->pci_base0 + ((i) * 4))
+#define QLCNIC_MBX_FW(ahw, i)          ((ahw)->pci_base0 + 0x800 + ((i) * 4))
+
 /* Mailbox process AEN count */
 #define QLC_83XX_IDC_COMP_AEN                  3
 #define QLC_83XX_MBX_AEN_CNT                   5
@@ -418,7 +428,7 @@ int qlcnic_83xx_test_link(struct qlcnic_adapter *);
 int qlcnic_83xx_reg_test(struct qlcnic_adapter *);
 int qlcnic_83xx_get_regs_len(struct qlcnic_adapter *);
 int qlcnic_83xx_get_registers(struct qlcnic_adapter *, u32 *);
-int qlcnic_83xx_interrupt_test(struct qlcnic_adapter *,
-                              struct qlcnic_cmd_args *);
+int qlcnic_83xx_loopback_test(struct net_device *, u8);
+int qlcnic_83xx_interrupt_test(struct net_device *);
 int qlcnic_83xx_flash_test(struct qlcnic_adapter *);
 #endif
index 8163e5b..c53832b 100644 (file)
@@ -1,3 +1,10 @@
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ *
+ * See LICENSE.qlcnic for copyright and licensing details.
+ */
+
 #include "qlcnic.h"
 #include "qlcnic_hw.h"
 
@@ -578,6 +585,9 @@ static int qlcnic_83xx_idc_check_fan_failure(struct qlcnic_adapter *adapter)
 
 static int qlcnic_83xx_idc_reattach_driver(struct qlcnic_adapter *adapter)
 {
+       /* register for NIC IDC AEN Events */
+       qlcnic_83xx_register_nic_idc_func(adapter, 1);
+
        qlcnic_83xx_enable_mbx_intrpt(adapter);
        if ((adapter->flags & QLCNIC_MSIX_ENABLED)) {
                if (qlcnic_83xx_config_intrpt(adapter, 1)) {
@@ -2018,6 +2028,9 @@ int qlcnic_83xx_init(struct qlcnic_adapter *adapter)
        set_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status);
        qlcnic_83xx_clear_function_resources(adapter);
 
+       /* register for NIC IDC AEN Events */
+       qlcnic_83xx_register_nic_idc_func(adapter, 1);
+
        if (!qlcnic_83xx_read_flash_descriptor_table(adapter))
                qlcnic_83xx_read_flash_mfg_id(adapter);
 
@@ -2034,9 +2047,6 @@ int qlcnic_83xx_init(struct qlcnic_adapter *adapter)
 
        INIT_DELAYED_WORK(&adapter->idc_aen_work, qlcnic_83xx_idc_aen_work);
 
-       /* register for NIC IDC AEN Events */
-       qlcnic_83xx_register_nic_idc_func(adapter, 1);
-
        /* Periodically monitor device status */
        qlcnic_83xx_idc_poll_dev_state(&adapter->fw_work.work);
 
index b3ef33a..b0c3de9 100644 (file)
@@ -1,3 +1,10 @@
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ *
+ * See LICENSE.qlcnic for copyright and licensing details.
+ */
+
 #include "qlcnic.h"
 #include "qlcnic_hw.h"
 
index 7372964..4a3bd64 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * QLogic qlcnic NIC Driver
- * Copyright (c)  2009-2010 QLogic Corporation
+ * Copyright (c) 2009-2013 QLogic Corporation
  *
  * See LICENSE.qlcnic for copyright and licensing details.
  */
index f65fd7b..5641f8e 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * QLogic qlcnic NIC Driver
- * Copyright (c)  2009-2010 QLogic Corporation
+ * Copyright (c) 2009-2013 QLogic Corporation
  *
  * See LICENSE.qlcnic for copyright and licensing details.
  */
@@ -823,38 +823,36 @@ static int qlcnic_get_sset_count(struct net_device *dev, int sset)
 static int qlcnic_irq_test(struct net_device *netdev)
 {
        struct qlcnic_adapter *adapter = netdev_priv(netdev);
-       int max_sds_rings = adapter->max_sds_rings;
-       int ret;
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
        struct qlcnic_cmd_args cmd;
+       int ret, max_sds_rings = adapter->max_sds_rings;
+
+       if (qlcnic_83xx_check(adapter))
+               return qlcnic_83xx_interrupt_test(netdev);
 
        if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
                return -EIO;
 
        ret = qlcnic_diag_alloc_res(netdev, QLCNIC_INTERRUPT_TEST);
        if (ret)
-               goto clear_it;
+               goto clear_diag_irq;
 
-       adapter->ahw->diag_cnt = 0;
+       ahw->diag_cnt = 0;
        qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_INTRPT_TEST);
 
-       if (qlcnic_83xx_check(adapter)) {
-               ret = qlcnic_83xx_interrupt_test(adapter, &cmd);
-       } else {
-               cmd.req.arg[1] = adapter->ahw->pci_func;
-               ret = qlcnic_issue_cmd(adapter, &cmd);
-       }
-
+       cmd.req.arg[1] = ahw->pci_func;
+       ret = qlcnic_issue_cmd(adapter, &cmd);
        if (ret)
                goto done;
 
        usleep_range(1000, 12000);
-       ret = !adapter->ahw->diag_cnt;
+       ret = !ahw->diag_cnt;
 
 done:
        qlcnic_free_mbx_args(&cmd);
        qlcnic_diag_free_res(netdev, max_sds_rings);
 
-clear_it:
+clear_diag_irq:
        adapter->max_sds_rings = max_sds_rings;
        clear_bit(__QLCNIC_RESETTING, &adapter->state);
        return ret;
@@ -883,7 +881,7 @@ int qlcnic_check_loopback_buff(unsigned char *data, u8 mac[])
        return memcmp(data, buff, QLCNIC_ILB_PKT_SIZE);
 }
 
-static int qlcnic_do_lb_test(struct qlcnic_adapter *adapter, u8 mode)
+int qlcnic_do_lb_test(struct qlcnic_adapter *adapter, u8 mode)
 {
        struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
        struct qlcnic_host_sds_ring *sds_ring = &recv_ctx->sds_rings[0];
@@ -925,7 +923,7 @@ static int qlcnic_do_lb_test(struct qlcnic_adapter *adapter, u8 mode)
        return 0;
 }
 
-static int qlcnic_loopback_test(struct net_device *netdev, u8 mode)
+int qlcnic_loopback_test(struct net_device *netdev, u8 mode)
 {
        struct qlcnic_adapter *adapter = netdev_priv(netdev);
        int max_sds_rings = adapter->max_sds_rings;
@@ -935,13 +933,14 @@ static int qlcnic_loopback_test(struct net_device *netdev, u8 mode)
        int ret;
 
        if (qlcnic_83xx_check(adapter))
-               goto skip_cap;
+               return qlcnic_83xx_loopback_test(netdev, mode);
+
        if (!(ahw->capabilities & QLCNIC_FW_CAPABILITY_MULTI_LOOPBACK)) {
                dev_info(&adapter->pdev->dev,
                         "Firmware do not support loopback test\n");
                return -EOPNOTSUPP;
        }
-skip_cap:
+
        dev_warn(&adapter->pdev->dev, "%s loopback test in progress\n",
                 mode == QLCNIC_ILB_MODE ? "internal" : "external");
        if (ahw->op_mode == QLCNIC_NON_PRIV_FUNC) {
@@ -962,9 +961,6 @@ skip_cap:
        if (ret)
                goto free_res;
 
-       if (qlcnic_83xx_check(adapter))
-               goto skip_fw_msg;
-
        ahw->diag_cnt = 0;
        do {
                msleep(500);
@@ -979,21 +975,9 @@ skip_cap:
                        goto free_res;
                }
        } while (!QLCNIC_IS_LB_CONFIGURED(ahw->loopback_state));
-skip_fw_msg:
-       if (qlcnic_83xx_check(adapter)) {
-               /* wait until firmware report link up before running traffic */
-               loop = 0;
-               do {
-                       msleep(500);
-                       if (loop++ > QLCNIC_ILB_MAX_RCV_LOOP) {
-                               dev_info(&adapter->pdev->dev,
-                                        "No linkup event after LB req\n");
-                               ret = -QLCNIC_FW_NOT_RESPOND;
-                               goto free_res;
-                       }
-               } while ((adapter->ahw->linkup && ahw->has_link_events) != 1);
-       }
+
        ret = qlcnic_do_lb_test(adapter, mode);
+
        qlcnic_clear_lb_mode(adapter, mode);
 
  free_res:
index 74f7711..44197ca 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * QLogic qlcnic NIC Driver
- * Copyright (c)  2009-2010 QLogic Corporation
+ * Copyright (c) 2009-2013 QLogic Corporation
  *
  * See LICENSE.qlcnic for copyright and licensing details.
  */
index 4ee92b2..51716ab 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * QLogic qlcnic NIC Driver
- * Copyright (c)  2009-2010 QLogic Corporation
+ * Copyright (c) 2009-2013 QLogic Corporation
  *
  * See LICENSE.qlcnic for copyright and licensing details.
  */
index 9673e2b..5b8749e 100644 (file)
@@ -1,3 +1,10 @@
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ *
+ * See LICENSE.qlcnic for copyright and licensing details.
+ */
+
 #ifndef __QLCNIC_HW_H
 #define __QLCNIC_HW_H
 
@@ -128,6 +135,7 @@ struct qlcnic_mailbox_metadata {
 
 #define QLCNIC_MBX_RSP_OK      1
 #define QLCNIC_MBX_PORT_RSP_OK 0x1a
+#define QLCNIC_MBX_ASYNC_EVENT BIT_15
 
 struct qlcnic_pci_info;
 struct qlcnic_info;
index 10ad25d..d28336f 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * QLogic qlcnic NIC Driver
- * Copyright (c)  2009-2010 QLogic Corporation
+ * Copyright (c) 2009-2013 QLogic Corporation
  *
  * See LICENSE.qlcnic for copyright and licensing details.
  */
index d00f628..93839f8 100644 (file)
@@ -1,3 +1,10 @@
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ *
+ * See LICENSE.qlcnic for copyright and licensing details.
+ */
+
 #include <linux/netdevice.h>
 #include <linux/if_vlan.h>
 #include <net/ip.h>
@@ -1036,8 +1043,13 @@ qlcnic_process_lro(struct qlcnic_adapter *adapter,
        th->seq = htonl(seq_number);
        length = skb->len;
 
-       if (adapter->flags & QLCNIC_FW_LRO_MSS_CAP)
+       if (adapter->flags & QLCNIC_FW_LRO_MSS_CAP) {
                skb_shinfo(skb)->gso_size = qlcnic_get_lro_sts_mss(sts_data1);
+               if (skb->protocol == htons(ETH_P_IPV6))
+                       skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
+               else
+                       skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
+       }
 
        if (vid != 0xffff)
                __vlan_hwaccel_put_tag(skb, vid);
@@ -1546,6 +1558,24 @@ skip:
        return count;
 }
 
+static void qlcnic_83xx_poll_process_aen(struct qlcnic_adapter *adapter)
+{
+       unsigned long flags;
+       u32 mask, resp, event;
+
+       spin_lock_irqsave(&adapter->ahw->mbx_lock, flags);
+       resp = QLCRDX(adapter->ahw, QLCNIC_FW_MBX_CTRL);
+       if (!(resp & QLCNIC_SET_OWNER))
+               goto out;
+       event = readl(QLCNIC_MBX_FW(adapter->ahw, 0));
+       if (event &  QLCNIC_MBX_ASYNC_EVENT)
+               qlcnic_83xx_process_aen(adapter);
+out:
+       mask = QLCRDX(adapter->ahw, QLCNIC_DEF_INT_MASK);
+       writel(0, adapter->ahw->pci_base0 + mask);
+       spin_unlock_irqrestore(&adapter->ahw->mbx_lock, flags);
+}
+
 static int qlcnic_83xx_poll(struct napi_struct *napi, int budget)
 {
        int tx_complete;
@@ -1560,7 +1590,7 @@ static int qlcnic_83xx_poll(struct napi_struct *napi, int budget)
        tx_ring = adapter->tx_ring;
 
        if (!(adapter->flags & QLCNIC_MSIX_ENABLED))
-               qlcnic_83xx_process_aen(adapter);
+               qlcnic_83xx_poll_process_aen(adapter);
 
        tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring, budget);
        work_done = qlcnic_83xx_process_rcv_ring(sds_ring, budget);
index fb0255a..b953168 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * QLogic qlcnic NIC Driver
- * Copyright (c)  2009-2010 QLogic Corporation
+ * Copyright (c) 2009-2013 QLogic Corporation
  *
  * See LICENSE.qlcnic for copyright and licensing details.
  */
@@ -247,8 +247,8 @@ static int qlcnic_set_mac(struct net_device *netdev, void *p)
        return 0;
 }
 
-static int qlcnic_fdb_del(struct ndmsg *ndm, struct net_device *netdev,
-                       const unsigned char *addr)
+static int qlcnic_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
+                       struct net_device *netdev, const unsigned char *addr)
 {
        struct qlcnic_adapter *adapter = netdev_priv(netdev);
        int err = -EOPNOTSUPP;
@@ -455,12 +455,13 @@ int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix)
 
                        if (num_msix) {
                                dev_info(&pdev->dev,
-                                        "Trying %d MSI-X interrupt vectors\n",
+                                        "Trying to allocate %d MSI-X interrupt vectors\n",
                                         num_msix);
                                goto enable_msix;
                        }
                } else {
-                       dev_info(&pdev->dev, "Failed to get %d vectors\n",
+                       dev_info(&pdev->dev,
+                                "Unable to allocate %d MSI-X interrupt vectors\n",
                                 num_msix);
                }
        }
@@ -1174,8 +1175,6 @@ qlcnic_set_mgmt_operations(struct qlcnic_adapter *adapter)
 
        qlcnic_dev_set_npar_ready(adapter);
 
-       if (qlcnic_83xx_check(adapter))
-               qlcnic_83xx_register_nic_idc_func(adapter, 1);
        return err;
 }
 
@@ -1505,10 +1504,7 @@ void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings)
        if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) {
                for (ring = 0; ring < adapter->max_sds_rings; ring++) {
                        sds_ring = &adapter->recv_ctx->sds_rings[ring];
-                       if (qlcnic_83xx_check(adapter))
-                               writel(1, sds_ring->crb_intr_mask);
-                       else
-                               qlcnic_disable_int(sds_ring);
+                       qlcnic_disable_int(sds_ring);
                }
        }
 
@@ -1601,10 +1597,7 @@ int qlcnic_diag_alloc_res(struct net_device *netdev, int test)
        if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) {
                for (ring = 0; ring < adapter->max_sds_rings; ring++) {
                        sds_ring = &adapter->recv_ctx->sds_rings[ring];
-                       if (qlcnic_82xx_check(adapter))
-                               qlcnic_enable_int(sds_ring);
-                       else
-                               qlcnic_83xx_enable_intr(adapter, sds_ring);
+                       qlcnic_enable_int(sds_ring);
                }
        }
 
@@ -2025,6 +2018,8 @@ static void qlcnic_remove(struct pci_dev *pdev)
                if (adapter->flags & QLCNIC_MSIX_ENABLED)
                        qlcnic_83xx_config_intrpt(adapter, 0);
                qlcnic_83xx_free_mbx_intr(adapter);
+               qlcnic_83xx_register_nic_idc_func(adapter, 0);
+               cancel_delayed_work_sync(&adapter->idc_aen_work);
        }
 
        qlcnic_detach(adapter);
@@ -2182,10 +2177,6 @@ static int qlcnic_close(struct net_device *netdev)
        struct qlcnic_adapter *adapter = netdev_priv(netdev);
 
        __qlcnic_down(adapter, netdev);
-       if (qlcnic_83xx_check(adapter)) {
-               qlcnic_83xx_register_nic_idc_func(adapter, 0);
-               cancel_delayed_work_sync(&adapter->idc_aen_work);
-       }
 
        return 0;
 }
@@ -3063,6 +3054,8 @@ static int qlcnic_attach_func(struct pci_dev *pdev)
        }
 
        if (qlcnic_83xx_check(adapter)) {
+               /* register for NIC IDC AEN Events */
+               qlcnic_83xx_register_nic_idc_func(adapter, 1);
                err = qlcnic_83xx_setup_mbx_intr(adapter);
                if (err) {
                        dev_err(&adapter->pdev->dev,
@@ -3117,6 +3110,8 @@ static pci_ers_result_t qlcnic_io_error_detected(struct pci_dev *pdev,
                if (adapter->flags & QLCNIC_MSIX_ENABLED)
                        qlcnic_83xx_config_intrpt(adapter, 0);
                qlcnic_83xx_free_mbx_intr(adapter);
+               qlcnic_83xx_register_nic_idc_func(adapter, 0);
+               cancel_delayed_work_sync(&adapter->idc_aen_work);
        }
 
        qlcnic_detach(adapter);
@@ -3224,6 +3219,8 @@ int qlcnic_set_max_rss(struct qlcnic_adapter *adapter, u8 data, size_t len)
        }
 
        if (qlcnic_83xx_check(adapter)) {
+               /* register for NIC IDC AEN Events */
+               qlcnic_83xx_register_nic_idc_func(adapter, 1);
                err = qlcnic_83xx_setup_mbx_intr(adapter);
                if (err) {
                        dev_err(&adapter->pdev->dev,
index 8dbc8e7..abbd22c 100644 (file)
@@ -1,3 +1,9 @@
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ *
+ * See LICENSE.qlcnic for copyright and licensing details.
+ */
 
 #include "qlcnic.h"
 #include "qlcnic_hdr.h"
index 5045063..987fb6f 100644 (file)
@@ -1,3 +1,10 @@
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ *
+ * See LICENSE.qlcnic for copyright and licensing details.
+ */
+
 #include <linux/slab.h>
 #include <linux/vmalloc.h>
 #include <linux/interrupt.h>
index 325627e..b13ab54 100644 (file)
@@ -2920,14 +2920,11 @@ static int ql_alloc_rx_resources(struct ql_adapter *qdev,
                /*
                 * Allocate small buffer queue control blocks.
                 */
-               rx_ring->sbq =
-                   kmalloc(rx_ring->sbq_len * sizeof(struct bq_desc),
-                           GFP_KERNEL);
-               if (rx_ring->sbq == NULL) {
-                       netif_err(qdev, ifup, qdev->ndev,
-                                 "Small buffer queue control block allocation failed.\n");
+               rx_ring->sbq = kmalloc_array(rx_ring->sbq_len,
+                                            sizeof(struct bq_desc),
+                                            GFP_KERNEL);
+               if (rx_ring->sbq == NULL)
                        goto err_mem;
-               }
 
                ql_init_sbq_ring(qdev, rx_ring);
        }
@@ -2948,14 +2945,11 @@ static int ql_alloc_rx_resources(struct ql_adapter *qdev,
                /*
                 * Allocate large buffer queue control blocks.
                 */
-               rx_ring->lbq =
-                   kmalloc(rx_ring->lbq_len * sizeof(struct bq_desc),
-                           GFP_KERNEL);
-               if (rx_ring->lbq == NULL) {
-                       netif_err(qdev, ifup, qdev->ndev,
-                                 "Large buffer queue control block allocation failed.\n");
+               rx_ring->lbq = kmalloc_array(rx_ring->lbq_len,
+                                            sizeof(struct bq_desc),
+                                            GFP_KERNEL);
+               if (rx_ring->lbq == NULL)
                        goto err_mem;
-               }
 
                ql_init_lbq_ring(qdev, rx_ring);
        }
index 4208f28..8900398 100644 (file)
@@ -450,7 +450,6 @@ enum rtl8168_registers {
 #define PWM_EN                         (1 << 22)
 #define RXDV_GATED_EN                  (1 << 19)
 #define EARLY_TALLY_EN                 (1 << 16)
-#define FORCE_CLK                      (1 << 15) /* force clock request */
 };
 
 enum rtl_register_content {
@@ -514,7 +513,6 @@ enum rtl_register_content {
        PMEnable        = (1 << 0),     /* Power Management Enable */
 
        /* Config2 register p. 25 */
-       ClkReqEn        = (1 << 7),     /* Clock Request Enable */
        MSIEnable       = (1 << 5),     /* 8169 only. Reserved in the 8168. */
        PCI_Clock_66MHz = 0x01,
        PCI_Clock_33MHz = 0x00,
@@ -535,7 +533,6 @@ enum rtl_register_content {
        Spi_en          = (1 << 3),
        LanWake         = (1 << 1),     /* LanWake enable/disable */
        PMEStatus       = (1 << 0),     /* PME status can be reset by PCI RST# */
-       ASPM_en         = (1 << 0),     /* ASPM enable */
 
        /* TBICSR p.28 */
        TBIReset        = 0x80000000,
@@ -684,7 +681,6 @@ enum features {
        RTL_FEATURE_WOL         = (1 << 0),
        RTL_FEATURE_MSI         = (1 << 1),
        RTL_FEATURE_GMII        = (1 << 2),
-       RTL_FEATURE_FW_LOADED   = (1 << 3),
 };
 
 struct rtl8169_counters {
@@ -2388,10 +2384,8 @@ static void rtl_apply_firmware(struct rtl8169_private *tp)
        struct rtl_fw *rtl_fw = tp->rtl_fw;
 
        /* TODO: release firmware once rtl_phy_write_fw signals failures. */
-       if (!IS_ERR_OR_NULL(rtl_fw)) {
+       if (!IS_ERR_OR_NULL(rtl_fw))
                rtl_phy_write_fw(tp, rtl_fw);
-               tp->features |= RTL_FEATURE_FW_LOADED;
-       }
 }
 
 static void rtl_apply_firmware_cond(struct rtl8169_private *tp, u8 reg, u16 val)
@@ -2402,31 +2396,6 @@ static void rtl_apply_firmware_cond(struct rtl8169_private *tp, u8 reg, u16 val)
                rtl_apply_firmware(tp);
 }
 
-static void r810x_aldps_disable(struct rtl8169_private *tp)
-{
-       rtl_writephy(tp, 0x1f, 0x0000);
-       rtl_writephy(tp, 0x18, 0x0310);
-       msleep(100);
-}
-
-static void r810x_aldps_enable(struct rtl8169_private *tp)
-{
-       if (!(tp->features & RTL_FEATURE_FW_LOADED))
-               return;
-
-       rtl_writephy(tp, 0x1f, 0x0000);
-       rtl_writephy(tp, 0x18, 0x8310);
-}
-
-static void r8168_aldps_enable_1(struct rtl8169_private *tp)
-{
-       if (!(tp->features & RTL_FEATURE_FW_LOADED))
-               return;
-
-       rtl_writephy(tp, 0x1f, 0x0000);
-       rtl_w1w0_phy(tp, 0x15, 0x1000, 0x0000);
-}
-
 static void rtl8169s_hw_phy_config(struct rtl8169_private *tp)
 {
        static const struct phy_reg phy_reg_init[] = {
@@ -3217,8 +3186,6 @@ static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp)
        rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0400);
        rtl_writephy(tp, 0x1f, 0x0000);
 
-       r8168_aldps_enable_1(tp);
-
        /* Broken BIOS workaround: feed GigaMAC registers with MAC address. */
        rtl_rar_exgmac_set(tp, tp->dev->dev_addr);
 }
@@ -3293,8 +3260,6 @@ static void rtl8168f_1_hw_phy_config(struct rtl8169_private *tp)
        rtl_writephy(tp, 0x05, 0x8b85);
        rtl_w1w0_phy(tp, 0x06, 0x4000, 0x0000);
        rtl_writephy(tp, 0x1f, 0x0000);
-
-       r8168_aldps_enable_1(tp);
 }
 
 static void rtl8168f_2_hw_phy_config(struct rtl8169_private *tp)
@@ -3302,8 +3267,6 @@ static void rtl8168f_2_hw_phy_config(struct rtl8169_private *tp)
        rtl_apply_firmware(tp);
 
        rtl8168f_hw_phy_config(tp);
-
-       r8168_aldps_enable_1(tp);
 }
 
 static void rtl8411_hw_phy_config(struct rtl8169_private *tp)
@@ -3401,8 +3364,6 @@ static void rtl8411_hw_phy_config(struct rtl8169_private *tp)
        rtl_w1w0_phy(tp, 0x19, 0x0000, 0x0001);
        rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0400);
        rtl_writephy(tp, 0x1f, 0x0000);
-
-       r8168_aldps_enable_1(tp);
 }
 
 static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp)
@@ -3488,19 +3449,21 @@ static void rtl8105e_hw_phy_config(struct rtl8169_private *tp)
        };
 
        /* Disable ALDPS before ram code */
-       r810x_aldps_disable(tp);
+       rtl_writephy(tp, 0x1f, 0x0000);
+       rtl_writephy(tp, 0x18, 0x0310);
+       msleep(100);
 
        rtl_apply_firmware(tp);
 
        rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
-
-       r810x_aldps_enable(tp);
 }
 
 static void rtl8402_hw_phy_config(struct rtl8169_private *tp)
 {
        /* Disable ALDPS before setting firmware */
-       r810x_aldps_disable(tp);
+       rtl_writephy(tp, 0x1f, 0x0000);
+       rtl_writephy(tp, 0x18, 0x0310);
+       msleep(20);
 
        rtl_apply_firmware(tp);
 
@@ -3510,8 +3473,6 @@ static void rtl8402_hw_phy_config(struct rtl8169_private *tp)
        rtl_writephy(tp, 0x10, 0x401f);
        rtl_writephy(tp, 0x19, 0x7030);
        rtl_writephy(tp, 0x1f, 0x0000);
-
-       r810x_aldps_enable(tp);
 }
 
 static void rtl8106e_hw_phy_config(struct rtl8169_private *tp)
@@ -3524,7 +3485,9 @@ static void rtl8106e_hw_phy_config(struct rtl8169_private *tp)
        };
 
        /* Disable ALDPS before ram code */
-       r810x_aldps_disable(tp);
+       rtl_writephy(tp, 0x1f, 0x0000);
+       rtl_writephy(tp, 0x18, 0x0310);
+       msleep(100);
 
        rtl_apply_firmware(tp);
 
@@ -3532,8 +3495,6 @@ static void rtl8106e_hw_phy_config(struct rtl8169_private *tp)
        rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
 
        rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
-
-       r810x_aldps_enable(tp);
 }
 
 static void rtl_hw_phy_config(struct net_device *dev)
@@ -5050,6 +5011,8 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
 
        RTL_W8(MaxTxPacketSize, EarlySize);
 
+       rtl_disable_clock_request(pdev);
+
        RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
        RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
 
@@ -5058,8 +5021,7 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
 
        RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
        RTL_W32(MISC, RTL_R32(MISC) | PWM_EN);
-       RTL_W8(Config5, (RTL_R8(Config5) & ~Spi_en) | ASPM_en);
-       RTL_W8(Config2, RTL_R8(Config2) | ClkReqEn);
+       RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
 }
 
 static void rtl_hw_start_8168f(struct rtl8169_private *tp)
@@ -5084,12 +5046,13 @@ static void rtl_hw_start_8168f(struct rtl8169_private *tp)
 
        RTL_W8(MaxTxPacketSize, EarlySize);
 
+       rtl_disable_clock_request(pdev);
+
        RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
        RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
        RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
-       RTL_W32(MISC, RTL_R32(MISC) | PWM_EN | FORCE_CLK);
-       RTL_W8(Config5, (RTL_R8(Config5) & ~Spi_en) | ASPM_en);
-       RTL_W8(Config2, RTL_R8(Config2) | ClkReqEn);
+       RTL_W32(MISC, RTL_R32(MISC) | PWM_EN);
+       RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
 }
 
 static void rtl_hw_start_8168f_1(struct rtl8169_private *tp)
@@ -5146,10 +5109,8 @@ static void rtl_hw_start_8168g_1(struct rtl8169_private *tp)
        rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
 
        RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
-       RTL_W32(MISC, (RTL_R32(MISC) | FORCE_CLK) & ~RXDV_GATED_EN);
+       RTL_W32(MISC, RTL_R32(MISC) & ~RXDV_GATED_EN);
        RTL_W8(MaxTxPacketSize, EarlySize);
-       RTL_W8(Config5, RTL_R8(Config5) | ASPM_en);
-       RTL_W8(Config2, RTL_R8(Config2) | ClkReqEn);
 
        rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
        rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
@@ -5365,9 +5326,6 @@ static void rtl_hw_start_8105e_1(struct rtl8169_private *tp)
 
        RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET);
        RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
-       RTL_W8(Config5, RTL_R8(Config5) | ASPM_en);
-       RTL_W8(Config2, RTL_R8(Config2) | ClkReqEn);
-       RTL_W32(MISC, RTL_R32(MISC) | FORCE_CLK);
 
        rtl_ephy_init(tp, e_info_8105e_1, ARRAY_SIZE(e_info_8105e_1));
 }
@@ -5393,9 +5351,6 @@ static void rtl_hw_start_8402(struct rtl8169_private *tp)
 
        RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
        RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
-       RTL_W8(Config5, RTL_R8(Config5) | ASPM_en);
-       RTL_W8(Config2, RTL_R8(Config2) | ClkReqEn);
-       RTL_W32(MISC, RTL_R32(MISC) | FORCE_CLK);
 
        rtl_ephy_init(tp, e_info_8402, ARRAY_SIZE(e_info_8402));
 
@@ -5417,10 +5372,7 @@ static void rtl_hw_start_8106(struct rtl8169_private *tp)
        /* Force LAN exit from ASPM if Rx/Tx are not idle */
        RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800);
 
-       RTL_W32(MISC,
-               (RTL_R32(MISC) | DISABLE_LAN_EN | FORCE_CLK) & ~EARLY_TALLY_EN);
-       RTL_W8(Config5, RTL_R8(Config5) | ASPM_en);
-       RTL_W8(Config2, RTL_R8(Config2) | ClkReqEn);
+       RTL_W32(MISC, (RTL_R32(MISC) | DISABLE_LAN_EN) & ~EARLY_TALLY_EN);
        RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET);
        RTL_W8(DLLPR, RTL_R8(DLLPR) & ~PFM_EN);
 }
index ecfb436..d457fa2 100644 (file)
@@ -1250,12 +1250,11 @@ static int smsc9420_alloc_tx_ring(struct smsc9420_pdata *pd)
 
        BUG_ON(!pd->tx_ring);
 
-       pd->tx_buffers = kmalloc((sizeof(struct smsc9420_ring_info) *
-               TX_RING_SIZE), GFP_KERNEL);
-       if (!pd->tx_buffers) {
-               smsc_warn(IFUP, "Failed to allocated tx_buffers");
+       pd->tx_buffers = kmalloc_array(TX_RING_SIZE,
+                                      sizeof(struct smsc9420_ring_info),
+                                      GFP_KERNEL);
+       if (!pd->tx_buffers)
                return -ENOMEM;
-       }
 
        /* Initialize the TX Ring */
        for (i = 0; i < TX_RING_SIZE; i++) {
index fd4d659..39c6c55 100644 (file)
@@ -69,7 +69,7 @@
 
 #undef STMMAC_XMIT_DEBUG
 /*#define STMMAC_XMIT_DEBUG*/
-#ifdef STMMAC_TX_DEBUG
+#ifdef STMMAC_XMIT_DEBUG
 #define TX_DBG(fmt, args...)  printk(fmt, ## args)
 #else
 #define TX_DBG(fmt, args...)  do { } while (0)
@@ -2254,7 +2254,7 @@ static int __init stmmac_cmdline_opt(char *str)
                } else if (!strncmp(opt, "pause:", 6)) {
                        if (kstrtoint(opt + 6, 0, &pause))
                                goto err;
-               } else if (!strncmp(opt, "eee_timer:", 6)) {
+               } else if (!strncmp(opt, "eee_timer:", 10)) {
                        if (kstrtoint(opt + 10, 0, &eee_timer))
                                goto err;
                }
index 0376a5e..0b9829f 100644 (file)
@@ -188,8 +188,6 @@ int stmmac_mdio_register(struct net_device *ndev)
                goto bus_register_fail;
        }
 
-       priv->mii = new_bus;
-
        found = 0;
        for (addr = 0; addr < PHY_MAX_ADDR; addr++) {
                struct phy_device *phydev = new_bus->phy_map[addr];
@@ -237,8 +235,14 @@ int stmmac_mdio_register(struct net_device *ndev)
                }
        }
 
-       if (!found)
+       if (!found) {
                pr_warning("%s: No PHY found\n", ndev->name);
+               mdiobus_unregister(new_bus);
+               mdiobus_free(new_bus);
+               return -ENODEV;
+       }
+
+       priv->mii = new_bus;
 
        return 0;
 
index 9e63bff..7e93df6 100644 (file)
@@ -32,6 +32,7 @@
 #include <linux/of.h>
 #include <linux/of_net.h>
 #include <linux/of_device.h>
+#include <linux/if_vlan.h>
 
 #include <linux/platform_data/cpsw.h>
 
@@ -118,6 +119,13 @@ do {                                                               \
 #define TX_PRIORITY_MAPPING    0x33221100
 #define CPDMA_TX_PRIORITY_MAP  0x76543210
 
+#define CPSW_VLAN_AWARE                BIT(1)
+#define CPSW_ALE_VLAN_AWARE    1
+
+#define CPSW_FIFO_NORMAL_MODE          (0 << 15)
+#define CPSW_FIFO_DUAL_MAC_MODE                (1 << 15)
+#define CPSW_FIFO_RATE_LIMIT_MODE      (2 << 15)
+
 #define cpsw_enable_irq(priv)  \
        do {                    \
                u32 i;          \
@@ -250,7 +258,7 @@ struct cpsw_ss_regs {
 struct cpsw_host_regs {
        u32     max_blks;
        u32     blk_cnt;
-       u32     flow_thresh;
+       u32     tx_in_ctl;
        u32     port_vlan;
        u32     tx_pri_map;
        u32     cpdma_tx_pri_map;
@@ -277,6 +285,9 @@ struct cpsw_slave {
        u32                             mac_control;
        struct cpsw_slave_data          *data;
        struct phy_device               *phy;
+       struct net_device               *ndev;
+       u32                             port_vlan;
+       u32                             open_stat;
 };
 
 static inline u32 slave_read(struct cpsw_slave *slave, u32 offset)
@@ -315,17 +326,65 @@ struct cpsw_priv {
        /* snapshot of IRQ numbers */
        u32 irqs_table[4];
        u32 num_irqs;
-       struct cpts cpts;
+       struct cpts *cpts;
+       u32 emac_port;
 };
 
 #define napi_to_priv(napi)     container_of(napi, struct cpsw_priv, napi)
-#define for_each_slave(priv, func, arg...)                     \
-       do {                                                    \
-               int idx;                                        \
-               for (idx = 0; idx < (priv)->data.slaves; idx++) \
-                       (func)((priv)->slaves + idx, ##arg);    \
+#define for_each_slave(priv, func, arg...)                             \
+       do {                                                            \
+               int idx;                                                \
+               if (priv->data.dual_emac)                               \
+                       (func)((priv)->slaves + priv->emac_port, ##arg);\
+               else                                                    \
+                       for (idx = 0; idx < (priv)->data.slaves; idx++) \
+                               (func)((priv)->slaves + idx, ##arg);    \
+       } while (0)
+#define cpsw_get_slave_ndev(priv, __slave_no__)                                \
+       (priv->slaves[__slave_no__].ndev)
+#define cpsw_get_slave_priv(priv, __slave_no__)                                \
+       ((priv->slaves[__slave_no__].ndev) ?                            \
+               netdev_priv(priv->slaves[__slave_no__].ndev) : NULL)    \
+
+#define cpsw_dual_emac_src_port_detect(status, priv, ndev, skb)                \
+       do {                                                            \
+               if (!priv->data.dual_emac)                              \
+                       break;                                          \
+               if (CPDMA_RX_SOURCE_PORT(status) == 1) {                \
+                       ndev = cpsw_get_slave_ndev(priv, 0);            \
+                       priv = netdev_priv(ndev);                       \
+                       skb->dev = ndev;                                \
+               } else if (CPDMA_RX_SOURCE_PORT(status) == 2) {         \
+                       ndev = cpsw_get_slave_ndev(priv, 1);            \
+                       priv = netdev_priv(ndev);                       \
+                       skb->dev = ndev;                                \
+               }                                                       \
+       } while (0)
+#define cpsw_add_mcast(priv, addr)                                     \
+       do {                                                            \
+               if (priv->data.dual_emac) {                             \
+                       struct cpsw_slave *slave = priv->slaves +       \
+                                               priv->emac_port;        \
+                       int slave_port = cpsw_get_slave_port(priv,      \
+                                               slave->slave_num);      \
+                       cpsw_ale_add_mcast(priv->ale, addr,             \
+                               1 << slave_port | 1 << priv->host_port, \
+                               ALE_VLAN, slave->port_vlan, 0);         \
+               } else {                                                \
+                       cpsw_ale_add_mcast(priv->ale, addr,             \
+                               ALE_ALL_PORTS << priv->host_port,       \
+                               0, 0, 0);                               \
+               }                                                       \
        } while (0)
 
+static inline int cpsw_get_slave_port(struct cpsw_priv *priv, u32 slave_num)
+{
+       if (priv->host_port == 0)
+               return slave_num + 1;
+       else
+               return slave_num;
+}
+
 static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
 {
        struct cpsw_priv *priv = netdev_priv(ndev);
@@ -344,8 +403,7 @@ static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
 
                /* program multicast address list into ALE register */
                netdev_for_each_mc_addr(ha, ndev) {
-                       cpsw_ale_add_mcast(priv->ale, (u8 *)ha->addr,
-                               ALE_ALL_PORTS << priv->host_port, 0, 0);
+                       cpsw_add_mcast(priv, (u8 *)ha->addr);
                }
        }
 }
@@ -379,7 +437,7 @@ void cpsw_tx_handler(void *token, int len, int status)
         */
        if (unlikely(netif_queue_stopped(ndev)))
                netif_start_queue(ndev);
-       cpts_tx_timestamp(&priv->cpts, skb);
+       cpts_tx_timestamp(priv->cpts, skb);
        priv->stats.tx_packets++;
        priv->stats.tx_bytes += len;
        dev_kfree_skb_any(skb);
@@ -392,6 +450,8 @@ void cpsw_rx_handler(void *token, int len, int status)
        struct cpsw_priv        *priv = netdev_priv(ndev);
        int                     ret = 0;
 
+       cpsw_dual_emac_src_port_detect(status, priv, ndev, skb);
+
        /* free and bail if we are shutting down */
        if (unlikely(!netif_running(ndev)) ||
                        unlikely(!netif_carrier_ok(ndev))) {
@@ -400,7 +460,7 @@ void cpsw_rx_handler(void *token, int len, int status)
        }
        if (likely(status >= 0)) {
                skb_put(skb, len);
-               cpts_rx_timestamp(&priv->cpts, skb);
+               cpts_rx_timestamp(priv->cpts, skb);
                skb->protocol = eth_type_trans(skb, ndev);
                netif_receive_skb(skb);
                priv->stats.rx_bytes += len;
@@ -420,7 +480,7 @@ void cpsw_rx_handler(void *token, int len, int status)
                        return;
 
                ret = cpdma_chan_submit(priv->rxch, skb, skb->data,
-                                       skb_tailroom(skb), GFP_KERNEL);
+                                       skb_tailroom(skb), 0, GFP_KERNEL);
        }
        WARN_ON(ret < 0);
 }
@@ -433,37 +493,38 @@ static irqreturn_t cpsw_interrupt(int irq, void *dev_id)
                cpsw_intr_disable(priv);
                cpsw_disable_irq(priv);
                napi_schedule(&priv->napi);
+       } else {
+               priv = cpsw_get_slave_priv(priv, 1);
+               if (likely(priv) && likely(netif_running(priv->ndev))) {
+                       cpsw_intr_disable(priv);
+                       cpsw_disable_irq(priv);
+                       napi_schedule(&priv->napi);
+               }
        }
        return IRQ_HANDLED;
 }
 
-static inline int cpsw_get_slave_port(struct cpsw_priv *priv, u32 slave_num)
-{
-       if (priv->host_port == 0)
-               return slave_num + 1;
-       else
-               return slave_num;
-}
-
 static int cpsw_poll(struct napi_struct *napi, int budget)
 {
        struct cpsw_priv        *priv = napi_to_priv(napi);
        int                     num_tx, num_rx;
 
        num_tx = cpdma_chan_process(priv->txch, 128);
-       num_rx = cpdma_chan_process(priv->rxch, budget);
-
-       if (num_rx || num_tx)
-               cpsw_dbg(priv, intr, "poll %d rx, %d tx pkts\n",
-                        num_rx, num_tx);
+       if (num_tx)
+               cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
 
+       num_rx = cpdma_chan_process(priv->rxch, budget);
        if (num_rx < budget) {
                napi_complete(napi);
                cpsw_intr_enable(priv);
-               cpdma_ctlr_eoi(priv->dma);
+               cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
                cpsw_enable_irq(priv);
        }
 
+       if (num_rx || num_tx)
+               cpsw_dbg(priv, intr, "poll %d rx, %d tx pkts\n",
+                        num_rx, num_tx);
+
        return num_rx;
 }
 
@@ -562,6 +623,54 @@ static inline int __show_stat(char *buf, int maxlen, const char *name, u32 val)
                                leader + strlen(name), val);
 }
 
+static int cpsw_common_res_usage_state(struct cpsw_priv *priv)
+{
+       u32 i;
+       u32 usage_count = 0;
+
+       if (!priv->data.dual_emac)
+               return 0;
+
+       for (i = 0; i < priv->data.slaves; i++)
+               if (priv->slaves[i].open_stat)
+                       usage_count++;
+
+       return usage_count;
+}
+
+static inline int cpsw_tx_packet_submit(struct net_device *ndev,
+                       struct cpsw_priv *priv, struct sk_buff *skb)
+{
+       if (!priv->data.dual_emac)
+               return cpdma_chan_submit(priv->txch, skb, skb->data,
+                                 skb->len, 0, GFP_KERNEL);
+
+       if (ndev == cpsw_get_slave_ndev(priv, 0))
+               return cpdma_chan_submit(priv->txch, skb, skb->data,
+                                 skb->len, 1, GFP_KERNEL);
+       else
+               return cpdma_chan_submit(priv->txch, skb, skb->data,
+                                 skb->len, 2, GFP_KERNEL);
+}
+
+static inline void cpsw_add_dual_emac_def_ale_entries(
+               struct cpsw_priv *priv, struct cpsw_slave *slave,
+               u32 slave_port)
+{
+       u32 port_mask = 1 << slave_port | 1 << priv->host_port;
+
+       if (priv->version == CPSW_VERSION_1)
+               slave_write(slave, slave->port_vlan, CPSW1_PORT_VLAN);
+       else
+               slave_write(slave, slave->port_vlan, CPSW2_PORT_VLAN);
+       cpsw_ale_add_vlan(priv->ale, slave->port_vlan, port_mask,
+                         port_mask, port_mask, 0);
+       cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
+                          port_mask, ALE_VLAN, slave->port_vlan, 0);
+       cpsw_ale_add_ucast(priv->ale, priv->mac_addr,
+               priv->host_port, ALE_VLAN, slave->port_vlan);
+}
+
 static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
 {
        char name[32];
@@ -591,8 +700,11 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
 
        slave_port = cpsw_get_slave_port(priv, slave->slave_num);
 
-       cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
-                          1 << slave_port, 0, ALE_MCAST_FWD_2);
+       if (priv->data.dual_emac)
+               cpsw_add_dual_emac_def_ale_entries(priv, slave, slave_port);
+       else
+               cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
+                                  1 << slave_port, 0, 0, ALE_MCAST_FWD_2);
 
        slave->phy = phy_connect(priv->ndev, slave->data->phy_id,
                                 &cpsw_adjust_link, slave->data->phy_if);
@@ -607,14 +719,44 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
        }
 }
 
+static inline void cpsw_add_default_vlan(struct cpsw_priv *priv)
+{
+       const int vlan = priv->data.default_vlan;
+       const int port = priv->host_port;
+       u32 reg;
+       int i;
+
+       reg = (priv->version == CPSW_VERSION_1) ? CPSW1_PORT_VLAN :
+              CPSW2_PORT_VLAN;
+
+       writel(vlan, &priv->host_port_regs->port_vlan);
+
+       for (i = 0; i < 2; i++)
+               slave_write(priv->slaves + i, vlan, reg);
+
+       cpsw_ale_add_vlan(priv->ale, vlan, ALE_ALL_PORTS << port,
+                         ALE_ALL_PORTS << port, ALE_ALL_PORTS << port,
+                         (ALE_PORT_1 | ALE_PORT_2) << port);
+}
+
 static void cpsw_init_host_port(struct cpsw_priv *priv)
 {
+       u32 control_reg;
+       u32 fifo_mode;
+
        /* soft reset the controller and initialize ale */
        soft_reset("cpsw", &priv->regs->soft_reset);
        cpsw_ale_start(priv->ale);
 
        /* switch to vlan unaware mode */
-       cpsw_ale_control_set(priv->ale, 0, ALE_VLAN_AWARE, 0);
+       cpsw_ale_control_set(priv->ale, priv->host_port, ALE_VLAN_AWARE,
+                            CPSW_ALE_VLAN_AWARE);
+       control_reg = readl(&priv->regs->control);
+       control_reg |= CPSW_VLAN_AWARE;
+       writel(control_reg, &priv->regs->control);
+       fifo_mode = (priv->data.dual_emac) ? CPSW_FIFO_DUAL_MAC_MODE :
+                    CPSW_FIFO_NORMAL_MODE;
+       writel(fifo_mode, &priv->host_port_regs->tx_in_ctl);
 
        /* setup host port priority mapping */
        __raw_writel(CPDMA_TX_PRIORITY_MAP,
@@ -624,9 +766,12 @@ static void cpsw_init_host_port(struct cpsw_priv *priv)
        cpsw_ale_control_set(priv->ale, priv->host_port,
                             ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
 
-       cpsw_ale_add_ucast(priv->ale, priv->mac_addr, priv->host_port, 0);
-       cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
-                          1 << priv->host_port, 0, ALE_MCAST_FWD_2);
+       if (!priv->data.dual_emac) {
+               cpsw_ale_add_ucast(priv->ale, priv->mac_addr, priv->host_port,
+                                  0, 0);
+               cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
+                                  1 << priv->host_port, 0, 0, ALE_MCAST_FWD_2);
+       }
 }
 
 static int cpsw_ndo_open(struct net_device *ndev)
@@ -635,7 +780,8 @@ static int cpsw_ndo_open(struct net_device *ndev)
        int i, ret;
        u32 reg;
 
-       cpsw_intr_disable(priv);
+       if (!cpsw_common_res_usage_state(priv))
+               cpsw_intr_disable(priv);
        netif_carrier_off(ndev);
 
        pm_runtime_get_sync(&priv->pdev->dev);
@@ -647,43 +793,55 @@ static int cpsw_ndo_open(struct net_device *ndev)
                 CPSW_RTL_VERSION(reg));
 
        /* initialize host and slave ports */
-       cpsw_init_host_port(priv);
+       if (!cpsw_common_res_usage_state(priv))
+               cpsw_init_host_port(priv);
        for_each_slave(priv, cpsw_slave_open, priv);
 
-       /* setup tx dma to fixed prio and zero offset */
-       cpdma_control_set(priv->dma, CPDMA_TX_PRIO_FIXED, 1);
-       cpdma_control_set(priv->dma, CPDMA_RX_BUFFER_OFFSET, 0);
+       /* Add default VLAN */
+       if (!priv->data.dual_emac)
+               cpsw_add_default_vlan(priv);
 
-       /* disable priority elevation and enable statistics on all ports */
-       __raw_writel(0, &priv->regs->ptype);
+       if (!cpsw_common_res_usage_state(priv)) {
+               /* setup tx dma to fixed prio and zero offset */
+               cpdma_control_set(priv->dma, CPDMA_TX_PRIO_FIXED, 1);
+               cpdma_control_set(priv->dma, CPDMA_RX_BUFFER_OFFSET, 0);
 
-       /* enable statistics collection only on the host port */
-       __raw_writel(0x7, &priv->regs->stat_port_en);
+               /* disable priority elevation */
+               __raw_writel(0, &priv->regs->ptype);
 
-       if (WARN_ON(!priv->data.rx_descs))
-               priv->data.rx_descs = 128;
+               /* enable statistics collection only on all ports */
+               __raw_writel(0x7, &priv->regs->stat_port_en);
 
-       for (i = 0; i < priv->data.rx_descs; i++) {
-               struct sk_buff *skb;
+               if (WARN_ON(!priv->data.rx_descs))
+                       priv->data.rx_descs = 128;
 
-               ret = -ENOMEM;
-               skb = netdev_alloc_skb_ip_align(priv->ndev,
-                                               priv->rx_packet_max);
-               if (!skb)
-                       break;
-               ret = cpdma_chan_submit(priv->rxch, skb, skb->data,
-                                       skb_tailroom(skb), GFP_KERNEL);
-               if (WARN_ON(ret < 0))
-                       break;
+               for (i = 0; i < priv->data.rx_descs; i++) {
+                       struct sk_buff *skb;
+
+                       ret = -ENOMEM;
+                       skb = netdev_alloc_skb_ip_align(priv->ndev,
+                                                       priv->rx_packet_max);
+                       if (!skb)
+                               break;
+                       ret = cpdma_chan_submit(priv->rxch, skb, skb->data,
+                                       skb_tailroom(skb), 0, GFP_KERNEL);
+                       if (WARN_ON(ret < 0))
+                               break;
+               }
+               /* continue even if we didn't manage to submit all
+                * receive descs
+                */
+               cpsw_info(priv, ifup, "submitted %d rx descriptors\n", i);
        }
-       /* continue even if we didn't manage to submit all receive descs */
-       cpsw_info(priv, ifup, "submitted %d rx descriptors\n", i);
 
        cpdma_ctlr_start(priv->dma);
        cpsw_intr_enable(priv);
        napi_enable(&priv->napi);
-       cpdma_ctlr_eoi(priv->dma);
+       cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
+       cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
 
+       if (priv->data.dual_emac)
+               priv->slaves[priv->emac_port].open_stat = true;
        return 0;
 }
 
@@ -704,12 +862,17 @@ static int cpsw_ndo_stop(struct net_device *ndev)
        netif_stop_queue(priv->ndev);
        napi_disable(&priv->napi);
        netif_carrier_off(priv->ndev);
-       cpsw_intr_disable(priv);
-       cpdma_ctlr_int_ctrl(priv->dma, false);
-       cpdma_ctlr_stop(priv->dma);
-       cpsw_ale_stop(priv->ale);
+
+       if (cpsw_common_res_usage_state(priv) <= 1) {
+               cpsw_intr_disable(priv);
+               cpdma_ctlr_int_ctrl(priv->dma, false);
+               cpdma_ctlr_stop(priv->dma);
+               cpsw_ale_stop(priv->ale);
+       }
        for_each_slave(priv, cpsw_slave_stop, priv);
        pm_runtime_put_sync(&priv->pdev->dev);
+       if (priv->data.dual_emac)
+               priv->slaves[priv->emac_port].open_stat = false;
        return 0;
 }
 
@@ -727,13 +890,13 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
                return NETDEV_TX_OK;
        }
 
-       if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && priv->cpts.tx_enable)
+       if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
+                               priv->cpts->tx_enable)
                skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
 
        skb_tx_timestamp(skb);
 
-       ret = cpdma_chan_submit(priv->txch, skb, skb->data,
-                               skb->len, GFP_KERNEL);
+       ret = cpsw_tx_packet_submit(ndev, priv, skb);
        if (unlikely(ret != 0)) {
                cpsw_err(priv, tx_err, "desc submit failed\n");
                goto fail;
@@ -782,7 +945,7 @@ static void cpsw_hwtstamp_v1(struct cpsw_priv *priv)
        struct cpsw_slave *slave = &priv->slaves[priv->data.cpts_active_slave];
        u32 ts_en, seq_id;
 
-       if (!priv->cpts.tx_enable && !priv->cpts.rx_enable) {
+       if (!priv->cpts->tx_enable && !priv->cpts->rx_enable) {
                slave_write(slave, 0, CPSW1_TS_CTL);
                return;
        }
@@ -790,10 +953,10 @@ static void cpsw_hwtstamp_v1(struct cpsw_priv *priv)
        seq_id = (30 << CPSW_V1_SEQ_ID_OFS_SHIFT) | ETH_P_1588;
        ts_en = EVENT_MSG_BITS << CPSW_V1_MSG_TYPE_OFS;
 
-       if (priv->cpts.tx_enable)
+       if (priv->cpts->tx_enable)
                ts_en |= CPSW_V1_TS_TX_EN;
 
-       if (priv->cpts.rx_enable)
+       if (priv->cpts->rx_enable)
                ts_en |= CPSW_V1_TS_RX_EN;
 
        slave_write(slave, ts_en, CPSW1_TS_CTL);
@@ -802,16 +965,21 @@ static void cpsw_hwtstamp_v1(struct cpsw_priv *priv)
 
 static void cpsw_hwtstamp_v2(struct cpsw_priv *priv)
 {
-       struct cpsw_slave *slave = &priv->slaves[priv->data.cpts_active_slave];
+       struct cpsw_slave *slave;
        u32 ctrl, mtype;
 
+       if (priv->data.dual_emac)
+               slave = &priv->slaves[priv->emac_port];
+       else
+               slave = &priv->slaves[priv->data.cpts_active_slave];
+
        ctrl = slave_read(slave, CPSW2_CONTROL);
        ctrl &= ~CTRL_ALL_TS_MASK;
 
-       if (priv->cpts.tx_enable)
+       if (priv->cpts->tx_enable)
                ctrl |= CTRL_TX_TS_BITS;
 
-       if (priv->cpts.rx_enable)
+       if (priv->cpts->rx_enable)
                ctrl |= CTRL_RX_TS_BITS;
 
        mtype = (30 << TS_SEQ_ID_OFFSET_SHIFT) | EVENT_MSG_BITS;
@@ -824,7 +992,7 @@ static void cpsw_hwtstamp_v2(struct cpsw_priv *priv)
 static int cpsw_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
 {
        struct cpsw_priv *priv = netdev_priv(dev);
-       struct cpts *cpts = &priv->cpts;
+       struct cpts *cpts = priv->cpts;
        struct hwtstamp_config cfg;
 
        if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
@@ -910,7 +1078,9 @@ static void cpsw_ndo_tx_timeout(struct net_device *ndev)
        cpdma_chan_start(priv->txch);
        cpdma_ctlr_int_ctrl(priv->dma, true);
        cpsw_intr_enable(priv);
-       cpdma_ctlr_eoi(priv->dma);
+       cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
+       cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
+
 }
 
 static struct net_device_stats *cpsw_ndo_get_stats(struct net_device *ndev)
@@ -929,10 +1099,79 @@ static void cpsw_ndo_poll_controller(struct net_device *ndev)
        cpsw_interrupt(ndev->irq, priv);
        cpdma_ctlr_int_ctrl(priv->dma, true);
        cpsw_intr_enable(priv);
-       cpdma_ctlr_eoi(priv->dma);
+       cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
+       cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
+
 }
 #endif
 
+static inline int cpsw_add_vlan_ale_entry(struct cpsw_priv *priv,
+                               unsigned short vid)
+{
+       int ret;
+
+       ret = cpsw_ale_add_vlan(priv->ale, vid,
+                               ALE_ALL_PORTS << priv->host_port,
+                               0, ALE_ALL_PORTS << priv->host_port,
+                               (ALE_PORT_1 | ALE_PORT_2) << priv->host_port);
+       if (ret != 0)
+               return ret;
+
+       ret = cpsw_ale_add_ucast(priv->ale, priv->mac_addr,
+                                priv->host_port, ALE_VLAN, vid);
+       if (ret != 0)
+               goto clean_vid;
+
+       ret = cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
+                                ALE_ALL_PORTS << priv->host_port,
+                                ALE_VLAN, vid, 0);
+       if (ret != 0)
+               goto clean_vlan_ucast;
+       return 0;
+
+clean_vlan_ucast:
+       cpsw_ale_del_ucast(priv->ale, priv->mac_addr,
+                           priv->host_port, ALE_VLAN, vid);
+clean_vid:
+       cpsw_ale_del_vlan(priv->ale, vid, 0);
+       return ret;
+}
+
+static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
+               unsigned short vid)
+{
+       struct cpsw_priv *priv = netdev_priv(ndev);
+
+       if (vid == priv->data.default_vlan)
+               return 0;
+
+       dev_info(priv->dev, "Adding vlanid %d to vlan filter\n", vid);
+       return cpsw_add_vlan_ale_entry(priv, vid);
+}
+
+static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,
+               unsigned short vid)
+{
+       struct cpsw_priv *priv = netdev_priv(ndev);
+       int ret;
+
+       if (vid == priv->data.default_vlan)
+               return 0;
+
+       dev_info(priv->dev, "removing vlanid %d from vlan filter\n", vid);
+       ret = cpsw_ale_del_vlan(priv->ale, vid, 0);
+       if (ret != 0)
+               return ret;
+
+       ret = cpsw_ale_del_ucast(priv->ale, priv->mac_addr,
+                                priv->host_port, ALE_VLAN, vid);
+       if (ret != 0)
+               return ret;
+
+       return cpsw_ale_del_mcast(priv->ale, priv->ndev->broadcast,
+                                 0, ALE_VLAN, vid);
+}
+
 static const struct net_device_ops cpsw_netdev_ops = {
        .ndo_open               = cpsw_ndo_open,
        .ndo_stop               = cpsw_ndo_stop,
@@ -947,6 +1186,8 @@ static const struct net_device_ops cpsw_netdev_ops = {
 #ifdef CONFIG_NET_POLL_CONTROLLER
        .ndo_poll_controller    = cpsw_ndo_poll_controller,
 #endif
+       .ndo_vlan_rx_add_vid    = cpsw_ndo_vlan_rx_add_vid,
+       .ndo_vlan_rx_kill_vid   = cpsw_ndo_vlan_rx_kill_vid,
 };
 
 static void cpsw_get_drvinfo(struct net_device *ndev,
@@ -984,7 +1225,7 @@ static int cpsw_get_ts_info(struct net_device *ndev,
                SOF_TIMESTAMPING_RX_SOFTWARE |
                SOF_TIMESTAMPING_SOFTWARE |
                SOF_TIMESTAMPING_RAW_HARDWARE;
-       info->phc_index = priv->cpts.phc_index;
+       info->phc_index = priv->cpts->phc_index;
        info->tx_types =
                (1 << HWTSTAMP_TX_OFF) |
                (1 << HWTSTAMP_TX_ON);
@@ -1021,6 +1262,7 @@ static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv,
        slave->data     = data;
        slave->regs     = regs + slave_reg_ofs;
        slave->sliver   = regs + sliver_reg_ofs;
+       slave->port_vlan = data->dual_emac_res_vlan;
 }
 
 static int cpsw_probe_dt(struct cpsw_platform_data *data,
@@ -1101,6 +1343,9 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
        }
        data->mac_control = prop;
 
+       if (!of_property_read_u32(node, "dual_emac", &prop))
+               data->dual_emac = prop;
+
        /*
         * Populate all the child nodes here...
         */
@@ -1134,6 +1379,18 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
                if (mac_addr)
                        memcpy(slave_data->mac_addr, mac_addr, ETH_ALEN);
 
+               if (data->dual_emac) {
+                       if (of_property_read_u32(node, "dual_emac_res_vlan",
+                                                &prop)) {
+                               pr_err("Missing dual_emac_res_vlan in DT.\n");
+                               slave_data->dual_emac_res_vlan = i+1;
+                               pr_err("Using %d as Reserved VLAN for %d slave\n",
+                                      slave_data->dual_emac_res_vlan, i);
+                       } else {
+                               slave_data->dual_emac_res_vlan = prop;
+                       }
+               }
+
                i++;
        }
 
@@ -1144,6 +1401,79 @@ error_ret:
        return ret;
 }
 
+static int cpsw_probe_dual_emac(struct platform_device *pdev,
+                               struct cpsw_priv *priv)
+{
+       struct cpsw_platform_data       *data = &priv->data;
+       struct net_device               *ndev;
+       struct cpsw_priv                *priv_sl2;
+       int ret = 0, i;
+
+       ndev = alloc_etherdev(sizeof(struct cpsw_priv));
+       if (!ndev) {
+               pr_err("cpsw: error allocating net_device\n");
+               return -ENOMEM;
+       }
+
+       priv_sl2 = netdev_priv(ndev);
+       spin_lock_init(&priv_sl2->lock);
+       priv_sl2->data = *data;
+       priv_sl2->pdev = pdev;
+       priv_sl2->ndev = ndev;
+       priv_sl2->dev  = &ndev->dev;
+       priv_sl2->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG);
+       priv_sl2->rx_packet_max = max(rx_packet_max, 128);
+
+       if (is_valid_ether_addr(data->slave_data[1].mac_addr)) {
+               memcpy(priv_sl2->mac_addr, data->slave_data[1].mac_addr,
+                       ETH_ALEN);
+               pr_info("cpsw: Detected MACID = %pM\n", priv_sl2->mac_addr);
+       } else {
+               random_ether_addr(priv_sl2->mac_addr);
+               pr_info("cpsw: Random MACID = %pM\n", priv_sl2->mac_addr);
+       }
+       memcpy(ndev->dev_addr, priv_sl2->mac_addr, ETH_ALEN);
+
+       priv_sl2->slaves = priv->slaves;
+       priv_sl2->clk = priv->clk;
+
+       priv_sl2->cpsw_res = priv->cpsw_res;
+       priv_sl2->regs = priv->regs;
+       priv_sl2->host_port = priv->host_port;
+       priv_sl2->host_port_regs = priv->host_port_regs;
+       priv_sl2->wr_regs = priv->wr_regs;
+       priv_sl2->dma = priv->dma;
+       priv_sl2->txch = priv->txch;
+       priv_sl2->rxch = priv->rxch;
+       priv_sl2->ale = priv->ale;
+       priv_sl2->emac_port = 1;
+       priv->slaves[1].ndev = ndev;
+       priv_sl2->cpts = priv->cpts;
+       priv_sl2->version = priv->version;
+
+       for (i = 0; i < priv->num_irqs; i++) {
+               priv_sl2->irqs_table[i] = priv->irqs_table[i];
+               priv_sl2->num_irqs = priv->num_irqs;
+       }
+
+       ndev->features |= NETIF_F_HW_VLAN_FILTER;
+
+       ndev->netdev_ops = &cpsw_netdev_ops;
+       SET_ETHTOOL_OPS(ndev, &cpsw_ethtool_ops);
+       netif_napi_add(ndev, &priv_sl2->napi, cpsw_poll, CPSW_POLL_WEIGHT);
+
+       /* register the network device */
+       SET_NETDEV_DEV(ndev, &pdev->dev);
+       ret = register_netdev(ndev);
+       if (ret) {
+               pr_err("cpsw: error registering net device\n");
+               free_netdev(ndev);
+               ret = -ENODEV;
+       }
+
+       return ret;
+}
+
 static int cpsw_probe(struct platform_device *pdev)
 {
        struct cpsw_platform_data       *data = pdev->dev.platform_data;
@@ -1170,6 +1500,11 @@ static int cpsw_probe(struct platform_device *pdev)
        priv->dev  = &ndev->dev;
        priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG);
        priv->rx_packet_max = max(rx_packet_max, 128);
+       priv->cpts = devm_kzalloc(&pdev->dev, sizeof(struct cpts), GFP_KERNEL);
+       if (!ndev) {
+               pr_err("error allocating cpts\n");
+               goto clean_ndev_ret;
+       }
 
        /*
         * This may be required here for child devices.
@@ -1202,6 +1537,9 @@ static int cpsw_probe(struct platform_device *pdev)
        for (i = 0; i < data->slaves; i++)
                priv->slaves[i].slave_num = i;
 
+       priv->slaves[0].ndev = ndev;
+       priv->emac_port = 0;
+
        priv->clk = clk_get(&pdev->dev, "fck");
        if (IS_ERR(priv->clk)) {
                dev_err(&pdev->dev, "fck is not found\n");
@@ -1256,7 +1594,7 @@ static int cpsw_probe(struct platform_device *pdev)
        switch (priv->version) {
        case CPSW_VERSION_1:
                priv->host_port_regs = ss_regs + CPSW1_HOST_PORT_OFFSET;
-               priv->cpts.reg       = ss_regs + CPSW1_CPTS_OFFSET;
+               priv->cpts->reg       = ss_regs + CPSW1_CPTS_OFFSET;
                dma_params.dmaregs   = ss_regs + CPSW1_CPDMA_OFFSET;
                dma_params.txhdp     = ss_regs + CPSW1_STATERAM_OFFSET;
                ale_params.ale_regs  = ss_regs + CPSW1_ALE_OFFSET;
@@ -1267,7 +1605,7 @@ static int cpsw_probe(struct platform_device *pdev)
                break;
        case CPSW_VERSION_2:
                priv->host_port_regs = ss_regs + CPSW2_HOST_PORT_OFFSET;
-               priv->cpts.reg       = ss_regs + CPSW2_CPTS_OFFSET;
+               priv->cpts->reg       = ss_regs + CPSW2_CPTS_OFFSET;
                dma_params.dmaregs   = ss_regs + CPSW2_CPDMA_OFFSET;
                dma_params.txhdp     = ss_regs + CPSW2_STATERAM_OFFSET;
                ale_params.ale_regs  = ss_regs + CPSW2_ALE_OFFSET;
@@ -1354,7 +1692,7 @@ static int cpsw_probe(struct platform_device *pdev)
                k++;
        }
 
-       ndev->flags |= IFF_ALLMULTI;    /* see cpsw_ndo_change_rx_flags() */
+       ndev->features |= NETIF_F_HW_VLAN_FILTER;
 
        ndev->netdev_ops = &cpsw_netdev_ops;
        SET_ETHTOOL_OPS(ndev, &cpsw_ethtool_ops);
@@ -1369,13 +1707,21 @@ static int cpsw_probe(struct platform_device *pdev)
                goto clean_irq_ret;
        }
 
-       if (cpts_register(&pdev->dev, &priv->cpts,
+       if (cpts_register(&pdev->dev, priv->cpts,
                          data->cpts_clock_mult, data->cpts_clock_shift))
                dev_err(priv->dev, "error registering cpts device\n");
 
        cpsw_notice(priv, probe, "initialized device (regs %x, irq %d)\n",
                  priv->cpsw_res->start, ndev->irq);
 
+       if (priv->data.dual_emac) {
+               ret = cpsw_probe_dual_emac(pdev, priv);
+               if (ret) {
+                       cpsw_err(priv, probe, "error probe slave 2 emac interface\n");
+                       goto clean_irq_ret;
+               }
+       }
+
        return 0;
 
 clean_irq_ret:
@@ -1414,7 +1760,7 @@ static int cpsw_remove(struct platform_device *pdev)
        pr_info("removing device");
        platform_set_drvdata(pdev, NULL);
 
-       cpts_unregister(&priv->cpts);
+       cpts_unregister(priv->cpts);
        free_irq(ndev->irq, priv);
        cpsw_ale_destroy(priv->ale);
        cpdma_chan_destroy(priv->txch);
index 0e9ccc2..7fa60d6 100644 (file)
@@ -148,7 +148,7 @@ static int cpsw_ale_write(struct cpsw_ale *ale, int idx, u32 *ale_entry)
        return idx;
 }
 
-static int cpsw_ale_match_addr(struct cpsw_ale *ale, u8 *addr)
+int cpsw_ale_match_addr(struct cpsw_ale *ale, u8 *addr, u16 vid)
 {
        u32 ale_entry[ALE_ENTRY_WORDS];
        int type, idx;
@@ -160,6 +160,8 @@ static int cpsw_ale_match_addr(struct cpsw_ale *ale, u8 *addr)
                type = cpsw_ale_get_entry_type(ale_entry);
                if (type != ALE_TYPE_ADDR && type != ALE_TYPE_VLAN_ADDR)
                        continue;
+               if (cpsw_ale_get_vlan_id(ale_entry) != vid)
+                       continue;
                cpsw_ale_get_addr(ale_entry, entry_addr);
                if (memcmp(entry_addr, addr, 6) == 0)
                        return idx;
@@ -167,6 +169,22 @@ static int cpsw_ale_match_addr(struct cpsw_ale *ale, u8 *addr)
        return -ENOENT;
 }
 
+int cpsw_ale_match_vlan(struct cpsw_ale *ale, u16 vid)
+{
+       u32 ale_entry[ALE_ENTRY_WORDS];
+       int type, idx;
+
+       for (idx = 0; idx < ale->params.ale_entries; idx++) {
+               cpsw_ale_read(ale, idx, ale_entry);
+               type = cpsw_ale_get_entry_type(ale_entry);
+               if (type != ALE_TYPE_VLAN)
+                       continue;
+               if (cpsw_ale_get_vlan_id(ale_entry) == vid)
+                       return idx;
+       }
+       return -ENOENT;
+}
+
 static int cpsw_ale_match_free(struct cpsw_ale *ale)
 {
        u32 ale_entry[ALE_ENTRY_WORDS];
@@ -274,19 +292,32 @@ int cpsw_ale_flush(struct cpsw_ale *ale, int port_mask)
        return 0;
 }
 
-int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port, int flags)
+static inline void cpsw_ale_set_vlan_entry_type(u32 *ale_entry,
+                                               int flags, u16 vid)
+{
+       if (flags & ALE_VLAN) {
+               cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_VLAN_ADDR);
+               cpsw_ale_set_vlan_id(ale_entry, vid);
+       } else {
+               cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_ADDR);
+       }
+}
+
+int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port,
+                      int flags, u16 vid)
 {
        u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
        int idx;
 
-       cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_ADDR);
+       cpsw_ale_set_vlan_entry_type(ale_entry, flags, vid);
+
        cpsw_ale_set_addr(ale_entry, addr);
        cpsw_ale_set_ucast_type(ale_entry, ALE_UCAST_PERSISTANT);
        cpsw_ale_set_secure(ale_entry, (flags & ALE_SECURE) ? 1 : 0);
        cpsw_ale_set_blocked(ale_entry, (flags & ALE_BLOCKED) ? 1 : 0);
        cpsw_ale_set_port_num(ale_entry, port);
 
-       idx = cpsw_ale_match_addr(ale, addr);
+       idx = cpsw_ale_match_addr(ale, addr, (flags & ALE_VLAN) ? vid : 0);
        if (idx < 0)
                idx = cpsw_ale_match_free(ale);
        if (idx < 0)
@@ -298,12 +329,13 @@ int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port, int flags)
        return 0;
 }
 
-int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port)
+int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port,
+                      int flags, u16 vid)
 {
        u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
        int idx;
 
-       idx = cpsw_ale_match_addr(ale, addr);
+       idx = cpsw_ale_match_addr(ale, addr, (flags & ALE_VLAN) ? vid : 0);
        if (idx < 0)
                return -ENOENT;
 
@@ -313,18 +345,19 @@ int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port)
 }
 
 int cpsw_ale_add_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask,
-                       int super, int mcast_state)
+                      int flags, u16 vid, int mcast_state)
 {
        u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
        int idx, mask;
 
-       idx = cpsw_ale_match_addr(ale, addr);
+       idx = cpsw_ale_match_addr(ale, addr, (flags & ALE_VLAN) ? vid : 0);
        if (idx >= 0)
                cpsw_ale_read(ale, idx, ale_entry);
 
-       cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_ADDR);
+       cpsw_ale_set_vlan_entry_type(ale_entry, flags, vid);
+
        cpsw_ale_set_addr(ale_entry, addr);
-       cpsw_ale_set_super(ale_entry, super);
+       cpsw_ale_set_super(ale_entry, (flags & ALE_BLOCKED) ? 1 : 0);
        cpsw_ale_set_mcast_state(ale_entry, mcast_state);
 
        mask = cpsw_ale_get_port_mask(ale_entry);
@@ -342,12 +375,13 @@ int cpsw_ale_add_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask,
        return 0;
 }
 
-int cpsw_ale_del_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask)
+int cpsw_ale_del_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask,
+                      int flags, u16 vid)
 {
        u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
        int idx;
 
-       idx = cpsw_ale_match_addr(ale, addr);
+       idx = cpsw_ale_match_addr(ale, addr, (flags & ALE_VLAN) ? vid : 0);
        if (idx < 0)
                return -EINVAL;
 
@@ -362,6 +396,55 @@ int cpsw_ale_del_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask)
        return 0;
 }
 
+int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port, int untag,
+                     int reg_mcast, int unreg_mcast)
+{
+       u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
+       int idx;
+
+       idx = cpsw_ale_match_vlan(ale, vid);
+       if (idx >= 0)
+               cpsw_ale_read(ale, idx, ale_entry);
+
+       cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_VLAN);
+       cpsw_ale_set_vlan_id(ale_entry, vid);
+
+       cpsw_ale_set_vlan_untag_force(ale_entry, untag);
+       cpsw_ale_set_vlan_reg_mcast(ale_entry, reg_mcast);
+       cpsw_ale_set_vlan_unreg_mcast(ale_entry, unreg_mcast);
+       cpsw_ale_set_vlan_member_list(ale_entry, port);
+
+       if (idx < 0)
+               idx = cpsw_ale_match_free(ale);
+       if (idx < 0)
+               idx = cpsw_ale_find_ageable(ale);
+       if (idx < 0)
+               return -ENOMEM;
+
+       cpsw_ale_write(ale, idx, ale_entry);
+       return 0;
+}
+
+int cpsw_ale_del_vlan(struct cpsw_ale *ale, u16 vid, int port_mask)
+{
+       u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
+       int idx;
+
+       idx = cpsw_ale_match_vlan(ale, vid);
+       if (idx < 0)
+               return -ENOENT;
+
+       cpsw_ale_read(ale, idx, ale_entry);
+
+       if (port_mask)
+               cpsw_ale_set_vlan_member_list(ale_entry, port_mask);
+       else
+               cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE);
+
+       cpsw_ale_write(ale, idx, ale_entry);
+       return 0;
+}
+
 struct ale_control_info {
        const char      *name;
        int             offset, port_offset;
index 2bd09cb..30daa12 100644 (file)
@@ -64,8 +64,14 @@ enum cpsw_ale_port_state {
 };
 
 /* ALE unicast entry flags - passed into cpsw_ale_add_ucast() */
-#define ALE_SECURE                     1
-#define ALE_BLOCKED                    2
+#define ALE_SECURE                     BIT(0)
+#define ALE_BLOCKED                    BIT(1)
+#define ALE_SUPER                      BIT(2)
+#define ALE_VLAN                       BIT(3)
+
+#define ALE_PORT_HOST                  BIT(0)
+#define ALE_PORT_1                     BIT(1)
+#define ALE_PORT_2                     BIT(2)
 
 #define ALE_MCAST_FWD                  0
 #define ALE_MCAST_BLOCK_LEARN_FWD      1
@@ -81,11 +87,17 @@ void cpsw_ale_stop(struct cpsw_ale *ale);
 int cpsw_ale_set_ageout(struct cpsw_ale *ale, int ageout);
 int cpsw_ale_flush(struct cpsw_ale *ale, int port_mask);
 int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask);
-int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port, int flags);
-int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port);
+int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port,
+                      int flags, u16 vid);
+int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port,
+                      int flags, u16 vid);
 int cpsw_ale_add_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask,
-                       int super, int mcast_state);
-int cpsw_ale_del_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask);
+                      int flags, u16 vid, int mcast_state);
+int cpsw_ale_del_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask,
+                      int flags, u16 vid);
+int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port, int untag,
+                       int reg_mcast, int unreg_mcast);
+int cpsw_ale_del_vlan(struct cpsw_ale *ale, u16 vid, int port);
 
 int cpsw_ale_control_get(struct cpsw_ale *ale, int port, int control);
 int cpsw_ale_control_set(struct cpsw_ale *ale, int port,
index f862918..68c3418 100644 (file)
@@ -60,6 +60,9 @@
 #define CPDMA_DESC_EOQ         BIT(28)
 #define CPDMA_DESC_TD_COMPLETE BIT(27)
 #define CPDMA_DESC_PASS_CRC    BIT(26)
+#define CPDMA_DESC_TO_PORT_EN  BIT(20)
+#define CPDMA_TO_PORT_SHIFT    16
+#define CPDMA_DESC_PORT_MASK   (BIT(18) | BIT(17) | BIT(16))
 
 #define CPDMA_TEARDOWN_VALUE   0xfffffffc
 
@@ -132,6 +135,14 @@ struct cpdma_chan {
 #define chan_write(chan, fld, v)       __raw_writel(v, (chan)->fld)
 #define desc_write(desc, fld, v)       __raw_writel((u32)(v), &(desc)->fld)
 
+#define cpdma_desc_to_port(chan, mode, directed)                       \
+       do {                                                            \
+               if (!is_rx_chan(chan) && ((directed == 1) ||            \
+                                         (directed == 2)))             \
+                       mode |= (CPDMA_DESC_TO_PORT_EN |                \
+                                (directed << CPDMA_TO_PORT_SHIFT));    \
+       } while (0)
+
 /*
  * Utility constructs for a cpdma descriptor pool.  Some devices (e.g. davinci
  * emac) have dedicated on-chip memory for these descriptors.  Some other
@@ -449,10 +460,8 @@ int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr)
        if (ctlr->state != CPDMA_STATE_IDLE)
                cpdma_ctlr_stop(ctlr);
 
-       for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
-               if (ctlr->channels[i])
-                       cpdma_chan_destroy(ctlr->channels[i]);
-       }
+       for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++)
+               cpdma_chan_destroy(ctlr->channels[i]);
 
        cpdma_desc_pool_destroy(ctlr->pool);
        spin_unlock_irqrestore(&ctlr->lock, flags);
@@ -484,9 +493,9 @@ int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable)
        return 0;
 }
 
-void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr)
+void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr, u32 value)
 {
-       dma_reg_write(ctlr, CPDMA_MACEOIVECTOR, 0);
+       dma_reg_write(ctlr, CPDMA_MACEOIVECTOR, value);
 }
 
 struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
@@ -662,7 +671,7 @@ static void __cpdma_chan_submit(struct cpdma_chan *chan,
 }
 
 int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
-                     int len, gfp_t gfp_mask)
+                     int len, int directed, gfp_t gfp_mask)
 {
        struct cpdma_ctlr               *ctlr = chan->ctlr;
        struct cpdma_desc __iomem       *desc;
@@ -692,6 +701,7 @@ int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
 
        buffer = dma_map_single(ctlr->dev, data, len, chan->dir);
        mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP;
+       cpdma_desc_to_port(chan, mode, directed);
 
        desc_write(desc, hw_next,   0);
        desc_write(desc, hw_buffer, buffer);
@@ -782,7 +792,8 @@ static int __cpdma_chan_process(struct cpdma_chan *chan)
                status = -EBUSY;
                goto unlock_ret;
        }
-       status  = status & (CPDMA_DESC_EOQ | CPDMA_DESC_TD_COMPLETE);
+       status  = status & (CPDMA_DESC_EOQ | CPDMA_DESC_TD_COMPLETE |
+                           CPDMA_DESC_PORT_MASK);
 
        chan->head = desc_from_phys(pool, desc_read(desc, hw_next));
        chan_write(chan, cp, desc_dma);
index 8d2aeb2..d9bcc60 100644 (file)
 #define __chan_linear(chan_num)        ((chan_num) & (CPDMA_MAX_CHANNELS - 1))
 #define chan_linear(chan)      __chan_linear((chan)->chan_num)
 
+#define CPDMA_RX_SOURCE_PORT(__status__)       ((__status__ >> 16) & 0x7)
+
+#define CPDMA_EOI_RX_THRESH    0x0
+#define CPDMA_EOI_RX           0x1
+#define CPDMA_EOI_TX           0x2
+#define CPDMA_EOI_MISC         0x3
+
 struct cpdma_params {
        struct device           *dev;
        void __iomem            *dmaregs;
@@ -82,11 +89,11 @@ int cpdma_chan_dump(struct cpdma_chan *chan);
 int cpdma_chan_get_stats(struct cpdma_chan *chan,
                         struct cpdma_chan_stats *stats);
 int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
-                     int len, gfp_t gfp_mask);
+                     int len, int directed, gfp_t gfp_mask);
 int cpdma_chan_process(struct cpdma_chan *chan, int quota);
 
 int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable);
-void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr);
+void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr, u32 value);
 int cpdma_chan_int_ctrl(struct cpdma_chan *chan, bool enable);
 bool cpdma_check_free_tx_desc(struct cpdma_chan *chan);
 
index 242ec55..52c0536 100644 (file)
@@ -1037,7 +1037,7 @@ static void emac_rx_handler(void *token, int len, int status)
 
 recycle:
        ret = cpdma_chan_submit(priv->rxchan, skb, skb->data,
-                       skb_tailroom(skb), GFP_KERNEL);
+                       skb_tailroom(skb), 0, GFP_KERNEL);
 
        WARN_ON(ret == -ENOMEM);
        if (unlikely(ret < 0))
@@ -1092,7 +1092,7 @@ static int emac_dev_xmit(struct sk_buff *skb, struct net_device *ndev)
        skb_tx_timestamp(skb);
 
        ret_code = cpdma_chan_submit(priv->txchan, skb, skb->data, skb->len,
-                                    GFP_KERNEL);
+                                    0, GFP_KERNEL);
        if (unlikely(ret_code != 0)) {
                if (netif_msg_tx_err(priv) && net_ratelimit())
                        dev_err(emac_dev, "DaVinci EMAC: desc submit failed");
@@ -1558,7 +1558,7 @@ static int emac_dev_open(struct net_device *ndev)
                        break;
 
                ret = cpdma_chan_submit(priv->rxchan, skb, skb->data,
-                                       skb_tailroom(skb), GFP_KERNEL);
+                                       skb_tailroom(skb), 0, GFP_KERNEL);
                if (WARN_ON(ret < 0))
                        break;
        }
index ec4a5e1..185c721 100644 (file)
@@ -1812,7 +1812,7 @@ static void rhine_tx(struct net_device *dev)
                                         rp->tx_skbuff[entry]->len,
                                         PCI_DMA_TODEVICE);
                }
-               dev_kfree_skb_irq(rp->tx_skbuff[entry]);
+               dev_kfree_skb(rp->tx_skbuff[entry]);
                rp->tx_skbuff[entry] = NULL;
                entry = (++rp->dirty_tx) % TX_RING_SIZE;
        }
@@ -2024,11 +2024,7 @@ static void rhine_slow_event_task(struct work_struct *work)
        if (intr_status & IntrPCIErr)
                netif_warn(rp, hw, dev, "PCI error\n");
 
-       napi_disable(&rp->napi);
-       rhine_irq_disable(rp);
-       /* Slow and safe. Consider __napi_schedule as a replacement ? */
-       napi_enable(&rp->napi);
-       napi_schedule(&rp->napi);
+       iowrite16(RHINE_EVENT & 0xffff, rp->base + IntrEnable);
 
 out_unlock:
        mutex_unlock(&rp->task_lock);
index c2e5497..02de6c8 100644 (file)
@@ -586,7 +586,8 @@ static int bpq_device_event(struct notifier_block *this,unsigned long event, voi
 static int __init bpq_init_driver(void)
 {
 #ifdef CONFIG_PROC_FS
-       if (!proc_net_fops_create(&init_net, "bpqether", S_IRUGO, &bpq_info_fops)) {
+       if (!proc_create("bpqether", S_IRUGO, init_net.proc_net,
+                        &bpq_info_fops)) {
                printk(KERN_ERR
                        "bpq: cannot create /proc/net/bpqether entry.\n");
                return -ENOENT;
@@ -610,7 +611,7 @@ static void __exit bpq_cleanup_driver(void)
 
        unregister_netdevice_notifier(&bpq_dev_notifier);
 
-       proc_net_remove(&init_net, "bpqether");
+       remove_proc_entry("bpqether", init_net.proc_net);
 
        rtnl_lock();
        while (!list_empty(&bpq_devices)) {
index 1b4a47b..bc1d521 100644 (file)
@@ -2118,7 +2118,7 @@ static int __init scc_init_driver (void)
        }
        rtnl_unlock();
 
-       proc_net_fops_create(&init_net, "z8530drv", 0, &scc_net_seq_fops);
+       proc_create("z8530drv", 0, init_net.proc_net, &scc_net_seq_fops);
 
        return 0;
 }
@@ -2173,7 +2173,7 @@ static void __exit scc_cleanup_driver(void)
        if (Vector_Latch)
                release_region(Vector_Latch, 1);
 
-       proc_net_remove(&init_net, "z8530drv");
+       remove_proc_entry("z8530drv", init_net.proc_net);
 }
 
 MODULE_AUTHOR("Joerg Reuter <jreuter@yaina.de>");
index c6645f1..4cf8f10 100644 (file)
@@ -1167,7 +1167,7 @@ static int __init yam_init_driver(void)
        yam_timer.expires = jiffies + HZ / 100;
        add_timer(&yam_timer);
 
-       proc_net_fops_create(&init_net, "yam", S_IRUGO, &yam_info_fops);
+       proc_create("yam", S_IRUGO, init_net.proc_net, &yam_info_fops);
        return 0;
  error:
        while (--i >= 0) {
@@ -1199,7 +1199,7 @@ static void __exit yam_cleanup_driver(void)
                kfree(p);
        }
 
-       proc_net_remove(&init_net, "yam");
+       remove_proc_entry("yam", init_net.proc_net);
 }
 
 /* --------------------------------------------------------------------- */
index a4a62e1..fc1687e 100644 (file)
@@ -751,16 +751,6 @@ static int at86rf230_hw_init(struct at86rf230_local *lp)
        return 0;
 }
 
-static int at86rf230_suspend(struct spi_device *spi, pm_message_t message)
-{
-       return 0;
-}
-
-static int at86rf230_resume(struct spi_device *spi)
-{
-       return 0;
-}
-
 static int at86rf230_fill_data(struct spi_device *spi)
 {
        struct at86rf230_local *lp = spi_get_drvdata(spi);
@@ -948,8 +938,6 @@ static struct spi_driver at86rf230_driver = {
        },
        .probe      = at86rf230_probe,
        .remove     = at86rf230_remove,
-       .suspend    = at86rf230_suspend,
-       .resume     = at86rf230_resume,
 };
 
 module_spi_driver(at86rf230_driver);
index 7b44ebd..defcd8a 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/if_vlan.h>
 #include <linux/if_link.h>
 #include <linux/if_macvlan.h>
+#include <linux/hash.h>
 #include <net/rtnetlink.h>
 #include <net/xfrm.h>
 
@@ -126,6 +127,21 @@ static int macvlan_broadcast_one(struct sk_buff *skb,
        return vlan->receive(skb);
 }
 
+static u32 macvlan_hash_mix(const struct macvlan_dev *vlan)
+{
+       return (u32)(((unsigned long)vlan) >> L1_CACHE_SHIFT);
+}
+
+
+static unsigned int mc_hash(const struct macvlan_dev *vlan,
+                           const unsigned char *addr)
+{
+       u32 val = __get_unaligned_cpu32(addr + 2);
+
+       val ^= macvlan_hash_mix(vlan);
+       return hash_32(val, MACVLAN_MC_FILTER_BITS);
+}
+
 static void macvlan_broadcast(struct sk_buff *skb,
                              const struct macvlan_port *port,
                              struct net_device *src,
@@ -137,6 +153,7 @@ static void macvlan_broadcast(struct sk_buff *skb,
        struct sk_buff *nskb;
        unsigned int i;
        int err;
+       unsigned int hash;
 
        if (skb->protocol == htons(ETH_P_PAUSE))
                return;
@@ -146,6 +163,9 @@ static void macvlan_broadcast(struct sk_buff *skb,
                        if (vlan->dev == src || !(vlan->mode & mode))
                                continue;
 
+                       hash = mc_hash(vlan, eth->h_dest);
+                       if (!test_bit(hash, vlan->mc_filter))
+                               continue;
                        nskb = skb_clone(skb, GFP_ATOMIC);
                        err = macvlan_broadcast_one(nskb, vlan, eth,
                                         mode == MACVLAN_MODE_BRIDGE);
@@ -405,6 +425,21 @@ static void macvlan_set_mac_lists(struct net_device *dev)
 {
        struct macvlan_dev *vlan = netdev_priv(dev);
 
+       if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) {
+               bitmap_fill(vlan->mc_filter, MACVLAN_MC_FILTER_SZ);
+       } else {
+               struct netdev_hw_addr *ha;
+               DECLARE_BITMAP(filter, MACVLAN_MC_FILTER_SZ);
+
+               bitmap_zero(filter, MACVLAN_MC_FILTER_SZ);
+               netdev_for_each_mc_addr(ha, dev) {
+                       __set_bit(mc_hash(vlan, ha->addr), filter);
+               }
+
+               __set_bit(mc_hash(vlan, dev->broadcast), filter);
+
+               bitmap_copy(vlan->mc_filter, filter, MACVLAN_MC_FILTER_SZ);
+       }
        dev_uc_sync(vlan->lowerdev, dev);
        dev_mc_sync(vlan->lowerdev, dev);
 }
@@ -564,7 +599,7 @@ static int macvlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
        return err;
 }
 
-static int macvlan_fdb_del(struct ndmsg *ndm,
+static int macvlan_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
                           struct net_device *dev,
                           const unsigned char *addr)
 {
index b181dfb..9724301 100644 (file)
@@ -543,7 +543,6 @@ static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from,
                skb->data_len += len;
                skb->len += len;
                skb->truesize += truesize;
-               skb_shinfo(skb)->gso_type |= SKB_GSO_SHARED_FRAG;
                atomic_add(truesize, &skb->sk->sk_wmem_alloc);
                while (len) {
                        int off = base & ~PAGE_MASK;
@@ -599,7 +598,7 @@ static int macvtap_skb_from_vnet_hdr(struct sk_buff *skb,
 
        if (vnet_hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
                skb_shinfo(skb)->gso_size = vnet_hdr->gso_size;
-               skb_shinfo(skb)->gso_type |= gso_type;
+               skb_shinfo(skb)->gso_type = gso_type;
 
                /* Header must be checked, and gso_segs computed. */
                skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
@@ -743,6 +742,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
        if (zerocopy) {
                skb_shinfo(skb)->destructor_arg = m->msg_control;
                skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
+               skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
        }
        if (vlan)
                macvlan_start_xmit(skb, vlan->dev);
index 0b2706a..4fd754e 100644 (file)
@@ -1805,8 +1805,7 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
                /* the filter instructions are constructed assuming
                   a four-byte PPP header on each packet */
                if (ppp->pass_filter || ppp->active_filter) {
-                       if (skb_cloned(skb) &&
-                           pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
+                       if (skb_unclone(skb, GFP_ATOMIC))
                                goto err;
 
                        *skb_push(skb, 2) = 0;
index 20f31d0..bb07ba9 100644 (file)
@@ -1134,7 +1134,7 @@ static __net_init int pppoe_init_net(struct net *net)
 
        rwlock_init(&pn->hash_lock);
 
-       pde = proc_net_fops_create(net, "pppoe", S_IRUGO, &pppoe_seq_fops);
+       pde = proc_create("pppoe", S_IRUGO, net->proc_net, &pppoe_seq_fops);
 #ifdef CONFIG_PROC_FS
        if (!pde)
                return -ENOMEM;
@@ -1145,7 +1145,7 @@ static __net_init int pppoe_init_net(struct net *net)
 
 static __net_exit void pppoe_exit_net(struct net *net)
 {
-       proc_net_remove(net, "pppoe");
+       remove_proc_entry("pppoe", net->proc_net);
 }
 
 static struct pernet_operations pppoe_net_ops = {
index 694ccf6..05c5efe 100644 (file)
@@ -508,6 +508,7 @@ static bool team_is_mode_set(struct team *team)
 
 static void team_set_no_mode(struct team *team)
 {
+       team->user_carrier_enabled = false;
        team->mode = &__team_no_mode;
 }
 
@@ -1710,6 +1711,10 @@ static netdev_features_t team_fix_features(struct net_device *dev,
 
 static int team_change_carrier(struct net_device *dev, bool new_carrier)
 {
+       struct team *team = netdev_priv(dev);
+
+       team->user_carrier_enabled = true;
+
        if (new_carrier)
                netif_carrier_on(dev);
        else
@@ -2573,6 +2578,9 @@ static void __team_carrier_check(struct team *team)
        struct team_port *port;
        bool team_linkup;
 
+       if (team->user_carrier_enabled)
+               return;
+
        team_linkup = false;
        list_for_each_entry(port, &team->port_list, list) {
                if (port->linkup) {
index 8d208dd..b6f45c5 100644 (file)
@@ -298,11 +298,12 @@ static void tun_flow_cleanup(unsigned long data)
 }
 
 static void tun_flow_update(struct tun_struct *tun, u32 rxhash,
-                           u16 queue_index)
+                           struct tun_file *tfile)
 {
        struct hlist_head *head;
        struct tun_flow_entry *e;
        unsigned long delay = tun->ageing_time;
+       u16 queue_index = tfile->queue_index;
 
        if (!rxhash)
                return;
@@ -311,7 +312,9 @@ static void tun_flow_update(struct tun_struct *tun, u32 rxhash,
 
        rcu_read_lock();
 
-       if (tun->numqueues == 1)
+       /* We may get a very small possibility of OOO during switching, not
+        * worth to optimize.*/
+       if (tun->numqueues == 1 || tfile->detached)
                goto unlock;
 
        e = tun_flow_find(head, rxhash);
@@ -411,21 +414,21 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
 
        tun = rtnl_dereference(tfile->tun);
 
-       if (tun) {
+       if (tun && !tfile->detached) {
                u16 index = tfile->queue_index;
                BUG_ON(index >= tun->numqueues);
                dev = tun->dev;
 
                rcu_assign_pointer(tun->tfiles[index],
                                   tun->tfiles[tun->numqueues - 1]);
-               rcu_assign_pointer(tfile->tun, NULL);
                ntfile = rtnl_dereference(tun->tfiles[index]);
                ntfile->queue_index = index;
 
                --tun->numqueues;
-               if (clean)
+               if (clean) {
+                       rcu_assign_pointer(tfile->tun, NULL);
                        sock_put(&tfile->sk);
-               else
+               else
                        tun_disable_queue(tun, tfile);
 
                synchronize_net();
@@ -439,10 +442,13 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
        }
 
        if (clean) {
-               if (tun && tun->numqueues == 0 && tun->numdisabled == 0 &&
-                   !(tun->flags & TUN_PERSIST))
-                       if (tun->dev->reg_state == NETREG_REGISTERED)
+               if (tun && tun->numqueues == 0 && tun->numdisabled == 0) {
+                       netif_carrier_off(tun->dev);
+
+                       if (!(tun->flags & TUN_PERSIST) &&
+                           tun->dev->reg_state == NETREG_REGISTERED)
                                unregister_netdevice(tun->dev);
+               }
 
                BUG_ON(!test_bit(SOCK_EXTERNALLY_ALLOCATED,
                                 &tfile->socket.flags));
@@ -470,6 +476,10 @@ static void tun_detach_all(struct net_device *dev)
                rcu_assign_pointer(tfile->tun, NULL);
                --tun->numqueues;
        }
+       list_for_each_entry(tfile, &tun->disabled, next) {
+               wake_up_all(&tfile->wq.wait);
+               rcu_assign_pointer(tfile->tun, NULL);
+       }
        BUG_ON(tun->numqueues != 0);
 
        synchronize_net();
@@ -500,7 +510,7 @@ static int tun_attach(struct tun_struct *tun, struct file *file)
                goto out;
 
        err = -EINVAL;
-       if (rtnl_dereference(tfile->tun))
+       if (rtnl_dereference(tfile->tun) && !tfile->detached)
                goto out;
 
        err = -EBUSY;
@@ -1009,7 +1019,6 @@ static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from,
                skb->data_len += len;
                skb->len += len;
                skb->truesize += truesize;
-               skb_shinfo(skb)->gso_type |= SKB_GSO_SHARED_FRAG;
                atomic_add(truesize, &skb->sk->sk_wmem_alloc);
                while (len) {
                        int off = base & ~PAGE_MASK;
@@ -1155,18 +1164,16 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
        }
 
        if (gso.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
-               unsigned short gso_type = 0;
-
                pr_debug("GSO!\n");
                switch (gso.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
                case VIRTIO_NET_HDR_GSO_TCPV4:
-                       gso_type = SKB_GSO_TCPV4;
+                       skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
                        break;
                case VIRTIO_NET_HDR_GSO_TCPV6:
-                       gso_type = SKB_GSO_TCPV6;
+                       skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
                        break;
                case VIRTIO_NET_HDR_GSO_UDP:
-                       gso_type = SKB_GSO_UDP;
+                       skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
                        break;
                default:
                        tun->dev->stats.rx_frame_errors++;
@@ -1175,10 +1182,9 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
                }
 
                if (gso.gso_type & VIRTIO_NET_HDR_GSO_ECN)
-                       gso_type |= SKB_GSO_TCP_ECN;
+                       skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
 
                skb_shinfo(skb)->gso_size = gso.gso_size;
-               skb_shinfo(skb)->gso_type |= gso_type;
                if (skb_shinfo(skb)->gso_size == 0) {
                        tun->dev->stats.rx_frame_errors++;
                        kfree_skb(skb);
@@ -1194,6 +1200,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
        if (zerocopy) {
                skb_shinfo(skb)->destructor_arg = msg_control;
                skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
+               skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
        }
 
        skb_reset_network_header(skb);
@@ -1203,7 +1210,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
        tun->dev->stats.rx_packets++;
        tun->dev->stats.rx_bytes += len;
 
-       tun_flow_update(tun, rxhash, tfile->queue_index);
+       tun_flow_update(tun, rxhash, tfile);
        return total_len;
 }
 
@@ -1662,10 +1669,10 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
                    device_create_file(&tun->dev->dev, &dev_attr_owner) ||
                    device_create_file(&tun->dev->dev, &dev_attr_group))
                        pr_err("Failed to create tun sysfs files\n");
-
-               netif_carrier_on(tun->dev);
        }
 
+       netif_carrier_on(tun->dev);
+
        tun_debug(KERN_INFO, tun, "tun_set_iff\n");
 
        if (ifr->ifr_flags & IFF_NO_PI)
@@ -1817,7 +1824,7 @@ static int tun_set_queue(struct file *file, struct ifreq *ifr)
                ret = tun_attach(tun, file);
        } else if (ifr->ifr_flags & IFF_DETACH_QUEUE) {
                tun = rtnl_dereference(tfile->tun);
-               if (!tun || !(tun->flags & TUN_TAP_MQ))
+               if (!tun || !(tun->flags & TUN_TAP_MQ) || tfile->detached)
                        ret = -EINVAL;
                else
                        __tun_detach(tfile, false);
index b5ad7ea..4a8c25a 100644 (file)
@@ -576,9 +576,14 @@ static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf)
        if ((intf->num_altsetting == 2) &&
            !usb_set_interface(dev->udev,
                               intf->cur_altsetting->desc.bInterfaceNumber,
-                              CDC_NCM_COMM_ALTSETTING_MBIM) &&
-           cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting))
-               return -ENODEV;
+                              CDC_NCM_COMM_ALTSETTING_MBIM)) {
+               if (cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting))
+                       return -ENODEV;
+               else
+                       usb_set_interface(dev->udev,
+                                         intf->cur_altsetting->desc.bInterfaceNumber,
+                                         CDC_NCM_COMM_ALTSETTING_NCM);
+       }
 #endif
 
        /* NCM data altsetting is always 1 */
@@ -1215,6 +1220,9 @@ static const struct usb_device_id cdc_devs[] = {
        { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x46),
          .driver_info = (unsigned long)&wwan_info,
        },
+       { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x76),
+         .driver_info = (unsigned long)&wwan_info,
+       },
 
        /* Infineon(now Intel) HSPA Modem platform */
        { USB_DEVICE_AND_INTERFACE_INFO(0x1519, 0x0443,
index 8ee5ab0..73051d1 100644 (file)
@@ -149,11 +149,9 @@ static int get_registers(pegasus_t *pegasus, __u16 indx, __u16 size,
        DECLARE_WAITQUEUE(wait, current);
 
        buffer = kmalloc(size, GFP_KERNEL);
-       if (!buffer) {
-               netif_warn(pegasus, drv, pegasus->net,
-                          "out of memory in %s\n", __func__);
+       if (!buffer)
                return -ENOMEM;
-       }
+
        add_wait_queue(&pegasus->ctrl_wait, &wait);
        set_current_state(TASK_UNINTERRUPTIBLE);
        while (pegasus->flags & ETH_REGS_CHANGED)
index 575a583..19d9035 100644 (file)
@@ -351,6 +351,10 @@ static const struct usb_device_id products[] = {
                USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 1, 57),
                .driver_info        = (unsigned long)&qmi_wwan_info,
        },
+       {       /* HUAWEI_INTERFACE_NDIS_CONTROL_QUALCOMM */
+               USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 0x01, 0x69),
+               .driver_info        = (unsigned long)&qmi_wwan_info,
+       },
 
        /* 2. Combined interface devices matching on class+protocol */
        {       /* Huawei E367 and possibly others in "Windows mode" */
@@ -361,6 +365,14 @@ static const struct usb_device_id products[] = {
                USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 1, 17),
                .driver_info        = (unsigned long)&qmi_wwan_info,
        },
+       {       /* HUAWEI_NDIS_SINGLE_INTERFACE_VDF */
+               USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 0x01, 0x37),
+               .driver_info        = (unsigned long)&qmi_wwan_info,
+       },
+       {       /* HUAWEI_INTERFACE_NDIS_HW_QUALCOMM */
+               USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 0x01, 0x67),
+               .driver_info        = (unsigned long)&qmi_wwan_info,
+       },
        {       /* Pantech UML290, P4200 and more */
                USB_VENDOR_AND_INTERFACE_INFO(0x106c, USB_CLASS_VENDOR_SPEC, 0xf0, 0xff),
                .driver_info        = (unsigned long)&qmi_wwan_info,
@@ -399,6 +411,7 @@ static const struct usb_device_id products[] = {
        },
 
        /* 3. Combined interface devices matching on interface number */
+       {QMI_FIXED_INTF(0x0408, 0xea42, 4)},    /* Yota / Megafon M100-1 */
        {QMI_FIXED_INTF(0x12d1, 0x140c, 1)},    /* Huawei E173 */
        {QMI_FIXED_INTF(0x19d2, 0x0002, 1)},
        {QMI_FIXED_INTF(0x19d2, 0x0012, 1)},
@@ -461,6 +474,7 @@ static const struct usb_device_id products[] = {
        {QMI_FIXED_INTF(0x1199, 0x901c, 8)},    /* Sierra Wireless EM7700 */
        {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)},    /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
        {QMI_FIXED_INTF(0x2357, 0x0201, 4)},    /* TP-LINK HSUPA Modem MA180 */
+       {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)},    /* Telit LE920 */
 
        /* 4. Gobi 1000 devices */
        {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)},    /* Acer Gobi Modem Device */
index 30c1b33..51f3192 100644 (file)
@@ -380,6 +380,12 @@ static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
        unsigned long           lockflags;
        size_t                  size = dev->rx_urb_size;
 
+       /* prevent rx skb allocation when error ratio is high */
+       if (test_bit(EVENT_RX_KILL, &dev->flags)) {
+               usb_free_urb(urb);
+               return -ENOLINK;
+       }
+
        skb = __netdev_alloc_skb_ip_align(dev->net, size, flags);
        if (!skb) {
                netif_dbg(dev, rx_err, dev->net, "no rx skb\n");
@@ -539,6 +545,17 @@ block:
                break;
        }
 
+       /* stop rx if packet error rate is high */
+       if (++dev->pkt_cnt > 30) {
+               dev->pkt_cnt = 0;
+               dev->pkt_err = 0;
+       } else {
+               if (state == rx_cleanup)
+                       dev->pkt_err++;
+               if (dev->pkt_err > 20)
+                       set_bit(EVENT_RX_KILL, &dev->flags);
+       }
+
        state = defer_bh(dev, skb, &dev->rxq, state);
 
        if (urb) {
@@ -791,6 +808,11 @@ int usbnet_open (struct net_device *net)
                   (dev->driver_info->flags & FLAG_FRAMING_AX) ? "ASIX" :
                   "simple");
 
+       /* reset rx error state */
+       dev->pkt_cnt = 0;
+       dev->pkt_err = 0;
+       clear_bit(EVENT_RX_KILL, &dev->flags);
+
        // delay posting reads until we're fully open
        tasklet_schedule (&dev->bh);
        if (info->manage_power) {
@@ -1103,13 +1125,11 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
        if (info->tx_fixup) {
                skb = info->tx_fixup (dev, skb, GFP_ATOMIC);
                if (!skb) {
-                       if (netif_msg_tx_err(dev)) {
-                               netif_dbg(dev, tx_err, dev->net, "can't tx_fixup skb\n");
-                               goto drop;
-                       } else {
-                               /* cdc_ncm collected packet; waits for more */
+                       /* packet collected; minidriver waiting for more */
+                       if (info->flags & FLAG_MULTI_PACKET)
                                goto not_drop;
-                       }
+                       netif_dbg(dev, tx_err, dev->net, "can't tx_fixup skb\n");
+                       goto drop;
                }
        }
        length = skb->len;
@@ -1254,6 +1274,9 @@ static void usbnet_bh (unsigned long param)
                }
        }
 
+       /* restart RX again after disabling due to high error rate */
+       clear_bit(EVENT_RX_KILL, &dev->flags);
+
        // waiting for all pending urbs to complete?
        if (dev->wait) {
                if ((dev->txq.qlen + dev->rxq.qlen + dev->done.qlen) == 0) {
index e1da42a..07a4af0 100644 (file)
@@ -426,12 +426,13 @@ static void veth_dellink(struct net_device *dev, struct list_head *head)
         * not being freed before one RCU grace period.
         */
        RCU_INIT_POINTER(priv->peer, NULL);
-
-       priv = netdev_priv(peer);
-       RCU_INIT_POINTER(priv->peer, NULL);
-
        unregister_netdevice_queue(dev, head);
-       unregister_netdevice_queue(peer, head);
+
+       if (peer) {
+               priv = netdev_priv(peer);
+               RCU_INIT_POINTER(priv->peer, NULL);
+               unregister_netdevice_queue(peer, head);
+       }
 }
 
 static const struct nla_policy veth_policy[VETH_INFO_MAX + 1] = {
index 381a2d8..192c91c 100644 (file)
@@ -227,7 +227,7 @@ static void set_skb_frag(struct sk_buff *skb, struct page *page,
        skb->len += size;
        skb->truesize += PAGE_SIZE;
        skb_shinfo(skb)->nr_frags++;
-       skb_shinfo(skb)->gso_type |= SKB_GSO_SHARED_FRAG;
+       skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
        *len -= size;
 }
 
@@ -387,18 +387,16 @@ static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len)
                 ntohs(skb->protocol), skb->len, skb->pkt_type);
 
        if (hdr->hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
-               unsigned short gso_type = 0;
-
                pr_debug("GSO!\n");
                switch (hdr->hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
                case VIRTIO_NET_HDR_GSO_TCPV4:
-                       gso_type = SKB_GSO_TCPV4;
+                       skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
                        break;
                case VIRTIO_NET_HDR_GSO_UDP:
-                       gso_type = SKB_GSO_UDP;
+                       skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
                        break;
                case VIRTIO_NET_HDR_GSO_TCPV6:
-                       gso_type = SKB_GSO_TCPV6;
+                       skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
                        break;
                default:
                        net_warn_ratelimited("%s: bad gso type %u.\n",
@@ -407,7 +405,7 @@ static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len)
                }
 
                if (hdr->hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN)
-                       gso_type |= SKB_GSO_TCP_ECN;
+                       skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
 
                skb_shinfo(skb)->gso_size = hdr->hdr.gso_size;
                if (skb_shinfo(skb)->gso_size == 0) {
@@ -415,7 +413,6 @@ static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len)
                        goto frame_err;
                }
 
-               skb_shinfo(skb)->gso_type |= gso_type;
                /* Header must be checked, and gso_segs computed. */
                skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
                skb_shinfo(skb)->gso_segs = 0;
index b1c90f8..ffb97b2 100644 (file)
@@ -150,8 +150,7 @@ vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue)
        if (ret & 1) { /* Link is up. */
                netdev_info(adapter->netdev, "NIC Link is Up %d Mbps\n",
                            adapter->link_speed);
-               if (!netif_carrier_ok(adapter->netdev))
-                       netif_carrier_on(adapter->netdev);
+               netif_carrier_on(adapter->netdev);
 
                if (affectTxQueue) {
                        for (i = 0; i < adapter->num_tx_queues; i++)
@@ -160,8 +159,7 @@ vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue)
                }
        } else {
                netdev_info(adapter->netdev, "NIC Link is Down\n");
-               if (netif_carrier_ok(adapter->netdev))
-                       netif_carrier_off(adapter->netdev);
+               netif_carrier_off(adapter->netdev);
 
                if (affectTxQueue) {
                        for (i = 0; i < adapter->num_tx_queues; i++)
@@ -3060,6 +3058,7 @@ vmxnet3_probe_device(struct pci_dev *pdev,
        netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
        netif_set_real_num_rx_queues(adapter->netdev, adapter->num_rx_queues);
 
+       netif_carrier_off(netdev);
        err = register_netdev(netdev);
 
        if (err) {
index 72485b9..9d70421 100644 (file)
@@ -393,7 +393,8 @@ static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
 }
 
 /* Delete entry (via netlink) */
-static int vxlan_fdb_delete(struct ndmsg *ndm, struct net_device *dev,
+static int vxlan_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
+                           struct net_device *dev,
                            const unsigned char *addr)
 {
        struct vxlan_dev *vxlan = netdev_priv(dev);
index 8077e6e..0b60295 100644 (file)
@@ -1346,7 +1346,6 @@ EXPORT_SYMBOL(i2400m_unknown_barker);
 int i2400m_rx_setup(struct i2400m *i2400m)
 {
        int result = 0;
-       struct device *dev = i2400m_dev(i2400m);
 
        i2400m->rx_reorder = i2400m_rx_reorder_disabled? 0 : 1;
        if (i2400m->rx_reorder) {
index ab363f3..a78afa9 100644 (file)
@@ -1613,6 +1613,10 @@ ath5k_hw_update_noise_floor(struct ath5k_hw *ah)
        ah->ah_cal_mask |= AR5K_CALIBRATION_NF;
 
        ee_mode = ath5k_eeprom_mode_from_channel(ah->ah_current_channel);
+       if (WARN_ON(ee_mode < 0)) {
+               ah->ah_cal_mask &= ~AR5K_CALIBRATION_NF;
+               return;
+       }
 
        /* completed NF calibration, test threshold */
        nf = ath5k_hw_read_measured_noise_floor(ah);
index 4084b10..e2d8b2c 100644 (file)
@@ -985,6 +985,8 @@ ath5k_hw_commit_eeprom_settings(struct ath5k_hw *ah,
                return;
 
        ee_mode = ath5k_eeprom_mode_from_channel(channel);
+       if (WARN_ON(ee_mode < 0))
+               return;
 
        /* Adjust power delta for channel 14 */
        if (channel->center_freq == 2484)
index 4225cca..752ffc4 100644 (file)
@@ -427,6 +427,30 @@ static bool ath6kl_is_tx_pending(struct ath6kl *ar)
        return ar->tx_pending[ath6kl_wmi_get_control_ep(ar->wmi)] == 0;
 }
 
+static void ath6kl_cfg80211_sta_bmiss_enhance(struct ath6kl_vif *vif,
+                                             bool enable)
+{
+       int err;
+
+       if (WARN_ON(!test_bit(WMI_READY, &vif->ar->flag)))
+               return;
+
+       if (vif->nw_type != INFRA_NETWORK)
+               return;
+
+       if (!test_bit(ATH6KL_FW_CAPABILITY_BMISS_ENHANCE,
+                     vif->ar->fw_capabilities))
+               return;
+
+       ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s fw bmiss enhance\n",
+                  enable ? "enable" : "disable");
+
+       err = ath6kl_wmi_sta_bmiss_enhance_cmd(vif->ar->wmi,
+                                              vif->fw_vif_idx, enable);
+       if (err)
+               ath6kl_err("failed to %s enhanced bmiss detection: %d\n",
+                          enable ? "enable" : "disable", err);
+}
 
 static int ath6kl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
                                   struct cfg80211_connect_params *sme)
@@ -616,13 +640,13 @@ static int ath6kl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
                                        vif->req_bssid, vif->ch_hint,
                                        ar->connect_ctrl_flags, nw_subtype);
 
-       /* disable background scan if period is 0 */
-       if (sme->bg_scan_period == 0)
+       if (sme->bg_scan_period == 0) {
+               /* disable background scan if period is 0 */
                sme->bg_scan_period = 0xffff;
-
-       /* configure default value if not specified */
-       if (sme->bg_scan_period == -1)
+       } else if (sme->bg_scan_period == -1) {
+               /* configure default value if not specified */
                sme->bg_scan_period = DEFAULT_BG_SCAN_PERIOD;
+       }
 
        ath6kl_wmi_scanparams_cmd(ar->wmi, vif->fw_vif_idx, 0, 0,
                                  sme->bg_scan_period, 0, 0, 0, 3, 0, 0, 0);
@@ -767,7 +791,7 @@ void ath6kl_cfg80211_connect_event(struct ath6kl_vif *vif, u16 channel,
                ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "ad-hoc %s selected\n",
                           nw_type & ADHOC_CREATOR ? "creator" : "joiner");
                cfg80211_ibss_joined(vif->ndev, bssid, GFP_KERNEL);
-               cfg80211_put_bss(bss);
+               cfg80211_put_bss(ar->wiphy, bss);
                return;
        }
 
@@ -778,7 +802,7 @@ void ath6kl_cfg80211_connect_event(struct ath6kl_vif *vif, u16 channel,
                                        assoc_req_ie, assoc_req_len,
                                        assoc_resp_ie, assoc_resp_len,
                                        WLAN_STATUS_SUCCESS, GFP_KERNEL);
-               cfg80211_put_bss(bss);
+               cfg80211_put_bss(ar->wiphy, bss);
        } else if (vif->sme_state == SME_CONNECTED) {
                /* inform roam event to cfg80211 */
                cfg80211_roamed_bss(vif->ndev, bss, assoc_req_ie, assoc_req_len,
@@ -1454,10 +1478,10 @@ static int ath6kl_cfg80211_set_power_mgmt(struct wiphy *wiphy,
                return -EIO;
 
        if (pmgmt) {
-               ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: max perf\n", __func__);
+               ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: rec power\n", __func__);
                mode.pwr_mode = REC_POWER;
        } else {
-               ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: rec power\n", __func__);
+               ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: max perf\n", __func__);
                mode.pwr_mode = MAX_PERF_POWER;
        }
 
@@ -1509,7 +1533,7 @@ static int ath6kl_cfg80211_del_iface(struct wiphy *wiphy,
        list_del(&vif->list);
        spin_unlock_bh(&ar->list_lock);
 
-       ath6kl_cleanup_vif(vif, test_bit(WMI_READY, &ar->flag));
+       ath6kl_cfg80211_vif_stop(vif, test_bit(WMI_READY, &ar->flag));
 
        ath6kl_cfg80211_vif_cleanup(vif);
 
@@ -1559,17 +1583,13 @@ static int ath6kl_cfg80211_change_iface(struct wiphy *wiphy,
 set_iface_type:
        switch (type) {
        case NL80211_IFTYPE_STATION:
+       case NL80211_IFTYPE_P2P_CLIENT:
                vif->next_mode = INFRA_NETWORK;
                break;
        case NL80211_IFTYPE_ADHOC:
                vif->next_mode = ADHOC_NETWORK;
                break;
        case NL80211_IFTYPE_AP:
-               vif->next_mode = AP_NETWORK;
-               break;
-       case NL80211_IFTYPE_P2P_CLIENT:
-               vif->next_mode = INFRA_NETWORK;
-               break;
        case NL80211_IFTYPE_P2P_GO:
                vif->next_mode = AP_NETWORK;
                break;
@@ -1778,14 +1798,14 @@ static int ath6kl_get_station(struct wiphy *wiphy, struct net_device *dev,
 
        if (vif->target_stats.rx_byte) {
                sinfo->rx_bytes = vif->target_stats.rx_byte;
-               sinfo->filled |= STATION_INFO_RX_BYTES;
+               sinfo->filled |= STATION_INFO_RX_BYTES64;
                sinfo->rx_packets = vif->target_stats.rx_pkt;
                sinfo->filled |= STATION_INFO_RX_PACKETS;
        }
 
        if (vif->target_stats.tx_byte) {
                sinfo->tx_bytes = vif->target_stats.tx_byte;
-               sinfo->filled |= STATION_INFO_TX_BYTES;
+               sinfo->filled |= STATION_INFO_TX_BYTES64;
                sinfo->tx_packets = vif->target_stats.tx_pkt;
                sinfo->filled |= STATION_INFO_TX_PACKETS;
        }
@@ -2673,30 +2693,6 @@ static int ath6kl_set_ies(struct ath6kl_vif *vif,
        return 0;
 }
 
-void ath6kl_cfg80211_sta_bmiss_enhance(struct ath6kl_vif *vif, bool enable)
-{
-       int err;
-
-       if (WARN_ON(!test_bit(WMI_READY, &vif->ar->flag)))
-               return;
-
-       if (vif->nw_type != INFRA_NETWORK)
-               return;
-
-       if (!test_bit(ATH6KL_FW_CAPABILITY_BMISS_ENHANCE,
-                     vif->ar->fw_capabilities))
-               return;
-
-       ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s fw bmiss enhance\n",
-                  enable ? "enable" : "disable");
-
-       err = ath6kl_wmi_sta_bmiss_enhance_cmd(vif->ar->wmi,
-                                              vif->fw_vif_idx, enable);
-       if (err)
-               ath6kl_err("failed to %s enhanced bmiss detection: %d\n",
-                          enable ? "enable" : "disable", err);
-}
-
 static int ath6kl_get_rsn_capab(struct cfg80211_beacon_data *beacon,
                                u8 *rsn_capab)
 {
@@ -2776,9 +2772,11 @@ static int ath6kl_start_ap(struct wiphy *wiphy, struct net_device *dev,
 
        ar->ap_mode_bkey.valid = false;
 
-       /* TODO:
-        * info->interval
-        */
+       ret = ath6kl_wmi_ap_set_beacon_intvl_cmd(ar->wmi, vif->fw_vif_idx,
+                                                info->beacon_interval);
+
+       if (ret)
+               ath6kl_warn("Failed to set beacon interval: %d\n", ret);
 
        ret = ath6kl_wmi_ap_set_dtim_cmd(ar->wmi, vif->fw_vif_idx,
                                         info->dtim_period);
@@ -3557,6 +3555,37 @@ static int ath6kl_cfg80211_vif_init(struct ath6kl_vif *vif)
        return 0;
 }
 
+void ath6kl_cfg80211_vif_stop(struct ath6kl_vif *vif, bool wmi_ready)
+{
+       static u8 bcast_mac[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+       bool discon_issued;
+
+       netif_stop_queue(vif->ndev);
+
+       clear_bit(WLAN_ENABLED, &vif->flags);
+
+       if (wmi_ready) {
+               discon_issued = test_bit(CONNECTED, &vif->flags) ||
+                               test_bit(CONNECT_PEND, &vif->flags);
+               ath6kl_disconnect(vif);
+               del_timer(&vif->disconnect_timer);
+
+               if (discon_issued)
+                       ath6kl_disconnect_event(vif, DISCONNECT_CMD,
+                                               (vif->nw_type & AP_NETWORK) ?
+                                               bcast_mac : vif->bssid,
+                                               0, NULL, 0);
+       }
+
+       if (vif->scan_req) {
+               cfg80211_scan_done(vif->scan_req, true);
+               vif->scan_req = NULL;
+       }
+
+       /* need to clean up enhanced bmiss detection fw state */
+       ath6kl_cfg80211_sta_bmiss_enhance(vif, false);
+}
+
 void ath6kl_cfg80211_vif_cleanup(struct ath6kl_vif *vif)
 {
        struct ath6kl *ar = vif->ar;
index e5e70f3..b59becd 100644 (file)
@@ -61,7 +61,5 @@ void ath6kl_cfg80211_cleanup(struct ath6kl *ar);
 
 struct ath6kl *ath6kl_cfg80211_create(void);
 void ath6kl_cfg80211_destroy(struct ath6kl *ar);
-/* TODO: remove this once ath6kl_vif_cleanup() is moved to cfg80211.c */
-void ath6kl_cfg80211_sta_bmiss_enhance(struct ath6kl_vif *vif, bool enable);
 
 #endif /* ATH6KL_CFG80211_H */
index 189d8fa..61b2f98 100644 (file)
@@ -940,7 +940,7 @@ void ath6kl_reset_device(struct ath6kl *ar, u32 target_type,
                         bool wait_fot_compltn, bool cold_reset);
 void ath6kl_init_control_info(struct ath6kl_vif *vif);
 struct ath6kl_vif *ath6kl_vif_first(struct ath6kl *ar);
-void ath6kl_cleanup_vif(struct ath6kl_vif *vif, bool wmi_ready);
+void ath6kl_cfg80211_vif_stop(struct ath6kl_vif *vif, bool wmi_ready);
 int ath6kl_init_hw_start(struct ath6kl *ar);
 int ath6kl_init_hw_stop(struct ath6kl *ar);
 int ath6kl_init_fetch_firmwares(struct ath6kl *ar);
index ba6bd49..2813901 100644 (file)
@@ -509,9 +509,7 @@ static void destroy_htc_txctrl_packet(struct htc_packet *packet)
 {
        struct sk_buff *skb;
        skb = packet->skb;
-       if (skb != NULL)
-               dev_kfree_skb(skb);
-
+       dev_kfree_skb(skb);
        kfree(packet);
 }
 
@@ -969,6 +967,22 @@ static int ath6kl_htc_pipe_rx_complete(struct ath6kl *ar, struct sk_buff *skb,
        u16 payload_len;
        int status = 0;
 
+       /*
+        * ar->htc_target can be NULL due to a race condition that can occur
+        * during driver initialization(we do 'ath6kl_hif_power_on' before
+        * initializing 'ar->htc_target' via 'ath6kl_htc_create').
+        * 'ath6kl_hif_power_on' assigns 'ath6kl_recv_complete' as
+        * usb_complete_t/callback function for 'usb_fill_bulk_urb'.
+        * Thus the possibility of ar->htc_target being NULL
+        * via ath6kl_recv_complete -> ath6kl_usb_io_comp_work.
+        */
+       if (WARN_ON_ONCE(!target)) {
+               ath6kl_err("Target not yet initialized\n");
+               status = -EINVAL;
+               goto free_skb;
+       }
+
+
        netdata = skb->data;
        netlen = skb->len;
 
@@ -1054,6 +1068,7 @@ static int ath6kl_htc_pipe_rx_complete(struct ath6kl *ar, struct sk_buff *skb,
 
                dev_kfree_skb(skb);
                skb = NULL;
+
                goto free_skb;
        }
 
@@ -1089,8 +1104,7 @@ static int ath6kl_htc_pipe_rx_complete(struct ath6kl *ar, struct sk_buff *skb,
        skb = NULL;
 
 free_skb:
-       if (skb != NULL)
-               dev_kfree_skb(skb);
+       dev_kfree_skb(skb);
 
        return status;
 
@@ -1184,7 +1198,7 @@ static void reset_endpoint_states(struct htc_target *target)
                INIT_LIST_HEAD(&ep->pipe.tx_lookup_queue);
                INIT_LIST_HEAD(&ep->rx_bufq);
                ep->target = target;
-               ep->pipe.tx_credit_flow_enabled = (bool) 1; /* FIXME */
+               ep->pipe.tx_credit_flow_enabled = true;
        }
 }
 
index f21fa32..5d434cf 100644 (file)
@@ -1715,38 +1715,6 @@ void ath6kl_init_hw_restart(struct ath6kl *ar)
        }
 }
 
-/* FIXME: move this to cfg80211.c and rename to ath6kl_cfg80211_vif_stop() */
-void ath6kl_cleanup_vif(struct ath6kl_vif *vif, bool wmi_ready)
-{
-       static u8 bcast_mac[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
-       bool discon_issued;
-
-       netif_stop_queue(vif->ndev);
-
-       clear_bit(WLAN_ENABLED, &vif->flags);
-
-       if (wmi_ready) {
-               discon_issued = test_bit(CONNECTED, &vif->flags) ||
-                               test_bit(CONNECT_PEND, &vif->flags);
-               ath6kl_disconnect(vif);
-               del_timer(&vif->disconnect_timer);
-
-               if (discon_issued)
-                       ath6kl_disconnect_event(vif, DISCONNECT_CMD,
-                                               (vif->nw_type & AP_NETWORK) ?
-                                               bcast_mac : vif->bssid,
-                                               0, NULL, 0);
-       }
-
-       if (vif->scan_req) {
-               cfg80211_scan_done(vif->scan_req, true);
-               vif->scan_req = NULL;
-       }
-
-       /* need to clean up enhanced bmiss detection fw state */
-       ath6kl_cfg80211_sta_bmiss_enhance(vif, false);
-}
-
 void ath6kl_stop_txrx(struct ath6kl *ar)
 {
        struct ath6kl_vif *vif, *tmp_vif;
@@ -1766,7 +1734,7 @@ void ath6kl_stop_txrx(struct ath6kl *ar)
        list_for_each_entry_safe(vif, tmp_vif, &ar->vif_list, list) {
                list_del(&vif->list);
                spin_unlock_bh(&ar->list_lock);
-               ath6kl_cleanup_vif(vif, test_bit(WMI_READY, &ar->flag));
+               ath6kl_cfg80211_vif_stop(vif, test_bit(WMI_READY, &ar->flag));
                rtnl_lock();
                ath6kl_cfg80211_vif_cleanup(vif);
                rtnl_unlock();
@@ -1801,8 +1769,6 @@ void ath6kl_stop_txrx(struct ath6kl *ar)
                   "attempting to reset target on instance destroy\n");
        ath6kl_reset_device(ar, ar->target_type, true, true);
 
-       clear_bit(WLAN_ENABLED, &ar->flag);
-
        up(&ar->sem);
 }
 EXPORT_SYMBOL(ath6kl_stop_txrx);
index 62bcc0d..5fcd342 100644 (file)
@@ -159,10 +159,8 @@ static void ath6kl_usb_free_urb_to_pipe(struct ath6kl_usb_pipe *pipe,
 
 static void ath6kl_usb_cleanup_recv_urb(struct ath6kl_urb_context *urb_context)
 {
-       if (urb_context->skb != NULL) {
-               dev_kfree_skb(urb_context->skb);
-               urb_context->skb = NULL;
-       }
+       dev_kfree_skb(urb_context->skb);
+       urb_context->skb = NULL;
 
        ath6kl_usb_free_urb_to_pipe(urb_context->pipe, urb_context);
 }
index 998f8b0..d76b5bd 100644 (file)
@@ -751,6 +751,23 @@ int ath6kl_wmi_force_roam_cmd(struct wmi *wmi, const u8 *bssid)
                                   NO_SYNC_WMIFLAG);
 }
 
+int ath6kl_wmi_ap_set_beacon_intvl_cmd(struct wmi *wmi, u8 if_idx,
+                                      u32 beacon_intvl)
+{
+       struct sk_buff *skb;
+       struct set_beacon_int_cmd *cmd;
+
+       skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+       if (!skb)
+               return -ENOMEM;
+
+       cmd = (struct set_beacon_int_cmd *) skb->data;
+
+       cmd->beacon_intvl = cpu_to_le32(beacon_intvl);
+       return ath6kl_wmi_cmd_send(wmi, if_idx, skb,
+                                  WMI_SET_BEACON_INT_CMDID, NO_SYNC_WMIFLAG);
+}
+
 int ath6kl_wmi_ap_set_dtim_cmd(struct wmi *wmi, u8 if_idx, u32 dtim_period)
 {
        struct sk_buff *skb;
@@ -1108,7 +1125,7 @@ static int ath6kl_wmi_bssinfo_event_rx(struct wmi *wmi, u8 *datap, int len,
        kfree(mgmt);
        if (bss == NULL)
                return -ENOMEM;
-       cfg80211_put_bss(bss);
+       cfg80211_put_bss(ar->wiphy, bss);
 
        /*
         * Firmware doesn't return any event when scheduled scan has
@@ -2480,16 +2497,11 @@ static int ath6kl_wmi_sync_point(struct wmi *wmi, u8 if_idx)
 
 free_cmd_skb:
        /* free up any resources left over (possibly due to an error) */
-       if (skb)
-               dev_kfree_skb(skb);
+       dev_kfree_skb(skb);
 
 free_data_skb:
-       for (index = 0; index < num_pri_streams; index++) {
-               if (data_sync_bufs[index].skb != NULL) {
-                       dev_kfree_skb((struct sk_buff *)data_sync_bufs[index].
-                                     skb);
-               }
-       }
+       for (index = 0; index < num_pri_streams; index++)
+               dev_kfree_skb((struct sk_buff *)data_sync_bufs[index].skb);
 
        return ret;
 }
index 98b1755..b5f2265 100644 (file)
@@ -1660,6 +1660,10 @@ struct roam_ctrl_cmd {
        u8 roam_ctrl;
 } __packed;
 
+struct set_beacon_int_cmd {
+       __le32 beacon_intvl;
+} __packed;
+
 struct set_dtim_cmd {
        __le32 dtim_period;
 } __packed;
@@ -2649,6 +2653,8 @@ int ath6kl_wmi_del_wow_pattern_cmd(struct wmi *wmi, u8 if_idx,
 int ath6kl_wmi_set_rssi_filter_cmd(struct wmi *wmi, u8 if_idx, s8 rssi);
 int ath6kl_wmi_set_roam_lrssi_cmd(struct wmi *wmi, u8 lrssi);
 int ath6kl_wmi_ap_set_dtim_cmd(struct wmi *wmi, u8 if_idx, u32 dtim_period);
+int ath6kl_wmi_ap_set_beacon_intvl_cmd(struct wmi *wmi, u8 if_idx,
+                                      u32 beacon_interval);
 int ath6kl_wmi_force_roam_cmd(struct wmi *wmi, const u8 *bssid);
 int ath6kl_wmi_set_roam_mode_cmd(struct wmi *wmi, enum wmi_roam_mode mode);
 int ath6kl_wmi_mcast_filter_cmd(struct wmi *wmi, u8 if_idx, bool mc_all_on);
index 7647ed6..17507dc 100644 (file)
@@ -58,6 +58,7 @@ config ATH9K_DEBUGFS
        bool "Atheros ath9k debugging"
        depends on ATH9K
        select MAC80211_DEBUGFS
+       select RELAY
        ---help---
          Say Y, if you need access to ath9k's statistics for
          interrupts, rate control, etc.
index b2d6c18..a56b241 100644 (file)
@@ -319,6 +319,8 @@ struct ath_rx {
        struct ath_rx_edma rx_edma[ATH9K_RX_QUEUE_MAX];
 
        struct sk_buff *frag;
+
+       u32 ampdu_ref;
 };
 
 int ath_startrecv(struct ath_softc *sc);
@@ -387,6 +389,7 @@ struct ath_beacon_config {
        u16 bmiss_timeout;
        u8 dtim_count;
        bool enable_beacon;
+       bool ibss_creator;
 };
 
 struct ath_beacon {
@@ -754,6 +757,7 @@ struct ath_softc {
        /* relay(fs) channel for spectral scan */
        struct rchan *rfs_chan_spec_scan;
        enum spectral_mode spectral_mode;
+       struct ath_spec_scan spec_config;
        int scanning;
 
 #ifdef CONFIG_PM_SLEEP
@@ -863,31 +867,31 @@ static inline u8 spectral_bitmap_weight(u8 *bins)
  * interface.
  */
 enum ath_fft_sample_type {
-       ATH_FFT_SAMPLE_HT20 = 0,
+       ATH_FFT_SAMPLE_HT20 = 1,
 };
 
 struct fft_sample_tlv {
        u8 type;        /* see ath_fft_sample */
-       u16 length;
+       __be16 length;
        /* type dependent data follows */
 } __packed;
 
 struct fft_sample_ht20 {
        struct fft_sample_tlv tlv;
 
-       u8 __alignment;
+       u8 max_exp;
 
-       u16 freq;
+       __be16 freq;
        s8 rssi;
        s8 noise;
 
-       u16 max_magnitude;
+       __be16 max_magnitude;
        u8 max_index;
        u8 bitmap_weight;
 
-       u64 tsf;
+       __be64 tsf;
 
-       u16 data[SPECTRAL_HT20_NUM_BINS];
+       u8 data[SPECTRAL_HT20_NUM_BINS];
 } __packed;
 
 void ath9k_tasklet(unsigned long data);
index dd37719..5f05c26 100644 (file)
@@ -407,12 +407,17 @@ void ath9k_beacon_tasklet(unsigned long data)
        }
 }
 
-static void ath9k_beacon_init(struct ath_softc *sc, u32 nexttbtt, u32 intval)
+/*
+ * Both nexttbtt and intval have to be in usecs.
+ */
+static void ath9k_beacon_init(struct ath_softc *sc, u32 nexttbtt,
+                             u32 intval, bool reset_tsf)
 {
        struct ath_hw *ah = sc->sc_ah;
 
        ath9k_hw_disable_interrupts(ah);
-       ath9k_hw_reset_tsf(ah);
+       if (reset_tsf)
+               ath9k_hw_reset_tsf(ah);
        ath9k_beaconq_config(sc);
        ath9k_hw_beaconinit(ah, nexttbtt, intval);
        sc->beacon.bmisscnt = 0;
@@ -442,10 +447,12 @@ static void ath9k_beacon_config_ap(struct ath_softc *sc,
        else
                ah->imask &= ~ATH9K_INT_SWBA;
 
-       ath_dbg(common, BEACON, "AP nexttbtt: %u intval: %u conf_intval: %u\n",
+       ath_dbg(common, BEACON,
+               "AP (%s) nexttbtt: %u intval: %u conf_intval: %u\n",
+               (conf->enable_beacon) ? "Enable" : "Disable",
                nexttbtt, intval, conf->beacon_interval);
 
-       ath9k_beacon_init(sc, nexttbtt, intval);
+       ath9k_beacon_init(sc, nexttbtt, intval, true);
 }
 
 /*
@@ -586,17 +593,45 @@ static void ath9k_beacon_config_adhoc(struct ath_softc *sc,
        ath9k_reset_beacon_status(sc);
 
        intval = TU_TO_USEC(conf->beacon_interval);
-       nexttbtt = intval;
+
+       if (conf->ibss_creator) {
+               nexttbtt = intval;
+       } else {
+               u32 tbtt, offset, tsftu;
+               u64 tsf;
+
+               /*
+                * Pull nexttbtt forward to reflect the current
+                * sync'd TSF.
+                */
+               tsf = ath9k_hw_gettsf64(ah);
+               tsftu = TSF_TO_TU(tsf >> 32, tsf) + FUDGE;
+               offset = tsftu % conf->beacon_interval;
+               tbtt = tsftu - offset;
+               if (offset)
+                       tbtt += conf->beacon_interval;
+
+               nexttbtt = TU_TO_USEC(tbtt);
+       }
 
        if (conf->enable_beacon)
                ah->imask |= ATH9K_INT_SWBA;
        else
                ah->imask &= ~ATH9K_INT_SWBA;
 
-       ath_dbg(common, BEACON, "IBSS nexttbtt: %u intval: %u conf_intval: %u\n",
+       ath_dbg(common, BEACON,
+               "IBSS (%s) nexttbtt: %u intval: %u conf_intval: %u\n",
+               (conf->enable_beacon) ? "Enable" : "Disable",
                nexttbtt, intval, conf->beacon_interval);
 
-       ath9k_beacon_init(sc, nexttbtt, intval);
+       ath9k_beacon_init(sc, nexttbtt, intval, conf->ibss_creator);
+
+       /*
+        * Set the global 'beacon has been configured' flag for the
+        * joiner case in IBSS mode.
+        */
+       if (!conf->ibss_creator && conf->enable_beacon)
+               set_bit(SC_OP_BEACONS, &sc->sc_flags);
 }
 
 bool ath9k_allow_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif)
@@ -639,6 +674,7 @@ static void ath9k_cache_beacon_config(struct ath_softc *sc,
        cur_conf->dtim_period = bss_conf->dtim_period;
        cur_conf->listen_interval = 1;
        cur_conf->dtim_count = 1;
+       cur_conf->ibss_creator = bss_conf->ibss_creator;
        cur_conf->bmiss_timeout =
                ATH_DEFAULT_BMISS_LIMIT * cur_conf->beacon_interval;
 
@@ -666,34 +702,59 @@ void ath9k_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif,
 {
        struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
        struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
+       unsigned long flags;
+       bool skip_beacon = false;
 
        if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION) {
                ath9k_cache_beacon_config(sc, bss_conf);
                ath9k_set_beacon(sc);
                set_bit(SC_OP_BEACONS, &sc->sc_flags);
-       } else {
-               /*
-                * Take care of multiple interfaces when
-                * enabling/disabling SWBA.
-                */
-               if (changed & BSS_CHANGED_BEACON_ENABLED) {
-                       if (!bss_conf->enable_beacon &&
-                           (sc->nbcnvifs <= 1)) {
-                               cur_conf->enable_beacon = false;
-                       } else if (bss_conf->enable_beacon) {
-                               cur_conf->enable_beacon = true;
-                               ath9k_cache_beacon_config(sc, bss_conf);
-                       }
+               return;
+
+       }
+
+       /*
+        * Take care of multiple interfaces when
+        * enabling/disabling SWBA.
+        */
+       if (changed & BSS_CHANGED_BEACON_ENABLED) {
+               if (!bss_conf->enable_beacon &&
+                   (sc->nbcnvifs <= 1)) {
+                       cur_conf->enable_beacon = false;
+               } else if (bss_conf->enable_beacon) {
+                       cur_conf->enable_beacon = true;
+                       ath9k_cache_beacon_config(sc, bss_conf);
                }
+       }
 
-               if (cur_conf->beacon_interval) {
+       /*
+        * Configure the HW beacon registers only when we have a valid
+        * beacon interval.
+        */
+       if (cur_conf->beacon_interval) {
+               /*
+                * If we are joining an existing IBSS network, start beaconing
+                * only after a TSF-sync has taken place. Ensure that this
+                * happens by setting the appropriate flags.
+                */
+               if ((changed & BSS_CHANGED_IBSS) && !bss_conf->ibss_creator &&
+                   bss_conf->enable_beacon) {
+                       spin_lock_irqsave(&sc->sc_pm_lock, flags);
+                       sc->ps_flags |= PS_BEACON_SYNC | PS_WAIT_FOR_BEACON;
+                       spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
+                       skip_beacon = true;
+               } else {
                        ath9k_set_beacon(sc);
-
-                       if (cur_conf->enable_beacon)
-                               set_bit(SC_OP_BEACONS, &sc->sc_flags);
-                       else
-                               clear_bit(SC_OP_BEACONS, &sc->sc_flags);
                }
+
+               /*
+                * Do not set the SC_OP_BEACONS flag for IBSS joiner mode
+                * here, it is done in ath9k_beacon_config_adhoc().
+                */
+               if (cur_conf->enable_beacon && !skip_beacon)
+                       set_bit(SC_OP_BEACONS, &sc->sc_flags);
+               else
+                       clear_bit(SC_OP_BEACONS, &sc->sc_flags);
        }
 }
 
index 6c5d313..3714b97 100644 (file)
@@ -895,6 +895,7 @@ static ssize_t read_file_recv(struct file *file, char __user *user_buf,
        RXS_ERR("RX-Bytes-All", rx_bytes_all);
        RXS_ERR("RX-Beacons", rx_beacons);
        RXS_ERR("RX-Frags", rx_frags);
+       RXS_ERR("RX-Spectral", rx_spectral);
 
        if (len > size)
                len = size;
@@ -1035,6 +1036,182 @@ static const struct file_operations fops_spec_scan_ctl = {
        .llseek = default_llseek,
 };
 
+static ssize_t read_file_spectral_short_repeat(struct file *file,
+                                              char __user *user_buf,
+                                              size_t count, loff_t *ppos)
+{
+       struct ath_softc *sc = file->private_data;
+       char buf[32];
+       unsigned int len;
+
+       len = sprintf(buf, "%d\n", sc->spec_config.short_repeat);
+       return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t write_file_spectral_short_repeat(struct file *file,
+                                               const char __user *user_buf,
+                                               size_t count, loff_t *ppos)
+{
+       struct ath_softc *sc = file->private_data;
+       unsigned long val;
+       char buf[32];
+       ssize_t len;
+
+       len = min(count, sizeof(buf) - 1);
+       if (copy_from_user(buf, user_buf, len))
+               return -EFAULT;
+
+       buf[len] = '\0';
+       if (kstrtoul(buf, 0, &val))
+               return -EINVAL;
+
+       if (val < 0 || val > 1)
+               return -EINVAL;
+
+       sc->spec_config.short_repeat = val;
+       return count;
+}
+
+static const struct file_operations fops_spectral_short_repeat = {
+       .read = read_file_spectral_short_repeat,
+       .write = write_file_spectral_short_repeat,
+       .open = simple_open,
+       .owner = THIS_MODULE,
+       .llseek = default_llseek,
+};
+
+static ssize_t read_file_spectral_count(struct file *file,
+                                       char __user *user_buf,
+                                       size_t count, loff_t *ppos)
+{
+       struct ath_softc *sc = file->private_data;
+       char buf[32];
+       unsigned int len;
+
+       len = sprintf(buf, "%d\n", sc->spec_config.count);
+       return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t write_file_spectral_count(struct file *file,
+                                        const char __user *user_buf,
+                                        size_t count, loff_t *ppos)
+{
+       struct ath_softc *sc = file->private_data;
+       unsigned long val;
+       char buf[32];
+       ssize_t len;
+
+       len = min(count, sizeof(buf) - 1);
+       if (copy_from_user(buf, user_buf, len))
+               return -EFAULT;
+
+       buf[len] = '\0';
+       if (kstrtoul(buf, 0, &val))
+               return -EINVAL;
+
+       if (val < 0 || val > 255)
+               return -EINVAL;
+
+       sc->spec_config.count = val;
+       return count;
+}
+
+static const struct file_operations fops_spectral_count = {
+       .read = read_file_spectral_count,
+       .write = write_file_spectral_count,
+       .open = simple_open,
+       .owner = THIS_MODULE,
+       .llseek = default_llseek,
+};
+
+static ssize_t read_file_spectral_period(struct file *file,
+                                        char __user *user_buf,
+                                        size_t count, loff_t *ppos)
+{
+       struct ath_softc *sc = file->private_data;
+       char buf[32];
+       unsigned int len;
+
+       len = sprintf(buf, "%d\n", sc->spec_config.period);
+       return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t write_file_spectral_period(struct file *file,
+                                         const char __user *user_buf,
+                                         size_t count, loff_t *ppos)
+{
+       struct ath_softc *sc = file->private_data;
+       unsigned long val;
+       char buf[32];
+       ssize_t len;
+
+       len = min(count, sizeof(buf) - 1);
+       if (copy_from_user(buf, user_buf, len))
+               return -EFAULT;
+
+       buf[len] = '\0';
+       if (kstrtoul(buf, 0, &val))
+               return -EINVAL;
+
+       if (val < 0 || val > 255)
+               return -EINVAL;
+
+       sc->spec_config.period = val;
+       return count;
+}
+
+static const struct file_operations fops_spectral_period = {
+       .read = read_file_spectral_period,
+       .write = write_file_spectral_period,
+       .open = simple_open,
+       .owner = THIS_MODULE,
+       .llseek = default_llseek,
+};
+
+static ssize_t read_file_spectral_fft_period(struct file *file,
+                                            char __user *user_buf,
+                                            size_t count, loff_t *ppos)
+{
+       struct ath_softc *sc = file->private_data;
+       char buf[32];
+       unsigned int len;
+
+       len = sprintf(buf, "%d\n", sc->spec_config.fft_period);
+       return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t write_file_spectral_fft_period(struct file *file,
+                                             const char __user *user_buf,
+                                             size_t count, loff_t *ppos)
+{
+       struct ath_softc *sc = file->private_data;
+       unsigned long val;
+       char buf[32];
+       ssize_t len;
+
+       len = min(count, sizeof(buf) - 1);
+       if (copy_from_user(buf, user_buf, len))
+               return -EFAULT;
+
+       buf[len] = '\0';
+       if (kstrtoul(buf, 0, &val))
+               return -EINVAL;
+
+       if (val < 0 || val > 15)
+               return -EINVAL;
+
+       sc->spec_config.fft_period = val;
+       return count;
+}
+
+static const struct file_operations fops_spectral_fft_period = {
+       .read = read_file_spectral_fft_period,
+       .write = write_file_spectral_fft_period,
+       .open = simple_open,
+       .owner = THIS_MODULE,
+       .llseek = default_llseek,
+};
+
 static struct dentry *create_buf_file_handler(const char *filename,
                                              struct dentry *parent,
                                              umode_t mode,
@@ -1059,11 +1236,13 @@ static int remove_buf_file_handler(struct dentry *dentry)
 void ath_debug_send_fft_sample(struct ath_softc *sc,
                               struct fft_sample_tlv *fft_sample_tlv)
 {
+       int length;
        if (!sc->rfs_chan_spec_scan)
                return;
 
-       relay_write(sc->rfs_chan_spec_scan, fft_sample_tlv,
-                   fft_sample_tlv->length + sizeof(*fft_sample_tlv));
+       length = __be16_to_cpu(fft_sample_tlv->length) +
+                sizeof(*fft_sample_tlv);
+       relay_write(sc->rfs_chan_spec_scan, fft_sample_tlv, length);
 }
 
 static struct rchan_callbacks rfs_spec_scan_cb = {
@@ -1893,6 +2072,16 @@ int ath9k_init_debug(struct ath_hw *ah)
        debugfs_create_file("spectral_scan_ctl", S_IRUSR | S_IWUSR,
                            sc->debug.debugfs_phy, sc,
                            &fops_spec_scan_ctl);
+       debugfs_create_file("spectral_short_repeat", S_IRUSR | S_IWUSR,
+                           sc->debug.debugfs_phy, sc,
+                           &fops_spectral_short_repeat);
+       debugfs_create_file("spectral_count", S_IRUSR | S_IWUSR,
+                           sc->debug.debugfs_phy, sc, &fops_spectral_count);
+       debugfs_create_file("spectral_period", S_IRUSR | S_IWUSR,
+                           sc->debug.debugfs_phy, sc, &fops_spectral_period);
+       debugfs_create_file("spectral_fft_period", S_IRUSR | S_IWUSR,
+                           sc->debug.debugfs_phy, sc,
+                           &fops_spectral_fft_period);
 
 #ifdef CONFIG_ATH9K_MAC_DEBUG
        debugfs_create_file("samples", S_IRUSR, sc->debug.debugfs_phy, sc,
index a22c0d7..410d6d8 100644 (file)
@@ -219,6 +219,7 @@ struct ath_tx_stats {
  * @rx_too_many_frags_err:  Frames dropped due to too-many-frags received.
  * @rx_beacons:  No. of beacons received.
  * @rx_frags:  No. of rx-fragements received.
+ * @rx_spectral: No of spectral packets received.
  */
 struct ath_rx_stats {
        u32 rx_pkts_all;
@@ -237,6 +238,7 @@ struct ath_rx_stats {
        u32 rx_too_many_frags_err;
        u32 rx_beacons;
        u32 rx_frags;
+       u32 rx_spectral;
 };
 
 struct ath_stats {
index b6a5a08..3ad1fd0 100644 (file)
@@ -1196,20 +1196,17 @@ void ath9k_rx_cleanup(struct ath9k_htc_priv *priv)
 
 int ath9k_rx_init(struct ath9k_htc_priv *priv)
 {
-       struct ath_hw *ah = priv->ah;
-       struct ath_common *common = ath9k_hw_common(ah);
-       struct ath9k_htc_rxbuf *rxbuf;
        int i = 0;
 
        INIT_LIST_HEAD(&priv->rx.rxbuf);
        spin_lock_init(&priv->rx.rxbuflock);
 
        for (i = 0; i < ATH9K_HTC_RXBUF; i++) {
-               rxbuf = kzalloc(sizeof(struct ath9k_htc_rxbuf), GFP_KERNEL);
-               if (rxbuf == NULL) {
-                       ath_err(common, "Unable to allocate RX buffers\n");
+               struct ath9k_htc_rxbuf *rxbuf =
+                       kzalloc(sizeof(struct ath9k_htc_rxbuf), GFP_KERNEL);
+               if (rxbuf == NULL)
                        goto err;
-               }
+
                list_add_tail(&rxbuf->list, &priv->rx.rxbuf);
        }
 
index 42cf3c7..2a2ae40 100644 (file)
@@ -2981,13 +2981,8 @@ struct ath_gen_timer *ath_gen_timer_alloc(struct ath_hw *ah,
        struct ath_gen_timer *timer;
 
        timer = kzalloc(sizeof(struct ath_gen_timer), GFP_KERNEL);
-
-       if (timer == NULL) {
-               ath_err(ath9k_hw_common(ah),
-                       "Failed to allocate memory for hw timer[%d]\n",
-                       timer_index);
+       if (timer == NULL)
                return NULL;
-       }
 
        /* allocate a hardware generic timer slot */
        timer_table->timers[timer_index] = timer;
index 4b1abc7..af932c9 100644 (file)
@@ -497,6 +497,13 @@ static void ath9k_init_misc(struct ath_softc *sc)
 
        if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB)
                sc->ant_comb.count = ATH_ANT_DIV_COMB_INIT_COUNT;
+
+       sc->spec_config.enabled = 0;
+       sc->spec_config.short_repeat = true;
+       sc->spec_config.count = 8;
+       sc->spec_config.endless = false;
+       sc->spec_config.period = 0xFF;
+       sc->spec_config.fft_period = 0xF;
 }
 
 static void ath9k_eeprom_request_cb(const struct firmware *eeprom_blob,
@@ -915,7 +922,7 @@ static void ath9k_deinit_softc(struct ath_softc *sc)
 
        ath9k_eeprom_release(sc);
 
-       if (sc->rfs_chan_spec_scan) {
+       if (config_enabled(CONFIG_ATH9K_DEBUGFS) && sc->rfs_chan_spec_scan) {
                relay_close(sc->rfs_chan_spec_scan);
                sc->rfs_chan_spec_scan = NULL;
        }
index b42be91..811007e 100644 (file)
@@ -605,13 +605,13 @@ int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds,
                 * reported, then decryption and MIC errors are irrelevant,
                 * the frame is going to be dropped either way
                 */
-               if (ads.ds_rxstatus8 & AR_CRCErr)
-                       rs->rs_status |= ATH9K_RXERR_CRC;
-               else if (ads.ds_rxstatus8 & AR_PHYErr) {
+               if (ads.ds_rxstatus8 & AR_PHYErr) {
                        rs->rs_status |= ATH9K_RXERR_PHY;
                        phyerr = MS(ads.ds_rxstatus8, AR_PHYErrCode);
                        rs->rs_phyerr = phyerr;
-               } else if (ads.ds_rxstatus8 & AR_DecryptCRCErr)
+               } else if (ads.ds_rxstatus8 & AR_CRCErr)
+                       rs->rs_status |= ATH9K_RXERR_CRC;
+               else if (ads.ds_rxstatus8 & AR_DecryptCRCErr)
                        rs->rs_status |= ATH9K_RXERR_DECRYPT;
                else if (ads.ds_rxstatus8 & AR_MichaelErr)
                        rs->rs_status |= ATH9K_RXERR_MIC;
index 4b72b66..6e66f9c 100644 (file)
@@ -320,28 +320,25 @@ static void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta,
                            struct ieee80211_vif *vif)
 {
        struct ath_node *an;
-       u8 density;
        an = (struct ath_node *)sta->drv_priv;
 
        an->sc = sc;
        an->sta = sta;
        an->vif = vif;
 
-       if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
-               ath_tx_node_init(sc, an);
+       ath_tx_node_init(sc, an);
+
+       if (sta->ht_cap.ht_supported) {
                an->maxampdu = 1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
                                     sta->ht_cap.ampdu_factor);
-               density = ath9k_parse_mpdudensity(sta->ht_cap.ampdu_density);
-               an->mpdudensity = density;
+               an->mpdudensity = ath9k_parse_mpdudensity(sta->ht_cap.ampdu_density);
        }
 }
 
 static void ath_node_detach(struct ath_softc *sc, struct ieee80211_sta *sta)
 {
        struct ath_node *an = (struct ath_node *)sta->drv_priv;
-
-       if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT)
-               ath_tx_node_cleanup(sc, an);
+       ath_tx_node_cleanup(sc, an);
 }
 
 void ath9k_tasklet(unsigned long data)
@@ -1099,45 +1096,34 @@ int ath9k_spectral_scan_config(struct ieee80211_hw *hw,
        struct ath_softc *sc = hw->priv;
        struct ath_hw *ah = sc->sc_ah;
        struct ath_common *common = ath9k_hw_common(ah);
-       struct ath_spec_scan param;
 
        if (!ath9k_hw_ops(ah)->spectral_scan_trigger) {
                ath_err(common, "spectrum analyzer not implemented on this hardware\n");
                return -1;
        }
 
-       /* NOTE: this will generate a few samples ...
-        *
-        * TODO: review default parameters, and/or define an interface to set
-        * them.
-        */
-       param.enabled = 1;
-       param.short_repeat = true;
-       param.count = 8;
-       param.endless = false;
-       param.period = 0xFF;
-       param.fft_period = 0xF;
-
        switch (spectral_mode) {
        case SPECTRAL_DISABLED:
-               param.enabled = 0;
+               sc->spec_config.enabled = 0;
                break;
        case SPECTRAL_BACKGROUND:
                /* send endless samples.
                 * TODO: is this really useful for "background"?
                 */
-               param.endless = 1;
+               sc->spec_config.endless = 1;
+               sc->spec_config.enabled = 1;
                break;
        case SPECTRAL_CHANSCAN:
-               break;
        case SPECTRAL_MANUAL:
+               sc->spec_config.endless = 0;
+               sc->spec_config.enabled = 1;
                break;
        default:
                return -1;
        }
 
        ath9k_ps_wakeup(sc);
-       ath9k_hw_ops(ah)->spectral_scan_config(ah, &param);
+       ath9k_hw_ops(ah)->spectral_scan_config(ah, &sc->spec_config);
        ath9k_ps_restore(sc);
 
        sc->spectral_mode = spectral_mode;
index d207433..815bee2 100644 (file)
@@ -474,8 +474,6 @@ void ath_mci_cleanup(struct ath_softc *sc)
 {
        struct ath_common *common = ath9k_hw_common(sc->sc_ah);
        struct ath_hw *ah = sc->sc_ah;
-       struct ath_mci_coex *mci = &sc->mci_coex;
-       struct ath_mci_buf *buf = &mci->sched_buf;
 
        ar9003_mci_cleanup(ah);
 
index 714558d..96ac433 100644 (file)
@@ -1204,7 +1204,7 @@ static u8 ath_rc_build_ht_caps(struct ath_softc *sc, struct ieee80211_sta *sta)
                        caps |= WLAN_RC_TS_FLAG | WLAN_RC_DS_FLAG;
                else if (sta->ht_cap.mcs.rx_mask[1])
                        caps |= WLAN_RC_DS_FLAG;
-               if (sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) {
+               if (sta->bandwidth >= IEEE80211_STA_RX_BW_40) {
                        caps |= WLAN_RC_40_FLAG;
                        if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40)
                                caps |= WLAN_RC_SGI_FLAG;
@@ -1452,17 +1452,7 @@ static void ath_rate_free(void *priv)
 
 static void *ath_rate_alloc_sta(void *priv, struct ieee80211_sta *sta, gfp_t gfp)
 {
-       struct ath_softc *sc = priv;
-       struct ath_rate_priv *rate_priv;
-
-       rate_priv = kzalloc(sizeof(struct ath_rate_priv), gfp);
-       if (!rate_priv) {
-               ath_err(ath9k_hw_common(sc->sc_ah),
-                       "Unable to allocate private rc structure\n");
-               return NULL;
-       }
-
-       return rate_priv;
+       return kzalloc(sizeof(struct ath_rate_priv), gfp);
 }
 
 static void ath_rate_free_sta(void *priv, struct ieee80211_sta *sta,
index d7c129b..ee156e5 100644 (file)
@@ -533,7 +533,7 @@ static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb)
        if (sc->ps_flags & PS_BEACON_SYNC) {
                sc->ps_flags &= ~PS_BEACON_SYNC;
                ath_dbg(common, PS,
-                       "Reconfigure Beacon timers based on timestamp from the AP\n");
+                       "Reconfigure beacon timers based on synchronized timestamp\n");
                ath9k_set_beacon(sc);
        }
 
@@ -1016,18 +1016,20 @@ static void ath9k_rx_skb_postprocess(struct ath_common *common,
                rxs->flag &= ~RX_FLAG_DECRYPTED;
 }
 
+#ifdef CONFIG_ATH9K_DEBUGFS
 static s8 fix_rssi_inv_only(u8 rssi_val)
 {
        if (rssi_val == 128)
                rssi_val = 0;
        return (s8) rssi_val;
 }
+#endif
 
-
-static void ath_process_fft(struct ath_softc *sc, struct ieee80211_hdr *hdr,
-                           struct ath_rx_status *rs, u64 tsf)
+/* returns 1 if this was a spectral frame, even if not handled. */
+static int ath_process_fft(struct ath_softc *sc, struct ieee80211_hdr *hdr,
+                          struct ath_rx_status *rs, u64 tsf)
 {
-#ifdef CONFIG_ATH_DEBUG
+#ifdef CONFIG_ATH9K_DEBUGFS
        struct ath_hw *ah = sc->sc_ah;
        u8 bins[SPECTRAL_HT20_NUM_BINS];
        u8 *vdata = (u8 *)hdr;
@@ -1035,7 +1037,8 @@ static void ath_process_fft(struct ath_softc *sc, struct ieee80211_hdr *hdr,
        struct ath_radar_info *radar_info;
        struct ath_ht20_mag_info *mag_info;
        int len = rs->rs_datalen;
-       int i, dc_pos;
+       int dc_pos;
+       u16 length, max_magnitude;
 
        /* AR9280 and before report via ATH9K_PHYERR_RADAR, AR93xx and newer
         * via ATH9K_PHYERR_SPECTRAL. Haven't seen ATH9K_PHYERR_FALSE_RADAR_EXT
@@ -1044,7 +1047,14 @@ static void ath_process_fft(struct ath_softc *sc, struct ieee80211_hdr *hdr,
        if (rs->rs_phyerr != ATH9K_PHYERR_RADAR &&
            rs->rs_phyerr != ATH9K_PHYERR_FALSE_RADAR_EXT &&
            rs->rs_phyerr != ATH9K_PHYERR_SPECTRAL)
-               return;
+               return 0;
+
+       /* check if spectral scan bit is set. This does not have to be checked
+        * if received through a SPECTRAL phy error, but shouldn't hurt.
+        */
+       radar_info = ((struct ath_radar_info *)&vdata[len]) - 1;
+       if (!(radar_info->pulse_bw_info & SPECTRAL_SCAN_BITMASK))
+               return 0;
 
        /* Variation in the data length is possible and will be fixed later.
         * Note that we only support HT20 for now.
@@ -1053,19 +1063,13 @@ static void ath_process_fft(struct ath_softc *sc, struct ieee80211_hdr *hdr,
         */
        if ((len > SPECTRAL_HT20_TOTAL_DATA_LEN + 2) ||
            (len < SPECTRAL_HT20_TOTAL_DATA_LEN - 1))
-               return;
-
-       /* check if spectral scan bit is set. This does not have to be checked
-        * if received through a SPECTRAL phy error, but shouldn't hurt.
-        */
-       radar_info = ((struct ath_radar_info *)&vdata[len]) - 1;
-       if (!(radar_info->pulse_bw_info & SPECTRAL_SCAN_BITMASK))
-               return;
+               return 1;
 
        fft_sample.tlv.type = ATH_FFT_SAMPLE_HT20;
-       fft_sample.tlv.length = sizeof(fft_sample) - sizeof(fft_sample.tlv);
+       length = sizeof(fft_sample) - sizeof(fft_sample.tlv);
+       fft_sample.tlv.length = __cpu_to_be16(length);
 
-       fft_sample.freq = ah->curchan->chan->center_freq;
+       fft_sample.freq = __cpu_to_be16(ah->curchan->chan->center_freq);
        fft_sample.rssi = fix_rssi_inv_only(rs->rs_rssi_ctl0);
        fft_sample.noise = ah->noise;
 
@@ -1093,7 +1097,7 @@ static void ath_process_fft(struct ath_softc *sc, struct ieee80211_hdr *hdr,
                memcpy(&bins[32], &vdata[33], SPECTRAL_HT20_NUM_BINS - 32);
                break;
        default:
-               return;
+               return 1;
        }
 
        /* DC value (value in the middle) is the blind spot of the spectral
@@ -1105,19 +1109,41 @@ static void ath_process_fft(struct ath_softc *sc, struct ieee80211_hdr *hdr,
        /* mag data is at the end of the frame, in front of radar_info */
        mag_info = ((struct ath_ht20_mag_info *)radar_info) - 1;
 
-       /* Apply exponent and grab further auxiliary information. */
-       for (i = 0; i < SPECTRAL_HT20_NUM_BINS; i++)
-               fft_sample.data[i] = bins[i] << mag_info->max_exp;
+       /* copy raw bins without scaling them */
+       memcpy(fft_sample.data, bins, SPECTRAL_HT20_NUM_BINS);
+       fft_sample.max_exp = mag_info->max_exp & 0xf;
 
-       fft_sample.max_magnitude = spectral_max_magnitude(mag_info->all_bins);
+       max_magnitude = spectral_max_magnitude(mag_info->all_bins);
+       fft_sample.max_magnitude = __cpu_to_be16(max_magnitude);
        fft_sample.max_index = spectral_max_index(mag_info->all_bins);
        fft_sample.bitmap_weight = spectral_bitmap_weight(mag_info->all_bins);
-       fft_sample.tsf = tsf;
+       fft_sample.tsf = __cpu_to_be64(tsf);
 
        ath_debug_send_fft_sample(sc, &fft_sample.tlv);
+       return 1;
+#else
+       return 0;
 #endif
 }
 
+static void ath9k_apply_ampdu_details(struct ath_softc *sc,
+       struct ath_rx_status *rs, struct ieee80211_rx_status *rxs)
+{
+       if (rs->rs_isaggr) {
+               rxs->flag |= RX_FLAG_AMPDU_DETAILS | RX_FLAG_AMPDU_LAST_KNOWN;
+
+               rxs->ampdu_reference = sc->rx.ampdu_ref;
+
+               if (!rs->rs_moreaggr) {
+                       rxs->flag |= RX_FLAG_AMPDU_IS_LAST;
+                       sc->rx.ampdu_ref++;
+               }
+
+               if (rs->rs_flags & ATH9K_RX_DELIM_CRC_PRE)
+                       rxs->flag |= RX_FLAG_AMPDU_DELIM_CRC_ERROR;
+       }
+}
+
 int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
 {
        struct ath_buf *bf;
@@ -1202,8 +1228,12 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
                    unlikely(tsf_lower - rs.rs_tstamp > 0x10000000))
                        rxs->mactime += 0x100000000ULL;
 
-               if ((rs.rs_status & ATH9K_RXERR_PHY))
-                       ath_process_fft(sc, hdr, &rs, rxs->mactime);
+               if (rs.rs_status & ATH9K_RXERR_PHY) {
+                       if (ath_process_fft(sc, hdr, &rs, rxs->mactime)) {
+                               RX_STAT_INC(rx_spectral);
+                               goto requeue_drop_frag;
+                       }
+               }
 
                retval = ath9k_rx_skb_preprocess(common, hw, hdr, &rs,
                                                 rxs, &decrypt_error);
@@ -1320,6 +1350,8 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
                if ((ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) && sc->ant_rx == 3)
                        ath_ant_comb_scan(sc, &rs);
 
+               ath9k_apply_ampdu_details(sc, &rs, rxs);
+
                ieee80211_rx(hw, skb);
 
 requeue_drop_frag:
index feacaaf..89a6441 100644 (file)
@@ -1233,7 +1233,7 @@ int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
         * in HT IBSS when a beacon with HT-info is received after the station
         * has already been added.
         */
-       if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
+       if (sta->ht_cap.ht_supported) {
                an->maxampdu = 1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
                                     sta->ht_cap.ampdu_factor);
                density = ath9k_parse_mpdudensity(sta->ht_cap.ampdu_density);
@@ -1904,8 +1904,7 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb,
        struct ath_buf *bf;
        u8 tidno;
 
-       if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) && txctl->an &&
-               ieee80211_is_data_qos(hdr->frame_control)) {
+       if (txctl->an && ieee80211_is_data_qos(hdr->frame_control)) {
                tidno = ieee80211_get_qos_ctl(hdr)[0] &
                        IEEE80211_QOS_CTL_TID_MASK;
                tid = ATH_AN_2_TID(txctl->an, tidno);
index ef82751..f293b3f 100644 (file)
@@ -1853,7 +1853,7 @@ void *carl9170_alloc(size_t priv_size)
                     IEEE80211_HW_REPORTS_TX_ACK_STATUS |
                     IEEE80211_HW_SUPPORTS_PS |
                     IEEE80211_HW_PS_NULLFUNC_STACK |
-                    IEEE80211_HW_NEED_DTIM_PERIOD |
+                    IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC |
                     IEEE80211_HW_SIGNAL_DBM;
 
        if (!modparam_noht) {
index 116f4e8..9ecc196 100644 (file)
@@ -204,7 +204,6 @@ static int wil_cfg80211_scan(struct wiphy *wiphy,
                break;
        default:
                return -EOPNOTSUPP;
-
        }
 
        /* FW don't support scan after connection attempt */
@@ -228,8 +227,8 @@ static int wil_cfg80211_scan(struct wiphy *wiphy,
                }
                /* 0-based channel indexes */
                cmd.cmd.channel_list[cmd.cmd.num_channels++].channel = ch - 1;
-               wil_dbg(wil, "Scan for ch %d  : %d MHz\n", ch,
-                       request->channels[i]->center_freq);
+               wil_dbg_misc(wil, "Scan for ch %d  : %d MHz\n", ch,
+                            request->channels[i]->center_freq);
        }
 
        return wmi_send(wil, WMI_START_SCAN_CMDID, &cmd, sizeof(cmd.cmd) +
@@ -342,7 +341,7 @@ static int wil_cfg80211_connect(struct wiphy *wiphy,
        }
 
  out:
-       cfg80211_put_bss(bss);
+       cfg80211_put_bss(wiphy, bss);
 
        return rc;
 }
@@ -425,8 +424,8 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy,
                return -EINVAL;
        }
 
-       wil_dbg(wil, "AP on Channel %d %d MHz, %s\n", channel->hw_value,
-               channel->center_freq, info->privacy ? "secure" : "open");
+       wil_dbg_misc(wil, "AP on Channel %d %d MHz, %s\n", channel->hw_value,
+                    channel->center_freq, info->privacy ? "secure" : "open");
        print_hex_dump_bytes("SSID ", DUMP_PREFIX_OFFSET,
                             info->ssid, info->ssid_len);
 
index 38049da..dc97e7b 100644 (file)
@@ -38,7 +38,9 @@
 #define WIL6210_IMC_RX         BIT_DMA_EP_RX_ICR_RX_DONE
 #define WIL6210_IMC_TX         (BIT_DMA_EP_TX_ICR_TX_DONE | \
                                BIT_DMA_EP_TX_ICR_TX_DONE_N(0))
-#define WIL6210_IMC_MISC       (ISR_MISC_FW_READY | ISR_MISC_MBOX_EVT)
+#define WIL6210_IMC_MISC       (ISR_MISC_FW_READY | \
+                                ISR_MISC_MBOX_EVT | \
+                                ISR_MISC_FW_ERROR)
 
 #define WIL6210_IRQ_PSEUDO_MASK (u32)(~(BIT_DMA_PSEUDO_CAUSE_RX | \
                                        BIT_DMA_PSEUDO_CAUSE_TX | \
@@ -50,7 +52,6 @@
 
 static inline void wil_icr_clear(u32 x, void __iomem *addr)
 {
-
 }
 #else /* defined(CONFIG_WIL6210_ISR_COR) */
 /* configure to Write-1-to-Clear mode */
@@ -94,7 +95,7 @@ static void wil6210_mask_irq_misc(struct wil6210_priv *wil)
 
 static void wil6210_mask_irq_pseudo(struct wil6210_priv *wil)
 {
-       wil_dbg_IRQ(wil, "%s()\n", __func__);
+       wil_dbg_irq(wil, "%s()\n", __func__);
 
        iowrite32(WIL6210_IRQ_DISABLE, wil->csr +
                  HOSTADDR(RGF_DMA_PSEUDO_CAUSE_MASK_SW));
@@ -125,7 +126,7 @@ static void wil6210_unmask_irq_misc(struct wil6210_priv *wil)
 
 static void wil6210_unmask_irq_pseudo(struct wil6210_priv *wil)
 {
-       wil_dbg_IRQ(wil, "%s()\n", __func__);
+       wil_dbg_irq(wil, "%s()\n", __func__);
 
        set_bit(wil_status_irqen, &wil->status);
 
@@ -135,7 +136,7 @@ static void wil6210_unmask_irq_pseudo(struct wil6210_priv *wil)
 
 void wil6210_disable_irq(struct wil6210_priv *wil)
 {
-       wil_dbg_IRQ(wil, "%s()\n", __func__);
+       wil_dbg_irq(wil, "%s()\n", __func__);
 
        wil6210_mask_irq_tx(wil);
        wil6210_mask_irq_rx(wil);
@@ -145,7 +146,7 @@ void wil6210_disable_irq(struct wil6210_priv *wil)
 
 void wil6210_enable_irq(struct wil6210_priv *wil)
 {
-       wil_dbg_IRQ(wil, "%s()\n", __func__);
+       wil_dbg_irq(wil, "%s()\n", __func__);
 
        iowrite32(WIL_ICR_ICC_VALUE, wil->csr + HOSTADDR(RGF_DMA_EP_RX_ICR) +
                  offsetof(struct RGF_ICR, ICC));
@@ -167,7 +168,7 @@ static irqreturn_t wil6210_irq_rx(int irq, void *cookie)
                                         HOSTADDR(RGF_DMA_EP_RX_ICR) +
                                         offsetof(struct RGF_ICR, ICR));
 
-       wil_dbg_IRQ(wil, "ISR RX 0x%08x\n", isr);
+       wil_dbg_irq(wil, "ISR RX 0x%08x\n", isr);
 
        if (!isr) {
                wil_err(wil, "spurious IRQ: RX\n");
@@ -177,7 +178,7 @@ static irqreturn_t wil6210_irq_rx(int irq, void *cookie)
        wil6210_mask_irq_rx(wil);
 
        if (isr & BIT_DMA_EP_RX_ICR_RX_DONE) {
-               wil_dbg_IRQ(wil, "RX done\n");
+               wil_dbg_irq(wil, "RX done\n");
                isr &= ~BIT_DMA_EP_RX_ICR_RX_DONE;
                wil_rx_handle(wil);
        }
@@ -197,7 +198,7 @@ static irqreturn_t wil6210_irq_tx(int irq, void *cookie)
                                         HOSTADDR(RGF_DMA_EP_TX_ICR) +
                                         offsetof(struct RGF_ICR, ICR));
 
-       wil_dbg_IRQ(wil, "ISR TX 0x%08x\n", isr);
+       wil_dbg_irq(wil, "ISR TX 0x%08x\n", isr);
 
        if (!isr) {
                wil_err(wil, "spurious IRQ: TX\n");
@@ -208,13 +209,13 @@ static irqreturn_t wil6210_irq_tx(int irq, void *cookie)
 
        if (isr & BIT_DMA_EP_TX_ICR_TX_DONE) {
                uint i;
-               wil_dbg_IRQ(wil, "TX done\n");
+               wil_dbg_irq(wil, "TX done\n");
                isr &= ~BIT_DMA_EP_TX_ICR_TX_DONE;
                for (i = 0; i < 24; i++) {
                        u32 mask = BIT_DMA_EP_TX_ICR_TX_DONE_N(i);
                        if (isr & mask) {
                                isr &= ~mask;
-                               wil_dbg_IRQ(wil, "TX done(%i)\n", i);
+                               wil_dbg_irq(wil, "TX done(%i)\n", i);
                                wil_tx_complete(wil, i);
                        }
                }
@@ -228,6 +229,17 @@ static irqreturn_t wil6210_irq_tx(int irq, void *cookie)
        return IRQ_HANDLED;
 }
 
+static void wil_notify_fw_error(struct wil6210_priv *wil)
+{
+       struct device *dev = &wil_to_ndev(wil)->dev;
+       char *envp[3] = {
+               [0] = "SOURCE=wil6210",
+               [1] = "EVENT=FW_ERROR",
+               [2] = NULL,
+       };
+       kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, envp);
+}
+
 static irqreturn_t wil6210_irq_misc(int irq, void *cookie)
 {
        struct wil6210_priv *wil = cookie;
@@ -235,7 +247,7 @@ static irqreturn_t wil6210_irq_misc(int irq, void *cookie)
                                         HOSTADDR(RGF_DMA_EP_MISC_ICR) +
                                         offsetof(struct RGF_ICR, ICR));
 
-       wil_dbg_IRQ(wil, "ISR MISC 0x%08x\n", isr);
+       wil_dbg_irq(wil, "ISR MISC 0x%08x\n", isr);
 
        if (!isr) {
                wil_err(wil, "spurious IRQ: MISC\n");
@@ -244,8 +256,15 @@ static irqreturn_t wil6210_irq_misc(int irq, void *cookie)
 
        wil6210_mask_irq_misc(wil);
 
+       if (isr & ISR_MISC_FW_ERROR) {
+               wil_dbg_irq(wil, "IRQ: Firmware error\n");
+               clear_bit(wil_status_fwready, &wil->status);
+               wil_notify_fw_error(wil);
+               isr &= ~ISR_MISC_FW_ERROR;
+       }
+
        if (isr & ISR_MISC_FW_READY) {
-               wil_dbg_IRQ(wil, "IRQ: FW ready\n");
+               wil_dbg_irq(wil, "IRQ: FW ready\n");
                /**
                 * Actual FW ready indicated by the
                 * WMI_FW_READY_EVENTID
@@ -268,10 +287,10 @@ static irqreturn_t wil6210_irq_misc_thread(int irq, void *cookie)
        struct wil6210_priv *wil = cookie;
        u32 isr = wil->isr_misc;
 
-       wil_dbg_IRQ(wil, "Thread ISR MISC 0x%08x\n", isr);
+       wil_dbg_irq(wil, "Thread ISR MISC 0x%08x\n", isr);
 
        if (isr & ISR_MISC_MBOX_EVT) {
-               wil_dbg_IRQ(wil, "MBOX event\n");
+               wil_dbg_irq(wil, "MBOX event\n");
                wmi_recv_cmd(wil);
                isr &= ~ISR_MISC_MBOX_EVT;
        }
@@ -293,7 +312,7 @@ static irqreturn_t wil6210_thread_irq(int irq, void *cookie)
 {
        struct wil6210_priv *wil = cookie;
 
-       wil_dbg_IRQ(wil, "Thread IRQ\n");
+       wil_dbg_irq(wil, "Thread IRQ\n");
        /* Discover real IRQ cause */
        if (wil->isr_misc)
                wil6210_irq_misc_thread(irq, cookie);
@@ -370,6 +389,8 @@ static irqreturn_t wil6210_hardirq(int irq, void *cookie)
        if (wil6210_debug_irq_mask(wil, pseudo_cause))
                return IRQ_NONE;
 
+       wil_dbg_irq(wil, "Pseudo IRQ 0x%08x\n", pseudo_cause);
+
        wil6210_mask_irq_pseudo(wil);
 
        /* Discover real IRQ cause
@@ -401,8 +422,6 @@ static irqreturn_t wil6210_hardirq(int irq, void *cookie)
        if (rc != IRQ_WAKE_THREAD)
                wil6210_unmask_irq_pseudo(wil);
 
-       wil_dbg_IRQ(wil, "Hard IRQ 0x%08x\n", pseudo_cause);
-
        return rc;
 }
 
index 95fcd36..761c389 100644 (file)
@@ -64,7 +64,7 @@ static void _wil6210_disconnect(struct wil6210_priv *wil, void *bssid)
        struct net_device *ndev = wil_to_ndev(wil);
        struct wireless_dev *wdev = wil->wdev;
 
-       wil_dbg(wil, "%s()\n", __func__);
+       wil_dbg_misc(wil, "%s()\n", __func__);
 
        wil_link_off(wil);
        clear_bit(wil_status_fwconnected, &wil->status);
@@ -80,11 +80,13 @@ static void _wil6210_disconnect(struct wil6210_priv *wil, void *bssid)
                                        GFP_KERNEL);
                break;
        default:
-               ;
+               break;
        }
 
        for (i = 0; i < ARRAY_SIZE(wil->vring_tx); i++)
                wil_vring_fini_tx(wil, i);
+
+       clear_bit(wil_status_dontscan, &wil->status);
 }
 
 static void wil_disconnect_worker(struct work_struct *work)
@@ -99,7 +101,7 @@ static void wil_connect_timer_fn(ulong x)
 {
        struct wil6210_priv *wil = (void *)x;
 
-       wil_dbg(wil, "Connect timeout\n");
+       wil_dbg_misc(wil, "Connect timeout\n");
 
        /* reschedule to thread context - disconnect won't
         * run from atomic context
@@ -107,9 +109,18 @@ static void wil_connect_timer_fn(ulong x)
        schedule_work(&wil->disconnect_worker);
 }
 
+static void wil_cache_mbox_regs(struct wil6210_priv *wil)
+{
+       /* make shadow copy of registers that should not change on run time */
+       wil_memcpy_fromio_32(&wil->mbox_ctl, wil->csr + HOST_MBOX,
+                            sizeof(struct wil6210_mbox_ctl));
+       wil_mbox_ring_le2cpus(&wil->mbox_ctl.rx);
+       wil_mbox_ring_le2cpus(&wil->mbox_ctl.tx);
+}
+
 int wil_priv_init(struct wil6210_priv *wil)
 {
-       wil_dbg(wil, "%s()\n", __func__);
+       wil_dbg_misc(wil, "%s()\n", __func__);
 
        mutex_init(&wil->mutex);
        mutex_init(&wil->wmi_mutex);
@@ -136,11 +147,7 @@ int wil_priv_init(struct wil6210_priv *wil)
                return -EAGAIN;
        }
 
-       /* make shadow copy of registers that should not change on run time */
-       wil_memcpy_fromio_32(&wil->mbox_ctl, wil->csr + HOST_MBOX,
-                            sizeof(struct wil6210_mbox_ctl));
-       wil_mbox_ring_le2cpus(&wil->mbox_ctl.rx);
-       wil_mbox_ring_le2cpus(&wil->mbox_ctl.tx);
+       wil_cache_mbox_regs(wil);
 
        return 0;
 }
@@ -162,7 +169,7 @@ void wil_priv_deinit(struct wil6210_priv *wil)
 
 static void wil_target_reset(struct wil6210_priv *wil)
 {
-       wil_dbg(wil, "Resetting...\n");
+       wil_dbg_misc(wil, "Resetting...\n");
 
        /* register write */
 #define W(a, v) iowrite32(v, wil->csr + HOSTADDR(a))
@@ -202,7 +209,7 @@ static void wil_target_reset(struct wil6210_priv *wil)
 
        msleep(2000);
 
-       wil_dbg(wil, "Reset completed\n");
+       wil_dbg_misc(wil, "Reset completed\n");
 
 #undef W
 #undef S
@@ -225,8 +232,8 @@ static int wil_wait_for_fw_ready(struct wil6210_priv *wil)
                wil_err(wil, "Firmware not ready\n");
                return -ETIME;
        } else {
-               wil_dbg(wil, "FW ready after %d ms\n",
-                       jiffies_to_msecs(to-left));
+               wil_dbg_misc(wil, "FW ready after %d ms\n",
+                            jiffies_to_msecs(to-left));
        }
        return 0;
 }
@@ -243,13 +250,13 @@ int wil_reset(struct wil6210_priv *wil)
        cancel_work_sync(&wil->disconnect_worker);
        wil6210_disconnect(wil, NULL);
 
+       wil6210_disable_irq(wil);
+       wil->status = 0;
+
        wmi_event_flush(wil);
 
-       flush_workqueue(wil->wmi_wq);
        flush_workqueue(wil->wmi_wq_conn);
-
-       wil6210_disable_irq(wil);
-       wil->status = 0;
+       flush_workqueue(wil->wmi_wq);
 
        /* TODO: put MAC in reset */
        wil_target_reset(wil);
@@ -258,11 +265,7 @@ int wil_reset(struct wil6210_priv *wil)
        wil->pending_connect_cid = -1;
        INIT_COMPLETION(wil->wmi_ready);
 
-       /* make shadow copy of registers that should not change on run time */
-       wil_memcpy_fromio_32(&wil->mbox_ctl, wil->csr + HOST_MBOX,
-                            sizeof(struct wil6210_mbox_ctl));
-       wil_mbox_ring_le2cpus(&wil->mbox_ctl.rx);
-       wil_mbox_ring_le2cpus(&wil->mbox_ctl.tx);
+       wil_cache_mbox_regs(wil);
 
        /* TODO: release MAC reset */
        wil6210_enable_irq(wil);
@@ -278,7 +281,7 @@ void wil_link_on(struct wil6210_priv *wil)
 {
        struct net_device *ndev = wil_to_ndev(wil);
 
-       wil_dbg(wil, "%s()\n", __func__);
+       wil_dbg_misc(wil, "%s()\n", __func__);
 
        netif_carrier_on(ndev);
        netif_tx_wake_all_queues(ndev);
@@ -288,7 +291,7 @@ void wil_link_off(struct wil6210_priv *wil)
 {
        struct net_device *ndev = wil_to_ndev(wil);
 
-       wil_dbg(wil, "%s()\n", __func__);
+       wil_dbg_misc(wil, "%s()\n", __func__);
 
        netif_tx_stop_all_queues(ndev);
        netif_carrier_off(ndev);
@@ -311,27 +314,27 @@ static int __wil_up(struct wil6210_priv *wil)
        wmi_nettype = wil_iftype_nl2wmi(NL80211_IFTYPE_ADHOC);
        switch (wdev->iftype) {
        case NL80211_IFTYPE_STATION:
-               wil_dbg(wil, "type: STATION\n");
+               wil_dbg_misc(wil, "type: STATION\n");
                bi = 0;
                ndev->type = ARPHRD_ETHER;
                break;
        case NL80211_IFTYPE_AP:
-               wil_dbg(wil, "type: AP\n");
+               wil_dbg_misc(wil, "type: AP\n");
                bi = 100;
                ndev->type = ARPHRD_ETHER;
                break;
        case NL80211_IFTYPE_P2P_CLIENT:
-               wil_dbg(wil, "type: P2P_CLIENT\n");
+               wil_dbg_misc(wil, "type: P2P_CLIENT\n");
                bi = 0;
                ndev->type = ARPHRD_ETHER;
                break;
        case NL80211_IFTYPE_P2P_GO:
-               wil_dbg(wil, "type: P2P_GO\n");
+               wil_dbg_misc(wil, "type: P2P_GO\n");
                bi = 100;
                ndev->type = ARPHRD_ETHER;
                break;
        case NL80211_IFTYPE_MONITOR:
-               wil_dbg(wil, "type: Monitor\n");
+               wil_dbg_misc(wil, "type: Monitor\n");
                bi = 0;
                ndev->type = ARPHRD_IEEE80211_RADIOTAP;
                /* ARPHRD_IEEE80211 or ARPHRD_IEEE80211_RADIOTAP ? */
@@ -354,7 +357,7 @@ static int __wil_up(struct wil6210_priv *wil)
                        wmi_set_channel(wil, channel->hw_value);
                break;
        default:
-               ;
+               break;
        }
 
        /* MAC address - pre-requisite for other commands */
index 3068b5c..8ce2e33 100644 (file)
@@ -35,37 +35,12 @@ static int wil_stop(struct net_device *ndev)
        return wil_down(wil);
 }
 
-/*
- * AC to queue mapping
- *
- * AC_VO -> queue 3
- * AC_VI -> queue 2
- * AC_BE -> queue 1
- * AC_BK -> queue 0
- */
-static u16 wil_select_queue(struct net_device *ndev, struct sk_buff *skb)
-{
-       static const u16 wil_1d_to_queue[8] = { 1, 0, 0, 1, 2, 2, 3, 3 };
-       struct wil6210_priv *wil = ndev_to_wil(ndev);
-       u16 rc;
-
-       skb->priority = cfg80211_classify8021d(skb);
-
-       rc = wil_1d_to_queue[skb->priority];
-
-       wil_dbg_TXRX(wil, "%s() %d -> %d\n", __func__, (int)skb->priority,
-                    (int)rc);
-
-       return rc;
-}
-
 static const struct net_device_ops wil_netdev_ops = {
        .ndo_open               = wil_open,
        .ndo_stop               = wil_stop,
        .ndo_start_xmit         = wil_start_xmit,
-       .ndo_select_queue       = wil_select_queue,
-       .ndo_set_mac_address    = eth_mac_addr,
-       .ndo_validate_addr      = eth_validate_addr,
+       .ndo_set_mac_address    = eth_mac_addr,
+       .ndo_validate_addr      = eth_validate_addr,
 };
 
 void *wil_if_alloc(struct device *dev, void __iomem *csr)
@@ -97,7 +72,7 @@ void *wil_if_alloc(struct device *dev, void __iomem *csr)
        ch = wdev->wiphy->bands[IEEE80211_BAND_60GHZ]->channels;
        cfg80211_chandef_create(&wdev->preset_chandef, ch, NL80211_CHAN_NO_HT);
 
-       ndev = alloc_netdev_mqs(0, "wlan%d", ether_setup, WIL6210_TX_QUEUES, 1);
+       ndev = alloc_netdev(0, "wlan%d", ether_setup);
        if (!ndev) {
                dev_err(dev, "alloc_netdev_mqs failed\n");
                rc = -ENOMEM;
index 0fc83ed..81c35c6 100644 (file)
@@ -53,7 +53,7 @@ static int wil_if_pcie_enable(struct wil6210_priv *wil)
        }
        wil->n_msi = use_msi;
        if (wil->n_msi) {
-               wil_dbg(wil, "Setup %d MSI interrupts\n", use_msi);
+               wil_dbg_misc(wil, "Setup %d MSI interrupts\n", use_msi);
                rc = pci_enable_msi_block(pdev, wil->n_msi);
                if (rc && (wil->n_msi == 3)) {
                        wil_err(wil, "3 MSI mode failed, try 1 MSI\n");
@@ -65,7 +65,7 @@ static int wil_if_pcie_enable(struct wil6210_priv *wil)
                        wil->n_msi = 0;
                }
        } else {
-               wil_dbg(wil, "MSI interrupts disabled, use INTx\n");
+               wil_dbg_misc(wil, "MSI interrupts disabled, use INTx\n");
        }
 
        rc = wil6210_init_irq(wil, pdev->irq);
index f29c294..d1315b4 100644 (file)
@@ -74,8 +74,6 @@ static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring)
        vring->swtail = 0;
        vring->ctx = kzalloc(vring->size * sizeof(vring->ctx[0]), GFP_KERNEL);
        if (!vring->ctx) {
-               wil_err(wil, "vring_alloc [%d] failed to alloc ctx mem\n",
-                       vring->size);
                vring->va = NULL;
                return -ENOMEM;
        }
@@ -100,8 +98,8 @@ static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring)
                d->dma.status = TX_DMA_STATUS_DU;
        }
 
-       wil_dbg(wil, "vring[%d] 0x%p:0x%016llx 0x%p\n", vring->size,
-               vring->va, (unsigned long long)vring->pa, vring->ctx);
+       wil_dbg_misc(wil, "vring[%d] 0x%p:0x%016llx 0x%p\n", vring->size,
+                    vring->va, (unsigned long long)vring->pa, vring->ctx);
 
        return 0;
 }
@@ -353,8 +351,8 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
        if (ndev->type == ARPHRD_IEEE80211_RADIOTAP)
                wil_rx_add_radiotap_header(wil, skb, d);
 
-       wil_dbg_TXRX(wil, "Rx[%3d] : %d bytes\n", vring->swhead, d->dma.length);
-       wil_hex_dump_TXRX("Rx ", DUMP_PREFIX_NONE, 32, 4,
+       wil_dbg_txrx(wil, "Rx[%3d] : %d bytes\n", vring->swhead, d->dma.length);
+       wil_hex_dump_txrx("Rx ", DUMP_PREFIX_NONE, 32, 4,
                          (const void *)d, sizeof(*d), false);
 
        wil_vring_advance_head(vring, 1);
@@ -369,7 +367,7 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
         */
        ftype = wil_rxdesc_ftype(d) << 2;
        if (ftype != IEEE80211_FTYPE_DATA) {
-               wil_dbg_TXRX(wil, "Non-data frame ftype 0x%08x\n", ftype);
+               wil_dbg_txrx(wil, "Non-data frame ftype 0x%08x\n", ftype);
                /* TODO: process it */
                kfree_skb(skb);
                return NULL;
@@ -430,6 +428,8 @@ static void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
        int rc;
        unsigned int len = skb->len;
 
+       skb_orphan(skb);
+
        if (in_interrupt())
                rc = netif_rx(skb);
        else
@@ -459,13 +459,11 @@ void wil_rx_handle(struct wil6210_priv *wil)
                wil_err(wil, "Rx IRQ while Rx not yet initialized\n");
                return;
        }
-       wil_dbg_TXRX(wil, "%s()\n", __func__);
+       wil_dbg_txrx(wil, "%s()\n", __func__);
        while (NULL != (skb = wil_vring_reap_rx(wil, v))) {
-               wil_hex_dump_TXRX("Rx ", DUMP_PREFIX_OFFSET, 16, 1,
+               wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET, 16, 1,
                                  skb->data, skb_headlen(skb), false);
 
-               skb_orphan(skb);
-
                if (wil->wdev->iftype == NL80211_IFTYPE_MONITOR) {
                        skb->dev = ndev;
                        skb_reset_mac_header(skb);
@@ -484,53 +482,18 @@ void wil_rx_handle(struct wil6210_priv *wil)
 
 int wil_rx_init(struct wil6210_priv *wil)
 {
-       struct net_device *ndev = wil_to_ndev(wil);
-       struct wireless_dev *wdev = wil->wdev;
        struct vring *vring = &wil->vring_rx;
        int rc;
-       struct wmi_cfg_rx_chain_cmd cmd = {
-               .action = WMI_RX_CHAIN_ADD,
-               .rx_sw_ring = {
-                       .max_mpdu_size = cpu_to_le16(RX_BUF_LEN),
-               },
-               .mid = 0, /* TODO - what is it? */
-               .decap_trans_type = WMI_DECAP_TYPE_802_3,
-       };
-       struct {
-               struct wil6210_mbox_hdr_wmi wmi;
-               struct wmi_cfg_rx_chain_done_event evt;
-       } __packed evt;
 
        vring->size = WIL6210_RX_RING_SIZE;
        rc = wil_vring_alloc(wil, vring);
        if (rc)
                return rc;
 
-       cmd.rx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa);
-       cmd.rx_sw_ring.ring_size = cpu_to_le16(vring->size);
-       if (wdev->iftype == NL80211_IFTYPE_MONITOR) {
-               struct ieee80211_channel *ch = wdev->preset_chandef.chan;
-
-               cmd.sniffer_cfg.mode = cpu_to_le32(WMI_SNIFFER_ON);
-               if (ch)
-                       cmd.sniffer_cfg.channel = ch->hw_value - 1;
-               cmd.sniffer_cfg.phy_info_mode =
-                       cpu_to_le32(ndev->type == ARPHRD_IEEE80211_RADIOTAP);
-               cmd.sniffer_cfg.phy_support =
-                       cpu_to_le32((wil->monitor_flags & MONITOR_FLAG_CONTROL)
-                                   ? WMI_SNIFFER_CP : WMI_SNIFFER_DP);
-       }
-       /* typical time for secure PCP is 840ms */
-       rc = wmi_call(wil, WMI_CFG_RX_CHAIN_CMDID, &cmd, sizeof(cmd),
-                     WMI_CFG_RX_CHAIN_DONE_EVENTID, &evt, sizeof(evt), 2000);
+       rc = wmi_rx_chain_add(wil, vring);
        if (rc)
                goto err_free;
 
-       vring->hwtail = le32_to_cpu(evt.evt.rx_ring_tail_ptr);
-
-       wil_dbg(wil, "Rx init: status %d tail 0x%08x\n",
-               le32_to_cpu(evt.evt.status), vring->hwtail);
-
        rc = wil_rx_refill(wil, vring->size);
        if (rc)
                goto err_free;
@@ -546,25 +509,8 @@ void wil_rx_fini(struct wil6210_priv *wil)
 {
        struct vring *vring = &wil->vring_rx;
 
-       if (vring->va) {
-               int rc;
-               struct wmi_cfg_rx_chain_cmd cmd = {
-                       .action = cpu_to_le32(WMI_RX_CHAIN_DEL),
-                       .rx_sw_ring = {
-                               .max_mpdu_size = cpu_to_le16(RX_BUF_LEN),
-                       },
-               };
-               struct {
-                       struct wil6210_mbox_hdr_wmi wmi;
-                       struct wmi_cfg_rx_chain_done_event cfg;
-               } __packed wmi_rx_cfg_reply;
-
-               rc = wmi_call(wil, WMI_CFG_RX_CHAIN_CMDID, &cmd, sizeof(cmd),
-                             WMI_CFG_RX_CHAIN_DONE_EVENTID,
-                             &wmi_rx_cfg_reply, sizeof(wmi_rx_cfg_reply),
-                             100);
+       if (vring->va)
                wil_vring_free(wil, vring, 0);
-       }
 }
 
 int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size,
@@ -617,6 +563,7 @@ int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size,
        if (reply.cmd.status != WMI_VRING_CFG_SUCCESS) {
                wil_err(wil, "Tx config failed, status 0x%02x\n",
                        reply.cmd.status);
+               rc = -EINVAL;
                goto out_free;
        }
        vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr);
@@ -689,7 +636,7 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
        uint i = swhead;
        dma_addr_t pa;
 
-       wil_dbg_TXRX(wil, "%s()\n", __func__);
+       wil_dbg_txrx(wil, "%s()\n", __func__);
 
        if (avail < vring->size/8)
                netif_tx_stop_all_queues(wil_to_ndev(wil));
@@ -706,9 +653,9 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
        pa = dma_map_single(dev, skb->data,
                        skb_headlen(skb), DMA_TO_DEVICE);
 
-       wil_dbg_TXRX(wil, "Tx skb %d bytes %p -> %#08llx\n", skb_headlen(skb),
+       wil_dbg_txrx(wil, "Tx skb %d bytes %p -> %#08llx\n", skb_headlen(skb),
                     skb->data, (unsigned long long)pa);
-       wil_hex_dump_TXRX("Tx ", DUMP_PREFIX_OFFSET, 16, 1,
+       wil_hex_dump_txrx("Tx ", DUMP_PREFIX_OFFSET, 16, 1,
                          skb->data, skb_headlen(skb), false);
 
        if (unlikely(dma_mapping_error(dev, pa)))
@@ -737,12 +684,12 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
        d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_DMA_IT_POS);
        d->dma.d0 |= (vring_index << DMA_CFG_DESC_TX_0_QID_POS);
 
-       wil_hex_dump_TXRX("Tx ", DUMP_PREFIX_NONE, 32, 4,
+       wil_hex_dump_txrx("Tx ", DUMP_PREFIX_NONE, 32, 4,
                          (const void *)d, sizeof(*d), false);
 
        /* advance swhead */
        wil_vring_advance_head(vring, nr_frags + 1);
-       wil_dbg_TXRX(wil, "Tx swhead %d -> %d\n", swhead, vring->swhead);
+       wil_dbg_txrx(wil, "Tx swhead %d -> %d\n", swhead, vring->swhead);
        iowrite32(vring->swhead, wil->csr + HOSTADDR(vring->hwtail));
        /* hold reference to skb
         * to prevent skb release before accounting
@@ -775,7 +722,7 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
        struct vring *vring;
        int rc;
 
-       wil_dbg_TXRX(wil, "%s()\n", __func__);
+       wil_dbg_txrx(wil, "%s()\n", __func__);
        if (!test_bit(wil_status_fwready, &wil->status)) {
                wil_err(wil, "FW not ready\n");
                goto drop;
@@ -802,15 +749,13 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
        }
        switch (rc) {
        case 0:
-               ndev->stats.tx_packets++;
-               ndev->stats.tx_bytes += skb->len;
+               /* statistics will be updated on the tx_complete */
                dev_kfree_skb_any(skb);
                return NETDEV_TX_OK;
        case -ENOMEM:
                return NETDEV_TX_BUSY;
        default:
-               ; /* goto drop; */
-               break;
+               break; /* goto drop; */
        }
  drop:
        netif_tx_stop_all_queues(ndev);
@@ -827,6 +772,7 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
  */
 void wil_tx_complete(struct wil6210_priv *wil, int ringid)
 {
+       struct net_device *ndev = wil_to_ndev(wil);
        struct device *dev = wil_to_dev(wil);
        struct vring *vring = &wil->vring_tx[ringid];
 
@@ -835,7 +781,7 @@ void wil_tx_complete(struct wil6210_priv *wil, int ringid)
                return;
        }
 
-       wil_dbg_TXRX(wil, "%s(%d)\n", __func__, ringid);
+       wil_dbg_txrx(wil, "%s(%d)\n", __func__, ringid);
 
        while (!wil_vring_is_empty(vring)) {
                volatile struct vring_tx_desc *d = &vring->va[vring->swtail].tx;
@@ -844,16 +790,23 @@ void wil_tx_complete(struct wil6210_priv *wil, int ringid)
                if (!(d->dma.status & TX_DMA_STATUS_DU))
                        break;
 
-               wil_dbg_TXRX(wil,
+               wil_dbg_txrx(wil,
                             "Tx[%3d] : %d bytes, status 0x%02x err 0x%02x\n",
                             vring->swtail, d->dma.length, d->dma.status,
                             d->dma.error);
-               wil_hex_dump_TXRX("TxC ", DUMP_PREFIX_NONE, 32, 4,
+               wil_hex_dump_txrx("TxC ", DUMP_PREFIX_NONE, 32, 4,
                                  (const void *)d, sizeof(*d), false);
 
                pa = d->dma.addr_low | ((u64)d->dma.addr_high << 32);
                skb = vring->ctx[vring->swtail];
                if (skb) {
+                       if (d->dma.error == 0) {
+                               ndev->stats.tx_packets++;
+                               ndev->stats.tx_bytes += skb->len;
+                       } else {
+                               ndev->stats.tx_errors++;
+                       }
+
                        dma_unmap_single(dev, pa, d->dma.length, DMA_TO_DEVICE);
                        dev_kfree_skb_any(skb);
                        vring->ctx[vring->swtail] = NULL;
index 9bcfffa..aea961f 100644 (file)
@@ -36,8 +36,6 @@ static inline u32 WIL_GET_BITS(u32 x, int b0, int b1)
 
 #define WIL6210_MEM_SIZE (2*1024*1024UL)
 
-#define WIL6210_TX_QUEUES (4)
-
 #define WIL6210_RX_RING_SIZE (128)
 #define WIL6210_TX_RING_SIZE (128)
 #define WIL6210_MAX_TX_RINGS (24)
@@ -101,8 +99,7 @@ struct RGF_ICR {
 #define RGF_DMA_EP_MISC_ICR            (0x881bec) /* struct RGF_ICR */
        #define BIT_DMA_EP_MISC_ICR_RX_HTRSH    BIT(0)
        #define BIT_DMA_EP_MISC_ICR_TX_NO_ACT   BIT(1)
-       #define BIT_DMA_EP_MISC_ICR_FW_INT0     BIT(28)
-       #define BIT_DMA_EP_MISC_ICR_FW_INT1     BIT(29)
+       #define BIT_DMA_EP_MISC_ICR_FW_INT(n)   BIT(28+n) /* n = [0..3] */
 
 /* Interrupt moderation control */
 #define RGF_DMA_ITR_CNT_TRSH           (0x881c5c)
@@ -121,8 +118,9 @@ struct RGF_ICR {
 #define SW_INT_MBOX BIT_USER_USER_ICR_SW_INT_2
 
 /* ISR register bits */
-#define ISR_MISC_FW_READY BIT_DMA_EP_MISC_ICR_FW_INT0
-#define ISR_MISC_MBOX_EVT BIT_DMA_EP_MISC_ICR_FW_INT1
+#define ISR_MISC_FW_READY      BIT_DMA_EP_MISC_ICR_FW_INT(0)
+#define ISR_MISC_MBOX_EVT      BIT_DMA_EP_MISC_ICR_FW_INT(1)
+#define ISR_MISC_FW_ERROR      BIT_DMA_EP_MISC_ICR_FW_INT(3)
 
 /* Hardware definitions end */
 
@@ -272,17 +270,18 @@ struct wil6210_priv {
 #define wil_info(wil, fmt, arg...) netdev_info(wil_to_ndev(wil), fmt, ##arg)
 #define wil_err(wil, fmt, arg...) netdev_err(wil_to_ndev(wil), fmt, ##arg)
 
-#define wil_dbg_IRQ(wil, fmt, arg...) wil_dbg(wil, "DBG[ IRQ]" fmt, ##arg)
-#define wil_dbg_TXRX(wil, fmt, arg...) wil_dbg(wil, "DBG[TXRX]" fmt, ##arg)
-#define wil_dbg_WMI(wil, fmt, arg...) wil_dbg(wil, "DBG[ WMI]" fmt, ##arg)
+#define wil_dbg_irq(wil, fmt, arg...) wil_dbg(wil, "DBG[ IRQ]" fmt, ##arg)
+#define wil_dbg_txrx(wil, fmt, arg...) wil_dbg(wil, "DBG[TXRX]" fmt, ##arg)
+#define wil_dbg_wmi(wil, fmt, arg...) wil_dbg(wil, "DBG[ WMI]" fmt, ##arg)
+#define wil_dbg_misc(wil, fmt, arg...) wil_dbg(wil, "DBG[MISC]" fmt, ##arg)
 
-#define wil_hex_dump_TXRX(prefix_str, prefix_type, rowsize,    \
+#define wil_hex_dump_txrx(prefix_str, prefix_type, rowsize,    \
                          groupsize, buf, len, ascii)           \
                          wil_print_hex_dump_debug("DBG[TXRX]" prefix_str,\
                                         prefix_type, rowsize,  \
                                         groupsize, buf, len, ascii)
 
-#define wil_hex_dump_WMI(prefix_str, prefix_type, rowsize,     \
+#define wil_hex_dump_wmi(prefix_str, prefix_type, rowsize,     \
                         groupsize, buf, len, ascii)            \
                         wil_print_hex_dump_debug("DBG[ WMI]" prefix_str,\
                                        prefix_type, rowsize,   \
@@ -328,6 +327,7 @@ int wmi_add_cipher_key(struct wil6210_priv *wil, u8 key_index,
                       const void *mac_addr, int key_len, const void *key);
 int wmi_echo(struct wil6210_priv *wil);
 int wmi_set_ie(struct wil6210_priv *wil, u8 type, u16 ie_len, const void *ie);
+int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring);
 
 int wil6210_init_irq(struct wil6210_priv *wil, int irq);
 void wil6210_fini_irq(struct wil6210_priv *wil, int irq);
index 12915f6..0bb3b76 100644 (file)
 #include <linux/io.h>
 #include <linux/list.h>
 #include <linux/etherdevice.h>
+#include <linux/if_arp.h>
 
 #include "wil6210.h"
+#include "txrx.h"
 #include "wmi.h"
 
 /**
@@ -186,7 +188,6 @@ static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
                wil_err(wil, "WMI size too large: %d bytes, max is %d\n",
                        (int)(sizeof(cmd) + len), r->entry_size);
                return -ERANGE;
-
        }
 
        might_sleep();
@@ -213,7 +214,7 @@ static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
        }
        /* next head */
        next_head = r->base + ((r->head - r->base + sizeof(d_head)) % r->size);
-       wil_dbg_WMI(wil, "Head 0x%08x -> 0x%08x\n", r->head, next_head);
+       wil_dbg_wmi(wil, "Head 0x%08x -> 0x%08x\n", r->head, next_head);
        /* wait till FW finish with previous command */
        for (retry = 5; retry > 0; retry--) {
                r->tail = ioread32(wil->csr + HOST_MBOX +
@@ -234,10 +235,10 @@ static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
        }
        cmd.hdr.seq = cpu_to_le16(++wil->wmi_seq);
        /* set command */
-       wil_dbg_WMI(wil, "WMI command 0x%04x [%d]\n", cmdid, len);
-       wil_hex_dump_WMI("Cmd ", DUMP_PREFIX_OFFSET, 16, 1, &cmd,
+       wil_dbg_wmi(wil, "WMI command 0x%04x [%d]\n", cmdid, len);
+       wil_hex_dump_wmi("Cmd ", DUMP_PREFIX_OFFSET, 16, 1, &cmd,
                         sizeof(cmd), true);
-       wil_hex_dump_WMI("cmd ", DUMP_PREFIX_OFFSET, 16, 1, buf,
+       wil_hex_dump_wmi("cmd ", DUMP_PREFIX_OFFSET, 16, 1, buf,
                         len, true);
        wil_memcpy_toio_32(dst, &cmd, sizeof(cmd));
        wil_memcpy_toio_32(dst + sizeof(cmd), buf, len);
@@ -273,7 +274,7 @@ static void wmi_evt_ready(struct wil6210_priv *wil, int id, void *d, int len)
        struct wmi_ready_event *evt = d;
        u32 ver = le32_to_cpu(evt->sw_version);
 
-       wil_dbg_WMI(wil, "FW ver. %d; MAC %pM\n", ver, evt->mac);
+       wil_dbg_wmi(wil, "FW ver. %d; MAC %pM\n", ver, evt->mac);
 
        if (!is_valid_ether_addr(ndev->dev_addr)) {
                memcpy(ndev->dev_addr, evt->mac, ETH_ALEN);
@@ -286,7 +287,7 @@ static void wmi_evt_ready(struct wil6210_priv *wil, int id, void *d, int len)
 static void wmi_evt_fw_ready(struct wil6210_priv *wil, int id, void *d,
                             int len)
 {
-       wil_dbg_WMI(wil, "WMI: FW ready\n");
+       wil_dbg_wmi(wil, "WMI: FW ready\n");
 
        set_bit(wil_status_fwready, &wil->status);
        /* reuse wmi_ready for the firmware ready indication */
@@ -309,11 +310,11 @@ static void wmi_evt_rx_mgmt(struct wil6210_priv *wil, int id, void *d, int len)
        u32 d_len = le32_to_cpu(data->info.len);
        u16 d_status = le16_to_cpu(data->info.status);
 
-       wil_dbg_WMI(wil, "MGMT: channel %d MCS %d SNR %d\n",
+       wil_dbg_wmi(wil, "MGMT: channel %d MCS %d SNR %d\n",
                    data->info.channel, data->info.mcs, data->info.snr);
-       wil_dbg_WMI(wil, "status 0x%04x len %d stype %04x\n", d_status, d_len,
+       wil_dbg_wmi(wil, "status 0x%04x len %d stype %04x\n", d_status, d_len,
                    le16_to_cpu(data->info.stype));
-       wil_dbg_WMI(wil, "qid %d mid %d cid %d\n",
+       wil_dbg_wmi(wil, "qid %d mid %d cid %d\n",
                    data->info.qid, data->info.mid, data->info.cid);
 
        if (!channel) {
@@ -329,15 +330,15 @@ static void wmi_evt_rx_mgmt(struct wil6210_priv *wil, int id, void *d, int len)
                const u8 *ie_buf = rx_mgmt_frame->u.beacon.variable;
                size_t ie_len = d_len - offsetof(struct ieee80211_mgmt,
                                                 u.beacon.variable);
-               wil_dbg_WMI(wil, "Capability info : 0x%04x\n", cap);
+               wil_dbg_wmi(wil, "Capability info : 0x%04x\n", cap);
 
                bss = cfg80211_inform_bss(wiphy, channel, rx_mgmt_frame->bssid,
                                          tsf, cap, bi, ie_buf, ie_len,
                                          signal, GFP_KERNEL);
                if (bss) {
-                       wil_dbg_WMI(wil, "Added BSS %pM\n",
+                       wil_dbg_wmi(wil, "Added BSS %pM\n",
                                    rx_mgmt_frame->bssid);
-                       cfg80211_put_bss(bss);
+                       cfg80211_put_bss(wiphy, bss);
                } else {
                        wil_err(wil, "cfg80211_inform_bss() failed\n");
                }
@@ -351,7 +352,7 @@ static void wmi_evt_scan_complete(struct wil6210_priv *wil, int id,
                struct wmi_scan_complete_event *data = d;
                bool aborted = (data->status != 0);
 
-               wil_dbg_WMI(wil, "SCAN_COMPLETE(0x%08x)\n", data->status);
+               wil_dbg_wmi(wil, "SCAN_COMPLETE(0x%08x)\n", data->status);
                cfg80211_scan_done(wil->scan_request, aborted);
                wil->scan_request = NULL;
        } else {
@@ -386,9 +387,9 @@ static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len)
                return;
        }
        ch = evt->channel + 1;
-       wil_dbg_WMI(wil, "Connect %pM channel [%d] cid %d\n",
+       wil_dbg_wmi(wil, "Connect %pM channel [%d] cid %d\n",
                    evt->bssid, ch, evt->cid);
-       wil_hex_dump_WMI("connect AI : ", DUMP_PREFIX_OFFSET, 16, 1,
+       wil_hex_dump_wmi("connect AI : ", DUMP_PREFIX_OFFSET, 16, 1,
                         evt->assoc_info, len - sizeof(*evt), true);
 
        /* figure out IE's */
@@ -450,14 +451,13 @@ static void wmi_evt_disconnect(struct wil6210_priv *wil, int id,
 {
        struct wmi_disconnect_event *evt = d;
 
-       wil_dbg_WMI(wil, "Disconnect %pM reason %d proto %d wmi\n",
+       wil_dbg_wmi(wil, "Disconnect %pM reason %d proto %d wmi\n",
                    evt->bssid,
                    evt->protocol_reason_status, evt->disconnect_reason);
 
        wil->sinfo_gen++;
 
        wil6210_disconnect(wil, evt->bssid);
-       clear_bit(wil_status_dontscan, &wil->status);
 }
 
 static void wmi_evt_notify(struct wil6210_priv *wil, int id, void *d, int len)
@@ -476,7 +476,7 @@ static void wmi_evt_notify(struct wil6210_priv *wil, int id, void *d, int len)
        wil->stats.my_tx_sector = le16_to_cpu(evt->my_tx_sector);
        wil->stats.peer_rx_sector = le16_to_cpu(evt->other_rx_sector);
        wil->stats.peer_tx_sector = le16_to_cpu(evt->other_tx_sector);
-       wil_dbg_WMI(wil, "Link status, MCS %d TSF 0x%016llx\n"
+       wil_dbg_wmi(wil, "Link status, MCS %d TSF 0x%016llx\n"
                    "BF status 0x%08x SNR 0x%08x\n"
                    "Tx Tpt %d goodput %d Rx goodput %d\n"
                    "Sectors(rx:tx) my %d:%d peer %d:%d\n",
@@ -501,7 +501,7 @@ static void wmi_evt_eapol_rx(struct wil6210_priv *wil, int id,
        struct sk_buff *skb;
        struct ethhdr *eth;
 
-       wil_dbg_WMI(wil, "EAPOL len %d from %pM\n", eapol_len,
+       wil_dbg_wmi(wil, "EAPOL len %d from %pM\n", eapol_len,
                    evt->src_mac);
 
        if (eapol_len > 196) { /* TODO: revisit size limit */
@@ -587,11 +587,9 @@ void wmi_recv_cmd(struct wil6210_priv *wil)
                evt = kmalloc(ALIGN(offsetof(struct pending_wmi_event,
                                             event.wmi) + len, 4),
                              GFP_KERNEL);
-               if (!evt) {
-                       wil_err(wil, "kmalloc for WMI event (%d) failed\n",
-                               len);
+               if (!evt)
                        return;
-               }
+
                evt->event.hdr = hdr;
                cmd = (void *)&evt->event.wmi;
                wil_memcpy_fromio_32(cmd, src, len);
@@ -599,15 +597,15 @@ void wmi_recv_cmd(struct wil6210_priv *wil)
                iowrite32(0, wil->csr + HOSTADDR(r->tail) +
                          offsetof(struct wil6210_mbox_ring_desc, sync));
                /* indicate */
-               wil_dbg_WMI(wil, "Mbox evt %04x %04x %04x %02x\n",
+               wil_dbg_wmi(wil, "Mbox evt %04x %04x %04x %02x\n",
                            le16_to_cpu(hdr.seq), len, le16_to_cpu(hdr.type),
                            hdr.flags);
                if ((hdr.type == WIL_MBOX_HDR_TYPE_WMI) &&
                    (len >= sizeof(struct wil6210_mbox_hdr_wmi))) {
-                       wil_dbg_WMI(wil, "WMI event 0x%04x\n",
+                       wil_dbg_wmi(wil, "WMI event 0x%04x\n",
                                    evt->event.wmi.id);
                }
-               wil_hex_dump_WMI("evt ", DUMP_PREFIX_OFFSET, 16, 1,
+               wil_hex_dump_wmi("evt ", DUMP_PREFIX_OFFSET, 16, 1,
                                 &evt->event.hdr, sizeof(hdr) + len, true);
 
                /* advance tail */
@@ -623,7 +621,7 @@ void wmi_recv_cmd(struct wil6210_priv *wil)
                {
                        int q = queue_work(wil->wmi_wq,
                                           &wil->wmi_event_worker);
-                       wil_dbg_WMI(wil, "queue_work -> %d\n", q);
+                       wil_dbg_wmi(wil, "queue_work -> %d\n", q);
                }
        }
 }
@@ -650,7 +648,7 @@ int wmi_call(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len,
                        cmdid, reply_id, to_msec);
                rc = -ETIME;
        } else {
-               wil_dbg_WMI(wil,
+               wil_dbg_wmi(wil,
                            "wmi_call(0x%04x->0x%04x) completed in %d msec\n",
                            cmdid, reply_id,
                            to_msec - jiffies_to_msecs(remain));
@@ -680,7 +678,7 @@ int wmi_set_mac_address(struct wil6210_priv *wil, void *addr)
 
        memcpy(cmd.mac, addr, ETH_ALEN);
 
-       wil_dbg_WMI(wil, "Set MAC %pM\n", addr);
+       wil_dbg_wmi(wil, "Set MAC %pM\n", addr);
 
        return wmi_send(wil, WMI_SET_MAC_ADDRESS_CMDID, &cmd, sizeof(cmd));
 }
@@ -778,7 +776,7 @@ int wmi_tx_eapol(struct wil6210_priv *wil, struct sk_buff *skb)
 
        skb_set_mac_header(skb, 0);
        eth = eth_hdr(skb);
-       wil_dbg_WMI(wil, "EAPOL %d bytes to %pM\n", eapol_len, eth->h_dest);
+       wil_dbg_wmi(wil, "EAPOL %d bytes to %pM\n", eapol_len, eth->h_dest);
        for (i = 0; i < ARRAY_SIZE(wil->vring_tx); i++) {
                if (memcmp(wil->dst_addr[i], eth->h_dest, ETH_ALEN) == 0)
                        goto found_dest;
@@ -838,10 +836,8 @@ int wmi_set_ie(struct wil6210_priv *wil, u8 type, u16 ie_len, const void *ie)
        int rc;
        u16 len = sizeof(struct wmi_set_appie_cmd) + ie_len;
        struct wmi_set_appie_cmd *cmd = kzalloc(len, GFP_KERNEL);
-       if (!cmd) {
-               wil_err(wil, "kmalloc(%d) failed\n", len);
+       if (!cmd)
                return -ENOMEM;
-       }
 
        cmd->mgmt_frm_type = type;
        /* BUG: FW API define ieLen as u8. Will fix FW */
@@ -853,11 +849,60 @@ int wmi_set_ie(struct wil6210_priv *wil, u8 type, u16 ie_len, const void *ie)
        return rc;
 }
 
+int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring)
+{
+       struct wireless_dev *wdev = wil->wdev;
+       struct net_device *ndev = wil_to_ndev(wil);
+       struct wmi_cfg_rx_chain_cmd cmd = {
+               .action = WMI_RX_CHAIN_ADD,
+               .rx_sw_ring = {
+                       .max_mpdu_size = cpu_to_le16(RX_BUF_LEN),
+                       .ring_mem_base = cpu_to_le64(vring->pa),
+                       .ring_size = cpu_to_le16(vring->size),
+               },
+               .mid = 0, /* TODO - what is it? */
+               .decap_trans_type = WMI_DECAP_TYPE_802_3,
+       };
+       struct {
+               struct wil6210_mbox_hdr_wmi wmi;
+               struct wmi_cfg_rx_chain_done_event evt;
+       } __packed evt;
+       int rc;
+
+       if (wdev->iftype == NL80211_IFTYPE_MONITOR) {
+               struct ieee80211_channel *ch = wdev->preset_chandef.chan;
+
+               cmd.sniffer_cfg.mode = cpu_to_le32(WMI_SNIFFER_ON);
+               if (ch)
+                       cmd.sniffer_cfg.channel = ch->hw_value - 1;
+               cmd.sniffer_cfg.phy_info_mode =
+                       cpu_to_le32(ndev->type == ARPHRD_IEEE80211_RADIOTAP);
+               cmd.sniffer_cfg.phy_support =
+                       cpu_to_le32((wil->monitor_flags & MONITOR_FLAG_CONTROL)
+                                   ? WMI_SNIFFER_CP : WMI_SNIFFER_DP);
+       }
+       /* typical time for secure PCP is 840ms */
+       rc = wmi_call(wil, WMI_CFG_RX_CHAIN_CMDID, &cmd, sizeof(cmd),
+                     WMI_CFG_RX_CHAIN_DONE_EVENTID, &evt, sizeof(evt), 2000);
+       if (rc)
+               return rc;
+
+       vring->hwtail = le32_to_cpu(evt.evt.rx_ring_tail_ptr);
+
+       wil_dbg_misc(wil, "Rx init: status %d tail 0x%08x\n",
+                    le32_to_cpu(evt.evt.status), vring->hwtail);
+
+       if (le32_to_cpu(evt.evt.status) != WMI_CFG_RX_CHAIN_SUCCESS)
+               rc = -EINVAL;
+
+       return rc;
+}
+
 void wmi_event_flush(struct wil6210_priv *wil)
 {
        struct pending_wmi_event *evt, *t;
 
-       wil_dbg_WMI(wil, "%s()\n", __func__);
+       wil_dbg_wmi(wil, "%s()\n", __func__);
 
        list_for_each_entry_safe(evt, t, &wil->pending_wmi_ev, list) {
                list_del(&evt->list);
@@ -899,7 +944,7 @@ static void wmi_event_handle(struct wil6210_priv *wil,
                                wmi_evt_call_handler(wil, id, evt_data,
                                                     len - sizeof(*wmi));
                        }
-                       wil_dbg_WMI(wil, "Complete WMI 0x%04x\n", id);
+                       wil_dbg_wmi(wil, "Complete WMI 0x%04x\n", id);
                        complete(&wil->wmi_ready);
                        return;
                }
@@ -964,7 +1009,7 @@ void wmi_connect_worker(struct work_struct *work)
                return;
        }
 
-       wil_dbg_WMI(wil, "Configure for connection CID %d\n",
+       wil_dbg_wmi(wil, "Configure for connection CID %d\n",
                    wil->pending_connect_cid);
 
        rc = wil_vring_init_tx(wil, 0, WIL6210_TX_RING_SIZE,
index 1a6661a..756e19f 100644 (file)
@@ -26,6 +26,7 @@ brcmfmac-objs += \
                wl_cfg80211.o \
                fwil.o \
                fweh.o \
+               p2p.o \
                dhd_cdc.o \
                dhd_common.o \
                dhd_linux.o
@@ -37,4 +38,4 @@ brcmfmac-$(CONFIG_BRCMFMAC_SDIO) += \
 brcmfmac-$(CONFIG_BRCMFMAC_USB) += \
                usb.o
 brcmfmac-$(CONFIG_BRCMDBG) += \
-               dhd_dbg.o
\ No newline at end of file
+               dhd_dbg.o
index a2f32fb..ef6f23b 100644 (file)
@@ -72,6 +72,7 @@
 #define BRCMF_C_SET_WSEC                       134
 #define BRCMF_C_GET_PHY_NOISE                  135
 #define BRCMF_C_GET_BSS_INFO                   136
+#define BRCMF_C_SET_SCB_TIMEOUT                        158
 #define BRCMF_C_GET_PHYLIST                    180
 #define BRCMF_C_SET_SCAN_CHANNEL_TIME          185
 #define BRCMF_C_SET_SCAN_UNASSOC_TIME          187
 #define BRCMF_E_REASON_MINTXRATE               9
 #define BRCMF_E_REASON_TXFAIL                  10
 
+#define BRCMF_E_REASON_LINK_BSSCFG_DIS         4
 #define BRCMF_E_REASON_FAST_ROAM_FAILED                5
 #define BRCMF_E_REASON_DIRECTED_ROAM           6
 #define BRCMF_E_REASON_TSPEC_REJECTED          7
@@ -375,6 +377,28 @@ struct brcmf_join_params {
        struct brcmf_assoc_params_le params_le;
 };
 
+/* scan params for extended join */
+struct brcmf_join_scan_params_le {
+       u8 scan_type;           /* 0 use default, active or passive scan */
+       __le32 nprobes;         /* -1 use default, nr of probes per channel */
+       __le32 active_time;     /* -1 use default, dwell time per channel for
+                                * active scanning
+                                */
+       __le32 passive_time;    /* -1 use default, dwell time per channel
+                                * for passive scanning
+                                */
+       __le32 home_time;       /* -1 use default, dwell time for the home
+                                * channel between channel scans
+                                */
+};
+
+/* extended join params */
+struct brcmf_ext_join_params_le {
+       struct brcmf_ssid_le ssid_le;   /* {0, ""}: wildcard scan */
+       struct brcmf_join_scan_params_le scan_le;
+       struct brcmf_assoc_params_le assoc_le;
+};
+
 struct brcmf_wsec_key {
        u32 index;              /* key index */
        u32 len;                /* key length */
@@ -451,6 +475,19 @@ struct brcmf_sta_info_le {
        __le32  rx_decrypt_failures;    /* # of packet decrypted failed */
 };
 
+/*
+ * WLC_E_PROBRESP_MSG
+ * WLC_E_P2P_PROBREQ_MSG
+ * WLC_E_ACTION_FRAME_RX
+ */
+struct brcmf_rx_mgmt_data {
+       __be16  version;
+       __be16  chanspec;
+       __be32  rssi;
+       __be32  mactime;
+       __be32  rate;
+};
+
 /* Bus independent dongle command */
 struct brcmf_dcmd {
        uint cmd;               /* common dongle cmd definition */
@@ -489,9 +526,6 @@ struct brcmf_pub {
        struct mutex proto_block;
        unsigned char proto_buf[BRCMF_DCMD_MAXLEN];
 
-       atomic_t pend_8021x_cnt;
-       wait_queue_head_t pend_8021x_wait;
-
        struct brcmf_fweh_info fweh;
 #ifdef DEBUG
        struct dentry *dbgfs_dir;
@@ -515,9 +549,11 @@ struct brcmf_cfg80211_vif;
  * @vif: points to cfg80211 specific interface information.
  * @ndev: associated network device.
  * @stats: interface specific network statistics.
- * @idx: interface index in device firmware.
+ * @ifidx: interface index in device firmware.
  * @bssidx: index of bss associated with this interface.
  * @mac_addr: assigned mac address.
+ * @pend_8021x_cnt: tracks outstanding number of 802.1x frames.
+ * @pend_8021x_wait: used for signalling change in count.
  */
 struct brcmf_if {
        struct brcmf_pub *drvr;
@@ -526,9 +562,11 @@ struct brcmf_if {
        struct net_device_stats stats;
        struct work_struct setmacaddr_work;
        struct work_struct multicast_work;
-       int idx;
+       int ifidx;
        s32 bssidx;
        u8 mac_addr[ETH_ALEN];
+       atomic_t pend_8021x_cnt;
+       wait_queue_head_t pend_8021x_wait;
 };
 
 
@@ -547,9 +585,10 @@ extern int brcmf_proto_cdc_set_dcmd(struct brcmf_pub *drvr, int ifidx, uint cmd,
 extern int brcmf_proto_hdrpull(struct brcmf_pub *drvr, u8 *ifidx,
                               struct sk_buff *rxp);
 
-extern int brcmf_net_attach(struct brcmf_if *ifp);
-extern struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, int ifidx,
-                                    s32 bssidx, char *name, u8 *mac_addr);
-extern void brcmf_del_if(struct brcmf_pub *drvr, int ifidx);
+extern int brcmf_net_attach(struct brcmf_if *ifp, bool rtnl_locked);
+extern struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, s32 bssidx,
+                                    s32 ifidx, char *name, u8 *mac_addr);
+extern void brcmf_del_if(struct brcmf_pub *drvr, s32 bssidx);
+extern u32 brcmf_get_chip_info(struct brcmf_if *ifp);
 
 #endif                         /* _BRCMF_H_ */
index 64c38f4..ad25c34 100644 (file)
@@ -24,18 +24,6 @@ enum brcmf_bus_state {
        BRCMF_BUS_DATA          /* Ready for frame transfers */
 };
 
-struct dngl_stats {
-       unsigned long rx_packets;       /* total packets received */
-       unsigned long tx_packets;       /* total packets transmitted */
-       unsigned long rx_bytes; /* total bytes received */
-       unsigned long tx_bytes; /* total bytes transmitted */
-       unsigned long rx_errors;        /* bad packets received */
-       unsigned long tx_errors;        /* packet transmit problems */
-       unsigned long rx_dropped;       /* packets dropped by dongle */
-       unsigned long tx_dropped;       /* packets dropped by dongle */
-       unsigned long multicast;        /* multicast packets received */
-};
-
 struct brcmf_bus_dcmd {
        char *name;
        char *param;
@@ -72,11 +60,12 @@ struct brcmf_bus_ops {
  * @drvr: public driver information.
  * @state: operational state of the bus interface.
  * @maxctl: maximum size for rxctl request message.
- * @drvr_up: indicates driver up/down status.
  * @tx_realloc: number of tx packets realloced for headroom.
  * @dstats: dongle-based statistical data.
  * @align: alignment requirement for the bus.
  * @dcmd_list: bus/device specific dongle initialization commands.
+ * @chip: device identifier of the dongle chip.
+ * @chiprev: revision of the dongle chip.
  */
 struct brcmf_bus {
        union {
@@ -87,10 +76,10 @@ struct brcmf_bus {
        struct brcmf_pub *drvr;
        enum brcmf_bus_state state;
        uint maxctl;
-       bool drvr_up;
        unsigned long tx_realloc;
-       struct dngl_stats dstats;
        u8 align;
+       u32 chip;
+       u32 chiprev;
        struct list_head dcmd_list;
 
        struct brcmf_bus_ops *ops;
index bb454cd..a2354d9 100644 (file)
@@ -303,6 +303,14 @@ int brcmf_proto_hdrpull(struct brcmf_pub *drvr, u8 *ifidx,
                brcmf_err("rx data ifnum out of range (%d)\n", *ifidx);
                return -EBADE;
        }
+       /* The ifidx is the idx to map to matching netdev/ifp. When receiving
+        * events this is easy because it contains the bssidx which maps
+        * 1-on-1 to the netdev/ifp. But for data frames the ifidx is rcvd.
+        * bssidx 1 is used for p2p0 and no data can be received or
+        * transmitted on it. Therefor bssidx is ifidx + 1 if ifidx > 0
+        */
+       if (*ifidx)
+               (*ifidx)++;
 
        if (((h->flags & BDC_FLAG_VER_MASK) >> BDC_FLAG_VER_SHIFT) !=
            BDC_PROTO_VER) {
index 14b8fdd..c06cea8 100644 (file)
@@ -26,6 +26,8 @@
 #include "dhd_bus.h"
 #include "dhd_proto.h"
 #include "dhd_dbg.h"
+#include "fwil_types.h"
+#include "p2p.h"
 #include "wl_cfg80211.h"
 #include "fwil.h"
 
@@ -40,6 +42,12 @@ MODULE_LICENSE("Dual BSD/GPL");
 int brcmf_msg_level;
 module_param(brcmf_msg_level, int, 0);
 
+/* P2P0 enable */
+static int brcmf_p2p_enable;
+#ifdef CONFIG_BRCMDBG
+module_param_named(p2pon, brcmf_p2p_enable, int, 0);
+MODULE_PARM_DESC(p2pon, "enable p2p management functionality");
+#endif
 
 char *brcmf_ifname(struct brcmf_pub *drvr, int ifidx)
 {
@@ -70,9 +78,10 @@ static void _brcmf_set_multicast_list(struct work_struct *work)
        u32 buflen;
        s32 err;
 
-       brcmf_dbg(TRACE, "enter\n");
-
        ifp = container_of(work, struct brcmf_if, multicast_work);
+
+       brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
+
        ndev = ifp->ndev;
 
        /* Determine initial value of allmulti flag */
@@ -129,9 +138,10 @@ _brcmf_set_mac_address(struct work_struct *work)
        struct brcmf_if *ifp;
        s32 err;
 
-       brcmf_dbg(TRACE, "enter\n");
-
        ifp = container_of(work, struct brcmf_if, setmacaddr_work);
+
+       brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
+
        err = brcmf_fil_iovar_data_set(ifp, "cur_etheraddr", ifp->mac_addr,
                                       ETH_ALEN);
        if (err < 0) {
@@ -168,7 +178,7 @@ static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb,
        struct brcmf_pub *drvr = ifp->drvr;
        struct ethhdr *eh;
 
-       brcmf_dbg(TRACE, "Enter\n");
+       brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
 
        /* Can the device send data? */
        if (drvr->bus_if->state != BRCMF_BUS_DATA) {
@@ -179,8 +189,8 @@ static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb,
                goto done;
        }
 
-       if (!drvr->iflist[ifp->idx]) {
-               brcmf_err("bad ifidx %d\n", ifp->idx);
+       if (!drvr->iflist[ifp->bssidx]) {
+               brcmf_err("bad ifidx %d\n", ifp->bssidx);
                netif_stop_queue(ndev);
                dev_kfree_skb(skb);
                ret = -ENODEV;
@@ -192,14 +202,14 @@ static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb,
                struct sk_buff *skb2;
 
                brcmf_dbg(INFO, "%s: insufficient headroom\n",
-                         brcmf_ifname(drvr, ifp->idx));
+                         brcmf_ifname(drvr, ifp->bssidx));
                drvr->bus_if->tx_realloc++;
                skb2 = skb_realloc_headroom(skb, drvr->hdrlen);
                dev_kfree_skb(skb);
                skb = skb2;
                if (skb == NULL) {
                        brcmf_err("%s: skb_realloc_headroom failed\n",
-                                 brcmf_ifname(drvr, ifp->idx));
+                                 brcmf_ifname(drvr, ifp->bssidx));
                        ret = -ENOMEM;
                        goto done;
                }
@@ -217,19 +227,21 @@ static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb,
        if (is_multicast_ether_addr(eh->h_dest))
                drvr->tx_multicast++;
        if (ntohs(eh->h_proto) == ETH_P_PAE)
-               atomic_inc(&drvr->pend_8021x_cnt);
+               atomic_inc(&ifp->pend_8021x_cnt);
 
        /* If the protocol uses a data header, apply it */
-       brcmf_proto_hdrpush(drvr, ifp->idx, skb);
+       brcmf_proto_hdrpush(drvr, ifp->ifidx, skb);
 
        /* Use bus module to send data frame */
        ret =  brcmf_bus_txdata(drvr->bus_if, skb);
 
 done:
-       if (ret)
-               drvr->bus_if->dstats.tx_dropped++;
-       else
-               drvr->bus_if->dstats.tx_packets++;
+       if (ret) {
+               ifp->stats.tx_dropped++;
+       } else {
+               ifp->stats.tx_packets++;
+               ifp->stats.tx_bytes += skb->len;
+       }
 
        /* Return ok: we always eat the packet */
        return NETDEV_TX_OK;
@@ -270,12 +282,13 @@ void brcmf_rx_frames(struct device *dev, struct sk_buff_head *skb_list)
        skb_queue_walk_safe(skb_list, skb, pnext) {
                skb_unlink(skb, skb_list);
 
-               /* process and remove protocol-specific header
-                */
+               /* process and remove protocol-specific header */
                ret = brcmf_proto_hdrpull(drvr, &ifidx, skb);
-               if (ret < 0) {
-                       if (ret != -ENODATA)
-                               bus_if->dstats.rx_errors++;
+               ifp = drvr->iflist[ifidx];
+
+               if (ret || !ifp || !ifp->ndev) {
+                       if ((ret != -ENODATA) && ifp)
+                               ifp->stats.rx_errors++;
                        brcmu_pkt_buf_free_skb(skb);
                        continue;
                }
@@ -295,21 +308,11 @@ void brcmf_rx_frames(struct device *dev, struct sk_buff_head *skb_list)
                eth = skb->data;
                len = skb->len;
 
-               ifp = drvr->iflist[ifidx];
-               if (ifp == NULL)
-                       ifp = drvr->iflist[0];
-
-               if (!ifp || !ifp->ndev ||
-                   ifp->ndev->reg_state != NETREG_REGISTERED) {
-                       brcmu_pkt_buf_free_skb(skb);
-                       continue;
-               }
-
                skb->dev = ifp->ndev;
                skb->protocol = eth_type_trans(skb, skb->dev);
 
                if (skb->pkt_type == PACKET_MULTICAST)
-                       bus_if->dstats.multicast++;
+                       ifp->stats.multicast++;
 
                skb->data = eth;
                skb->len = len;
@@ -325,8 +328,13 @@ void brcmf_rx_frames(struct device *dev, struct sk_buff_head *skb_list)
                        ifp->ndev->last_rx = jiffies;
                }
 
-               bus_if->dstats.rx_bytes += skb->len;
-               bus_if->dstats.rx_packets++;    /* Local count */
+               if (!(ifp->ndev->flags & IFF_UP)) {
+                       brcmu_pkt_buf_free_skb(skb);
+                       continue;
+               }
+
+               ifp->stats.rx_bytes += skb->len;
+               ifp->stats.rx_packets++;
 
                if (in_interrupt())
                        netif_rx(skb);
@@ -348,36 +356,31 @@ void brcmf_txcomplete(struct device *dev, struct sk_buff *txp, bool success)
        u16 type;
        struct brcmf_bus *bus_if = dev_get_drvdata(dev);
        struct brcmf_pub *drvr = bus_if->drvr;
+       struct brcmf_if *ifp;
 
        brcmf_proto_hdrpull(drvr, &ifidx, txp);
 
+       ifp = drvr->iflist[ifidx];
+       if (!ifp)
+               return;
+
        eh = (struct ethhdr *)(txp->data);
        type = ntohs(eh->h_proto);
 
        if (type == ETH_P_PAE) {
-               atomic_dec(&drvr->pend_8021x_cnt);
-               if (waitqueue_active(&drvr->pend_8021x_wait))
-                       wake_up(&drvr->pend_8021x_wait);
+               atomic_dec(&ifp->pend_8021x_cnt);
+               if (waitqueue_active(&ifp->pend_8021x_wait))
+                       wake_up(&ifp->pend_8021x_wait);
        }
+       if (!success)
+               ifp->stats.tx_errors++;
 }
 
 static struct net_device_stats *brcmf_netdev_get_stats(struct net_device *ndev)
 {
        struct brcmf_if *ifp = netdev_priv(ndev);
-       struct brcmf_bus *bus_if = ifp->drvr->bus_if;
-
-       brcmf_dbg(TRACE, "Enter\n");
 
-       /* Copy dongle stats to net device stats */
-       ifp->stats.rx_packets = bus_if->dstats.rx_packets;
-       ifp->stats.tx_packets = bus_if->dstats.tx_packets;
-       ifp->stats.rx_bytes = bus_if->dstats.rx_bytes;
-       ifp->stats.tx_bytes = bus_if->dstats.tx_bytes;
-       ifp->stats.rx_errors = bus_if->dstats.rx_errors;
-       ifp->stats.tx_errors = bus_if->dstats.tx_errors;
-       ifp->stats.rx_dropped = bus_if->dstats.rx_dropped;
-       ifp->stats.tx_dropped = bus_if->dstats.tx_dropped;
-       ifp->stats.multicast = bus_if->dstats.multicast;
+       brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
 
        return &ifp->stats;
 }
@@ -431,7 +434,7 @@ static int brcmf_ethtool(struct brcmf_if *ifp, void __user *uaddr)
        u32 toe_cmpnt, csum_dir;
        int ret;
 
-       brcmf_dbg(TRACE, "Enter\n");
+       brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
 
        /* all ethtool calls start with a cmd word */
        if (copy_from_user(&cmd, uaddr, sizeof(u32)))
@@ -454,13 +457,7 @@ static int brcmf_ethtool(struct brcmf_if *ifp, void __user *uaddr)
                        sprintf(info.driver, "dhd");
                        strcpy(info.version, BRCMF_VERSION_STR);
                }
-
-               /* otherwise, require dongle to be up */
-               else if (!drvr->bus_if->drvr_up) {
-                       brcmf_err("dongle is not up\n");
-                       return -ENODEV;
-               }
-               /* finally, report dongle driver type */
+               /* report dongle driver type */
                else
                        sprintf(info.driver, "wl");
 
@@ -534,9 +531,9 @@ static int brcmf_netdev_ioctl_entry(struct net_device *ndev, struct ifreq *ifr,
        struct brcmf_if *ifp = netdev_priv(ndev);
        struct brcmf_pub *drvr = ifp->drvr;
 
-       brcmf_dbg(TRACE, "ifidx %d, cmd 0x%04x\n", ifp->idx, cmd);
+       brcmf_dbg(TRACE, "Enter, idx=%d, cmd=0x%04x\n", ifp->bssidx, cmd);
 
-       if (!drvr->iflist[ifp->idx])
+       if (!drvr->iflist[ifp->bssidx])
                return -1;
 
        if (cmd == SIOCETHTOOL)
@@ -548,17 +545,12 @@ static int brcmf_netdev_ioctl_entry(struct net_device *ndev, struct ifreq *ifr,
 static int brcmf_netdev_stop(struct net_device *ndev)
 {
        struct brcmf_if *ifp = netdev_priv(ndev);
-       struct brcmf_pub *drvr = ifp->drvr;
-
-       brcmf_dbg(TRACE, "Enter\n");
 
-       if (drvr->bus_if->drvr_up == 0)
-               return 0;
+       brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
 
        brcmf_cfg80211_down(ndev);
 
        /* Set state and stop OS transmissions */
-       drvr->bus_if->drvr_up = false;
        netif_stop_queue(ndev);
 
        return 0;
@@ -572,7 +564,7 @@ static int brcmf_netdev_open(struct net_device *ndev)
        u32 toe_ol;
        s32 ret = 0;
 
-       brcmf_dbg(TRACE, "ifidx %d\n", ifp->idx);
+       brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
 
        /* If bus is not ready, can't continue */
        if (bus_if->state != BRCMF_BUS_DATA) {
@@ -580,9 +572,7 @@ static int brcmf_netdev_open(struct net_device *ndev)
                return -EAGAIN;
        }
 
-       atomic_set(&drvr->pend_8021x_cnt, 0);
-
-       memcpy(ndev->dev_addr, drvr->mac, ETH_ALEN);
+       atomic_set(&ifp->pend_8021x_cnt, 0);
 
        /* Get current TOE mode from dongle */
        if (brcmf_fil_iovar_int_get(ifp, "toe_ol", &toe_ol) >= 0
@@ -593,7 +583,6 @@ static int brcmf_netdev_open(struct net_device *ndev)
 
        /* Allow transmit calls */
        netif_start_queue(ndev);
-       drvr->bus_if->drvr_up = true;
        if (brcmf_cfg80211_up(ndev)) {
                brcmf_err("failed to bring up cfg80211\n");
                return -1;
@@ -612,29 +601,18 @@ static const struct net_device_ops brcmf_netdev_ops_pri = {
        .ndo_set_rx_mode = brcmf_netdev_set_multicast_list
 };
 
-static const struct net_device_ops brcmf_netdev_ops_virt = {
-       .ndo_open = brcmf_cfg80211_up,
-       .ndo_stop = brcmf_cfg80211_down,
-       .ndo_get_stats = brcmf_netdev_get_stats,
-       .ndo_do_ioctl = brcmf_netdev_ioctl_entry,
-       .ndo_start_xmit = brcmf_netdev_start_xmit,
-       .ndo_set_mac_address = brcmf_netdev_set_mac_address,
-       .ndo_set_rx_mode = brcmf_netdev_set_multicast_list
-};
-
-int brcmf_net_attach(struct brcmf_if *ifp)
+int brcmf_net_attach(struct brcmf_if *ifp, bool rtnl_locked)
 {
        struct brcmf_pub *drvr = ifp->drvr;
        struct net_device *ndev;
+       s32 err;
 
-       brcmf_dbg(TRACE, "ifidx %d mac %pM\n", ifp->idx, ifp->mac_addr);
+       brcmf_dbg(TRACE, "Enter, idx=%d mac=%pM\n", ifp->bssidx,
+                 ifp->mac_addr);
        ndev = ifp->ndev;
 
        /* set appropriate operations */
-       if (!ifp->idx)
-               ndev->netdev_ops = &brcmf_netdev_ops_pri;
-       else
-               ndev->netdev_ops = &brcmf_netdev_ops_virt;
+       ndev->netdev_ops = &brcmf_netdev_ops_pri;
 
        ndev->hard_header_len = ETH_HLEN + drvr->hdrlen;
        ndev->ethtool_ops = &brcmf_ethtool_ops;
@@ -645,7 +623,14 @@ int brcmf_net_attach(struct brcmf_if *ifp)
        /* set the mac address */
        memcpy(ndev->dev_addr, ifp->mac_addr, ETH_ALEN);
 
-       if (register_netdev(ndev) != 0) {
+       INIT_WORK(&ifp->setmacaddr_work, _brcmf_set_mac_address);
+       INIT_WORK(&ifp->multicast_work, _brcmf_set_multicast_list);
+
+       if (rtnl_locked)
+               err = register_netdevice(ndev);
+       else
+               err = register_netdev(ndev);
+       if (err != 0) {
                brcmf_err("couldn't register the net device\n");
                goto fail;
        }
@@ -659,16 +644,78 @@ fail:
        return -EBADE;
 }
 
-struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, int ifidx, s32 bssidx,
-                             char *name, u8 *addr_mask)
+static int brcmf_net_p2p_open(struct net_device *ndev)
+{
+       brcmf_dbg(TRACE, "Enter\n");
+
+       return brcmf_cfg80211_up(ndev);
+}
+
+static int brcmf_net_p2p_stop(struct net_device *ndev)
+{
+       brcmf_dbg(TRACE, "Enter\n");
+
+       return brcmf_cfg80211_down(ndev);
+}
+
+static int brcmf_net_p2p_do_ioctl(struct net_device *ndev,
+                                 struct ifreq *ifr, int cmd)
+{
+       brcmf_dbg(TRACE, "Enter\n");
+       return 0;
+}
+
+static netdev_tx_t brcmf_net_p2p_start_xmit(struct sk_buff *skb,
+                                           struct net_device *ndev)
+{
+       if (skb)
+               dev_kfree_skb_any(skb);
+
+       return NETDEV_TX_OK;
+}
+
+static const struct net_device_ops brcmf_netdev_ops_p2p = {
+       .ndo_open = brcmf_net_p2p_open,
+       .ndo_stop = brcmf_net_p2p_stop,
+       .ndo_do_ioctl = brcmf_net_p2p_do_ioctl,
+       .ndo_start_xmit = brcmf_net_p2p_start_xmit
+};
+
+static int brcmf_net_p2p_attach(struct brcmf_if *ifp)
+{
+       struct net_device *ndev;
+
+       brcmf_dbg(TRACE, "Enter, idx=%d mac=%pM\n", ifp->bssidx,
+                 ifp->mac_addr);
+       ndev = ifp->ndev;
+
+       ndev->netdev_ops = &brcmf_netdev_ops_p2p;
+
+       /* set the mac address */
+       memcpy(ndev->dev_addr, ifp->mac_addr, ETH_ALEN);
+
+       if (register_netdev(ndev) != 0) {
+               brcmf_err("couldn't register the p2p net device\n");
+               goto fail;
+       }
+
+       brcmf_dbg(INFO, "%s: Broadcom Dongle Host Driver\n", ndev->name);
+
+       return 0;
+
+fail:
+       return -EBADE;
+}
+
+struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, s32 bssidx, s32 ifidx,
+                             char *name, u8 *mac_addr)
 {
        struct brcmf_if *ifp;
        struct net_device *ndev;
-       int i;
 
-       brcmf_dbg(TRACE, "idx %d\n", ifidx);
+       brcmf_dbg(TRACE, "Enter, idx=%d, ifidx=%d\n", bssidx, ifidx);
 
-       ifp = drvr->iflist[ifidx];
+       ifp = drvr->iflist[bssidx];
        /*
         * Delete the existing interface before overwriting it
         * in case we missed the BRCMF_E_IF_DEL event.
@@ -680,7 +727,7 @@ struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, int ifidx, s32 bssidx,
                        netif_stop_queue(ifp->ndev);
                        unregister_netdev(ifp->ndev);
                        free_netdev(ifp->ndev);
-                       drvr->iflist[ifidx] = NULL;
+                       drvr->iflist[bssidx] = NULL;
                } else {
                        brcmf_err("ignore IF event\n");
                        return ERR_PTR(-EINVAL);
@@ -697,16 +744,15 @@ struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, int ifidx, s32 bssidx,
        ifp = netdev_priv(ndev);
        ifp->ndev = ndev;
        ifp->drvr = drvr;
-       drvr->iflist[ifidx] = ifp;
-       ifp->idx = ifidx;
+       drvr->iflist[bssidx] = ifp;
+       ifp->ifidx = ifidx;
        ifp->bssidx = bssidx;
 
-       INIT_WORK(&ifp->setmacaddr_work, _brcmf_set_mac_address);
-       INIT_WORK(&ifp->multicast_work, _brcmf_set_multicast_list);
 
-       if (addr_mask != NULL)
-               for (i = 0; i < ETH_ALEN; i++)
-                       ifp->mac_addr[i] = drvr->mac[i] ^ addr_mask[i];
+       init_waitqueue_head(&ifp->pend_8021x_wait);
+
+       if (mac_addr != NULL)
+               memcpy(ifp->mac_addr, mac_addr, ETH_ALEN);
 
        brcmf_dbg(TRACE, " ==== pid:%x, if:%s (%pM) created ===\n",
                  current->pid, ifp->ndev->name, ifp->mac_addr);
@@ -714,19 +760,18 @@ struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, int ifidx, s32 bssidx,
        return ifp;
 }
 
-void brcmf_del_if(struct brcmf_pub *drvr, int ifidx)
+void brcmf_del_if(struct brcmf_pub *drvr, s32 bssidx)
 {
        struct brcmf_if *ifp;
 
-       brcmf_dbg(TRACE, "idx %d\n", ifidx);
-
-       ifp = drvr->iflist[ifidx];
+       ifp = drvr->iflist[bssidx];
        if (!ifp) {
-               brcmf_err("Null interface\n");
+               brcmf_err("Null interface, idx=%d\n", bssidx);
                return;
        }
+       brcmf_dbg(TRACE, "Enter, idx=%d, ifidx=%d\n", bssidx, ifp->ifidx);
        if (ifp->ndev) {
-               if (ifidx == 0) {
+               if (bssidx == 0) {
                        if (ifp->ndev->netdev_ops == &brcmf_netdev_ops_pri) {
                                rtnl_lock();
                                brcmf_netdev_stop(ifp->ndev);
@@ -736,12 +781,14 @@ void brcmf_del_if(struct brcmf_pub *drvr, int ifidx)
                        netif_stop_queue(ifp->ndev);
                }
 
-               cancel_work_sync(&ifp->setmacaddr_work);
-               cancel_work_sync(&ifp->multicast_work);
+               if (ifp->ndev->netdev_ops == &brcmf_netdev_ops_pri) {
+                       cancel_work_sync(&ifp->setmacaddr_work);
+                       cancel_work_sync(&ifp->multicast_work);
+               }
 
                unregister_netdev(ifp->ndev);
-               drvr->iflist[ifidx] = NULL;
-               if (ifidx == 0)
+               drvr->iflist[bssidx] = NULL;
+               if (bssidx == 0)
                        brcmf_cfg80211_detach(drvr->config);
                free_netdev(ifp->ndev);
        }
@@ -781,8 +828,6 @@ int brcmf_attach(uint bus_hdrlen, struct device *dev)
 
        INIT_LIST_HEAD(&drvr->bus_if->dcmd_list);
 
-       init_waitqueue_head(&drvr->pend_8021x_wait);
-
        return ret;
 
 fail:
@@ -797,6 +842,7 @@ int brcmf_bus_start(struct device *dev)
        struct brcmf_bus *bus_if = dev_get_drvdata(dev);
        struct brcmf_pub *drvr = bus_if->drvr;
        struct brcmf_if *ifp;
+       struct brcmf_if *p2p_ifp;
 
        brcmf_dbg(TRACE, "\n");
 
@@ -812,6 +858,13 @@ int brcmf_bus_start(struct device *dev)
        if (IS_ERR(ifp))
                return PTR_ERR(ifp);
 
+       if (brcmf_p2p_enable)
+               p2p_ifp = brcmf_add_if(drvr, 1, 0, "p2p%d", NULL);
+       else
+               p2p_ifp = NULL;
+       if (IS_ERR(p2p_ifp))
+               p2p_ifp = NULL;
+
        /* signal bus ready */
        bus_if->state = BRCMF_BUS_DATA;
 
@@ -830,16 +883,22 @@ int brcmf_bus_start(struct device *dev)
        if (ret < 0)
                goto fail;
 
-       ret = brcmf_net_attach(ifp);
+       ret = brcmf_net_attach(ifp, false);
 fail:
        if (ret < 0) {
                brcmf_err("failed: %d\n", ret);
                if (drvr->config)
                        brcmf_cfg80211_detach(drvr->config);
-               free_netdev(drvr->iflist[0]->ndev);
+               free_netdev(ifp->ndev);
                drvr->iflist[0] = NULL;
+               if (p2p_ifp) {
+                       free_netdev(p2p_ifp->ndev);
+                       drvr->iflist[1] = NULL;
+               }
                return ret;
        }
+       if ((brcmf_p2p_enable) && (p2p_ifp))
+               brcmf_net_p2p_attach(p2p_ifp);
 
        return 0;
 }
@@ -865,12 +924,13 @@ void brcmf_dev_reset(struct device *dev)
        if (drvr == NULL)
                return;
 
-       brcmf_fil_cmd_int_set(drvr->iflist[0], BRCMF_C_TERMINATED, 1);
+       if (drvr->iflist[0])
+               brcmf_fil_cmd_int_set(drvr->iflist[0], BRCMF_C_TERMINATED, 1);
 }
 
 void brcmf_detach(struct device *dev)
 {
-       int i;
+       s32 i;
        struct brcmf_bus *bus_if = dev_get_drvdata(dev);
        struct brcmf_pub *drvr = bus_if->drvr;
 
@@ -897,19 +957,18 @@ void brcmf_detach(struct device *dev)
        kfree(drvr);
 }
 
-static int brcmf_get_pend_8021x_cnt(struct brcmf_pub *drvr)
+static int brcmf_get_pend_8021x_cnt(struct brcmf_if *ifp)
 {
-       return atomic_read(&drvr->pend_8021x_cnt);
+       return atomic_read(&ifp->pend_8021x_cnt);
 }
 
 int brcmf_netdev_wait_pend8021x(struct net_device *ndev)
 {
        struct brcmf_if *ifp = netdev_priv(ndev);
-       struct brcmf_pub *drvr = ifp->drvr;
        int err;
 
-       err = wait_event_timeout(drvr->pend_8021x_wait,
-                                !brcmf_get_pend_8021x_cnt(drvr),
+       err = wait_event_timeout(ifp->pend_8021x_wait,
+                                !brcmf_get_pend_8021x_cnt(ifp),
                                 msecs_to_jiffies(MAX_WAIT_FOR_8021X_TX));
 
        WARN_ON(!err);
@@ -917,6 +976,16 @@ int brcmf_netdev_wait_pend8021x(struct net_device *ndev)
        return !err;
 }
 
+/*
+ * return chip id and rev of the device encoded in u32.
+ */
+u32 brcmf_get_chip_info(struct brcmf_if *ifp)
+{
+       struct brcmf_bus *bus = ifp->drvr->bus_if;
+
+       return bus->chip << 4 | bus->chiprev;
+}
+
 static void brcmf_driver_init(struct work_struct *work)
 {
        brcmf_debugfs_init();
index 7fef9b5..4469321 100644 (file)
@@ -1096,7 +1096,6 @@ static int brcmf_sdio_hdparser(struct brcmf_sdio *bus, u8 *header,
        if (len > MAX_RX_DATASZ && rd->channel != SDPCM_CONTROL_CHANNEL &&
            type != BRCMF_SDIO_FT_SUPER) {
                brcmf_err("HW header length too long\n");
-               bus->sdiodev->bus_if->dstats.rx_errors++;
                bus->sdcnt.rx_toolong++;
                brcmf_sdbrcm_rxfail(bus, false, false);
                rd->len = 0;
@@ -1298,7 +1297,6 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
                if (errcode < 0) {
                        brcmf_err("glom read of %d bytes failed: %d\n",
                                  dlen, errcode);
-                       bus->sdiodev->bus_if->dstats.rx_errors++;
 
                        sdio_claim_host(bus->sdiodev->func[1]);
                        if (bus->glomerr++ < 3) {
@@ -1445,10 +1443,9 @@ brcmf_sdbrcm_read_control(struct brcmf_sdio *bus, u8 *hdr, uint len, uint doff)
 
        if (bus->rxblen)
                buf = vzalloc(bus->rxblen);
-       if (!buf) {
-               brcmf_err("no memory for control frame\n");
+       if (!buf)
                goto done;
-       }
+
        rbuf = bus->rxbuf;
        pad = ((unsigned long)rbuf % BRCMF_SDALIGN);
        if (pad)
@@ -1478,7 +1475,6 @@ brcmf_sdbrcm_read_control(struct brcmf_sdio *bus, u8 *hdr, uint len, uint doff)
        if ((rdlen + BRCMF_FIRSTREAD) > bus->sdiodev->bus_if->maxctl) {
                brcmf_err("%d-byte control read exceeds %d-byte buffer\n",
                          rdlen, bus->sdiodev->bus_if->maxctl);
-               bus->sdiodev->bus_if->dstats.rx_errors++;
                brcmf_sdbrcm_rxfail(bus, false, false);
                goto done;
        }
@@ -1486,7 +1482,6 @@ brcmf_sdbrcm_read_control(struct brcmf_sdio *bus, u8 *hdr, uint len, uint doff)
        if ((len - doff) > bus->sdiodev->bus_if->maxctl) {
                brcmf_err("%d-byte ctl frame (%d-byte ctl data) exceeds %d-byte limit\n",
                          len, len - doff, bus->sdiodev->bus_if->maxctl);
-               bus->sdiodev->bus_if->dstats.rx_errors++;
                bus->sdcnt.rx_toolong++;
                brcmf_sdbrcm_rxfail(bus, false, false);
                goto done;
@@ -1634,7 +1629,6 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
                if (!pkt) {
                        /* Give up on data, request rtx of events */
                        brcmf_err("brcmu_pkt_buf_get_skb failed\n");
-                       bus->sdiodev->bus_if->dstats.rx_dropped++;
                        brcmf_sdbrcm_rxfail(bus, false,
                                            RETRYCHAN(rd->channel));
                        sdio_release_host(bus->sdiodev->func[1]);
@@ -1652,7 +1646,6 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
                        brcmf_err("read %d bytes from channel %d failed: %d\n",
                                  rd->len, rd->channel, sdret);
                        brcmu_pkt_buf_free_skb(pkt);
-                       bus->sdiodev->bus_if->dstats.rx_errors++;
                        sdio_claim_host(bus->sdiodev->func[1]);
                        brcmf_sdbrcm_rxfail(bus, true,
                                            RETRYCHAN(rd->channel));
@@ -1940,10 +1933,6 @@ static uint brcmf_sdbrcm_sendfromq(struct brcmf_sdio *bus, uint maxframes)
                datalen = pkt->len - SDPCM_HDRLEN;
 
                ret = brcmf_sdbrcm_txpkt(bus, pkt, SDPCM_DATA_CHANNEL, true);
-               if (ret)
-                       bus->sdiodev->bus_if->dstats.tx_errors++;
-               else
-                       bus->sdiodev->bus_if->dstats.tx_bytes += datalen;
 
                /* In poll mode, need to check for other events */
                if (!bus->intr && cnt) {
@@ -1962,8 +1951,7 @@ static uint brcmf_sdbrcm_sendfromq(struct brcmf_sdio *bus, uint maxframes)
        }
 
        /* Deflow-control stack if needed */
-       if (bus->sdiodev->bus_if->drvr_up &&
-           (bus->sdiodev->bus_if->state == BRCMF_BUS_DATA) &&
+       if ((bus->sdiodev->bus_if->state == BRCMF_BUS_DATA) &&
            bus->txoff && (pktq_len(&bus->txq) < TXLOW)) {
                bus->txoff = false;
                brcmf_txflowblock(bus->sdiodev->dev, false);
@@ -2710,9 +2698,10 @@ static int brcmf_sdio_readshared(struct brcmf_sdio *bus,
         * address of sdpcm_shared structure
         */
        sdio_claim_host(bus->sdiodev->func[1]);
+       brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
        rv = brcmf_sdbrcm_membytes(bus, false, shaddr,
                                   (u8 *)&addr_le, 4);
-       sdio_claim_host(bus->sdiodev->func[1]);
+       sdio_release_host(bus->sdiodev->func[1]);
        if (rv < 0)
                return rv;
 
@@ -2731,10 +2720,8 @@ static int brcmf_sdio_readshared(struct brcmf_sdio *bus,
        }
 
        /* Read hndrte_shared structure */
-       sdio_claim_host(bus->sdiodev->func[1]);
        rv = brcmf_sdbrcm_membytes(bus, false, addr, (u8 *)&sh_le,
                                   sizeof(struct sdpcm_shared_le));
-       sdio_release_host(bus->sdiodev->func[1]);
        if (rv < 0)
                return rv;
 
@@ -2836,14 +2823,12 @@ static int brcmf_sdio_trap_info(struct brcmf_sdio *bus, struct sdpcm_shared *sh,
        if ((sh->flags & SDPCM_SHARED_TRAP) == 0)
                return 0;
 
-       sdio_claim_host(bus->sdiodev->func[1]);
        error = brcmf_sdbrcm_membytes(bus, false, sh->trap_addr, (u8 *)&tr,
                                      sizeof(struct brcmf_trap_info));
        if (error < 0)
                return error;
 
        nbytes = brcmf_sdio_dump_console(bus, sh, data, count);
-       sdio_release_host(bus->sdiodev->func[1]);
        if (nbytes < 0)
                return nbytes;
 
@@ -3308,9 +3293,6 @@ static int brcmf_sdbrcm_download_nvram(struct brcmf_sdio *bus)
 {
        int ret;
 
-       if (bus->sdiodev->bus_if->drvr_up)
-               return -EISCONN;
-
        ret = request_firmware(&bus->firmware, BRCMF_SDIO_NV_NAME,
                               &bus->sdiodev->func[2]->dev);
        if (ret) {
@@ -3941,6 +3923,8 @@ void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev)
        /* Assign bus interface call back */
        bus->sdiodev->bus_if->dev = bus->sdiodev->dev;
        bus->sdiodev->bus_if->ops = &brcmf_sdio_bus_ops;
+       bus->sdiodev->bus_if->chip = bus->ci->chip;
+       bus->sdiodev->bus_if->chiprev = bus->ci->chiprev;
 
        /* Attach to the brcmf/OS/network interface */
        ret = brcmf_attach(SDPCM_RESERVE, bus->sdiodev->dev);
index ba0b225..e9d6f91 100644 (file)
@@ -189,24 +189,24 @@ static void brcmf_fweh_handle_if_event(struct brcmf_pub *drvr,
                return;
        }
 
-       ifp = drvr->iflist[ifevent->ifidx];
+       ifp = drvr->iflist[ifevent->bssidx];
 
        if (ifevent->action == BRCMF_E_IF_ADD) {
                brcmf_dbg(EVENT, "adding %s (%pM)\n", emsg->ifname,
                          emsg->addr);
-               ifp = brcmf_add_if(drvr, ifevent->ifidx, ifevent->bssidx,
+               ifp = brcmf_add_if(drvr, ifevent->bssidx, ifevent->ifidx,
                                   emsg->ifname, emsg->addr);
                if (IS_ERR(ifp))
                        return;
 
                if (!drvr->fweh.evt_handler[BRCMF_E_IF])
-                       err = brcmf_net_attach(ifp);
+                       err = brcmf_net_attach(ifp, false);
        }
 
        err = brcmf_fweh_call_event_handler(ifp, emsg->event_code, emsg, data);
 
        if (ifevent->action == BRCMF_E_IF_DEL)
-               brcmf_del_if(drvr, ifevent->ifidx);
+               brcmf_del_if(drvr, ifevent->bssidx);
 }
 
 /**
@@ -250,8 +250,6 @@ static void brcmf_fweh_event_worker(struct work_struct *work)
        drvr = container_of(fweh, struct brcmf_pub, fweh);
 
        while ((event = brcmf_fweh_dequeue_event(fweh))) {
-               ifp = drvr->iflist[event->ifidx];
-
                brcmf_dbg(EVENT, "event %s (%u) ifidx %u bsscfg %u addr %pM\n",
                          brcmf_fweh_event_name(event->code), event->code,
                          event->emsg.ifidx, event->emsg.bsscfgidx,
@@ -283,6 +281,7 @@ static void brcmf_fweh_event_worker(struct work_struct *work)
                        goto event_free;
                }
 
+               ifp = drvr->iflist[emsg.bsscfgidx];
                err = brcmf_fweh_call_event_handler(ifp, event->code, &emsg,
                                                    event->data);
                if (err) {
index 36901f7..8c39b51 100644 (file)
@@ -83,6 +83,7 @@ struct brcmf_event;
        BRCMF_ENUM_DEF(MULTICAST_DECODE_ERROR, 51) \
        BRCMF_ENUM_DEF(TRACE, 52) \
        BRCMF_ENUM_DEF(IF, 54) \
+       BRCMF_ENUM_DEF(P2P_DISC_LISTEN_COMPLETE, 55) \
        BRCMF_ENUM_DEF(RSSI, 56) \
        BRCMF_ENUM_DEF(PFN_SCAN_COMPLETE, 57) \
        BRCMF_ENUM_DEF(EXTLOG_MSG, 58) \
@@ -96,8 +97,11 @@ struct brcmf_event;
        BRCMF_ENUM_DEF(DFS_AP_RESUME, 66) \
        BRCMF_ENUM_DEF(ESCAN_RESULT, 69) \
        BRCMF_ENUM_DEF(ACTION_FRAME_OFF_CHAN_COMPLETE, 70) \
+       BRCMF_ENUM_DEF(PROBERESP_MSG, 71) \
+       BRCMF_ENUM_DEF(P2P_PROBEREQ_MSG, 72) \
        BRCMF_ENUM_DEF(DCS_REQUEST, 73) \
-       BRCMF_ENUM_DEF(FIFO_CREDIT_MAP, 74)
+       BRCMF_ENUM_DEF(FIFO_CREDIT_MAP, 74) \
+       BRCMF_ENUM_DEF(ACTION_FRAME_RX, 75)
 
 #define BRCMF_ENUM_DEF(id, val) \
        BRCMF_E_##id = (val),
index d8d8b65..8d1def9 100644 (file)
@@ -45,9 +45,10 @@ brcmf_fil_cmd_data(struct brcmf_if *ifp, u32 cmd, void *data, u32 len, bool set)
        if (data != NULL)
                len = min_t(uint, len, BRCMF_DCMD_MAXLEN);
        if (set)
-               err = brcmf_proto_cdc_set_dcmd(drvr, ifp->idx, cmd, data, len);
+               err = brcmf_proto_cdc_set_dcmd(drvr, ifp->ifidx, cmd, data,
+                                              len);
        else
-               err = brcmf_proto_cdc_query_dcmd(drvr, ifp->idx, cmd, data,
+               err = brcmf_proto_cdc_query_dcmd(drvr, ifp->ifidx, cmd, data,
                                                 len);
 
        if (err >= 0)
@@ -100,6 +101,7 @@ brcmf_fil_cmd_int_set(struct brcmf_if *ifp, u32 cmd, u32 data)
        __le32 data_le = cpu_to_le32(data);
 
        mutex_lock(&ifp->drvr->proto_block);
+       brcmf_dbg(FIL, "cmd=%d, value=%d\n", cmd, data);
        err = brcmf_fil_cmd_data(ifp, cmd, &data_le, sizeof(data_le), true);
        mutex_unlock(&ifp->drvr->proto_block);
 
@@ -116,6 +118,7 @@ brcmf_fil_cmd_int_get(struct brcmf_if *ifp, u32 cmd, u32 *data)
        err = brcmf_fil_cmd_data(ifp, cmd, &data_le, sizeof(data_le), false);
        mutex_unlock(&ifp->drvr->proto_block);
        *data = le32_to_cpu(data_le);
+       brcmf_dbg(FIL, "cmd=%d, value=%d\n", cmd, *data);
 
        return err;
 }
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h b/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h
new file mode 100644 (file)
index 0000000..0f2c83b
--- /dev/null
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2012 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+
+#ifndef FWIL_TYPES_H_
+#define FWIL_TYPES_H_
+
+#include <linux/if_ether.h>
+
+
+#define BRCMF_FIL_ACTION_FRAME_SIZE    1800
+
+
+enum brcmf_fil_p2p_if_types {
+       BRCMF_FIL_P2P_IF_CLIENT,
+       BRCMF_FIL_P2P_IF_GO,
+       BRCMF_FIL_P2P_IF_DYNBCN_GO,
+       BRCMF_FIL_P2P_IF_DEV,
+};
+
+struct brcmf_fil_p2p_if_le {
+       u8 addr[ETH_ALEN];
+       __le16 type;
+       __le16 chspec;
+};
+
+struct brcmf_fil_chan_info_le {
+       __le32 hw_channel;
+       __le32 target_channel;
+       __le32 scan_channel;
+};
+
+struct brcmf_fil_action_frame_le {
+       u8      da[ETH_ALEN];
+       __le16  len;
+       __le32  packet_id;
+       u8      data[BRCMF_FIL_ACTION_FRAME_SIZE];
+};
+
+struct brcmf_fil_af_params_le {
+       __le32                                  channel;
+       __le32                                  dwell_time;
+       u8                                      bssid[ETH_ALEN];
+       u8                                      pad[2];
+       struct brcmf_fil_action_frame_le        action_frame;
+};
+
+struct brcmf_fil_bss_enable_le {
+       __le32 bsscfg_idx;
+       __le32 enable;
+};
+
+#endif /* FWIL_TYPES_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/brcm80211/brcmfmac/p2p.c
new file mode 100644 (file)
index 0000000..4166e64
--- /dev/null
@@ -0,0 +1,2277 @@
+/*
+ * Copyright (c) 2012 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#include <linux/slab.h>
+#include <linux/netdevice.h>
+#include <net/cfg80211.h>
+
+#include <brcmu_wifi.h>
+#include <brcmu_utils.h>
+#include <defs.h>
+#include <dhd.h>
+#include <dhd_dbg.h>
+#include "fwil.h"
+#include "fwil_types.h"
+#include "p2p.h"
+#include "wl_cfg80211.h"
+
+/* parameters used for p2p escan */
+#define P2PAPI_SCAN_NPROBES 1
+#define P2PAPI_SCAN_DWELL_TIME_MS 80
+#define P2PAPI_SCAN_SOCIAL_DWELL_TIME_MS 40
+#define P2PAPI_SCAN_HOME_TIME_MS 60
+#define P2PAPI_SCAN_NPROBS_TIME_MS 30
+#define P2PAPI_SCAN_AF_SEARCH_DWELL_TIME_MS 100
+#define WL_SCAN_CONNECT_DWELL_TIME_MS 200
+#define WL_SCAN_JOIN_PROBE_INTERVAL_MS 20
+
+#define BRCMF_P2P_WILDCARD_SSID                "DIRECT-"
+#define BRCMF_P2P_WILDCARD_SSID_LEN    (sizeof(BRCMF_P2P_WILDCARD_SSID) - 1)
+
+#define SOCIAL_CHAN_1          1
+#define SOCIAL_CHAN_2          6
+#define SOCIAL_CHAN_3          11
+#define IS_P2P_SOCIAL_CHANNEL(channel) ((channel == SOCIAL_CHAN_1) || \
+                                        (channel == SOCIAL_CHAN_2) || \
+                                        (channel == SOCIAL_CHAN_3))
+#define SOCIAL_CHAN_CNT                3
+#define AF_PEER_SEARCH_CNT     2
+
+#define BRCMF_SCB_TIMEOUT_VALUE        20
+
+#define P2P_VER                        9       /* P2P version: 9=WiFi P2P v1.0 */
+#define P2P_PUB_AF_CATEGORY    0x04
+#define P2P_PUB_AF_ACTION      0x09
+#define P2P_AF_CATEGORY                0x7f
+#define P2P_OUI                        "\x50\x6F\x9A"  /* P2P OUI */
+#define P2P_OUI_LEN            3               /* P2P OUI length */
+
+/* Action Frame Constants */
+#define DOT11_ACTION_HDR_LEN   2       /* action frame category + action */
+#define DOT11_ACTION_CAT_OFF   0       /* category offset */
+#define DOT11_ACTION_ACT_OFF   1       /* action offset */
+
+#define P2P_AF_DWELL_TIME              200
+#define P2P_AF_MIN_DWELL_TIME          100
+#define P2P_AF_MED_DWELL_TIME          400
+#define P2P_AF_LONG_DWELL_TIME         1000
+#define P2P_AF_TX_MAX_RETRY            1
+#define P2P_AF_MAX_WAIT_TIME           2000
+#define P2P_INVALID_CHANNEL            -1
+#define P2P_CHANNEL_SYNC_RETRY         5
+#define P2P_AF_FRM_SCAN_MAX_WAIT       1500
+#define P2P_DEFAULT_SLEEP_TIME_VSDB    200
+
+/* WiFi P2P Public Action Frame OUI Subtypes */
+#define P2P_PAF_GON_REQ                0       /* Group Owner Negotiation Req */
+#define P2P_PAF_GON_RSP                1       /* Group Owner Negotiation Rsp */
+#define P2P_PAF_GON_CONF       2       /* Group Owner Negotiation Confirm */
+#define P2P_PAF_INVITE_REQ     3       /* P2P Invitation Request */
+#define P2P_PAF_INVITE_RSP     4       /* P2P Invitation Response */
+#define P2P_PAF_DEVDIS_REQ     5       /* Device Discoverability Request */
+#define P2P_PAF_DEVDIS_RSP     6       /* Device Discoverability Response */
+#define P2P_PAF_PROVDIS_REQ    7       /* Provision Discovery Request */
+#define P2P_PAF_PROVDIS_RSP    8       /* Provision Discovery Response */
+#define P2P_PAF_SUBTYPE_INVALID        255     /* Invalid Subtype */
+
+/* WiFi P2P Action Frame OUI Subtypes */
+#define P2P_AF_NOTICE_OF_ABSENCE       0       /* Notice of Absence */
+#define P2P_AF_PRESENCE_REQ            1       /* P2P Presence Request */
+#define P2P_AF_PRESENCE_RSP            2       /* P2P Presence Response */
+#define P2P_AF_GO_DISC_REQ             3       /* GO Discoverability Request */
+
+/* P2P Service Discovery related */
+#define P2PSD_ACTION_CATEGORY          0x04    /* Public action frame */
+#define P2PSD_ACTION_ID_GAS_IREQ       0x0a    /* GAS Initial Request AF */
+#define P2PSD_ACTION_ID_GAS_IRESP      0x0b    /* GAS Initial Response AF */
+#define P2PSD_ACTION_ID_GAS_CREQ       0x0c    /* GAS Comback Request AF */
+#define P2PSD_ACTION_ID_GAS_CRESP      0x0d    /* GAS Comback Response AF */
+
+/**
+ * struct brcmf_p2p_disc_st_le - set discovery state in firmware.
+ *
+ * @state: requested discovery state (see enum brcmf_p2p_disc_state).
+ * @chspec: channel parameter for %WL_P2P_DISC_ST_LISTEN state.
+ * @dwell: dwell time in ms for %WL_P2P_DISC_ST_LISTEN state.
+ */
+struct brcmf_p2p_disc_st_le {
+       u8 state;
+       __le16 chspec;
+       __le16 dwell;
+};
+
+/**
+ * enum brcmf_p2p_disc_state - P2P discovery state values
+ *
+ * @WL_P2P_DISC_ST_SCAN: P2P discovery with wildcard SSID and P2P IE.
+ * @WL_P2P_DISC_ST_LISTEN: P2P discovery off-channel for specified time.
+ * @WL_P2P_DISC_ST_SEARCH: P2P discovery with P2P wildcard SSID and P2P IE.
+ */
+enum brcmf_p2p_disc_state {
+       WL_P2P_DISC_ST_SCAN,
+       WL_P2P_DISC_ST_LISTEN,
+       WL_P2P_DISC_ST_SEARCH
+};
+
+/**
+ * struct brcmf_p2p_scan_le - P2P specific scan request.
+ *
+ * @type: type of scan method requested (values: 'E' or 'S').
+ * @reserved: reserved (ignored).
+ * @eparams: parameters used for type 'E'.
+ * @sparams: parameters used for type 'S'.
+ */
+struct brcmf_p2p_scan_le {
+       u8 type;
+       u8 reserved[3];
+       union {
+               struct brcmf_escan_params_le eparams;
+               struct brcmf_scan_params_le sparams;
+       };
+};
+
+/**
+ * struct brcmf_p2p_pub_act_frame - WiFi P2P Public Action Frame
+ *
+ * @category: P2P_PUB_AF_CATEGORY
+ * @action: P2P_PUB_AF_ACTION
+ * @oui[3]: P2P_OUI
+ * @oui_type: OUI type - P2P_VER
+ * @subtype: OUI subtype - P2P_TYPE_*
+ * @dialog_token: nonzero, identifies req/rsp transaction
+ * @elts[1]: Variable length information elements.
+ */
+struct brcmf_p2p_pub_act_frame {
+       u8      category;
+       u8      action;
+       u8      oui[3];
+       u8      oui_type;
+       u8      subtype;
+       u8      dialog_token;
+       u8      elts[1];
+};
+
+/**
+ * struct brcmf_p2p_action_frame - WiFi P2P Action Frame
+ *
+ * @category: P2P_AF_CATEGORY
+ * @OUI[3]: OUI - P2P_OUI
+ * @type: OUI Type - P2P_VER
+ * @subtype: OUI Subtype - P2P_AF_*
+ * @dialog_token: nonzero, identifies req/resp tranaction
+ * @elts[1]: Variable length information elements.
+ */
+struct brcmf_p2p_action_frame {
+       u8      category;
+       u8      oui[3];
+       u8      type;
+       u8      subtype;
+       u8      dialog_token;
+       u8      elts[1];
+};
+
+/**
+ * struct brcmf_p2psd_gas_pub_act_frame - Wi-Fi GAS Public Action Frame
+ *
+ * @category: 0x04 Public Action Frame
+ * @action: 0x6c Advertisement Protocol
+ * @dialog_token: nonzero, identifies req/rsp transaction
+ * @query_data[1]: Query Data. SD gas ireq SD gas iresp
+ */
+struct brcmf_p2psd_gas_pub_act_frame {
+       u8      category;
+       u8      action;
+       u8      dialog_token;
+       u8      query_data[1];
+};
+
+/**
+ * struct brcmf_config_af_params - Action Frame Parameters for tx.
+ *
+ * @mpc_onoff: To make sure to send successfully action frame, we have to
+ *             turn off mpc  0: off, 1: on,  (-1): do nothing
+ * @search_channel: 1: search peer's channel to send af
+ * extra_listen: keep the dwell time to get af response frame.
+ */
+struct brcmf_config_af_params {
+       s32 mpc_onoff;
+       bool search_channel;
+       bool extra_listen;
+};
+
+/**
+ * brcmf_p2p_is_pub_action() - true if p2p public type frame.
+ *
+ * @frame: action frame data.
+ * @frame_len: length of action frame data.
+ *
+ * Determine if action frame is p2p public action type
+ */
+static bool brcmf_p2p_is_pub_action(void *frame, u32 frame_len)
+{
+       struct brcmf_p2p_pub_act_frame *pact_frm;
+
+       if (frame == NULL)
+               return false;
+
+       pact_frm = (struct brcmf_p2p_pub_act_frame *)frame;
+       if (frame_len < sizeof(struct brcmf_p2p_pub_act_frame) - 1)
+               return false;
+
+       if (pact_frm->category == P2P_PUB_AF_CATEGORY &&
+           pact_frm->action == P2P_PUB_AF_ACTION &&
+           pact_frm->oui_type == P2P_VER &&
+           memcmp(pact_frm->oui, P2P_OUI, P2P_OUI_LEN) == 0)
+               return true;
+
+       return false;
+}
+
+/**
+ * brcmf_p2p_is_p2p_action() - true if p2p action type frame.
+ *
+ * @frame: action frame data.
+ * @frame_len: length of action frame data.
+ *
+ * Determine if action frame is p2p action type
+ */
+static bool brcmf_p2p_is_p2p_action(void *frame, u32 frame_len)
+{
+       struct brcmf_p2p_action_frame *act_frm;
+
+       if (frame == NULL)
+               return false;
+
+       act_frm = (struct brcmf_p2p_action_frame *)frame;
+       if (frame_len < sizeof(struct brcmf_p2p_action_frame) - 1)
+               return false;
+
+       if (act_frm->category == P2P_AF_CATEGORY &&
+           act_frm->type  == P2P_VER &&
+           memcmp(act_frm->oui, P2P_OUI, P2P_OUI_LEN) == 0)
+               return true;
+
+       return false;
+}
+
+/**
+ * brcmf_p2p_is_gas_action() - true if p2p gas action type frame.
+ *
+ * @frame: action frame data.
+ * @frame_len: length of action frame data.
+ *
+ * Determine if action frame is p2p gas action type
+ */
+static bool brcmf_p2p_is_gas_action(void *frame, u32 frame_len)
+{
+       struct brcmf_p2psd_gas_pub_act_frame *sd_act_frm;
+
+       if (frame == NULL)
+               return false;
+
+       sd_act_frm = (struct brcmf_p2psd_gas_pub_act_frame *)frame;
+       if (frame_len < sizeof(struct brcmf_p2psd_gas_pub_act_frame) - 1)
+               return false;
+
+       if (sd_act_frm->category != P2PSD_ACTION_CATEGORY)
+               return false;
+
+       if (sd_act_frm->action == P2PSD_ACTION_ID_GAS_IREQ ||
+           sd_act_frm->action == P2PSD_ACTION_ID_GAS_IRESP ||
+           sd_act_frm->action == P2PSD_ACTION_ID_GAS_CREQ ||
+           sd_act_frm->action == P2PSD_ACTION_ID_GAS_CRESP)
+               return true;
+
+       return false;
+}
+
+/**
+ * brcmf_p2p_print_actframe() - debug print routine.
+ *
+ * @tx: Received or to be transmitted
+ * @frame: action frame data.
+ * @frame_len: length of action frame data.
+ *
+ * Print information about the p2p action frame
+ */
+
+#ifdef DEBUG
+
+static void brcmf_p2p_print_actframe(bool tx, void *frame, u32 frame_len)
+{
+       struct brcmf_p2p_pub_act_frame *pact_frm;
+       struct brcmf_p2p_action_frame *act_frm;
+       struct brcmf_p2psd_gas_pub_act_frame *sd_act_frm;
+
+       if (!frame || frame_len <= 2)
+               return;
+
+       if (brcmf_p2p_is_pub_action(frame, frame_len)) {
+               pact_frm = (struct brcmf_p2p_pub_act_frame *)frame;
+               switch (pact_frm->subtype) {
+               case P2P_PAF_GON_REQ:
+                       brcmf_dbg(TRACE, "%s P2P Group Owner Negotiation Req Frame\n",
+                                 (tx) ? "TX" : "RX");
+                       break;
+               case P2P_PAF_GON_RSP:
+                       brcmf_dbg(TRACE, "%s P2P Group Owner Negotiation Rsp Frame\n",
+                                 (tx) ? "TX" : "RX");
+                       break;
+               case P2P_PAF_GON_CONF:
+                       brcmf_dbg(TRACE, "%s P2P Group Owner Negotiation Confirm Frame\n",
+                                 (tx) ? "TX" : "RX");
+                       break;
+               case P2P_PAF_INVITE_REQ:
+                       brcmf_dbg(TRACE, "%s P2P Invitation Request  Frame\n",
+                                 (tx) ? "TX" : "RX");
+                       break;
+               case P2P_PAF_INVITE_RSP:
+                       brcmf_dbg(TRACE, "%s P2P Invitation Response Frame\n",
+                                 (tx) ? "TX" : "RX");
+                       break;
+               case P2P_PAF_DEVDIS_REQ:
+                       brcmf_dbg(TRACE, "%s P2P Device Discoverability Request Frame\n",
+                                 (tx) ? "TX" : "RX");
+                       break;
+               case P2P_PAF_DEVDIS_RSP:
+                       brcmf_dbg(TRACE, "%s P2P Device Discoverability Response Frame\n",
+                                 (tx) ? "TX" : "RX");
+                       break;
+               case P2P_PAF_PROVDIS_REQ:
+                       brcmf_dbg(TRACE, "%s P2P Provision Discovery Request Frame\n",
+                                 (tx) ? "TX" : "RX");
+                       break;
+               case P2P_PAF_PROVDIS_RSP:
+                       brcmf_dbg(TRACE, "%s P2P Provision Discovery Response Frame\n",
+                                 (tx) ? "TX" : "RX");
+                       break;
+               default:
+                       brcmf_dbg(TRACE, "%s Unknown P2P Public Action Frame\n",
+                                 (tx) ? "TX" : "RX");
+                       break;
+               }
+       } else if (brcmf_p2p_is_p2p_action(frame, frame_len)) {
+               act_frm = (struct brcmf_p2p_action_frame *)frame;
+               switch (act_frm->subtype) {
+               case P2P_AF_NOTICE_OF_ABSENCE:
+                       brcmf_dbg(TRACE, "%s P2P Notice of Absence Frame\n",
+                                 (tx) ? "TX" : "RX");
+                       break;
+               case P2P_AF_PRESENCE_REQ:
+                       brcmf_dbg(TRACE, "%s P2P Presence Request Frame\n",
+                                 (tx) ? "TX" : "RX");
+                       break;
+               case P2P_AF_PRESENCE_RSP:
+                       brcmf_dbg(TRACE, "%s P2P Presence Response Frame\n",
+                                 (tx) ? "TX" : "RX");
+                       break;
+               case P2P_AF_GO_DISC_REQ:
+                       brcmf_dbg(TRACE, "%s P2P Discoverability Request Frame\n",
+                                 (tx) ? "TX" : "RX");
+                       break;
+               default:
+                       brcmf_dbg(TRACE, "%s Unknown P2P Action Frame\n",
+                                 (tx) ? "TX" : "RX");
+               }
+
+       } else if (brcmf_p2p_is_gas_action(frame, frame_len)) {
+               sd_act_frm = (struct brcmf_p2psd_gas_pub_act_frame *)frame;
+               switch (sd_act_frm->action) {
+               case P2PSD_ACTION_ID_GAS_IREQ:
+                       brcmf_dbg(TRACE, "%s P2P GAS Initial Request\n",
+                                 (tx) ? "TX" : "RX");
+                       break;
+               case P2PSD_ACTION_ID_GAS_IRESP:
+                       brcmf_dbg(TRACE, "%s P2P GAS Initial Response\n",
+                                 (tx) ? "TX" : "RX");
+                       break;
+               case P2PSD_ACTION_ID_GAS_CREQ:
+                       brcmf_dbg(TRACE, "%s P2P GAS Comback Request\n",
+                                 (tx) ? "TX" : "RX");
+                       break;
+               case P2PSD_ACTION_ID_GAS_CRESP:
+                       brcmf_dbg(TRACE, "%s P2P GAS Comback Response\n",
+                                 (tx) ? "TX" : "RX");
+                       break;
+               default:
+                       brcmf_dbg(TRACE, "%s Unknown P2P GAS Frame\n",
+                                 (tx) ? "TX" : "RX");
+                       break;
+               }
+       }
+}
+
+#else
+
+static void brcmf_p2p_print_actframe(bool tx, void *frame, u32 frame_len)
+{
+}
+
+#endif
+
+
+/**
+ * brcmf_p2p_chnr_to_chspec() - convert channel number to chanspec.
+ *
+ * @channel: channel number
+ */
+static u16 brcmf_p2p_chnr_to_chspec(u16 channel)
+{
+       u16 chanspec;
+
+       chanspec = channel & WL_CHANSPEC_CHAN_MASK;
+
+       if (channel <= CH_MAX_2G_CHANNEL)
+               chanspec |= WL_CHANSPEC_BAND_2G;
+       else
+               chanspec |= WL_CHANSPEC_BAND_5G;
+
+       chanspec |= WL_CHANSPEC_BW_20;
+       chanspec |= WL_CHANSPEC_CTL_SB_NONE;
+
+       return chanspec;
+}
+
+
+/**
+ * brcmf_p2p_set_firmware() - prepare firmware for peer-to-peer operation.
+ *
+ * @ifp: ifp to use for iovars (primary).
+ * @p2p_mac: mac address to configure for p2p_da_override
+ */
+static int brcmf_p2p_set_firmware(struct brcmf_if *ifp, u8 *p2p_mac)
+{
+       s32 ret = 0;
+
+       brcmf_fil_iovar_int_set(ifp, "apsta", 1);
+
+       /* In case of COB type, firmware has default mac address
+        * After Initializing firmware, we have to set current mac address to
+        * firmware for P2P device address
+        */
+       ret = brcmf_fil_iovar_data_set(ifp, "p2p_da_override", p2p_mac,
+                                      ETH_ALEN);
+       if (ret)
+               brcmf_err("failed to update device address ret %d\n", ret);
+
+       return ret;
+}
+
+/**
+ * brcmf_p2p_generate_bss_mac() - derive mac addresses for P2P.
+ *
+ * @p2p: P2P specific data.
+ *
+ * P2P needs mac addresses for P2P device and interface. These are
+ * derived from the primary net device, ie. the permanent ethernet
+ * address of the device.
+ */
+static void brcmf_p2p_generate_bss_mac(struct brcmf_p2p_info *p2p)
+{
+       struct brcmf_if *pri_ifp = p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif->ifp;
+       struct brcmf_if *p2p_ifp = p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif->ifp;
+
+       /* Generate the P2P Device Address.  This consists of the device's
+        * primary MAC address with the locally administered bit set.
+        */
+       memcpy(p2p->dev_addr, pri_ifp->mac_addr, ETH_ALEN);
+       p2p->dev_addr[0] |= 0x02;
+       memcpy(p2p_ifp->mac_addr, p2p->dev_addr, ETH_ALEN);
+
+       /* Generate the P2P Interface Address.  If the discovery and connection
+        * BSSCFGs need to simultaneously co-exist, then this address must be
+        * different from the P2P Device Address, but also locally administered.
+        */
+       memcpy(p2p->int_addr, p2p->dev_addr, ETH_ALEN);
+       p2p->int_addr[4] ^= 0x80;
+}
+
+/**
+ * brcmf_p2p_scan_is_p2p_request() - is cfg80211 scan request a P2P scan.
+ *
+ * @request: the scan request as received from cfg80211.
+ *
+ * returns true if one of the ssids in the request matches the
+ * P2P wildcard ssid; otherwise returns false.
+ */
+static bool brcmf_p2p_scan_is_p2p_request(struct cfg80211_scan_request *request)
+{
+       struct cfg80211_ssid *ssids = request->ssids;
+       int i;
+
+       for (i = 0; i < request->n_ssids; i++) {
+               if (ssids[i].ssid_len != BRCMF_P2P_WILDCARD_SSID_LEN)
+                       continue;
+
+               brcmf_dbg(INFO, "comparing ssid \"%s\"", ssids[i].ssid);
+               if (!memcmp(BRCMF_P2P_WILDCARD_SSID, ssids[i].ssid,
+                           BRCMF_P2P_WILDCARD_SSID_LEN))
+                       return true;
+       }
+       return false;
+}
+
+/**
+ * brcmf_p2p_set_discover_state - set discover state in firmware.
+ *
+ * @ifp: low-level interface object.
+ * @state: discover state to set.
+ * @chanspec: channel parameters (for state @WL_P2P_DISC_ST_LISTEN only).
+ * @listen_ms: duration to listen (for state @WL_P2P_DISC_ST_LISTEN only).
+ */
+static s32 brcmf_p2p_set_discover_state(struct brcmf_if *ifp, u8 state,
+                                       u16 chanspec, u16 listen_ms)
+{
+       struct brcmf_p2p_disc_st_le discover_state;
+       s32 ret = 0;
+       brcmf_dbg(TRACE, "enter\n");
+
+       discover_state.state = state;
+       discover_state.chspec = cpu_to_le16(chanspec);
+       discover_state.dwell = cpu_to_le16(listen_ms);
+       ret = brcmf_fil_bsscfg_data_set(ifp, "p2p_state", &discover_state,
+                                       sizeof(discover_state));
+       return ret;
+}
+
+/**
+ * brcmf_p2p_deinit_discovery() - disable P2P device discovery.
+ *
+ * @p2p: P2P specific data.
+ *
+ * Resets the discovery state and disables it in firmware.
+ */
+static s32 brcmf_p2p_deinit_discovery(struct brcmf_p2p_info *p2p)
+{
+       struct brcmf_cfg80211_vif *vif;
+
+       brcmf_dbg(TRACE, "enter\n");
+
+       /* Set the discovery state to SCAN */
+       vif = p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif;
+       (void)brcmf_p2p_set_discover_state(vif->ifp, WL_P2P_DISC_ST_SCAN, 0, 0);
+
+       /* Disable P2P discovery in the firmware */
+       vif = p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif;
+       (void)brcmf_fil_iovar_int_set(vif->ifp, "p2p_disc", 0);
+
+       return 0;
+}
+
+/**
+ * brcmf_p2p_enable_discovery() - initialize and configure discovery.
+ *
+ * @p2p: P2P specific data.
+ *
+ * Initializes the discovery device and configure the virtual interface.
+ */
+static int brcmf_p2p_enable_discovery(struct brcmf_p2p_info *p2p)
+{
+       struct brcmf_cfg80211_vif *vif;
+       s32 ret = 0;
+
+       brcmf_dbg(TRACE, "enter\n");
+       vif = p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif;
+       if (!vif) {
+               brcmf_err("P2P config device not available\n");
+               ret = -EPERM;
+               goto exit;
+       }
+
+       if (test_bit(BRCMF_P2P_STATUS_ENABLED, &p2p->status)) {
+               brcmf_dbg(INFO, "P2P config device already configured\n");
+               goto exit;
+       }
+
+       /* Re-initialize P2P Discovery in the firmware */
+       vif = p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif;
+       ret = brcmf_fil_iovar_int_set(vif->ifp, "p2p_disc", 1);
+       if (ret < 0) {
+               brcmf_err("set p2p_disc error\n");
+               goto exit;
+       }
+       vif = p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif;
+       ret = brcmf_p2p_set_discover_state(vif->ifp, WL_P2P_DISC_ST_SCAN, 0, 0);
+       if (ret < 0) {
+               brcmf_err("unable to set WL_P2P_DISC_ST_SCAN\n");
+               goto exit;
+       }
+
+       /*
+        * Set wsec to any non-zero value in the discovery bsscfg
+        * to ensure our P2P probe responses have the privacy bit
+        * set in the 802.11 WPA IE. Some peer devices may not
+        * initiate WPS with us if this bit is not set.
+        */
+       ret = brcmf_fil_bsscfg_int_set(vif->ifp, "wsec", AES_ENABLED);
+       if (ret < 0) {
+               brcmf_err("wsec error %d\n", ret);
+               goto exit;
+       }
+
+       set_bit(BRCMF_P2P_STATUS_ENABLED, &p2p->status);
+exit:
+       return ret;
+}
+
+/**
+ * brcmf_p2p_escan() - initiate a P2P scan.
+ *
+ * @p2p: P2P specific data.
+ * @num_chans: number of channels to scan.
+ * @chanspecs: channel parameters for @num_chans channels.
+ * @search_state: P2P discover state to use.
+ * @action: scan action to pass to firmware.
+ * @bss_type: type of P2P bss.
+ */
+static s32 brcmf_p2p_escan(struct brcmf_p2p_info *p2p, u32 num_chans,
+                          u16 chanspecs[], s32 search_state, u16 action,
+                          enum p2p_bss_type bss_type)
+{
+       s32 ret = 0;
+       s32 memsize = offsetof(struct brcmf_p2p_scan_le,
+                              eparams.params_le.channel_list);
+       s32 nprobes;
+       s32 active;
+       u32 i;
+       u8 *memblk;
+       struct brcmf_cfg80211_vif *vif;
+       struct brcmf_p2p_scan_le *p2p_params;
+       struct brcmf_scan_params_le *sparams;
+       struct brcmf_ssid ssid;
+
+       memsize += num_chans * sizeof(__le16);
+       memblk = kzalloc(memsize, GFP_KERNEL);
+       if (!memblk)
+               return -ENOMEM;
+
+       vif = p2p->bss_idx[bss_type].vif;
+       if (vif == NULL) {
+               brcmf_err("no vif for bss type %d\n", bss_type);
+               ret = -EINVAL;
+               goto exit;
+       }
+
+       switch (search_state) {
+       case WL_P2P_DISC_ST_SEARCH:
+               /*
+                * If we in SEARCH STATE, we don't need to set SSID explictly
+                * because dongle use P2P WILDCARD internally by default
+                */
+               /* use null ssid */
+               ssid.SSID_len = 0;
+               memset(ssid.SSID, 0, sizeof(ssid.SSID));
+               break;
+       case WL_P2P_DISC_ST_SCAN:
+               /*
+                * wpa_supplicant has p2p_find command with type social or
+                * progressive. For progressive, we need to set the ssid to
+                * P2P WILDCARD because we just do broadcast scan unless
+                * setting SSID.
+                */
+               ssid.SSID_len = BRCMF_P2P_WILDCARD_SSID_LEN;
+               memcpy(ssid.SSID, BRCMF_P2P_WILDCARD_SSID, ssid.SSID_len);
+               break;
+       default:
+               brcmf_err(" invalid search state %d\n", search_state);
+               ret = -EINVAL;
+               goto exit;
+       }
+
+       brcmf_p2p_set_discover_state(vif->ifp, search_state, 0, 0);
+
+       /*
+        * set p2p scan parameters.
+        */
+       p2p_params = (struct brcmf_p2p_scan_le *)memblk;
+       p2p_params->type = 'E';
+
+       /* determine the scan engine parameters */
+       sparams = &p2p_params->eparams.params_le;
+       sparams->bss_type = DOT11_BSSTYPE_ANY;
+       if (p2p->cfg->active_scan)
+               sparams->scan_type = 0;
+       else
+               sparams->scan_type = 1;
+
+       memset(&sparams->bssid, 0xFF, ETH_ALEN);
+       if (ssid.SSID_len)
+               memcpy(sparams->ssid_le.SSID, ssid.SSID, ssid.SSID_len);
+       sparams->ssid_le.SSID_len = cpu_to_le32(ssid.SSID_len);
+       sparams->home_time = cpu_to_le32(P2PAPI_SCAN_HOME_TIME_MS);
+
+       /*
+        * SOCIAL_CHAN_CNT + 1 takes care of the Progressive scan
+        * supported by the supplicant.
+        */
+       if (num_chans == SOCIAL_CHAN_CNT || num_chans == (SOCIAL_CHAN_CNT + 1))
+               active = P2PAPI_SCAN_SOCIAL_DWELL_TIME_MS;
+       else if (num_chans == AF_PEER_SEARCH_CNT)
+               active = P2PAPI_SCAN_AF_SEARCH_DWELL_TIME_MS;
+       else if (wl_get_vif_state_all(p2p->cfg, BRCMF_VIF_STATUS_CONNECTED))
+               active = -1;
+       else
+               active = P2PAPI_SCAN_DWELL_TIME_MS;
+
+       /* Override scan params to find a peer for a connection */
+       if (num_chans == 1) {
+               active = WL_SCAN_CONNECT_DWELL_TIME_MS;
+               /* WAR to sync with presence period of VSDB GO.
+                * send probe request more frequently
+                */
+               nprobes = active / WL_SCAN_JOIN_PROBE_INTERVAL_MS;
+       } else {
+               nprobes = active / P2PAPI_SCAN_NPROBS_TIME_MS;
+       }
+
+       if (nprobes <= 0)
+               nprobes = 1;
+
+       brcmf_dbg(INFO, "nprobes # %d, active_time %d\n", nprobes, active);
+       sparams->active_time = cpu_to_le32(active);
+       sparams->nprobes = cpu_to_le32(nprobes);
+       sparams->passive_time = cpu_to_le32(-1);
+       sparams->channel_num = cpu_to_le32(num_chans &
+                                          BRCMF_SCAN_PARAMS_COUNT_MASK);
+       for (i = 0; i < num_chans; i++)
+               sparams->channel_list[i] = cpu_to_le16(chanspecs[i]);
+
+       /* set the escan specific parameters */
+       p2p_params->eparams.version = cpu_to_le32(BRCMF_ESCAN_REQ_VERSION);
+       p2p_params->eparams.action =  cpu_to_le16(action);
+       p2p_params->eparams.sync_id = cpu_to_le16(0x1234);
+       /* perform p2p scan on primary device */
+       ret = brcmf_fil_bsscfg_data_set(vif->ifp, "p2p_scan", memblk, memsize);
+       if (!ret)
+               set_bit(BRCMF_SCAN_STATUS_BUSY, &p2p->cfg->scan_status);
+exit:
+       kfree(memblk);
+       return ret;
+}
+
+/**
+ * brcmf_p2p_run_escan() - escan callback for peer-to-peer.
+ *
+ * @cfg: driver private data for cfg80211 interface.
+ * @ndev: net device for which scan is requested.
+ * @request: scan request from cfg80211.
+ * @action: scan action.
+ *
+ * Determines the P2P discovery state based to scan request parameters and
+ * validates the channels in the request.
+ */
+static s32 brcmf_p2p_run_escan(struct brcmf_cfg80211_info *cfg,
+                              struct net_device *ndev,
+                              struct cfg80211_scan_request *request,
+                              u16 action)
+{
+       struct brcmf_p2p_info *p2p = &cfg->p2p;
+       s32 err = 0;
+       s32 search_state = WL_P2P_DISC_ST_SCAN;
+       struct brcmf_cfg80211_vif *vif;
+       struct net_device *dev = NULL;
+       int i, num_nodfs = 0;
+       u16 *chanspecs;
+
+       brcmf_dbg(TRACE, "enter\n");
+
+       if (!request) {
+               err = -EINVAL;
+               goto exit;
+       }
+
+       if (request->n_channels) {
+               chanspecs = kcalloc(request->n_channels, sizeof(*chanspecs),
+                                   GFP_KERNEL);
+               if (!chanspecs) {
+                       err = -ENOMEM;
+                       goto exit;
+               }
+               vif = p2p->bss_idx[P2PAPI_BSSCFG_CONNECTION].vif;
+               if (vif)
+                       dev = vif->wdev.netdev;
+               if (request->n_channels == 3 &&
+                   request->channels[0]->hw_value == SOCIAL_CHAN_1 &&
+                   request->channels[1]->hw_value == SOCIAL_CHAN_2 &&
+                   request->channels[2]->hw_value == SOCIAL_CHAN_3) {
+                       /* SOCIAL CHANNELS 1, 6, 11 */
+                       search_state = WL_P2P_DISC_ST_SEARCH;
+                       brcmf_dbg(INFO, "P2P SEARCH PHASE START\n");
+               } else if (dev != NULL && vif->mode == WL_MODE_AP) {
+                       /* If you are already a GO, then do SEARCH only */
+                       brcmf_dbg(INFO, "Already a GO. Do SEARCH Only\n");
+                       search_state = WL_P2P_DISC_ST_SEARCH;
+               } else {
+                       brcmf_dbg(INFO, "P2P SCAN STATE START\n");
+               }
+
+               /*
+                * no P2P scanning on passive or DFS channels.
+                */
+               for (i = 0; i < request->n_channels; i++) {
+                       struct ieee80211_channel *chan = request->channels[i];
+
+                       if (chan->flags & (IEEE80211_CHAN_RADAR |
+                                          IEEE80211_CHAN_PASSIVE_SCAN))
+                               continue;
+
+                       chanspecs[i] = channel_to_chanspec(chan);
+                       brcmf_dbg(INFO, "%d: chan=%d, channel spec=%x\n",
+                                 num_nodfs, chan->hw_value, chanspecs[i]);
+                       num_nodfs++;
+               }
+               err = brcmf_p2p_escan(p2p, num_nodfs, chanspecs, search_state,
+                                     action, P2PAPI_BSSCFG_DEVICE);
+       }
+exit:
+       if (err)
+               brcmf_err("error (%d)\n", err);
+       return err;
+}
+
+
+/**
+ * brcmf_p2p_find_listen_channel() - find listen channel in ie string.
+ *
+ * @ie: string of information elements.
+ * @ie_len: length of string.
+ *
+ * Scan ie for p2p ie and look for attribute 6 channel. If available determine
+ * channel and return it.
+ */
+static s32 brcmf_p2p_find_listen_channel(const u8 *ie, u32 ie_len)
+{
+       u8 channel_ie[5];
+       s32 listen_channel;
+       s32 err;
+
+       err = cfg80211_get_p2p_attr(ie, ie_len,
+                                   IEEE80211_P2P_ATTR_LISTEN_CHANNEL,
+                                   channel_ie, sizeof(channel_ie));
+       if (err < 0)
+               return err;
+
+       /* listen channel subel length format:     */
+       /* 3(country) + 1(op. class) + 1(chan num) */
+       listen_channel = (s32)channel_ie[3 + 1];
+
+       if (listen_channel == SOCIAL_CHAN_1 ||
+           listen_channel == SOCIAL_CHAN_2 ||
+           listen_channel == SOCIAL_CHAN_3) {
+               brcmf_dbg(INFO, "Found my Listen Channel %d\n", listen_channel);
+               return listen_channel;
+       }
+
+       return -EPERM;
+}
+
+
+/**
+ * brcmf_p2p_scan_prep() - prepare scan based on request.
+ *
+ * @wiphy: wiphy device.
+ * @request: scan request from cfg80211.
+ * @vif: vif on which scan request is to be executed.
+ *
+ * Prepare the scan appropriately for type of scan requested. Overrides the
+ * escan .run() callback for peer-to-peer scanning.
+ */
+int brcmf_p2p_scan_prep(struct wiphy *wiphy,
+                       struct cfg80211_scan_request *request,
+                       struct brcmf_cfg80211_vif *vif)
+{
+       struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+       struct brcmf_p2p_info *p2p = &cfg->p2p;
+       int err = 0;
+
+       if (brcmf_p2p_scan_is_p2p_request(request)) {
+               /* find my listen channel */
+               err = brcmf_p2p_find_listen_channel(request->ie,
+                                                   request->ie_len);
+               if (err < 0)
+                       return err;
+
+               p2p->afx_hdl.my_listen_chan = err;
+
+               clear_bit(BRCMF_P2P_STATUS_GO_NEG_PHASE, &p2p->status);
+               brcmf_dbg(INFO, "P2P: GO_NEG_PHASE status cleared\n");
+
+               err = brcmf_p2p_enable_discovery(p2p);
+               if (err)
+                       return err;
+
+               vif = p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif;
+
+               /* override .run_escan() callback. */
+               cfg->escan_info.run = brcmf_p2p_run_escan;
+       }
+       err = brcmf_vif_set_mgmt_ie(vif, BRCMF_VNDR_IE_PRBREQ_FLAG,
+                                   request->ie, request->ie_len);
+       return err;
+}
+
+
+/**
+ * brcmf_p2p_discover_listen() - set firmware to discover listen state.
+ *
+ * @p2p: p2p device.
+ * @channel: channel nr for discover listen.
+ * @duration: time in ms to stay on channel.
+ *
+ */
+static s32
+brcmf_p2p_discover_listen(struct brcmf_p2p_info *p2p, u16 channel, u32 duration)
+{
+       struct brcmf_cfg80211_vif *vif;
+       s32 err = 0;
+       u16 chanspec;
+
+       vif = p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif;
+       if (!vif) {
+               brcmf_err("Discovery is not set, so we have nothing to do\n");
+               err = -EPERM;
+               goto exit;
+       }
+
+       if (test_bit(BRCMF_P2P_STATUS_DISCOVER_LISTEN, &p2p->status)) {
+               brcmf_err("Previous LISTEN is not completed yet\n");
+               /* WAR: prevent cookie mismatch in wpa_supplicant return OK */
+               goto exit;
+       }
+
+       chanspec = brcmf_p2p_chnr_to_chspec(channel);
+       err = brcmf_p2p_set_discover_state(vif->ifp, WL_P2P_DISC_ST_LISTEN,
+                                          chanspec, (u16)duration);
+       if (!err) {
+               set_bit(BRCMF_P2P_STATUS_DISCOVER_LISTEN, &p2p->status);
+               p2p->remain_on_channel_cookie++;
+       }
+exit:
+       return err;
+}
+
+
+/**
+ * brcmf_p2p_remain_on_channel() - put device on channel and stay there.
+ *
+ * @wiphy: wiphy device.
+ * @channel: channel to stay on.
+ * @duration: time in ms to remain on channel.
+ *
+ */
+int brcmf_p2p_remain_on_channel(struct wiphy *wiphy, struct wireless_dev *wdev,
+                               struct ieee80211_channel *channel,
+                               unsigned int duration, u64 *cookie)
+{
+       struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+       struct brcmf_p2p_info *p2p = &cfg->p2p;
+       s32 err;
+       u16 channel_nr;
+
+       channel_nr = ieee80211_frequency_to_channel(channel->center_freq);
+       brcmf_dbg(TRACE, "Enter, channel: %d, duration ms (%d)\n", channel_nr,
+                 duration);
+
+       err = brcmf_p2p_enable_discovery(p2p);
+       if (err)
+               goto exit;
+       err = brcmf_p2p_discover_listen(p2p, channel_nr, duration);
+       if (err)
+               goto exit;
+
+       memcpy(&p2p->remain_on_channel, channel, sizeof(*channel));
+       *cookie = p2p->remain_on_channel_cookie;
+       cfg80211_ready_on_channel(wdev, *cookie, channel, duration, GFP_KERNEL);
+
+exit:
+       return err;
+}
+
+
+/**
+ * brcmf_p2p_notify_listen_complete() - p2p listen has completed.
+ *
+ * @ifp: interfac control.
+ * @e: event message. Not used, to make it usable for fweh event dispatcher.
+ * @data: payload of message. Not used.
+ *
+ */
+int brcmf_p2p_notify_listen_complete(struct brcmf_if *ifp,
+                                    const struct brcmf_event_msg *e,
+                                    void *data)
+{
+       struct brcmf_cfg80211_info *cfg = ifp->drvr->config;
+       struct brcmf_p2p_info *p2p = &cfg->p2p;
+
+       brcmf_dbg(TRACE, "Enter\n");
+       if (test_and_clear_bit(BRCMF_P2P_STATUS_DISCOVER_LISTEN,
+                              &p2p->status)) {
+               if (test_and_clear_bit(BRCMF_P2P_STATUS_WAITING_NEXT_AF_LISTEN,
+                                      &p2p->status)) {
+                       clear_bit(BRCMF_P2P_STATUS_WAITING_NEXT_ACT_FRAME,
+                                 &p2p->status);
+                       brcmf_dbg(INFO, "Listen DONE, wake up wait_next_af\n");
+                       complete(&p2p->wait_next_af);
+               }
+
+               cfg80211_remain_on_channel_expired(&ifp->vif->wdev,
+                                                  p2p->remain_on_channel_cookie,
+                                                  &p2p->remain_on_channel,
+                                                  GFP_KERNEL);
+       }
+       return 0;
+}
+
+
+/**
+ * brcmf_p2p_cancel_remain_on_channel() - cancel p2p listen state.
+ *
+ * @ifp: interfac control.
+ *
+ */
+void brcmf_p2p_cancel_remain_on_channel(struct brcmf_if *ifp)
+{
+       if (!ifp)
+               return;
+       brcmf_p2p_set_discover_state(ifp, WL_P2P_DISC_ST_SCAN, 0, 0);
+       brcmf_p2p_notify_listen_complete(ifp, NULL, NULL);
+}
+
+
+/**
+ * brcmf_p2p_act_frm_search() - search function for action frame.
+ *
+ * @p2p: p2p device.
+ * channel: channel on which action frame is to be trasmitted.
+ *
+ * search function to reach at common channel to send action frame. When
+ * channel is 0 then all social channels will be used to send af
+ */
+static s32 brcmf_p2p_act_frm_search(struct brcmf_p2p_info *p2p, u16 channel)
+{
+       s32 err;
+       u32 channel_cnt;
+       u16 *default_chan_list;
+       u32 i;
+
+       brcmf_dbg(TRACE, "Enter\n");
+
+       if (channel)
+               channel_cnt = AF_PEER_SEARCH_CNT;
+       else
+               channel_cnt = SOCIAL_CHAN_CNT;
+       default_chan_list = kzalloc(channel_cnt * sizeof(*default_chan_list),
+                                   GFP_KERNEL);
+       if (default_chan_list == NULL) {
+               brcmf_err("channel list allocation failed\n");
+               err = -ENOMEM;
+               goto exit;
+       }
+       if (channel) {
+               /* insert same channel to the chan_list */
+               for (i = 0; i < channel_cnt; i++)
+                       default_chan_list[i] =
+                                       brcmf_p2p_chnr_to_chspec(channel);
+       } else {
+               default_chan_list[0] = brcmf_p2p_chnr_to_chspec(SOCIAL_CHAN_1);
+               default_chan_list[1] = brcmf_p2p_chnr_to_chspec(SOCIAL_CHAN_2);
+               default_chan_list[2] = brcmf_p2p_chnr_to_chspec(SOCIAL_CHAN_3);
+       }
+       err = brcmf_p2p_escan(p2p, channel_cnt, default_chan_list,
+                             WL_P2P_DISC_ST_SEARCH, WL_ESCAN_ACTION_START,
+                             P2PAPI_BSSCFG_DEVICE);
+       kfree(default_chan_list);
+exit:
+       return err;
+}
+
+
+/**
+ * brcmf_p2p_afx_handler() - afx worker thread.
+ *
+ * @work:
+ *
+ */
+static void brcmf_p2p_afx_handler(struct work_struct *work)
+{
+       struct afx_hdl *afx_hdl = container_of(work, struct afx_hdl, afx_work);
+       struct brcmf_p2p_info *p2p = container_of(afx_hdl,
+                                                 struct brcmf_p2p_info,
+                                                 afx_hdl);
+       s32 err;
+
+       if (!afx_hdl->is_active)
+               return;
+
+       if (afx_hdl->is_listen && afx_hdl->my_listen_chan)
+               /* 100ms ~ 300ms */
+               err = brcmf_p2p_discover_listen(p2p, afx_hdl->my_listen_chan,
+                                               100 * (1 + (random32() % 3)));
+       else
+               err = brcmf_p2p_act_frm_search(p2p, afx_hdl->peer_listen_chan);
+
+       if (err) {
+               brcmf_err("ERROR occurred! value is (%d)\n", err);
+               if (test_bit(BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL,
+                            &p2p->status))
+                       complete(&afx_hdl->act_frm_scan);
+       }
+}
+
+
+/**
+ * brcmf_p2p_af_searching_channel() - search channel.
+ *
+ * @p2p: p2p device info struct.
+ *
+ */
+static s32 brcmf_p2p_af_searching_channel(struct brcmf_p2p_info *p2p)
+{
+       struct afx_hdl *afx_hdl = &p2p->afx_hdl;
+       struct brcmf_cfg80211_vif *pri_vif;
+       unsigned long duration;
+       s32 retry;
+
+       brcmf_dbg(TRACE, "Enter\n");
+
+       pri_vif = p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif;
+
+       INIT_COMPLETION(afx_hdl->act_frm_scan);
+       set_bit(BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL, &p2p->status);
+       afx_hdl->is_active = true;
+       afx_hdl->peer_chan = P2P_INVALID_CHANNEL;
+
+       /* Loop to wait until we find a peer's channel or the
+        * pending action frame tx is cancelled.
+        */
+       retry = 0;
+       duration = msecs_to_jiffies(P2P_AF_FRM_SCAN_MAX_WAIT);
+       while ((retry < P2P_CHANNEL_SYNC_RETRY) &&
+              (afx_hdl->peer_chan == P2P_INVALID_CHANNEL)) {
+               afx_hdl->is_listen = false;
+               brcmf_dbg(TRACE, "Scheduling action frame for sending.. (%d)\n",
+                         retry);
+               /* search peer on peer's listen channel */
+               schedule_work(&afx_hdl->afx_work);
+               wait_for_completion_timeout(&afx_hdl->act_frm_scan, duration);
+               if ((afx_hdl->peer_chan != P2P_INVALID_CHANNEL) ||
+                   (!test_bit(BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL,
+                              &p2p->status)))
+                       break;
+
+               if (afx_hdl->my_listen_chan) {
+                       brcmf_dbg(TRACE, "Scheduling listen peer, channel=%d\n",
+                                 afx_hdl->my_listen_chan);
+                       /* listen on my listen channel */
+                       afx_hdl->is_listen = true;
+                       schedule_work(&afx_hdl->afx_work);
+                       wait_for_completion_timeout(&afx_hdl->act_frm_scan,
+                                                   duration);
+               }
+               if ((afx_hdl->peer_chan != P2P_INVALID_CHANNEL) ||
+                   (!test_bit(BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL,
+                              &p2p->status)))
+                       break;
+               retry++;
+
+               /* if sta is connected or connecting, sleep for a while before
+                * retry af tx or finding a peer
+                */
+               if (test_bit(BRCMF_VIF_STATUS_CONNECTED, &pri_vif->sme_state) ||
+                   test_bit(BRCMF_VIF_STATUS_CONNECTING, &pri_vif->sme_state))
+                       msleep(P2P_DEFAULT_SLEEP_TIME_VSDB);
+       }
+
+       brcmf_dbg(TRACE, "Completed search/listen peer_chan=%d\n",
+                 afx_hdl->peer_chan);
+       afx_hdl->is_active = false;
+
+       clear_bit(BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL, &p2p->status);
+
+       return afx_hdl->peer_chan;
+}
+
+
+/**
+ * brcmf_p2p_scan_finding_common_channel() - was escan used for finding channel
+ *
+ * @cfg: common configuration struct.
+ * @bi: bss info struct, result from scan.
+ *
+ */
+bool brcmf_p2p_scan_finding_common_channel(struct brcmf_cfg80211_info *cfg,
+                                          struct brcmf_bss_info_le *bi)
+
+{
+       struct brcmf_p2p_info *p2p = &cfg->p2p;
+       struct afx_hdl *afx_hdl = &p2p->afx_hdl;
+       u8 *ie;
+       s32 err;
+       u8 p2p_dev_addr[ETH_ALEN];
+
+       if (!test_bit(BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL, &p2p->status))
+               return false;
+
+       if (bi == NULL) {
+               brcmf_dbg(TRACE, "ACTION FRAME SCAN Done\n");
+               if (afx_hdl->peer_chan == P2P_INVALID_CHANNEL)
+                       complete(&afx_hdl->act_frm_scan);
+               return true;
+       }
+
+       ie = ((u8 *)bi) + le16_to_cpu(bi->ie_offset);
+       memset(p2p_dev_addr, 0, sizeof(p2p_dev_addr));
+       err = cfg80211_get_p2p_attr(ie, le32_to_cpu(bi->ie_length),
+                                   IEEE80211_P2P_ATTR_DEVICE_INFO,
+                                   p2p_dev_addr, sizeof(p2p_dev_addr));
+       if (err < 0)
+               err = cfg80211_get_p2p_attr(ie, le32_to_cpu(bi->ie_length),
+                                           IEEE80211_P2P_ATTR_DEVICE_ID,
+                                           p2p_dev_addr, sizeof(p2p_dev_addr));
+       if ((err >= 0) &&
+           (!memcmp(p2p_dev_addr, afx_hdl->tx_dst_addr, ETH_ALEN))) {
+               afx_hdl->peer_chan = bi->ctl_ch ? bi->ctl_ch :
+                                     CHSPEC_CHANNEL(le16_to_cpu(bi->chanspec));
+               brcmf_dbg(TRACE, "ACTION FRAME SCAN : Peer %pM found, channel : %d\n",
+                         afx_hdl->tx_dst_addr, afx_hdl->peer_chan);
+               complete(&afx_hdl->act_frm_scan);
+       }
+       return true;
+}
+
+/**
+ * brcmf_p2p_stop_wait_next_action_frame() - finish scan if af tx complete.
+ *
+ * @cfg: common configuration struct.
+ *
+ */
+static void
+brcmf_p2p_stop_wait_next_action_frame(struct brcmf_cfg80211_info *cfg)
+{
+       struct brcmf_p2p_info *p2p = &cfg->p2p;
+       struct net_device *ndev = cfg->escan_info.ndev;
+
+       if (test_bit(BRCMF_P2P_STATUS_SENDING_ACT_FRAME, &p2p->status) &&
+           (test_bit(BRCMF_P2P_STATUS_ACTION_TX_COMPLETED, &p2p->status) ||
+            test_bit(BRCMF_P2P_STATUS_ACTION_TX_NOACK, &p2p->status))) {
+               brcmf_dbg(TRACE, "*** Wake UP ** abort actframe iovar\n");
+               /* if channel is not zero, "actfame" uses off channel scan.
+                * So abort scan for off channel completion.
+                */
+               if (p2p->af_sent_channel)
+                       brcmf_notify_escan_complete(cfg, ndev, true, true);
+       } else if (test_bit(BRCMF_P2P_STATUS_WAITING_NEXT_AF_LISTEN,
+                           &p2p->status)) {
+               brcmf_dbg(TRACE, "*** Wake UP ** abort listen for next af frame\n");
+               /* So abort scan to cancel listen */
+               brcmf_notify_escan_complete(cfg, ndev, true, true);
+       }
+}
+
+
+/**
+ * brcmf_p2p_gon_req_collision() - Check if go negotiaton collission
+ *
+ * @p2p: p2p device info struct.
+ *
+ * return true if recevied action frame is to be dropped.
+ */
+static bool
+brcmf_p2p_gon_req_collision(struct brcmf_p2p_info *p2p, u8 *mac)
+{
+       struct brcmf_cfg80211_info *cfg = p2p->cfg;
+       struct brcmf_if *ifp;
+
+       brcmf_dbg(TRACE, "Enter\n");
+
+       if (!test_bit(BRCMF_P2P_STATUS_WAITING_NEXT_ACT_FRAME, &p2p->status) ||
+           !p2p->gon_req_action)
+               return false;
+
+       brcmf_dbg(TRACE, "GO Negotiation Request COLLISION !!!\n");
+       /* if sa(peer) addr is less than da(my) addr, then this device
+        * process peer's gon request and block to send gon req.
+        * if not (sa addr > da addr),
+        * this device will process gon request and drop gon req of peer.
+        */
+       ifp = p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif->ifp;
+       if (memcmp(mac, ifp->mac_addr, ETH_ALEN) < 0) {
+               brcmf_dbg(INFO, "Block transmit gon req !!!\n");
+               p2p->block_gon_req_tx = true;
+               /* if we are finding a common channel for sending af,
+                * do not scan more to block to send current gon req
+                */
+               if (test_and_clear_bit(BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL,
+                                      &p2p->status))
+                       complete(&p2p->afx_hdl.act_frm_scan);
+               if (test_and_clear_bit(BRCMF_P2P_STATUS_WAITING_NEXT_ACT_FRAME,
+                                      &p2p->status))
+                       brcmf_p2p_stop_wait_next_action_frame(cfg);
+               return false;
+       }
+
+       /* drop gon request of peer to process gon request by this device. */
+       brcmf_dbg(INFO, "Drop received gon req !!!\n");
+
+       return true;
+}
+
+
+/**
+ * brcmf_p2p_notify_action_frame_rx() - received action frame.
+ *
+ * @ifp: interfac control.
+ * @e: event message. Not used, to make it usable for fweh event dispatcher.
+ * @data: payload of message, containing action frame data.
+ *
+ */
+int brcmf_p2p_notify_action_frame_rx(struct brcmf_if *ifp,
+                                    const struct brcmf_event_msg *e,
+                                    void *data)
+{
+       struct brcmf_cfg80211_info *cfg = ifp->drvr->config;
+       struct brcmf_p2p_info *p2p = &cfg->p2p;
+       struct afx_hdl *afx_hdl = &p2p->afx_hdl;
+       struct wireless_dev *wdev;
+       u32 mgmt_frame_len = e->datalen - sizeof(struct brcmf_rx_mgmt_data);
+       struct brcmf_rx_mgmt_data *rxframe = (struct brcmf_rx_mgmt_data *)data;
+       u8 *frame = (u8 *)(rxframe + 1);
+       struct brcmf_p2p_pub_act_frame *act_frm;
+       struct brcmf_p2psd_gas_pub_act_frame *sd_act_frm;
+       u16 chanspec = be16_to_cpu(rxframe->chanspec);
+       struct ieee80211_mgmt *mgmt_frame;
+       s32 freq;
+       u16 mgmt_type;
+       u8 action;
+
+       /* Check if wpa_supplicant has registered for this frame */
+       brcmf_dbg(INFO, "ifp->vif->mgmt_rx_reg %04x\n", ifp->vif->mgmt_rx_reg);
+       mgmt_type = (IEEE80211_STYPE_ACTION & IEEE80211_FCTL_STYPE) >> 4;
+       if ((ifp->vif->mgmt_rx_reg & BIT(mgmt_type)) == 0)
+               return 0;
+
+       brcmf_p2p_print_actframe(false, frame, mgmt_frame_len);
+
+       action = P2P_PAF_SUBTYPE_INVALID;
+       if (brcmf_p2p_is_pub_action(frame, mgmt_frame_len)) {
+               act_frm = (struct brcmf_p2p_pub_act_frame *)frame;
+               action = act_frm->subtype;
+               if ((action == P2P_PAF_GON_REQ) &&
+                   (brcmf_p2p_gon_req_collision(p2p, (u8 *)e->addr))) {
+                       if (test_bit(BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL,
+                                    &p2p->status) &&
+                           (memcmp(afx_hdl->tx_dst_addr, e->addr,
+                                   ETH_ALEN) == 0)) {
+                               afx_hdl->peer_chan = CHSPEC_CHANNEL(chanspec);
+                               brcmf_dbg(INFO, "GON request: Peer found, channel=%d\n",
+                                         afx_hdl->peer_chan);
+                               complete(&afx_hdl->act_frm_scan);
+                       }
+                       return 0;
+               }
+               /* After complete GO Negotiation, roll back to mpc mode */
+               if ((action == P2P_PAF_GON_CONF) ||
+                   (action == P2P_PAF_PROVDIS_RSP))
+                       brcmf_set_mpc(ifp->ndev, 1);
+               if (action == P2P_PAF_GON_CONF) {
+                       brcmf_dbg(TRACE, "P2P: GO_NEG_PHASE status cleared\n");
+                       clear_bit(BRCMF_P2P_STATUS_GO_NEG_PHASE, &p2p->status);
+               }
+       } else if (brcmf_p2p_is_gas_action(frame, mgmt_frame_len)) {
+               sd_act_frm = (struct brcmf_p2psd_gas_pub_act_frame *)frame;
+               action = sd_act_frm->action;
+       }
+
+       if (test_bit(BRCMF_P2P_STATUS_WAITING_NEXT_ACT_FRAME, &p2p->status) &&
+           (p2p->next_af_subtype == action)) {
+               brcmf_dbg(TRACE, "We got a right next frame! (%d)\n", action);
+               clear_bit(BRCMF_P2P_STATUS_WAITING_NEXT_ACT_FRAME,
+                         &p2p->status);
+               /* Stop waiting for next AF. */
+               brcmf_p2p_stop_wait_next_action_frame(cfg);
+       }
+
+       mgmt_frame = kzalloc(offsetof(struct ieee80211_mgmt, u) +
+                            mgmt_frame_len, GFP_KERNEL);
+       if (!mgmt_frame) {
+               brcmf_err("No memory available for action frame\n");
+               return -ENOMEM;
+       }
+       memcpy(mgmt_frame->da, ifp->mac_addr, ETH_ALEN);
+       brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_BSSID, mgmt_frame->bssid,
+                              ETH_ALEN);
+       memcpy(mgmt_frame->sa, e->addr, ETH_ALEN);
+       mgmt_frame->frame_control = cpu_to_le16(IEEE80211_STYPE_ACTION);
+       memcpy(&mgmt_frame->u, frame, mgmt_frame_len);
+       mgmt_frame_len += offsetof(struct ieee80211_mgmt, u);
+
+       freq = ieee80211_channel_to_frequency(CHSPEC_CHANNEL(chanspec),
+                                             CHSPEC_IS2G(chanspec) ?
+                                             IEEE80211_BAND_2GHZ :
+                                             IEEE80211_BAND_5GHZ);
+       wdev = ifp->ndev->ieee80211_ptr;
+       cfg80211_rx_mgmt(wdev, freq, 0, (u8 *)mgmt_frame, mgmt_frame_len,
+                        GFP_ATOMIC);
+
+       kfree(mgmt_frame);
+       return 0;
+}
+
+
+/**
+ * brcmf_p2p_notify_action_tx_complete() - transmit action frame complete
+ *
+ * @ifp: interfac control.
+ * @e: event message. Not used, to make it usable for fweh event dispatcher.
+ * @data: not used.
+ *
+ */
+int brcmf_p2p_notify_action_tx_complete(struct brcmf_if *ifp,
+                                       const struct brcmf_event_msg *e,
+                                       void *data)
+{
+       struct brcmf_cfg80211_info *cfg = ifp->drvr->config;
+       struct brcmf_p2p_info *p2p = &cfg->p2p;
+
+       brcmf_dbg(INFO, "Enter: event %s, status=%d\n",
+                 e->event_code == BRCMF_E_ACTION_FRAME_OFF_CHAN_COMPLETE ?
+                 "ACTION_FRAME_OFF_CHAN_COMPLETE" : "ACTION_FRAME_COMPLETE",
+                 e->status);
+
+       if (!test_bit(BRCMF_P2P_STATUS_SENDING_ACT_FRAME, &p2p->status))
+               return 0;
+
+       if (e->event_code == BRCMF_E_ACTION_FRAME_COMPLETE) {
+               if (e->status == BRCMF_E_STATUS_SUCCESS)
+                       set_bit(BRCMF_P2P_STATUS_ACTION_TX_COMPLETED,
+                               &p2p->status);
+               else {
+                       set_bit(BRCMF_P2P_STATUS_ACTION_TX_NOACK, &p2p->status);
+                       /* If there is no ack, we don't need to wait for
+                        * WLC_E_ACTION_FRAME_OFFCHAN_COMPLETE event
+                        */
+                       brcmf_p2p_stop_wait_next_action_frame(cfg);
+               }
+
+       } else {
+               complete(&p2p->send_af_done);
+       }
+       return 0;
+}
+
+
+/**
+ * brcmf_p2p_tx_action_frame() - send action frame over fil.
+ *
+ * @p2p: p2p info struct for vif.
+ * @af_params: action frame data/info.
+ *
+ * Send an action frame immediately without doing channel synchronization.
+ *
+ * This function waits for a completion event before returning.
+ * The WLC_E_ACTION_FRAME_COMPLETE event will be received when the action
+ * frame is transmitted.
+ */
+static s32 brcmf_p2p_tx_action_frame(struct brcmf_p2p_info *p2p,
+                                    struct brcmf_fil_af_params_le *af_params)
+{
+       struct brcmf_cfg80211_vif *vif;
+       s32 err = 0;
+       s32 timeout = 0;
+
+       brcmf_dbg(TRACE, "Enter\n");
+
+       INIT_COMPLETION(p2p->send_af_done);
+       clear_bit(BRCMF_P2P_STATUS_ACTION_TX_COMPLETED, &p2p->status);
+       clear_bit(BRCMF_P2P_STATUS_ACTION_TX_NOACK, &p2p->status);
+
+       vif = p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif;
+       err = brcmf_fil_bsscfg_data_set(vif->ifp, "actframe", af_params,
+                                       sizeof(*af_params));
+       if (err) {
+               brcmf_err(" sending action frame has failed\n");
+               goto exit;
+       }
+
+       p2p->af_sent_channel = le32_to_cpu(af_params->channel);
+       p2p->af_tx_sent_jiffies = jiffies;
+
+       timeout = wait_for_completion_timeout(&p2p->send_af_done,
+                                       msecs_to_jiffies(P2P_AF_MAX_WAIT_TIME));
+
+       if (test_bit(BRCMF_P2P_STATUS_ACTION_TX_COMPLETED, &p2p->status)) {
+               brcmf_dbg(TRACE, "TX action frame operation is success\n");
+       } else {
+               err = -EIO;
+               brcmf_dbg(TRACE, "TX action frame operation has failed\n");
+       }
+       /* clear status bit for action tx */
+       clear_bit(BRCMF_P2P_STATUS_ACTION_TX_COMPLETED, &p2p->status);
+       clear_bit(BRCMF_P2P_STATUS_ACTION_TX_NOACK, &p2p->status);
+
+exit:
+       return err;
+}
+
+
+/**
+ * brcmf_p2p_pub_af_tx() - public action frame tx routine.
+ *
+ * @cfg: driver private data for cfg80211 interface.
+ * @af_params: action frame data/info.
+ * @config_af_params: configuration data for action frame.
+ *
+ * routine which transmits ation frame public type.
+ */
+static s32 brcmf_p2p_pub_af_tx(struct brcmf_cfg80211_info *cfg,
+                              struct brcmf_fil_af_params_le *af_params,
+                              struct brcmf_config_af_params *config_af_params)
+{
+       struct brcmf_p2p_info *p2p = &cfg->p2p;
+       struct brcmf_fil_action_frame_le *action_frame;
+       struct brcmf_p2p_pub_act_frame *act_frm;
+       s32 err = 0;
+       u16 ie_len;
+
+       action_frame = &af_params->action_frame;
+       act_frm = (struct brcmf_p2p_pub_act_frame *)(action_frame->data);
+
+       config_af_params->extra_listen = true;
+
+       switch (act_frm->subtype) {
+       case P2P_PAF_GON_REQ:
+               brcmf_dbg(TRACE, "P2P: GO_NEG_PHASE status set\n");
+               set_bit(BRCMF_P2P_STATUS_GO_NEG_PHASE, &p2p->status);
+               config_af_params->mpc_onoff = 0;
+               config_af_params->search_channel = true;
+               p2p->next_af_subtype = act_frm->subtype + 1;
+               p2p->gon_req_action = true;
+               /* increase dwell time to wait for RESP frame */
+               af_params->dwell_time = cpu_to_le32(P2P_AF_MED_DWELL_TIME);
+               break;
+       case P2P_PAF_GON_RSP:
+               p2p->next_af_subtype = act_frm->subtype + 1;
+               /* increase dwell time to wait for CONF frame */
+               af_params->dwell_time = cpu_to_le32(P2P_AF_MED_DWELL_TIME);
+               break;
+       case P2P_PAF_GON_CONF:
+               /* If we reached till GO Neg confirmation reset the filter */
+               brcmf_dbg(TRACE, "P2P: GO_NEG_PHASE status cleared\n");
+               clear_bit(BRCMF_P2P_STATUS_GO_NEG_PHASE, &p2p->status);
+               /* turn on mpc again if go nego is done */
+               config_af_params->mpc_onoff = 1;
+               /* minimize dwell time */
+               af_params->dwell_time = cpu_to_le32(P2P_AF_MIN_DWELL_TIME);
+               config_af_params->extra_listen = false;
+               break;
+       case P2P_PAF_INVITE_REQ:
+               config_af_params->search_channel = true;
+               p2p->next_af_subtype = act_frm->subtype + 1;
+               /* increase dwell time */
+               af_params->dwell_time = cpu_to_le32(P2P_AF_MED_DWELL_TIME);
+               break;
+       case P2P_PAF_INVITE_RSP:
+               /* minimize dwell time */
+               af_params->dwell_time = cpu_to_le32(P2P_AF_MIN_DWELL_TIME);
+               config_af_params->extra_listen = false;
+               break;
+       case P2P_PAF_DEVDIS_REQ:
+               config_af_params->search_channel = true;
+               p2p->next_af_subtype = act_frm->subtype + 1;
+               /* maximize dwell time to wait for RESP frame */
+               af_params->dwell_time = cpu_to_le32(P2P_AF_LONG_DWELL_TIME);
+               break;
+       case P2P_PAF_DEVDIS_RSP:
+               /* minimize dwell time */
+               af_params->dwell_time = cpu_to_le32(P2P_AF_MIN_DWELL_TIME);
+               config_af_params->extra_listen = false;
+               break;
+       case P2P_PAF_PROVDIS_REQ:
+               ie_len = le16_to_cpu(action_frame->len) -
+                        offsetof(struct brcmf_p2p_pub_act_frame, elts);
+               if (cfg80211_get_p2p_attr(&act_frm->elts[0], ie_len,
+                                         IEEE80211_P2P_ATTR_GROUP_ID,
+                                         NULL, 0) < 0)
+                       config_af_params->search_channel = true;
+               config_af_params->mpc_onoff = 0;
+               p2p->next_af_subtype = act_frm->subtype + 1;
+               /* increase dwell time to wait for RESP frame */
+               af_params->dwell_time = cpu_to_le32(P2P_AF_MED_DWELL_TIME);
+               break;
+       case P2P_PAF_PROVDIS_RSP:
+               /* wpa_supplicant send go nego req right after prov disc */
+               p2p->next_af_subtype = P2P_PAF_GON_REQ;
+               /* increase dwell time to MED level */
+               af_params->dwell_time = cpu_to_le32(P2P_AF_MED_DWELL_TIME);
+               config_af_params->extra_listen = false;
+               break;
+       default:
+               brcmf_err("Unknown p2p pub act frame subtype: %d\n",
+                         act_frm->subtype);
+               err = -EINVAL;
+       }
+       return err;
+}
+
+/**
+ * brcmf_p2p_send_action_frame() - send action frame .
+ *
+ * @cfg: driver private data for cfg80211 interface.
+ * @ndev: net device to transmit on.
+ * @af_params: configuration data for action frame.
+ */
+bool brcmf_p2p_send_action_frame(struct brcmf_cfg80211_info *cfg,
+                                struct net_device *ndev,
+                                struct brcmf_fil_af_params_le *af_params)
+{
+       struct brcmf_p2p_info *p2p = &cfg->p2p;
+       struct brcmf_fil_action_frame_le *action_frame;
+       struct brcmf_config_af_params config_af_params;
+       struct afx_hdl *afx_hdl = &p2p->afx_hdl;
+       u16 action_frame_len;
+       bool ack = false;
+       u8 category;
+       u8 action;
+       s32 tx_retry;
+       s32 extra_listen_time;
+       uint delta_ms;
+
+       action_frame = &af_params->action_frame;
+       action_frame_len = le16_to_cpu(action_frame->len);
+
+       brcmf_p2p_print_actframe(true, action_frame->data, action_frame_len);
+
+       /* Add the default dwell time. Dwell time to stay off-channel */
+       /* to wait for a response action frame after transmitting an  */
+       /* GO Negotiation action frame                                */
+       af_params->dwell_time = cpu_to_le32(P2P_AF_DWELL_TIME);
+
+       category = action_frame->data[DOT11_ACTION_CAT_OFF];
+       action = action_frame->data[DOT11_ACTION_ACT_OFF];
+
+       /* initialize variables */
+       p2p->next_af_subtype = P2P_PAF_SUBTYPE_INVALID;
+       p2p->gon_req_action = false;
+
+       /* config parameters */
+       config_af_params.mpc_onoff = -1;
+       config_af_params.search_channel = false;
+       config_af_params.extra_listen = false;
+
+       if (brcmf_p2p_is_pub_action(action_frame->data, action_frame_len)) {
+               /* p2p public action frame process */
+               if (brcmf_p2p_pub_af_tx(cfg, af_params, &config_af_params)) {
+                       /* Just send unknown subtype frame with */
+                       /* default parameters.                  */
+                       brcmf_err("P2P Public action frame, unknown subtype.\n");
+               }
+       } else if (brcmf_p2p_is_gas_action(action_frame->data,
+                                          action_frame_len)) {
+               /* service discovery process */
+               if (action == P2PSD_ACTION_ID_GAS_IREQ ||
+                   action == P2PSD_ACTION_ID_GAS_CREQ) {
+                       /* configure service discovery query frame */
+                       config_af_params.search_channel = true;
+
+                       /* save next af suptype to cancel */
+                       /* remaining dwell time           */
+                       p2p->next_af_subtype = action + 1;
+
+                       af_params->dwell_time =
+                               cpu_to_le32(P2P_AF_MED_DWELL_TIME);
+               } else if (action == P2PSD_ACTION_ID_GAS_IRESP ||
+                          action == P2PSD_ACTION_ID_GAS_CRESP) {
+                       /* configure service discovery response frame */
+                       af_params->dwell_time =
+                               cpu_to_le32(P2P_AF_MIN_DWELL_TIME);
+               } else {
+                       brcmf_err("Unknown action type: %d\n", action);
+                       goto exit;
+               }
+       } else if (brcmf_p2p_is_p2p_action(action_frame->data,
+                                          action_frame_len)) {
+               /* do not configure anything. it will be */
+               /* sent with a default configuration     */
+       } else {
+               brcmf_err("Unknown Frame: category 0x%x, action 0x%x\n",
+                         category, action);
+               return false;
+       }
+
+       /* if connecting on primary iface, sleep for a while before sending
+        * af tx for VSDB
+        */
+       if (test_bit(BRCMF_VIF_STATUS_CONNECTING,
+                    &p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif->sme_state))
+               msleep(50);
+
+       /* if scan is ongoing, abort current scan. */
+       if (test_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status))
+               brcmf_abort_scanning(cfg);
+
+       memcpy(afx_hdl->tx_dst_addr, action_frame->da, ETH_ALEN);
+
+       /* To make sure to send successfully action frame, turn off mpc */
+       if (config_af_params.mpc_onoff == 0)
+               brcmf_set_mpc(ndev, 0);
+
+       /* set status and destination address before sending af */
+       if (p2p->next_af_subtype != P2P_PAF_SUBTYPE_INVALID) {
+               /* set status to cancel the remained dwell time in rx process */
+               set_bit(BRCMF_P2P_STATUS_WAITING_NEXT_ACT_FRAME, &p2p->status);
+       }
+
+       p2p->af_sent_channel = 0;
+       set_bit(BRCMF_P2P_STATUS_SENDING_ACT_FRAME, &p2p->status);
+       /* validate channel and p2p ies */
+       if (config_af_params.search_channel &&
+           IS_P2P_SOCIAL_CHANNEL(le32_to_cpu(af_params->channel)) &&
+           p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif->saved_ie.probe_req_ie_len) {
+               afx_hdl = &p2p->afx_hdl;
+               afx_hdl->peer_listen_chan = le32_to_cpu(af_params->channel);
+
+               if (brcmf_p2p_af_searching_channel(p2p) ==
+                                                       P2P_INVALID_CHANNEL) {
+                       brcmf_err("Couldn't find peer's channel.\n");
+                       goto exit;
+               }
+
+               /* Abort scan even for VSDB scenarios. Scan gets aborted in
+                * firmware but after the check of piggyback algorithm. To take
+                * care of current piggback algo, lets abort the scan here
+                * itself.
+                */
+               brcmf_notify_escan_complete(cfg, ndev, true, true);
+
+               /* update channel */
+               af_params->channel = cpu_to_le32(afx_hdl->peer_chan);
+       }
+
+       tx_retry = 0;
+       while (!p2p->block_gon_req_tx &&
+              (ack == false) && (tx_retry < P2P_AF_TX_MAX_RETRY)) {
+               ack = !brcmf_p2p_tx_action_frame(p2p, af_params);
+               tx_retry++;
+       }
+       if (ack == false) {
+               brcmf_err("Failed to send Action Frame(retry %d)\n", tx_retry);
+               clear_bit(BRCMF_P2P_STATUS_GO_NEG_PHASE, &p2p->status);
+       }
+
+exit:
+       clear_bit(BRCMF_P2P_STATUS_SENDING_ACT_FRAME, &p2p->status);
+
+       /* WAR: sometimes dongle does not keep the dwell time of 'actframe'.
+        * if we coundn't get the next action response frame and dongle does
+        * not keep the dwell time, go to listen state again to get next action
+        * response frame.
+        */
+       if (ack && config_af_params.extra_listen && !p2p->block_gon_req_tx &&
+           test_bit(BRCMF_P2P_STATUS_WAITING_NEXT_ACT_FRAME, &p2p->status) &&
+           p2p->af_sent_channel == afx_hdl->my_listen_chan) {
+               delta_ms = jiffies_to_msecs(jiffies - p2p->af_tx_sent_jiffies);
+               if (le32_to_cpu(af_params->dwell_time) > delta_ms)
+                       extra_listen_time = le32_to_cpu(af_params->dwell_time) -
+                                           delta_ms;
+               else
+                       extra_listen_time = 0;
+               if (extra_listen_time > 50) {
+                       set_bit(BRCMF_P2P_STATUS_WAITING_NEXT_AF_LISTEN,
+                               &p2p->status);
+                       brcmf_dbg(INFO, "Wait more time! actual af time:%d, calculated extra listen:%d\n",
+                                 le32_to_cpu(af_params->dwell_time),
+                                 extra_listen_time);
+                       extra_listen_time += 100;
+                       if (!brcmf_p2p_discover_listen(p2p,
+                                                      p2p->af_sent_channel,
+                                                      extra_listen_time)) {
+                               unsigned long duration;
+
+                               extra_listen_time += 100;
+                               duration = msecs_to_jiffies(extra_listen_time);
+                               wait_for_completion_timeout(&p2p->wait_next_af,
+                                                           duration);
+                       }
+                       clear_bit(BRCMF_P2P_STATUS_WAITING_NEXT_AF_LISTEN,
+                                 &p2p->status);
+               }
+       }
+
+       if (p2p->block_gon_req_tx) {
+               /* if ack is true, supplicant will wait more time(100ms).
+                * so we will return it as a success to get more time .
+                */
+               p2p->block_gon_req_tx = false;
+               ack = true;
+       }
+
+       clear_bit(BRCMF_P2P_STATUS_WAITING_NEXT_ACT_FRAME, &p2p->status);
+       /* if all done, turn mpc on again */
+       if (config_af_params.mpc_onoff == 1)
+               brcmf_set_mpc(ndev, 1);
+
+       return ack;
+}
+
+/**
+ * brcmf_p2p_notify_rx_mgmt_p2p_probereq() - Event handler for p2p probe req.
+ *
+ * @ifp: interface pointer for which event was received.
+ * @e: even message.
+ * @data: payload of event message (probe request).
+ */
+s32 brcmf_p2p_notify_rx_mgmt_p2p_probereq(struct brcmf_if *ifp,
+                                         const struct brcmf_event_msg *e,
+                                         void *data)
+{
+       struct brcmf_cfg80211_info *cfg = ifp->drvr->config;
+       struct brcmf_p2p_info *p2p = &cfg->p2p;
+       struct afx_hdl *afx_hdl = &p2p->afx_hdl;
+       struct wireless_dev *wdev;
+       struct brcmf_cfg80211_vif *vif = ifp->vif;
+       struct brcmf_rx_mgmt_data *rxframe = (struct brcmf_rx_mgmt_data *)data;
+       u16 chanspec = be16_to_cpu(rxframe->chanspec);
+       u8 *mgmt_frame;
+       u32 mgmt_frame_len;
+       s32 freq;
+       u16 mgmt_type;
+
+       brcmf_dbg(INFO, "Enter: event %d reason %d\n", e->event_code,
+                 e->reason);
+
+       if (test_bit(BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL, &p2p->status) &&
+           (memcmp(afx_hdl->tx_dst_addr, e->addr, ETH_ALEN) == 0)) {
+               afx_hdl->peer_chan = CHSPEC_CHANNEL(chanspec);
+               brcmf_dbg(INFO, "PROBE REQUEST: Peer found, channel=%d\n",
+                         afx_hdl->peer_chan);
+               complete(&afx_hdl->act_frm_scan);
+       }
+
+       /* Firmware sends us two proberesponses for each idx one. At the */
+       /* moment anything but bsscfgidx 0 is passed up to supplicant    */
+       if (e->bsscfgidx == 0)
+               return 0;
+
+       /* Filter any P2P probe reqs arriving during the GO-NEG Phase */
+       if (test_bit(BRCMF_P2P_STATUS_GO_NEG_PHASE, &p2p->status)) {
+               brcmf_dbg(INFO, "Filtering P2P probe_req in GO-NEG phase\n");
+               return 0;
+       }
+
+       /* Check if wpa_supplicant has registered for this frame */
+       brcmf_dbg(INFO, "vif->mgmt_rx_reg %04x\n", vif->mgmt_rx_reg);
+       mgmt_type = (IEEE80211_STYPE_PROBE_REQ & IEEE80211_FCTL_STYPE) >> 4;
+       if ((vif->mgmt_rx_reg & BIT(mgmt_type)) == 0)
+               return 0;
+
+       mgmt_frame = (u8 *)(rxframe + 1);
+       mgmt_frame_len = e->datalen - sizeof(*rxframe);
+       freq = ieee80211_channel_to_frequency(CHSPEC_CHANNEL(chanspec),
+                                             CHSPEC_IS2G(chanspec) ?
+                                             IEEE80211_BAND_2GHZ :
+                                             IEEE80211_BAND_5GHZ);
+       wdev = ifp->ndev->ieee80211_ptr;
+       cfg80211_rx_mgmt(wdev, freq, 0, mgmt_frame, mgmt_frame_len, GFP_ATOMIC);
+
+       brcmf_dbg(INFO, "mgmt_frame_len (%d) , e->datalen (%d), chanspec (%04x), freq (%d)\n",
+                 mgmt_frame_len, e->datalen, chanspec, freq);
+
+       return 0;
+}
+
+
+/**
+ * brcmf_p2p_attach() - attach for P2P.
+ *
+ * @cfg: driver private data for cfg80211 interface.
+ */
+s32 brcmf_p2p_attach(struct brcmf_cfg80211_info *cfg)
+{
+       struct brcmf_if *pri_ifp;
+       struct brcmf_if *p2p_ifp;
+       struct brcmf_cfg80211_vif *p2p_vif;
+       struct brcmf_p2p_info *p2p;
+       struct brcmf_pub *drvr;
+       s32 bssidx;
+       s32 err = 0;
+
+       p2p = &cfg->p2p;
+       p2p->cfg = cfg;
+
+       drvr = cfg->pub;
+
+       pri_ifp = drvr->iflist[0];
+       p2p_ifp = drvr->iflist[1];
+
+       p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif = pri_ifp->vif;
+
+       if (p2p_ifp) {
+               p2p_vif = brcmf_alloc_vif(cfg, NL80211_IFTYPE_P2P_DEVICE,
+                                         false);
+               if (IS_ERR(p2p_vif)) {
+                       brcmf_err("could not create discovery vif\n");
+                       err = -ENOMEM;
+                       goto exit;
+               }
+
+               p2p_vif->ifp = p2p_ifp;
+               p2p_ifp->vif = p2p_vif;
+               p2p_vif->wdev.netdev = p2p_ifp->ndev;
+               p2p_ifp->ndev->ieee80211_ptr = &p2p_vif->wdev;
+               SET_NETDEV_DEV(p2p_ifp->ndev, wiphy_dev(cfg->wiphy));
+
+               p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif = p2p_vif;
+
+               brcmf_p2p_generate_bss_mac(p2p);
+               brcmf_p2p_set_firmware(pri_ifp, p2p->dev_addr);
+
+               /* Initialize P2P Discovery in the firmware */
+               err = brcmf_fil_iovar_int_set(pri_ifp, "p2p_disc", 1);
+               if (err < 0) {
+                       brcmf_err("set p2p_disc error\n");
+                       brcmf_free_vif(p2p_vif);
+                       goto exit;
+               }
+               /* obtain bsscfg index for P2P discovery */
+               err = brcmf_fil_iovar_int_get(pri_ifp, "p2p_dev", &bssidx);
+               if (err < 0) {
+                       brcmf_err("retrieving discover bsscfg index failed\n");
+                       brcmf_free_vif(p2p_vif);
+                       goto exit;
+               }
+               /* Verify that firmware uses same bssidx as driver !! */
+               if (p2p_ifp->bssidx != bssidx) {
+                       brcmf_err("Incorrect bssidx=%d, compared to p2p_ifp->bssidx=%d\n",
+                                 bssidx, p2p_ifp->bssidx);
+                       brcmf_free_vif(p2p_vif);
+                       goto exit;
+               }
+
+               init_completion(&p2p->send_af_done);
+               INIT_WORK(&p2p->afx_hdl.afx_work, brcmf_p2p_afx_handler);
+               init_completion(&p2p->afx_hdl.act_frm_scan);
+               init_completion(&p2p->wait_next_af);
+       }
+exit:
+       return err;
+}
+
+
+/**
+ * brcmf_p2p_detach() - detach P2P.
+ *
+ * @p2p: P2P specific data.
+ */
+void brcmf_p2p_detach(struct brcmf_p2p_info *p2p)
+{
+       struct brcmf_cfg80211_vif *vif;
+
+       vif = p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif;
+       if (vif != NULL) {
+               brcmf_p2p_cancel_remain_on_channel(vif->ifp);
+               brcmf_p2p_deinit_discovery(p2p);
+               /* remove discovery interface */
+               brcmf_free_vif(vif);
+               p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif = NULL;
+       }
+       /* just set it all to zero */
+       memset(p2p, 0, sizeof(*p2p));
+}
+
+/**
+ * brcmf_p2p_get_current_chanspec() - Get current operation channel.
+ *
+ * @p2p: P2P specific data.
+ * @chanspec: chanspec to be returned.
+ */
+static void brcmf_p2p_get_current_chanspec(struct brcmf_p2p_info *p2p,
+                                          u16 *chanspec)
+{
+       struct brcmf_if *ifp;
+       struct brcmf_fil_chan_info_le ci;
+       s32 err;
+
+       ifp = p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif->ifp;
+
+       *chanspec = 11 & WL_CHANSPEC_CHAN_MASK;
+
+       err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_CHANNEL, &ci, sizeof(ci));
+       if (!err) {
+               *chanspec = le32_to_cpu(ci.hw_channel) & WL_CHANSPEC_CHAN_MASK;
+               if (*chanspec < CH_MAX_2G_CHANNEL)
+                       *chanspec |= WL_CHANSPEC_BAND_2G;
+               else
+                       *chanspec |= WL_CHANSPEC_BAND_5G;
+       }
+       *chanspec |= WL_CHANSPEC_BW_20 | WL_CHANSPEC_CTL_SB_NONE;
+}
+
+/**
+ * Change a P2P Role.
+ * Parameters:
+ * @mac: MAC address of the BSS to change a role
+ * Returns 0 if success.
+ */
+int brcmf_p2p_ifchange(struct brcmf_cfg80211_info *cfg,
+                      enum brcmf_fil_p2p_if_types if_type)
+{
+       struct brcmf_p2p_info *p2p = &cfg->p2p;
+       struct brcmf_cfg80211_vif *vif;
+       struct brcmf_fil_p2p_if_le if_request;
+       s32 err;
+       u16 chanspec;
+
+       brcmf_dbg(TRACE, "Enter\n");
+
+       vif = p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif;
+       if (!vif) {
+               brcmf_err("vif for P2PAPI_BSSCFG_PRIMARY does not exist\n");
+               return -EPERM;
+       }
+       brcmf_notify_escan_complete(cfg, vif->ifp->ndev, true, true);
+       vif = p2p->bss_idx[P2PAPI_BSSCFG_CONNECTION].vif;
+       if (!vif) {
+               brcmf_err("vif for P2PAPI_BSSCFG_CONNECTION does not exist\n");
+               return -EPERM;
+       }
+       brcmf_set_mpc(vif->ifp->ndev, 0);
+
+       /* In concurrency case, STA may be already associated in a particular */
+       /* channel. so retrieve the current channel of primary interface and  */
+       /* then start the virtual interface on that.                          */
+       brcmf_p2p_get_current_chanspec(p2p, &chanspec);
+
+       if_request.type = cpu_to_le16((u16)if_type);
+       if_request.chspec = cpu_to_le16(chanspec);
+       memcpy(if_request.addr, p2p->int_addr, sizeof(if_request.addr));
+
+       brcmf_cfg80211_arm_vif_event(cfg, vif);
+       err = brcmf_fil_iovar_data_set(vif->ifp, "p2p_ifupd", &if_request,
+                                      sizeof(if_request));
+       if (err) {
+               brcmf_err("p2p_ifupd FAILED, err=%d\n", err);
+               brcmf_cfg80211_arm_vif_event(cfg, NULL);
+               return err;
+       }
+       err = brcmf_cfg80211_wait_vif_event_timeout(cfg, BRCMF_E_IF_CHANGE,
+                                                   msecs_to_jiffies(1500));
+       brcmf_cfg80211_arm_vif_event(cfg, NULL);
+       if (!err)  {
+               brcmf_err("No BRCMF_E_IF_CHANGE event received\n");
+               return -EIO;
+       }
+
+       err = brcmf_fil_cmd_int_set(vif->ifp, BRCMF_C_SET_SCB_TIMEOUT,
+                                   BRCMF_SCB_TIMEOUT_VALUE);
+
+       return err;
+}
+
+static int brcmf_p2p_request_p2p_if(struct brcmf_p2p_info *p2p,
+                                   struct brcmf_if *ifp, u8 ea[ETH_ALEN],
+                                   enum brcmf_fil_p2p_if_types iftype)
+{
+       struct brcmf_fil_p2p_if_le if_request;
+       int err;
+       u16 chanspec;
+
+       /* we need a default channel */
+       brcmf_p2p_get_current_chanspec(p2p, &chanspec);
+
+       /* fill the firmware request */
+       memcpy(if_request.addr, ea, ETH_ALEN);
+       if_request.type = cpu_to_le16((u16)iftype);
+       if_request.chspec = cpu_to_le16(chanspec);
+
+       err = brcmf_fil_iovar_data_set(ifp, "p2p_ifadd", &if_request,
+                                      sizeof(if_request));
+       if (err)
+               return err;
+
+       return err;
+}
+
+static int brcmf_p2p_disable_p2p_if(struct brcmf_cfg80211_vif *vif)
+{
+       struct brcmf_cfg80211_info *cfg = wdev_to_cfg(&vif->wdev);
+       struct net_device *pri_ndev = cfg_to_ndev(cfg);
+       struct brcmf_if *ifp = netdev_priv(pri_ndev);
+       u8 *addr = vif->wdev.netdev->dev_addr;
+
+       return brcmf_fil_iovar_data_set(ifp, "p2p_ifdis", addr, ETH_ALEN);
+}
+
+static int brcmf_p2p_release_p2p_if(struct brcmf_cfg80211_vif *vif)
+{
+       struct brcmf_cfg80211_info *cfg = wdev_to_cfg(&vif->wdev);
+       struct net_device *pri_ndev = cfg_to_ndev(cfg);
+       struct brcmf_if *ifp = netdev_priv(pri_ndev);
+       u8 *addr = vif->wdev.netdev->dev_addr;
+
+       return brcmf_fil_iovar_data_set(ifp, "p2p_ifdel", addr, ETH_ALEN);
+}
+
+/**
+ * brcmf_p2p_add_vif() - create a new P2P virtual interface.
+ *
+ * @wiphy: wiphy device of new interface.
+ * @name: name of the new interface.
+ * @type: nl80211 interface type.
+ * @flags: TBD
+ * @params: TBD
+ */
+struct wireless_dev *brcmf_p2p_add_vif(struct wiphy *wiphy, const char *name,
+                                      enum nl80211_iftype type, u32 *flags,
+                                      struct vif_params *params)
+{
+       struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+       struct brcmf_if *ifp = netdev_priv(cfg_to_ndev(cfg));
+       struct brcmf_cfg80211_vif *vif;
+       enum brcmf_fil_p2p_if_types iftype;
+       enum wl_mode mode;
+       int err;
+
+       if (brcmf_cfg80211_vif_event_armed(cfg))
+               return ERR_PTR(-EBUSY);
+
+       brcmf_dbg(INFO, "adding vif \"%s\" (type=%d)\n", name, type);
+
+       switch (type) {
+       case NL80211_IFTYPE_P2P_CLIENT:
+               iftype = BRCMF_FIL_P2P_IF_CLIENT;
+               mode = WL_MODE_BSS;
+               break;
+       case NL80211_IFTYPE_P2P_GO:
+               iftype = BRCMF_FIL_P2P_IF_GO;
+               mode = WL_MODE_AP;
+               break;
+       default:
+               return ERR_PTR(-EOPNOTSUPP);
+       }
+
+       vif = brcmf_alloc_vif(cfg, type, false);
+       if (IS_ERR(vif))
+               return (struct wireless_dev *)vif;
+       brcmf_cfg80211_arm_vif_event(cfg, vif);
+
+       err = brcmf_p2p_request_p2p_if(&cfg->p2p, ifp, cfg->p2p.int_addr,
+                                      iftype);
+       if (err) {
+               brcmf_cfg80211_arm_vif_event(cfg, NULL);
+               goto fail;
+       }
+
+       /* wait for firmware event */
+       err = brcmf_cfg80211_wait_vif_event_timeout(cfg, BRCMF_E_IF_ADD,
+                                                   msecs_to_jiffies(1500));
+       brcmf_cfg80211_arm_vif_event(cfg, NULL);
+       if (!err) {
+               brcmf_err("timeout occurred\n");
+               err = -EIO;
+               goto fail;
+       }
+
+       /* interface created in firmware */
+       ifp = vif->ifp;
+       if (!ifp) {
+               brcmf_err("no if pointer provided\n");
+               err = -ENOENT;
+               goto fail;
+       }
+
+       strncpy(ifp->ndev->name, name, sizeof(ifp->ndev->name) - 1);
+       err = brcmf_net_attach(ifp, true);
+       if (err) {
+               brcmf_err("Registering netdevice failed\n");
+               goto fail;
+       }
+       cfg->p2p.bss_idx[P2PAPI_BSSCFG_CONNECTION].vif = vif;
+       /* Disable firmware roaming for P2P interface  */
+       brcmf_fil_iovar_int_set(ifp, "roam_off", 1);
+       if (iftype == BRCMF_FIL_P2P_IF_GO) {
+               /* set station timeout for p2p */
+               brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_SCB_TIMEOUT,
+                                     BRCMF_SCB_TIMEOUT_VALUE);
+       }
+       return &ifp->vif->wdev;
+
+fail:
+       brcmf_free_vif(vif);
+       return ERR_PTR(err);
+}
+
+/**
+ * brcmf_p2p_del_vif() - delete a P2P virtual interface.
+ *
+ * @wiphy: wiphy device of interface.
+ * @wdev: wireless device of interface.
+ *
+ * TODO: not yet supported.
+ */
+int brcmf_p2p_del_vif(struct wiphy *wiphy, struct wireless_dev *wdev)
+{
+       struct brcmf_cfg80211_info *cfg = wiphy_priv(wiphy);
+       struct brcmf_p2p_info *p2p = &cfg->p2p;
+       struct brcmf_cfg80211_vif *vif;
+       unsigned long jiffie_timeout = msecs_to_jiffies(1500);
+       bool wait_for_disable = false;
+       int err;
+
+       brcmf_dbg(TRACE, "delete P2P vif\n");
+       vif = container_of(wdev, struct brcmf_cfg80211_vif, wdev);
+
+       switch (vif->wdev.iftype) {
+       case NL80211_IFTYPE_P2P_CLIENT:
+               if (test_bit(BRCMF_VIF_STATUS_DISCONNECTING, &vif->sme_state))
+                       wait_for_disable = true;
+               break;
+
+       case NL80211_IFTYPE_P2P_GO:
+               if (!brcmf_p2p_disable_p2p_if(vif))
+                       wait_for_disable = true;
+               break;
+
+       case NL80211_IFTYPE_P2P_DEVICE:
+       default:
+               return -ENOTSUPP;
+               break;
+       }
+
+       clear_bit(BRCMF_P2P_STATUS_GO_NEG_PHASE, &p2p->status);
+       brcmf_dbg(INFO, "P2P: GO_NEG_PHASE status cleared\n");
+
+       if (wait_for_disable)
+               wait_for_completion_timeout(&cfg->vif_disabled,
+                                           msecs_to_jiffies(500));
+
+       brcmf_vif_clear_mgmt_ies(vif);
+
+       brcmf_cfg80211_arm_vif_event(cfg, vif);
+       err = brcmf_p2p_release_p2p_if(vif);
+       if (!err) {
+               /* wait for firmware event */
+               err = brcmf_cfg80211_wait_vif_event_timeout(cfg, BRCMF_E_IF_DEL,
+                                                           jiffie_timeout);
+               if (!err)
+                       err = -EIO;
+               else
+                       err = 0;
+       }
+       brcmf_cfg80211_arm_vif_event(cfg, NULL);
+       brcmf_free_vif(vif);
+       p2p->bss_idx[P2PAPI_BSSCFG_CONNECTION].vif = NULL;
+
+       return err;
+}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/p2p.h b/drivers/net/wireless/brcm80211/brcmfmac/p2p.h
new file mode 100644 (file)
index 0000000..6821b26
--- /dev/null
@@ -0,0 +1,183 @@
+/*
+ * Copyright (c) 2012 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#ifndef WL_CFGP2P_H_
+#define WL_CFGP2P_H_
+
+#include <net/cfg80211.h>
+
+struct brcmf_cfg80211_info;
+
+/**
+ * enum p2p_bss_type - different type of BSS configurations.
+ *
+ * @P2PAPI_BSSCFG_PRIMARY: maps to driver's primary bsscfg.
+ * @P2PAPI_BSSCFG_DEVICE: maps to driver's P2P device discovery bsscfg.
+ * @P2PAPI_BSSCFG_CONNECTION: maps to driver's P2P connection bsscfg.
+ * @P2PAPI_BSSCFG_MAX: used for range checking.
+ */
+enum p2p_bss_type {
+       P2PAPI_BSSCFG_PRIMARY, /* maps to driver's primary bsscfg */
+       P2PAPI_BSSCFG_DEVICE, /* maps to driver's P2P device discovery bsscfg */
+       P2PAPI_BSSCFG_CONNECTION, /* maps to driver's P2P connection bsscfg */
+       P2PAPI_BSSCFG_MAX
+};
+
+/**
+ * struct p2p_bss - peer-to-peer bss related information.
+ *
+ * @vif: virtual interface of this P2P bss.
+ * @private_data: TBD
+ */
+struct p2p_bss {
+       struct brcmf_cfg80211_vif *vif;
+       void *private_data;
+};
+
+/**
+ * enum brcmf_p2p_status - P2P specific dongle status.
+ *
+ * @BRCMF_P2P_STATUS_IF_ADD: peer-to-peer vif add sent to dongle.
+ * @BRCMF_P2P_STATUS_IF_DEL: NOT-USED?
+ * @BRCMF_P2P_STATUS_IF_DELETING: peer-to-peer vif delete sent to dongle.
+ * @BRCMF_P2P_STATUS_IF_CHANGING: peer-to-peer vif change sent to dongle.
+ * @BRCMF_P2P_STATUS_IF_CHANGED: peer-to-peer vif change completed on dongle.
+ * @BRCMF_P2P_STATUS_ACTION_TX_COMPLETED: action frame tx completed.
+ * @BRCMF_P2P_STATUS_ACTION_TX_NOACK: action frame tx not acked.
+ * @BRCMF_P2P_STATUS_GO_NEG_PHASE: P2P GO negotiation ongoing.
+ * @BRCMF_P2P_STATUS_DISCOVER_LISTEN: P2P listen, remaining on channel.
+ * @BRCMF_P2P_STATUS_SENDING_ACT_FRAME: In the process of sending action frame.
+ * @BRCMF_P2P_STATUS_WAITING_NEXT_AF_LISTEN: extra listen time for af tx.
+ * @BRCMF_P2P_STATUS_WAITING_NEXT_ACT_FRAME: waiting for action frame response.
+ * @BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL: search channel for AF active.
+ */
+enum brcmf_p2p_status {
+       BRCMF_P2P_STATUS_ENABLED,
+       BRCMF_P2P_STATUS_IF_ADD,
+       BRCMF_P2P_STATUS_IF_DEL,
+       BRCMF_P2P_STATUS_IF_DELETING,
+       BRCMF_P2P_STATUS_IF_CHANGING,
+       BRCMF_P2P_STATUS_IF_CHANGED,
+       BRCMF_P2P_STATUS_ACTION_TX_COMPLETED,
+       BRCMF_P2P_STATUS_ACTION_TX_NOACK,
+       BRCMF_P2P_STATUS_GO_NEG_PHASE,
+       BRCMF_P2P_STATUS_DISCOVER_LISTEN,
+       BRCMF_P2P_STATUS_SENDING_ACT_FRAME,
+       BRCMF_P2P_STATUS_WAITING_NEXT_AF_LISTEN,
+       BRCMF_P2P_STATUS_WAITING_NEXT_ACT_FRAME,
+       BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL
+};
+
+/**
+ * struct afx_hdl - action frame off channel storage.
+ *
+ * @afx_work: worker thread for searching channel
+ * @act_frm_scan: thread synchronizing struct.
+ * @is_active: channel searching active.
+ * @peer_chan: current channel.
+ * @is_listen: sets mode for afx worker.
+ * @my_listen_chan: this peers listen channel.
+ * @peer_listen_chan: remote peers listen channel.
+ * @tx_dst_addr: mac address where tx af should be sent to.
+ */
+struct afx_hdl {
+       struct work_struct afx_work;
+       struct completion act_frm_scan;
+       bool is_active;
+       s32 peer_chan;
+       bool is_listen;
+       u16 my_listen_chan;
+       u16 peer_listen_chan;
+       u8 tx_dst_addr[ETH_ALEN];
+};
+
+/**
+ * struct brcmf_p2p_info - p2p specific driver information.
+ *
+ * @cfg: driver private data for cfg80211 interface.
+ * @status: status of P2P (see enum brcmf_p2p_status).
+ * @dev_addr: P2P device address.
+ * @int_addr: P2P interface address.
+ * @bss_idx: informate for P2P bss types.
+ * @listen_timer: timer for @WL_P2P_DISC_ST_LISTEN discover state.
+ * @ssid: ssid for P2P GO.
+ * @listen_channel: channel for @WL_P2P_DISC_ST_LISTEN discover state.
+ * @remain_on_channel: contains copy of struct used by cfg80211.
+ * @remain_on_channel_cookie: cookie counter for remain on channel cmd
+ * @next_af_subtype: expected action frame subtype.
+ * @send_af_done: indication that action frame tx is complete.
+ * @afx_hdl: action frame search handler info.
+ * @af_sent_channel: channel action frame is sent.
+ * @af_tx_sent_jiffies: jiffies time when af tx was transmitted.
+ * @wait_next_af: thread synchronizing struct.
+ * @gon_req_action: about to send go negotiation requets frame.
+ * @block_gon_req_tx: drop tx go negotiation requets frame.
+ */
+struct brcmf_p2p_info {
+       struct brcmf_cfg80211_info *cfg;
+       unsigned long status;
+       u8 dev_addr[ETH_ALEN];
+       u8 int_addr[ETH_ALEN];
+       struct p2p_bss bss_idx[P2PAPI_BSSCFG_MAX];
+       struct timer_list listen_timer;
+       struct brcmf_ssid ssid;
+       u8 listen_channel;
+       struct ieee80211_channel remain_on_channel;
+       u32 remain_on_channel_cookie;
+       u8 next_af_subtype;
+       struct completion send_af_done;
+       struct afx_hdl afx_hdl;
+       u32 af_sent_channel;
+       unsigned long af_tx_sent_jiffies;
+       struct completion wait_next_af;
+       bool gon_req_action;
+       bool block_gon_req_tx;
+};
+
+s32 brcmf_p2p_attach(struct brcmf_cfg80211_info *cfg);
+void brcmf_p2p_detach(struct brcmf_p2p_info *p2p);
+struct wireless_dev *brcmf_p2p_add_vif(struct wiphy *wiphy, const char *name,
+                                      enum nl80211_iftype type, u32 *flags,
+                                      struct vif_params *params);
+int brcmf_p2p_del_vif(struct wiphy *wiphy, struct wireless_dev *wdev);
+int brcmf_p2p_ifchange(struct brcmf_cfg80211_info *cfg,
+                      enum brcmf_fil_p2p_if_types if_type);
+int brcmf_p2p_start_device(struct wiphy *wiphy, struct wireless_dev *wdev);
+void brcmf_p2p_stop_device(struct wiphy *wiphy, struct wireless_dev *wdev);
+int brcmf_p2p_scan_prep(struct wiphy *wiphy,
+                       struct cfg80211_scan_request *request,
+                       struct brcmf_cfg80211_vif *vif);
+int brcmf_p2p_remain_on_channel(struct wiphy *wiphy, struct wireless_dev *wdev,
+                               struct ieee80211_channel *channel,
+                               unsigned int duration, u64 *cookie);
+int brcmf_p2p_notify_listen_complete(struct brcmf_if *ifp,
+                                    const struct brcmf_event_msg *e,
+                                    void *data);
+void brcmf_p2p_cancel_remain_on_channel(struct brcmf_if *ifp);
+int brcmf_p2p_notify_action_frame_rx(struct brcmf_if *ifp,
+                                    const struct brcmf_event_msg *e,
+                                    void *data);
+int brcmf_p2p_notify_action_tx_complete(struct brcmf_if *ifp,
+                                       const struct brcmf_event_msg *e,
+                                       void *data);
+bool brcmf_p2p_send_action_frame(struct brcmf_cfg80211_info *cfg,
+                                struct net_device *ndev,
+                                struct brcmf_fil_af_params_le *af_params);
+bool brcmf_p2p_scan_finding_common_channel(struct brcmf_cfg80211_info *cfg,
+                                          struct brcmf_bss_info_le *bi);
+s32 brcmf_p2p_notify_rx_mgmt_p2p_probereq(struct brcmf_if *ifp,
+                                         const struct brcmf_event_msg *e,
+                                         void *data);
+#endif /* WL_CFGP2P_H_ */
index e15630c..42289e9 100644 (file)
@@ -354,11 +354,10 @@ brcmf_usbdev_qinit(struct list_head *q, int qsize)
        int i;
        struct brcmf_usbreq *req, *reqs;
 
-       reqs = kzalloc(sizeof(struct brcmf_usbreq) * qsize, GFP_ATOMIC);
-       if (reqs == NULL) {
-               brcmf_err("fail to allocate memory!\n");
+       reqs = kcalloc(qsize, sizeof(struct brcmf_usbreq), GFP_ATOMIC);
+       if (reqs == NULL)
                return NULL;
-       }
+
        req = reqs;
 
        for (i = 0; i < qsize; i++) {
@@ -421,10 +420,6 @@ static void brcmf_usb_tx_complete(struct urb *urb)
        brcmf_dbg(USB, "Enter, urb->status=%d, skb=%p\n", urb->status,
                  req->skb);
        brcmf_usb_del_fromq(devinfo, req);
-       if (urb->status == 0)
-               devinfo->bus_pub.bus->dstats.tx_packets++;
-       else
-               devinfo->bus_pub.bus->dstats.tx_errors++;
 
        brcmf_txcomplete(devinfo->dev, req->skb, urb->status == 0);
 
@@ -451,10 +446,7 @@ static void brcmf_usb_rx_complete(struct urb *urb)
        req->skb = NULL;
 
        /* zero lenght packets indicate usb "failure". Do not refill */
-       if (urb->status == 0 && urb->actual_length) {
-               devinfo->bus_pub.bus->dstats.rx_packets++;
-       } else {
-               devinfo->bus_pub.bus->dstats.rx_errors++;
+       if (urb->status != 0 || !urb->actual_length) {
                brcmu_pkt_buf_free_skb(skb);
                brcmf_usb_enq(devinfo, &devinfo->rx_freeq, req, NULL);
                return;
@@ -1257,6 +1249,8 @@ static int brcmf_usb_probe_cb(struct brcmf_usbdev_info *devinfo)
        bus->bus_priv.usb = bus_pub;
        dev_set_drvdata(dev, bus);
        bus->ops = &brcmf_usb_bus_ops;
+       bus->chip = bus_pub->devid;
+       bus->chiprev = bus_pub->chiprev;
 
        /* Attach to the common driver interface */
        ret = brcmf_attach(0, dev);
index 62a528e..cecc3ef 100644 (file)
@@ -26,6 +26,8 @@
 #include <brcmu_wifi.h>
 #include "dhd.h"
 #include "dhd_dbg.h"
+#include "fwil_types.h"
+#include "p2p.h"
 #include "wl_cfg80211.h"
 #include "fwil.h"
 
 #define BRCMF_PNO_SCAN_COMPLETE                1
 #define BRCMF_PNO_SCAN_INCOMPLETE      0
 
-#define BRCMF_IFACE_MAX_CNT            2
+#define BRCMF_IFACE_MAX_CNT            3
 
-#define TLV_LEN_OFF                    1       /* length offset */
-#define TLV_HDR_LEN                    2       /* header length */
-#define TLV_BODY_OFF                   2       /* body offset */
-#define TLV_OUI_LEN                    3       /* oui id length */
 #define WPA_OUI                                "\x00\x50\xF2"  /* WPA OUI */
 #define WPA_OUI_TYPE                   1
 #define RSN_OUI                                "\x00\x0F\xAC"  /* RSN OUI */
 #define        WME_OUI_TYPE                    2
+#define WPS_OUI_TYPE                   4
 
 #define VS_IE_FIXED_HDR_LEN            6
 #define WPA_IE_VERSION_LEN             2
 #define VNDR_IE_PKTFLAG_OFFSET         8
 #define VNDR_IE_VSIE_OFFSET            12
 #define VNDR_IE_HDR_SIZE               12
-#define VNDR_IE_BEACON_FLAG            0x1
-#define VNDR_IE_PRBRSP_FLAG            0x2
-#define MAX_VNDR_IE_NUMBER             5
+#define VNDR_IE_PARSE_LIMIT            5
 
 #define        DOT11_MGMT_HDR_LEN              24      /* d11 management header len */
 #define        DOT11_BCN_PRB_FIXED_LEN         12      /* beacon/probe fixed length */
 
+#define BRCMF_SCAN_JOIN_ACTIVE_DWELL_TIME_MS   320
+#define BRCMF_SCAN_JOIN_PASSIVE_DWELL_TIME_MS  400
+#define BRCMF_SCAN_JOIN_PROBE_INTERVAL_MS      20
+
 #define BRCMF_ASSOC_PARAMS_FIXED_SIZE \
        (sizeof(struct brcmf_assoc_params_le) - sizeof(u16))
 
@@ -271,13 +272,6 @@ static const u32 __wl_cipher_suites[] = {
        WLAN_CIPHER_SUITE_AES_CMAC,
 };
 
-/* tag_ID/length/value_buffer tuple */
-struct brcmf_tlv {
-       u8 id;
-       u8 len;
-       u8 data[1];
-};
-
 /* Vendor specific ie. id = 221, oui and type defines exact ie */
 struct brcmf_vs_tlv {
        u8 id;
@@ -294,7 +288,7 @@ struct parsed_vndr_ie_info {
 
 struct parsed_vndr_ies {
        u32 count;
-       struct parsed_vndr_ie_info ie_info[MAX_VNDR_IE_NUMBER];
+       struct parsed_vndr_ie_info ie_info[VNDR_IE_PARSE_LIMIT];
 };
 
 /* Quarter dBm units to mW
@@ -381,7 +375,7 @@ static u8 brcmf_mw_to_qdbm(u16 mw)
        return qdbm;
 }
 
-static u16 channel_to_chanspec(struct ieee80211_channel *ch)
+u16 channel_to_chanspec(struct ieee80211_channel *ch)
 {
        u16 chanspec;
 
@@ -393,19 +387,92 @@ static u16 channel_to_chanspec(struct ieee80211_channel *ch)
        else
                chanspec |= WL_CHANSPEC_BAND_5G;
 
-       if (ch->flags & IEEE80211_CHAN_NO_HT40) {
-               chanspec |= WL_CHANSPEC_BW_20;
-               chanspec |= WL_CHANSPEC_CTL_SB_NONE;
-       } else {
-               chanspec |= WL_CHANSPEC_BW_40;
-               if (ch->flags & IEEE80211_CHAN_NO_HT40PLUS)
-                       chanspec |= WL_CHANSPEC_CTL_SB_LOWER;
-               else
-                       chanspec |= WL_CHANSPEC_CTL_SB_UPPER;
-       }
+       chanspec |= WL_CHANSPEC_BW_20;
+       chanspec |= WL_CHANSPEC_CTL_SB_NONE;
+
        return chanspec;
 }
 
+/* Traverse a string of 1-byte tag/1-byte length/variable-length value
+ * triples, returning a pointer to the substring whose first element
+ * matches tag
+ */
+struct brcmf_tlv *brcmf_parse_tlvs(void *buf, int buflen, uint key)
+{
+       struct brcmf_tlv *elt;
+       int totlen;
+
+       elt = (struct brcmf_tlv *)buf;
+       totlen = buflen;
+
+       /* find tagged parameter */
+       while (totlen >= TLV_HDR_LEN) {
+               int len = elt->len;
+
+               /* validate remaining totlen */
+               if ((elt->id == key) && (totlen >= (len + TLV_HDR_LEN)))
+                       return elt;
+
+               elt = (struct brcmf_tlv *)((u8 *)elt + (len + TLV_HDR_LEN));
+               totlen -= (len + TLV_HDR_LEN);
+       }
+
+       return NULL;
+}
+
+/* Is any of the tlvs the expected entry? If
+ * not update the tlvs buffer pointer/length.
+ */
+static bool
+brcmf_tlv_has_ie(u8 *ie, u8 **tlvs, u32 *tlvs_len,
+                u8 *oui, u32 oui_len, u8 type)
+{
+       /* If the contents match the OUI and the type */
+       if (ie[TLV_LEN_OFF] >= oui_len + 1 &&
+           !memcmp(&ie[TLV_BODY_OFF], oui, oui_len) &&
+           type == ie[TLV_BODY_OFF + oui_len]) {
+               return true;
+       }
+
+       if (tlvs == NULL)
+               return false;
+       /* point to the next ie */
+       ie += ie[TLV_LEN_OFF] + TLV_HDR_LEN;
+       /* calculate the length of the rest of the buffer */
+       *tlvs_len -= (int)(ie - *tlvs);
+       /* update the pointer to the start of the buffer */
+       *tlvs = ie;
+
+       return false;
+}
+
+static struct brcmf_vs_tlv *
+brcmf_find_wpaie(u8 *parse, u32 len)
+{
+       struct brcmf_tlv *ie;
+
+       while ((ie = brcmf_parse_tlvs(parse, len, WLAN_EID_VENDOR_SPECIFIC))) {
+               if (brcmf_tlv_has_ie((u8 *)ie, &parse, &len,
+                                    WPA_OUI, TLV_OUI_LEN, WPA_OUI_TYPE))
+                       return (struct brcmf_vs_tlv *)ie;
+       }
+       return NULL;
+}
+
+static struct brcmf_vs_tlv *
+brcmf_find_wpsie(u8 *parse, u32 len)
+{
+       struct brcmf_tlv *ie;
+
+       while ((ie = brcmf_parse_tlvs(parse, len, WLAN_EID_VENDOR_SPECIFIC))) {
+               if (brcmf_tlv_has_ie((u8 *)ie, &parse, &len,
+                                    WPA_OUI, TLV_OUI_LEN, WPS_OUI_TYPE))
+                       return (struct brcmf_vs_tlv *)ie;
+       }
+       return NULL;
+}
+
+
 static void convert_key_from_CPU(struct brcmf_wsec_key *key,
                                 struct brcmf_wsec_key_le *key_le)
 {
@@ -438,11 +505,153 @@ send_key_to_dongle(struct net_device *ndev, struct brcmf_wsec_key *key)
        return err;
 }
 
+static struct wireless_dev *brcmf_cfg80211_add_iface(struct wiphy *wiphy,
+                                                    const char *name,
+                                                    enum nl80211_iftype type,
+                                                    u32 *flags,
+                                                    struct vif_params *params)
+{
+       brcmf_dbg(TRACE, "enter: %s type %d\n", name, type);
+       switch (type) {
+       case NL80211_IFTYPE_ADHOC:
+       case NL80211_IFTYPE_STATION:
+       case NL80211_IFTYPE_AP:
+       case NL80211_IFTYPE_AP_VLAN:
+       case NL80211_IFTYPE_WDS:
+       case NL80211_IFTYPE_MONITOR:
+       case NL80211_IFTYPE_MESH_POINT:
+               return ERR_PTR(-EOPNOTSUPP);
+       case NL80211_IFTYPE_P2P_CLIENT:
+       case NL80211_IFTYPE_P2P_GO:
+               return brcmf_p2p_add_vif(wiphy, name, type, flags, params);
+       case NL80211_IFTYPE_UNSPECIFIED:
+       case NL80211_IFTYPE_P2P_DEVICE:
+       default:
+               return ERR_PTR(-EINVAL);
+       }
+}
+
+void brcmf_set_mpc(struct net_device *ndev, int mpc)
+{
+       struct brcmf_if *ifp = netdev_priv(ndev);
+       s32 err = 0;
+
+       if (check_vif_up(ifp->vif)) {
+               err = brcmf_fil_iovar_int_set(ifp, "mpc", mpc);
+               if (err) {
+                       brcmf_err("fail to set mpc\n");
+                       return;
+               }
+               brcmf_dbg(INFO, "MPC : %d\n", mpc);
+       }
+}
+
+s32
+brcmf_notify_escan_complete(struct brcmf_cfg80211_info *cfg,
+                           struct net_device *ndev,
+                           bool aborted, bool fw_abort)
+{
+       struct brcmf_scan_params_le params_le;
+       struct cfg80211_scan_request *scan_request;
+       s32 err = 0;
+
+       brcmf_dbg(SCAN, "Enter\n");
+
+       /* clear scan request, because the FW abort can cause a second call */
+       /* to this functon and might cause a double cfg80211_scan_done      */
+       scan_request = cfg->scan_request;
+       cfg->scan_request = NULL;
+
+       if (timer_pending(&cfg->escan_timeout))
+               del_timer_sync(&cfg->escan_timeout);
+
+       if (fw_abort) {
+               /* Do a scan abort to stop the driver's scan engine */
+               brcmf_dbg(SCAN, "ABORT scan in firmware\n");
+               memset(&params_le, 0, sizeof(params_le));
+               memset(params_le.bssid, 0xFF, ETH_ALEN);
+               params_le.bss_type = DOT11_BSSTYPE_ANY;
+               params_le.scan_type = 0;
+               params_le.channel_num = cpu_to_le32(1);
+               params_le.nprobes = cpu_to_le32(1);
+               params_le.active_time = cpu_to_le32(-1);
+               params_le.passive_time = cpu_to_le32(-1);
+               params_le.home_time = cpu_to_le32(-1);
+               /* Scan is aborted by setting channel_list[0] to -1 */
+               params_le.channel_list[0] = cpu_to_le16(-1);
+               /* E-Scan (or anyother type) can be aborted by SCAN */
+               err = brcmf_fil_cmd_data_set(netdev_priv(ndev), BRCMF_C_SCAN,
+                                            &params_le, sizeof(params_le));
+               if (err)
+                       brcmf_err("Scan abort  failed\n");
+       }
+       /*
+        * e-scan can be initiated by scheduled scan
+        * which takes precedence.
+        */
+       if (cfg->sched_escan) {
+               brcmf_dbg(SCAN, "scheduled scan completed\n");
+               cfg->sched_escan = false;
+               if (!aborted)
+                       cfg80211_sched_scan_results(cfg_to_wiphy(cfg));
+               brcmf_set_mpc(ndev, 1);
+       } else if (scan_request) {
+               brcmf_dbg(SCAN, "ESCAN Completed scan: %s\n",
+                         aborted ? "Aborted" : "Done");
+               cfg80211_scan_done(scan_request, aborted);
+               brcmf_set_mpc(ndev, 1);
+       }
+       if (!test_and_clear_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status))
+               brcmf_dbg(SCAN, "Scan complete, probably P2P scan\n");
+
+       return err;
+}
+
+static
+int brcmf_cfg80211_del_iface(struct wiphy *wiphy, struct wireless_dev *wdev)
+{
+       struct brcmf_cfg80211_info *cfg = wiphy_priv(wiphy);
+       struct net_device *ndev = wdev->netdev;
+
+       /* vif event pending in firmware */
+       if (brcmf_cfg80211_vif_event_armed(cfg))
+               return -EBUSY;
+
+       if (ndev) {
+               if (test_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status) &&
+                   cfg->escan_info.ndev == ndev)
+                       brcmf_notify_escan_complete(cfg, ndev, true,
+                                                   true);
+
+               brcmf_fil_iovar_int_set(netdev_priv(ndev), "mpc", 1);
+       }
+
+       switch (wdev->iftype) {
+       case NL80211_IFTYPE_ADHOC:
+       case NL80211_IFTYPE_STATION:
+       case NL80211_IFTYPE_AP:
+       case NL80211_IFTYPE_AP_VLAN:
+       case NL80211_IFTYPE_WDS:
+       case NL80211_IFTYPE_MONITOR:
+       case NL80211_IFTYPE_MESH_POINT:
+               return -EOPNOTSUPP;
+       case NL80211_IFTYPE_P2P_CLIENT:
+       case NL80211_IFTYPE_P2P_GO:
+               return brcmf_p2p_del_vif(wiphy, wdev);
+       case NL80211_IFTYPE_UNSPECIFIED:
+       case NL80211_IFTYPE_P2P_DEVICE:
+       default:
+               return -EINVAL;
+       }
+       return -EOPNOTSUPP;
+}
+
 static s32
 brcmf_cfg80211_change_iface(struct wiphy *wiphy, struct net_device *ndev,
                         enum nl80211_iftype type, u32 *flags,
                         struct vif_params *params)
 {
+       struct brcmf_cfg80211_info *cfg = wiphy_priv(wiphy);
        struct brcmf_if *ifp = netdev_priv(ndev);
        struct brcmf_cfg80211_vif *vif = ifp->vif;
        s32 infra = 0;
@@ -462,10 +671,23 @@ brcmf_cfg80211_change_iface(struct wiphy *wiphy, struct net_device *ndev,
                infra = 0;
                break;
        case NL80211_IFTYPE_STATION:
+               /* Ignore change for p2p IF. Unclear why supplicant does this */
+               if ((vif->wdev.iftype == NL80211_IFTYPE_P2P_CLIENT) ||
+                   (vif->wdev.iftype == NL80211_IFTYPE_P2P_GO)) {
+                       brcmf_dbg(TRACE, "Ignoring cmd for p2p if\n");
+                       /* WAR: It is unexpected to get a change of VIF for P2P
+                        * IF, but it happens. The request can not be handled
+                        * but returning EPERM causes a crash. Returning 0
+                        * without setting ieee80211_ptr->iftype causes trace
+                        * (WARN_ON) but it works with wpa_supplicant
+                        */
+                       return 0;
+               }
                vif->mode = WL_MODE_BSS;
                infra = 1;
                break;
        case NL80211_IFTYPE_AP:
+       case NL80211_IFTYPE_P2P_GO:
                vif->mode = WL_MODE_AP;
                ap = 1;
                break;
@@ -475,8 +697,14 @@ brcmf_cfg80211_change_iface(struct wiphy *wiphy, struct net_device *ndev,
        }
 
        if (ap) {
-               set_bit(BRCMF_VIF_STATUS_AP_CREATING, &vif->sme_state);
-               brcmf_dbg(INFO, "IF Type = AP\n");
+               if (type == NL80211_IFTYPE_P2P_GO) {
+                       brcmf_dbg(INFO, "IF Type = P2P GO\n");
+                       err = brcmf_p2p_ifchange(cfg, BRCMF_FIL_P2P_IF_GO);
+               }
+               if (!err) {
+                       set_bit(BRCMF_VIF_STATUS_AP_CREATING, &vif->sme_state);
+                       brcmf_dbg(INFO, "IF Type = AP\n");
+               }
        } else {
                err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_INFRA, infra);
                if (err) {
@@ -495,21 +723,6 @@ done:
        return err;
 }
 
-static void brcmf_set_mpc(struct net_device *ndev, int mpc)
-{
-       struct brcmf_if *ifp = netdev_priv(ndev);
-       s32 err = 0;
-
-       if (check_vif_up(ifp->vif)) {
-               err = brcmf_fil_iovar_int_set(ifp, "mpc", mpc);
-               if (err) {
-                       brcmf_err("fail to set mpc\n");
-                       return;
-               }
-               brcmf_dbg(INFO, "MPC : %d\n", mpc);
-       }
-}
-
 static void brcmf_escan_prep(struct brcmf_scan_params_le *params_le,
                             struct cfg80211_scan_request *request)
 {
@@ -590,69 +803,6 @@ static void brcmf_escan_prep(struct brcmf_scan_params_le *params_le,
 }
 
 static s32
-brcmf_notify_escan_complete(struct brcmf_cfg80211_info *cfg,
-                           struct net_device *ndev,
-                           bool aborted, bool fw_abort)
-{
-       struct brcmf_scan_params_le params_le;
-       struct cfg80211_scan_request *scan_request;
-       s32 err = 0;
-
-       brcmf_dbg(SCAN, "Enter\n");
-
-       /* clear scan request, because the FW abort can cause a second call */
-       /* to this functon and might cause a double cfg80211_scan_done      */
-       scan_request = cfg->scan_request;
-       cfg->scan_request = NULL;
-
-       if (timer_pending(&cfg->escan_timeout))
-               del_timer_sync(&cfg->escan_timeout);
-
-       if (fw_abort) {
-               /* Do a scan abort to stop the driver's scan engine */
-               brcmf_dbg(SCAN, "ABORT scan in firmware\n");
-               memset(&params_le, 0, sizeof(params_le));
-               memset(params_le.bssid, 0xFF, ETH_ALEN);
-               params_le.bss_type = DOT11_BSSTYPE_ANY;
-               params_le.scan_type = 0;
-               params_le.channel_num = cpu_to_le32(1);
-               params_le.nprobes = cpu_to_le32(1);
-               params_le.active_time = cpu_to_le32(-1);
-               params_le.passive_time = cpu_to_le32(-1);
-               params_le.home_time = cpu_to_le32(-1);
-               /* Scan is aborted by setting channel_list[0] to -1 */
-               params_le.channel_list[0] = cpu_to_le16(-1);
-               /* E-Scan (or anyother type) can be aborted by SCAN */
-               err = brcmf_fil_cmd_data_set(netdev_priv(ndev), BRCMF_C_SCAN,
-                                            &params_le, sizeof(params_le));
-               if (err)
-                       brcmf_err("Scan abort  failed\n");
-       }
-       /*
-        * e-scan can be initiated by scheduled scan
-        * which takes precedence.
-        */
-       if (cfg->sched_escan) {
-               brcmf_dbg(SCAN, "scheduled scan completed\n");
-               cfg->sched_escan = false;
-               if (!aborted)
-                       cfg80211_sched_scan_results(cfg_to_wiphy(cfg));
-               brcmf_set_mpc(ndev, 1);
-       } else if (scan_request) {
-               brcmf_dbg(SCAN, "ESCAN Completed scan: %s\n",
-                         aborted ? "Aborted" : "Done");
-               cfg80211_scan_done(scan_request, aborted);
-               brcmf_set_mpc(ndev, 1);
-       }
-       if (!test_and_clear_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status)) {
-               brcmf_err("Scan complete while device not scanning\n");
-               return -EPERM;
-       }
-
-       return err;
-}
-
-static s32
 brcmf_run_escan(struct brcmf_cfg80211_info *cfg, struct net_device *ndev,
                struct cfg80211_scan_request *request, u16 action)
 {
@@ -703,11 +853,12 @@ brcmf_do_escan(struct brcmf_cfg80211_info *cfg, struct wiphy *wiphy,
        s32 err;
        u32 passive_scan;
        struct brcmf_scan_results *results;
+       struct escan_info *escan = &cfg->escan_info;
 
        brcmf_dbg(SCAN, "Enter\n");
-       cfg->escan_info.ndev = ndev;
-       cfg->escan_info.wiphy = wiphy;
-       cfg->escan_info.escan_state = WL_ESCAN_STATE_SCANNING;
+       escan->ndev = ndev;
+       escan->wiphy = wiphy;
+       escan->escan_state = WL_ESCAN_STATE_SCANNING;
        passive_scan = cfg->active_scan ? 0 : 1;
        err = brcmf_fil_cmd_int_set(netdev_priv(ndev), BRCMF_C_SET_PASSIVE_SCAN,
                                    passive_scan);
@@ -721,7 +872,7 @@ brcmf_do_escan(struct brcmf_cfg80211_info *cfg, struct wiphy *wiphy,
        results->count = 0;
        results->buflen = WL_ESCAN_RESULTS_FIXED_SIZE;
 
-       err = brcmf_run_escan(cfg, ndev, request, WL_ESCAN_ACTION_START);
+       err = escan->run(cfg, ndev, request, WL_ESCAN_ACTION_START);
        if (err)
                brcmf_set_mpc(ndev, 1);
        return err;
@@ -758,6 +909,12 @@ brcmf_cfg80211_escan(struct wiphy *wiphy, struct net_device *ndev,
                return -EAGAIN;
        }
 
+       /* If scan req comes for p2p0, send it over primary I/F */
+       if (ifp->vif == cfg->p2p.bss_idx[P2PAPI_BSSCFG_DEVICE].vif) {
+               ifp = cfg->p2p.bss_idx[P2PAPI_BSSCFG_PRIMARY].vif->ifp;
+               ndev = ifp->ndev;
+       }
+
        /* Arm scan timeout timer */
        mod_timer(&cfg->escan_timeout, jiffies +
                        WL_ESCAN_TIMER_INTERVAL_MS * HZ / 1000);
@@ -776,6 +933,11 @@ brcmf_cfg80211_escan(struct wiphy *wiphy, struct net_device *ndev,
        cfg->scan_request = request;
        set_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status);
        if (escan_req) {
+               cfg->escan_info.run = brcmf_run_escan;
+               err = brcmf_p2p_scan_prep(wiphy, request, ifp->vif);
+               if (err)
+                       goto scan_out;
+
                err = brcmf_do_escan(cfg, wiphy, ndev, request);
                if (err)
                        goto scan_out;
@@ -933,31 +1095,6 @@ static void brcmf_init_prof(struct brcmf_cfg80211_profile *prof)
        memset(prof, 0, sizeof(*prof));
 }
 
-static void brcmf_ch_to_chanspec(int ch, struct brcmf_join_params *join_params,
-       size_t *join_params_size)
-{
-       u16 chanspec = 0;
-
-       if (ch != 0) {
-               if (ch <= CH_MAX_2G_CHANNEL)
-                       chanspec |= WL_CHANSPEC_BAND_2G;
-               else
-                       chanspec |= WL_CHANSPEC_BAND_5G;
-
-               chanspec |= WL_CHANSPEC_BW_20;
-               chanspec |= WL_CHANSPEC_CTL_SB_NONE;
-
-               *join_params_size += BRCMF_ASSOC_PARAMS_FIXED_SIZE +
-                                    sizeof(u16);
-
-               chanspec |= (ch & WL_CHANSPEC_CHAN_MASK);
-               join_params->params_le.chanspec_list[0] = cpu_to_le16(chanspec);
-               join_params->params_le.chanspec_num = cpu_to_le32(1);
-
-               brcmf_dbg(CONN, "channel %d, chanspec %#X\n", ch, chanspec);
-       }
-}
-
 static void brcmf_link_down(struct brcmf_cfg80211_vif *vif)
 {
        s32 err = 0;
@@ -988,6 +1125,7 @@ brcmf_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *ndev,
        s32 err = 0;
        s32 wsec = 0;
        s32 bcnprd;
+       u16 chanspec;
 
        brcmf_dbg(TRACE, "Enter\n");
        if (!check_vif_up(ifp->vif))
@@ -1091,8 +1229,11 @@ brcmf_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *ndev,
                                params->chandef.chan->center_freq);
                if (params->channel_fixed) {
                        /* adding chanspec */
-                       brcmf_ch_to_chanspec(cfg->channel,
-                               &join_params, &join_params_size);
+                       chanspec = channel_to_chanspec(params->chandef.chan);
+                       join_params.params_le.chanspec_list[0] =
+                               cpu_to_le16(chanspec);
+                       join_params.params_le.chanspec_num = cpu_to_le32(1);
+                       join_params_size += sizeof(join_params.params_le);
                }
 
                /* set channel for starter */
@@ -1155,7 +1296,7 @@ static s32 brcmf_set_wpa_version(struct net_device *ndev,
        else
                val = WPA_AUTH_DISABLED;
        brcmf_dbg(CONN, "setting wpa_auth to 0x%0x\n", val);
-       err = brcmf_fil_iovar_int_set(netdev_priv(ndev), "wpa_auth", val);
+       err = brcmf_fil_bsscfg_int_set(netdev_priv(ndev), "wpa_auth", val);
        if (err) {
                brcmf_err("set wpa_auth failed (%d)\n", err);
                return err;
@@ -1194,7 +1335,7 @@ static s32 brcmf_set_auth_type(struct net_device *ndev,
                break;
        }
 
-       err = brcmf_fil_iovar_int_set(netdev_priv(ndev), "auth", val);
+       err = brcmf_fil_bsscfg_int_set(netdev_priv(ndev), "auth", val);
        if (err) {
                brcmf_err("set auth failed (%d)\n", err);
                return err;
@@ -1258,7 +1399,12 @@ brcmf_set_set_cipher(struct net_device *ndev,
        }
 
        brcmf_dbg(CONN, "pval (%d) gval (%d)\n", pval, gval);
-       err = brcmf_fil_iovar_int_set(netdev_priv(ndev), "wsec", pval | gval);
+       /* In case of privacy, but no security and WPS then simulate */
+       /* setting AES. WPS-2.0 allows no security                   */
+       if (brcmf_find_wpsie(sme->ie, sme->ie_len) && !pval && !gval &&
+           sme->privacy)
+               pval = AES_ENABLED;
+       err = brcmf_fil_bsscfg_int_set(netdev_priv(ndev), "wsec", pval | gval);
        if (err) {
                brcmf_err("error (%d)\n", err);
                return err;
@@ -1280,8 +1426,8 @@ brcmf_set_key_mgmt(struct net_device *ndev, struct cfg80211_connect_params *sme)
        s32 err = 0;
 
        if (sme->crypto.n_akm_suites) {
-               err = brcmf_fil_iovar_int_get(netdev_priv(ndev),
-                                             "wpa_auth", &val);
+               err = brcmf_fil_bsscfg_int_get(netdev_priv(ndev),
+                                              "wpa_auth", &val);
                if (err) {
                        brcmf_err("could not get wpa_auth (%d)\n", err);
                        return err;
@@ -1315,8 +1461,8 @@ brcmf_set_key_mgmt(struct net_device *ndev, struct cfg80211_connect_params *sme)
                }
 
                brcmf_dbg(CONN, "setting wpa_auth to %d\n", val);
-               err = brcmf_fil_iovar_int_set(netdev_priv(ndev),
-                                             "wpa_auth", val);
+               err = brcmf_fil_bsscfg_int_set(netdev_priv(ndev),
+                                              "wpa_auth", val);
                if (err) {
                        brcmf_err("could not set wpa_auth (%d)\n", err);
                        return err;
@@ -1393,9 +1539,28 @@ brcmf_set_sharedkey(struct net_device *ndev,
        return err;
 }
 
+static
+enum nl80211_auth_type brcmf_war_auth_type(struct brcmf_if *ifp,
+                                          enum nl80211_auth_type type)
+{
+       u32 ci;
+       if (type == NL80211_AUTHTYPE_AUTOMATIC) {
+               /* shift to ignore chip revision */
+               ci = brcmf_get_chip_info(ifp) >> 4;
+               switch (ci) {
+               case 43236:
+                       brcmf_dbg(CONN, "43236 WAR: use OPEN instead of AUTO\n");
+                       return NL80211_AUTHTYPE_OPEN_SYSTEM;
+               default:
+                       break;
+               }
+       }
+       return type;
+}
+
 static s32
 brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
-                   struct cfg80211_connect_params *sme)
+                      struct cfg80211_connect_params *sme)
 {
        struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
        struct brcmf_if *ifp = netdev_priv(ndev);
@@ -1403,7 +1568,12 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
        struct ieee80211_channel *chan = sme->channel;
        struct brcmf_join_params join_params;
        size_t join_params_size;
-       struct brcmf_ssid ssid;
+       struct brcmf_tlv *rsn_ie;
+       struct brcmf_vs_tlv *wpa_ie;
+       void *ie;
+       u32 ie_len;
+       struct brcmf_ext_join_params_le *ext_join_params;
+       u16 chanspec;
 
        s32 err = 0;
 
@@ -1416,15 +1586,46 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
                return -EOPNOTSUPP;
        }
 
+       if (ifp->vif == cfg->p2p.bss_idx[P2PAPI_BSSCFG_PRIMARY].vif) {
+               /* A normal (non P2P) connection request setup. */
+               ie = NULL;
+               ie_len = 0;
+               /* find the WPA_IE */
+               wpa_ie = brcmf_find_wpaie((u8 *)sme->ie, sme->ie_len);
+               if (wpa_ie) {
+                       ie = wpa_ie;
+                       ie_len = wpa_ie->len + TLV_HDR_LEN;
+               } else {
+                       /* find the RSN_IE */
+                       rsn_ie = brcmf_parse_tlvs((u8 *)sme->ie, sme->ie_len,
+                                                 WLAN_EID_RSN);
+                       if (rsn_ie) {
+                               ie = rsn_ie;
+                               ie_len = rsn_ie->len + TLV_HDR_LEN;
+                       }
+               }
+               brcmf_fil_iovar_data_set(ifp, "wpaie", ie, ie_len);
+       }
+
+       err = brcmf_vif_set_mgmt_ie(ifp->vif, BRCMF_VNDR_IE_ASSOCREQ_FLAG,
+                                   sme->ie, sme->ie_len);
+       if (err)
+               brcmf_err("Set Assoc REQ IE Failed\n");
+       else
+               brcmf_dbg(TRACE, "Applied Vndr IEs for Assoc request\n");
+
        set_bit(BRCMF_VIF_STATUS_CONNECTING, &ifp->vif->sme_state);
 
        if (chan) {
                cfg->channel =
                        ieee80211_frequency_to_channel(chan->center_freq);
-               brcmf_dbg(CONN, "channel (%d), center_req (%d)\n",
-                         cfg->channel, chan->center_freq);
-       } else
+               chanspec = channel_to_chanspec(chan);
+               brcmf_dbg(CONN, "channel=%d, center_req=%d, chanspec=0x%04x\n",
+                         cfg->channel, chan->center_freq, chanspec);
+       } else {
                cfg->channel = 0;
+               chanspec = 0;
+       }
 
        brcmf_dbg(INFO, "ie (%p), ie_len (%zd)\n", sme->ie, sme->ie_len);
 
@@ -1434,6 +1635,7 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
                goto done;
        }
 
+       sme->auth_type = brcmf_war_auth_type(ifp, sme->auth_type);
        err = brcmf_set_auth_type(ndev, sme);
        if (err) {
                brcmf_err("wl_set_auth_type failed (%d)\n", err);
@@ -1458,27 +1660,88 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
                goto done;
        }
 
+       profile->ssid.SSID_len = min_t(u32, (u32)sizeof(profile->ssid.SSID),
+                                      (u32)sme->ssid_len);
+       memcpy(&profile->ssid.SSID, sme->ssid, profile->ssid.SSID_len);
+       if (profile->ssid.SSID_len < IEEE80211_MAX_SSID_LEN) {
+               profile->ssid.SSID[profile->ssid.SSID_len] = 0;
+               brcmf_dbg(CONN, "SSID \"%s\", len (%d)\n", profile->ssid.SSID,
+                         profile->ssid.SSID_len);
+       }
+
+       /* Join with specific BSSID and cached SSID
+        * If SSID is zero join based on BSSID only
+        */
+       join_params_size = offsetof(struct brcmf_ext_join_params_le, assoc_le) +
+               offsetof(struct brcmf_assoc_params_le, chanspec_list);
+       if (cfg->channel)
+               join_params_size += sizeof(u16);
+       ext_join_params = kzalloc(join_params_size, GFP_KERNEL);
+       if (ext_join_params == NULL) {
+               err = -ENOMEM;
+               goto done;
+       }
+       ext_join_params->ssid_le.SSID_len = cpu_to_le32(profile->ssid.SSID_len);
+       memcpy(&ext_join_params->ssid_le.SSID, sme->ssid,
+              profile->ssid.SSID_len);
+       /*increase dwell time to receive probe response or detect Beacon
+        * from target AP at a noisy air only during connect command
+        */
+       ext_join_params->scan_le.active_time =
+               cpu_to_le32(BRCMF_SCAN_JOIN_ACTIVE_DWELL_TIME_MS);
+       ext_join_params->scan_le.passive_time =
+               cpu_to_le32(BRCMF_SCAN_JOIN_PASSIVE_DWELL_TIME_MS);
+       /* Set up join scan parameters */
+       ext_join_params->scan_le.scan_type = -1;
+       /* to sync with presence period of VSDB GO.
+        * Send probe request more frequently. Probe request will be stopped
+        * when it gets probe response from target AP/GO.
+        */
+       ext_join_params->scan_le.nprobes =
+               cpu_to_le32(BRCMF_SCAN_JOIN_ACTIVE_DWELL_TIME_MS /
+                           BRCMF_SCAN_JOIN_PROBE_INTERVAL_MS);
+       ext_join_params->scan_le.home_time = cpu_to_le32(-1);
+
+       if (sme->bssid)
+               memcpy(&ext_join_params->assoc_le.bssid, sme->bssid, ETH_ALEN);
+       else
+               memset(&ext_join_params->assoc_le.bssid, 0xFF, ETH_ALEN);
+
+       if (cfg->channel) {
+               ext_join_params->assoc_le.chanspec_num = cpu_to_le32(1);
+
+               ext_join_params->assoc_le.chanspec_list[0] =
+                       cpu_to_le16(chanspec);
+       }
+
+       err  = brcmf_fil_bsscfg_data_set(ifp, "join", ext_join_params,
+                                        join_params_size);
+       kfree(ext_join_params);
+       if (!err)
+               /* This is it. join command worked, we are done */
+               goto done;
+
+       /* join command failed, fallback to set ssid */
        memset(&join_params, 0, sizeof(join_params));
        join_params_size = sizeof(join_params.ssid_le);
 
-       profile->ssid.SSID_len = min_t(u32,
-                                      sizeof(ssid.SSID), (u32)sme->ssid_len);
        memcpy(&join_params.ssid_le.SSID, sme->ssid, profile->ssid.SSID_len);
-       memcpy(&profile->ssid.SSID, sme->ssid, profile->ssid.SSID_len);
        join_params.ssid_le.SSID_len = cpu_to_le32(profile->ssid.SSID_len);
 
-       memset(join_params.params_le.bssid, 0xFF, ETH_ALEN);
-
-       if (ssid.SSID_len < IEEE80211_MAX_SSID_LEN)
-               brcmf_dbg(CONN, "ssid \"%s\", len (%d)\n",
-                         ssid.SSID, ssid.SSID_len);
+       if (sme->bssid)
+               memcpy(join_params.params_le.bssid, sme->bssid, ETH_ALEN);
+       else
+               memset(join_params.params_le.bssid, 0xFF, ETH_ALEN);
 
-       brcmf_ch_to_chanspec(cfg->channel,
-                            &join_params, &join_params_size);
+       if (cfg->channel) {
+               join_params.params_le.chanspec_list[0] = cpu_to_le16(chanspec);
+               join_params.params_le.chanspec_num = cpu_to_le32(1);
+               join_params_size += sizeof(join_params.params_le);
+       }
        err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SET_SSID,
                                     &join_params, join_params_size);
        if (err)
-               brcmf_err("WLC_SET_SSID failed (%d)\n", err);
+               brcmf_err("BRCMF_C_SET_SSID failed (%d)\n", err);
 
 done:
        if (err)
@@ -1937,7 +2200,7 @@ brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev,
                        goto done;
                }
                /* Report the current tx rate */
-       err = brcmf_fil_cmd_int_get(ifp, BRCMF_C_GET_RATE, &rate);
+               err = brcmf_fil_cmd_int_get(ifp, BRCMF_C_GET_RATE, &rate);
                if (err) {
                        brcmf_err("Could not get rate (%d)\n", err);
                        goto done;
@@ -2060,7 +2323,7 @@ static s32 brcmf_inform_single_bss(struct brcmf_cfg80211_info *cfg,
        if (!bss)
                return -ENOMEM;
 
-       cfg80211_put_bss(bss);
+       cfg80211_put_bss(wiphy, bss);
 
        return err;
 }
@@ -2166,7 +2429,7 @@ static s32 wl_inform_ibss(struct brcmf_cfg80211_info *cfg,
                goto CleanUp;
        }
 
-       cfg80211_put_bss(bss);
+       cfg80211_put_bss(wiphy, bss);
 
 CleanUp:
 
@@ -2182,78 +2445,10 @@ static bool brcmf_is_ibssmode(struct brcmf_cfg80211_vif *vif)
        return vif->mode == WL_MODE_IBSS;
 }
 
-/*
- * Traverse a string of 1-byte tag/1-byte length/variable-length value
- * triples, returning a pointer to the substring whose first element
- * matches tag
- */
-static struct brcmf_tlv *brcmf_parse_tlvs(void *buf, int buflen, uint key)
-{
-       struct brcmf_tlv *elt;
-       int totlen;
-
-       elt = (struct brcmf_tlv *) buf;
-       totlen = buflen;
-
-       /* find tagged parameter */
-       while (totlen >= TLV_HDR_LEN) {
-               int len = elt->len;
-
-               /* validate remaining totlen */
-               if ((elt->id == key) && (totlen >= (len + TLV_HDR_LEN)))
-                       return elt;
-
-               elt = (struct brcmf_tlv *) ((u8 *) elt + (len + TLV_HDR_LEN));
-               totlen -= (len + TLV_HDR_LEN);
-       }
-
-       return NULL;
-}
-
-/* Is any of the tlvs the expected entry? If
- * not update the tlvs buffer pointer/length.
- */
-static bool
-brcmf_tlv_has_ie(u8 *ie, u8 **tlvs, u32 *tlvs_len,
-                u8 *oui, u32 oui_len, u8 type)
-{
-       /* If the contents match the OUI and the type */
-       if (ie[TLV_LEN_OFF] >= oui_len + 1 &&
-           !memcmp(&ie[TLV_BODY_OFF], oui, oui_len) &&
-           type == ie[TLV_BODY_OFF + oui_len]) {
-               return true;
-       }
-
-       if (tlvs == NULL)
-               return false;
-       /* point to the next ie */
-       ie += ie[TLV_LEN_OFF] + TLV_HDR_LEN;
-       /* calculate the length of the rest of the buffer */
-       *tlvs_len -= (int)(ie - *tlvs);
-       /* update the pointer to the start of the buffer */
-       *tlvs = ie;
-
-       return false;
-}
-
-static struct brcmf_vs_tlv *
-brcmf_find_wpaie(u8 *parse, u32 len)
+static s32 brcmf_update_bss_info(struct brcmf_cfg80211_info *cfg,
+                                struct brcmf_if *ifp)
 {
-       struct brcmf_tlv *ie;
-
-       while ((ie = brcmf_parse_tlvs(parse, len, WLAN_EID_VENDOR_SPECIFIC))) {
-               if (brcmf_tlv_has_ie((u8 *)ie, &parse, &len,
-                                    WPA_OUI, TLV_OUI_LEN, WPA_OUI_TYPE))
-                       return (struct brcmf_vs_tlv *)ie;
-       }
-       return NULL;
-}
-
-static s32 brcmf_update_bss_info(struct brcmf_cfg80211_info *cfg)
-{
-       struct net_device *ndev = cfg_to_ndev(cfg);
-       struct brcmf_cfg80211_profile *profile = ndev_to_prof(ndev);
-       struct brcmf_if *ifp = netdev_priv(ndev);
+       struct brcmf_cfg80211_profile *profile = ndev_to_prof(ifp->ndev);
        struct brcmf_bss_info_le *bi;
        struct brcmf_ssid *ssid;
        struct brcmf_tlv *tim;
@@ -2309,7 +2504,7 @@ update_bss_info_out:
        return err;
 }
 
-static void brcmf_abort_scanning(struct brcmf_cfg80211_info *cfg)
+void brcmf_abort_scanning(struct brcmf_cfg80211_info *cfg)
 {
        struct escan_info *escan = &cfg->escan_info;
 
@@ -2328,8 +2523,7 @@ static void brcmf_cfg80211_escan_timeout_worker(struct work_struct *work)
                        container_of(work, struct brcmf_cfg80211_info,
                                     escan_timeout_work);
 
-       brcmf_notify_escan_complete(cfg,
-               cfg->escan_info.ndev, true, true);
+       brcmf_notify_escan_complete(cfg, cfg->escan_info.ndev, true, true);
 }
 
 static void brcmf_escan_timeout(unsigned long data)
@@ -2401,16 +2595,11 @@ brcmf_cfg80211_escan_handler(struct brcmf_if *ifp,
 
        if (status == BRCMF_E_STATUS_PARTIAL) {
                brcmf_dbg(SCAN, "ESCAN Partial result\n");
-               escan_result_le = (struct brcmf_escan_result_le *) data;
-               if (!escan_result_le) {
-                       brcmf_err("Invalid escan result (NULL pointer)\n");
-                       goto exit;
-               }
-               if (!cfg->scan_request) {
-                       brcmf_dbg(SCAN, "result without cfg80211 request\n");
+               escan_result_le = (struct brcmf_escan_result_le *) data;
+               if (!escan_result_le) {
+                       brcmf_err("Invalid escan result (NULL pointer)\n");
                        goto exit;
                }
-
                if (le16_to_cpu(escan_result_le->bss_count) != 1) {
                        brcmf_err("Invalid bss_count %d: ignoring\n",
                                  escan_result_le->bss_count);
@@ -2418,6 +2607,14 @@ brcmf_cfg80211_escan_handler(struct brcmf_if *ifp,
                }
                bss_info_le = &escan_result_le->bss_info_le;
 
+               if (brcmf_p2p_scan_finding_common_channel(cfg, bss_info_le))
+                       goto exit;
+
+               if (!cfg->scan_request) {
+                       brcmf_dbg(SCAN, "result without cfg80211 request\n");
+                       goto exit;
+               }
+
                bi_length = le32_to_cpu(bss_info_le->length);
                if (bi_length != (le32_to_cpu(escan_result_le->buflen) -
                                        WL_ESCAN_RESULTS_FIXED_SIZE)) {
@@ -2456,6 +2653,8 @@ brcmf_cfg80211_escan_handler(struct brcmf_if *ifp,
                list->count++;
        } else {
                cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
+               if (brcmf_p2p_scan_finding_common_channel(cfg, NULL))
+                       goto exit;
                if (cfg->scan_request) {
                        cfg->bss_list = (struct brcmf_scan_results *)
                                cfg->escan_info.escan_buf;
@@ -2464,7 +2663,8 @@ brcmf_cfg80211_escan_handler(struct brcmf_if *ifp,
                        brcmf_notify_escan_complete(cfg, ndev, aborted,
                                                    false);
                } else
-                       brcmf_err("Unexpected scan result 0x%x\n", status);
+                       brcmf_dbg(SCAN, "Ignored scan complete result 0x%x\n",
+                                 status);
        }
 exit:
        return err;
@@ -2968,9 +3168,8 @@ static int brcmf_cfg80211_testmode(struct wiphy *wiphy, void *data, int len)
 }
 #endif
 
-static s32 brcmf_configure_opensecurity(struct net_device *ndev, s32 bssidx)
+static s32 brcmf_configure_opensecurity(struct brcmf_if *ifp)
 {
-       struct brcmf_if *ifp = netdev_priv(ndev);
        s32 err;
 
        /* set auth */
@@ -3229,7 +3428,7 @@ brcmf_parse_vndr_ies(const u8 *vndr_ie_buf, u32 vndr_ie_len,
                          parsed_info->vndrie.oui[2],
                          parsed_info->vndrie.oui_type);
 
-               if (vndr_ies->count >= MAX_VNDR_IE_NUMBER)
+               if (vndr_ies->count >= VNDR_IE_PARSE_LIMIT)
                        break;
 next:
                remaining_len -= (ie->len + TLV_HDR_LEN);
@@ -3263,7 +3462,6 @@ brcmf_vndr_ie(u8 *iebuf, s32 pktflag, u8 *ie_ptr, u32 ie_len, s8 *add_del_cmd)
        return ie_len + VNDR_IE_HDR_SIZE;
 }
 
-static
 s32 brcmf_vif_set_mgmt_ie(struct brcmf_cfg80211_vif *vif, s32 pktflag,
                          const u8 *vndr_ie_buf, u32 vndr_ie_len)
 {
@@ -3295,24 +3493,28 @@ s32 brcmf_vif_set_mgmt_ie(struct brcmf_cfg80211_vif *vif, s32 pktflag,
        if (!iovar_ie_buf)
                return -ENOMEM;
        curr_ie_buf = iovar_ie_buf;
-       if (ifp->vif->mode == WL_MODE_AP) {
-               switch (pktflag) {
-               case VNDR_IE_PRBRSP_FLAG:
-                       mgmt_ie_buf = saved_ie->probe_res_ie;
-                       mgmt_ie_len = &saved_ie->probe_res_ie_len;
-                       mgmt_ie_buf_len = sizeof(saved_ie->probe_res_ie);
-                       break;
-               case VNDR_IE_BEACON_FLAG:
-                       mgmt_ie_buf = saved_ie->beacon_ie;
-                       mgmt_ie_len = &saved_ie->beacon_ie_len;
-                       mgmt_ie_buf_len = sizeof(saved_ie->beacon_ie);
-                       break;
-               default:
-                       err = -EPERM;
-                       brcmf_err("not suitable type\n");
-                       goto exit;
-               }
-       } else {
+       switch (pktflag) {
+       case BRCMF_VNDR_IE_PRBREQ_FLAG:
+               mgmt_ie_buf = saved_ie->probe_req_ie;
+               mgmt_ie_len = &saved_ie->probe_req_ie_len;
+               mgmt_ie_buf_len = sizeof(saved_ie->probe_req_ie);
+               break;
+       case BRCMF_VNDR_IE_PRBRSP_FLAG:
+               mgmt_ie_buf = saved_ie->probe_res_ie;
+               mgmt_ie_len = &saved_ie->probe_res_ie_len;
+               mgmt_ie_buf_len = sizeof(saved_ie->probe_res_ie);
+               break;
+       case BRCMF_VNDR_IE_BEACON_FLAG:
+               mgmt_ie_buf = saved_ie->beacon_ie;
+               mgmt_ie_len = &saved_ie->beacon_ie_len;
+               mgmt_ie_buf_len = sizeof(saved_ie->beacon_ie);
+               break;
+       case BRCMF_VNDR_IE_ASSOCREQ_FLAG:
+               mgmt_ie_buf = saved_ie->assoc_req_ie;
+               mgmt_ie_len = &saved_ie->assoc_req_ie_len;
+               mgmt_ie_buf_len = sizeof(saved_ie->assoc_req_ie);
+               break;
+       default:
                err = -EPERM;
                brcmf_err("not suitable type\n");
                goto exit;
@@ -3421,6 +3623,49 @@ exit:
        return err;
 }
 
+s32 brcmf_vif_clear_mgmt_ies(struct brcmf_cfg80211_vif *vif)
+{
+       s32 pktflags[] = {
+               BRCMF_VNDR_IE_PRBREQ_FLAG,
+               BRCMF_VNDR_IE_PRBRSP_FLAG,
+               BRCMF_VNDR_IE_BEACON_FLAG
+       };
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(pktflags); i++)
+               brcmf_vif_set_mgmt_ie(vif, pktflags[i], NULL, 0);
+
+       memset(&vif->saved_ie, 0, sizeof(vif->saved_ie));
+       return 0;
+}
+
+static s32
+brcmf_config_ap_mgmt_ie(struct brcmf_cfg80211_vif *vif,
+                       struct cfg80211_beacon_data *beacon)
+{
+       s32 err;
+
+       /* Set Beacon IEs to FW */
+       err = brcmf_vif_set_mgmt_ie(vif, BRCMF_VNDR_IE_BEACON_FLAG,
+                                   beacon->tail, beacon->tail_len);
+       if (err) {
+               brcmf_err("Set Beacon IE Failed\n");
+               return err;
+       }
+       brcmf_dbg(TRACE, "Applied Vndr IEs for Beacon\n");
+
+       /* Set Probe Response IEs to FW */
+       err = brcmf_vif_set_mgmt_ie(vif, BRCMF_VNDR_IE_PRBRSP_FLAG,
+                                   beacon->proberesp_ies,
+                                   beacon->proberesp_ies_len);
+       if (err)
+               brcmf_err("Set Probe Resp IE Failed\n");
+       else
+               brcmf_dbg(TRACE, "Applied Vndr IEs for Probe Resp\n");
+
+       return err;
+}
+
 static s32
 brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
                        struct cfg80211_ap_settings *settings)
@@ -3433,7 +3678,8 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
        struct brcmf_tlv *rsn_ie;
        struct brcmf_vs_tlv *wpa_ie;
        struct brcmf_join_params join_params;
-       s32 bssidx = 0;
+       enum nl80211_iftype dev_role;
+       struct brcmf_fil_bss_enable_le bss_enable;
 
        brcmf_dbg(TRACE, "channel_type=%d, beacon_interval=%d, dtim_period=%d,\n",
                  cfg80211_get_chandef_type(&settings->chandef),
@@ -3443,10 +3689,7 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
                  settings->ssid, settings->ssid_len, settings->auth_type,
                  settings->inactivity_timeout);
 
-       if (!test_bit(BRCMF_VIF_STATUS_AP_CREATING, &ifp->vif->sme_state)) {
-               brcmf_err("Not in AP creation mode\n");
-               return -EPERM;
-       }
+       dev_role = ifp->vif->wdev.iftype;
 
        memset(&ssid_le, 0, sizeof(ssid_le));
        if (settings->ssid == NULL || settings->ssid_len == 0) {
@@ -3467,21 +3710,6 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
        }
 
        brcmf_set_mpc(ndev, 0);
-       err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_DOWN, 1);
-       if (err < 0) {
-               brcmf_err("BRCMF_C_DOWN error %d\n", err);
-               goto exit;
-       }
-       err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_INFRA, 1);
-       if (err < 0) {
-               brcmf_err("SET INFRA error %d\n", err);
-               goto exit;
-       }
-       err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_AP, 1);
-       if (err < 0) {
-               brcmf_err("setting AP mode failed %d\n", err);
-               goto exit;
-       }
 
        /* find the RSN_IE */
        rsn_ie = brcmf_parse_tlvs((u8 *)settings->beacon.tail,
@@ -3507,27 +3735,10 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
                }
        } else {
                brcmf_dbg(TRACE, "No WPA(2) IEs found\n");
-               brcmf_configure_opensecurity(ndev, bssidx);
+               brcmf_configure_opensecurity(ifp);
        }
-       /* Set Beacon IEs to FW */
-       err = brcmf_vif_set_mgmt_ie(ndev_to_vif(ndev),
-                                   VNDR_IE_BEACON_FLAG,
-                                   settings->beacon.tail,
-                                   settings->beacon.tail_len);
-       if (err)
-               brcmf_err("Set Beacon IE Failed\n");
-       else
-               brcmf_dbg(TRACE, "Applied Vndr IEs for Beacon\n");
 
-       /* Set Probe Response IEs to FW */
-       err = brcmf_vif_set_mgmt_ie(ndev_to_vif(ndev),
-                                   VNDR_IE_PRBRSP_FLAG,
-                                   settings->beacon.proberesp_ies,
-                                   settings->beacon.proberesp_ies_len);
-       if (err)
-               brcmf_err("Set Probe Resp IE Failed\n");
-       else
-               brcmf_dbg(TRACE, "Applied Vndr IEs for Probe Resp\n");
+       brcmf_config_ap_mgmt_ie(ifp->vif, &settings->beacon);
 
        if (settings->beacon_interval) {
                err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_BCNPRD,
@@ -3545,22 +3756,62 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
                        goto exit;
                }
        }
-       err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_UP, 1);
-       if (err < 0) {
-               brcmf_err("BRCMF_C_UP error (%d)\n", err);
-               goto exit;
+
+       if (dev_role == NL80211_IFTYPE_AP) {
+               err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_DOWN, 1);
+               if (err < 0) {
+                       brcmf_err("BRCMF_C_DOWN error %d\n", err);
+                       goto exit;
+               }
+               brcmf_fil_iovar_int_set(ifp, "apsta", 0);
        }
 
-       memset(&join_params, 0, sizeof(join_params));
-       /* join parameters starts with ssid */
-       memcpy(&join_params.ssid_le, &ssid_le, sizeof(ssid_le));
-       /* create softap */
-       err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SET_SSID,
-                                    &join_params, sizeof(join_params));
+       err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_INFRA, 1);
        if (err < 0) {
-               brcmf_err("SET SSID error (%d)\n", err);
+               brcmf_err("SET INFRA error %d\n", err);
                goto exit;
        }
+       if (dev_role == NL80211_IFTYPE_AP) {
+               err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_AP, 1);
+               if (err < 0) {
+                       brcmf_err("setting AP mode failed %d\n", err);
+                       goto exit;
+               }
+               err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_UP, 1);
+               if (err < 0) {
+                       brcmf_err("BRCMF_C_UP error (%d)\n", err);
+                       goto exit;
+               }
+
+               memset(&join_params, 0, sizeof(join_params));
+               /* join parameters starts with ssid */
+               memcpy(&join_params.ssid_le, &ssid_le, sizeof(ssid_le));
+               /* create softap */
+               err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SET_SSID,
+                                            &join_params, sizeof(join_params));
+               if (err < 0) {
+                       brcmf_err("SET SSID error (%d)\n", err);
+                       goto exit;
+               }
+               brcmf_dbg(TRACE, "AP mode configuration complete\n");
+       } else {
+               err = brcmf_fil_bsscfg_data_set(ifp, "ssid", &ssid_le,
+                                               sizeof(ssid_le));
+               if (err < 0) {
+                       brcmf_err("setting ssid failed %d\n", err);
+                       goto exit;
+               }
+               bss_enable.bsscfg_idx = cpu_to_le32(ifp->bssidx);
+               bss_enable.enable = cpu_to_le32(1);
+               err = brcmf_fil_iovar_data_set(ifp, "bss", &bss_enable,
+                                              sizeof(bss_enable));
+               if (err < 0) {
+                       brcmf_err("bss_enable config failed %d\n", err);
+                       goto exit;
+               }
+
+               brcmf_dbg(TRACE, "GO mode configuration complete\n");
+       }
        clear_bit(BRCMF_VIF_STATUS_AP_CREATING, &ifp->vif->sme_state);
        set_bit(BRCMF_VIF_STATUS_AP_CREATED, &ifp->vif->sme_state);
 
@@ -3574,10 +3825,11 @@ static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev)
 {
        struct brcmf_if *ifp = netdev_priv(ndev);
        s32 err = -EPERM;
+       struct brcmf_fil_bss_enable_le bss_enable;
 
        brcmf_dbg(TRACE, "Enter\n");
 
-       if (ifp->vif->mode == WL_MODE_AP) {
+       if (ifp->vif->wdev.iftype == NL80211_IFTYPE_AP) {
                /* Due to most likely deauths outstanding we sleep */
                /* first to make sure they get processed by fw. */
                msleep(400);
@@ -3591,18 +3843,41 @@ static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev)
                        brcmf_err("BRCMF_C_UP error %d\n", err);
                        goto exit;
                }
-               brcmf_set_mpc(ndev, 1);
-               clear_bit(BRCMF_VIF_STATUS_AP_CREATING, &ifp->vif->sme_state);
-               clear_bit(BRCMF_VIF_STATUS_AP_CREATED, &ifp->vif->sme_state);
+       } else {
+               bss_enable.bsscfg_idx = cpu_to_le32(ifp->bssidx);
+               bss_enable.enable = cpu_to_le32(0);
+               err = brcmf_fil_iovar_data_set(ifp, "bss", &bss_enable,
+                                              sizeof(bss_enable));
+               if (err < 0)
+                       brcmf_err("bss_enable config failed %d\n", err);
        }
+       brcmf_set_mpc(ndev, 1);
+       set_bit(BRCMF_VIF_STATUS_AP_CREATING, &ifp->vif->sme_state);
+       clear_bit(BRCMF_VIF_STATUS_AP_CREATED, &ifp->vif->sme_state);
+
 exit:
        return err;
 }
 
+static s32
+brcmf_cfg80211_change_beacon(struct wiphy *wiphy, struct net_device *ndev,
+                            struct cfg80211_beacon_data *info)
+{
+       struct brcmf_if *ifp = netdev_priv(ndev);
+       s32 err;
+
+       brcmf_dbg(TRACE, "Enter\n");
+
+       err = brcmf_config_ap_mgmt_ie(ifp->vif, info);
+
+       return err;
+}
+
 static int
 brcmf_cfg80211_del_station(struct wiphy *wiphy, struct net_device *ndev,
                           u8 *mac)
 {
+       struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
        struct brcmf_scb_val_le scbval;
        struct brcmf_if *ifp = netdev_priv(ndev);
        s32 err;
@@ -3612,6 +3887,8 @@ brcmf_cfg80211_del_station(struct wiphy *wiphy, struct net_device *ndev,
 
        brcmf_dbg(TRACE, "Enter %pM\n", mac);
 
+       if (ifp->vif == cfg->p2p.bss_idx[P2PAPI_BSSCFG_DEVICE].vif)
+               ifp = cfg->p2p.bss_idx[P2PAPI_BSSCFG_PRIMARY].vif->ifp;
        if (!check_vif_up(ifp->vif))
                return -EIO;
 
@@ -3626,7 +3903,147 @@ brcmf_cfg80211_del_station(struct wiphy *wiphy, struct net_device *ndev,
        return err;
 }
 
+
+static void
+brcmf_cfg80211_mgmt_frame_register(struct wiphy *wiphy,
+                                  struct wireless_dev *wdev,
+                                  u16 frame_type, bool reg)
+{
+       struct brcmf_if *ifp = netdev_priv(wdev->netdev);
+       struct brcmf_cfg80211_vif *vif = ifp->vif;
+       u16 mgmt_type;
+
+       brcmf_dbg(TRACE, "Enter, frame_type %04x, reg=%d\n", frame_type, reg);
+
+       mgmt_type = (frame_type & IEEE80211_FCTL_STYPE) >> 4;
+       if (reg)
+               vif->mgmt_rx_reg |= BIT(mgmt_type);
+       else
+               vif->mgmt_rx_reg &= ~BIT(mgmt_type);
+}
+
+
+static int
+brcmf_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
+                      struct ieee80211_channel *chan, bool offchan,
+                      unsigned int wait, const u8 *buf, size_t len,
+                      bool no_cck, bool dont_wait_for_ack, u64 *cookie)
+{
+       struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+       const struct ieee80211_mgmt *mgmt;
+       struct brcmf_if *ifp;
+       struct brcmf_cfg80211_vif *vif;
+       s32 err = 0;
+       s32 ie_offset;
+       s32 ie_len;
+       struct brcmf_fil_action_frame_le *action_frame;
+       struct brcmf_fil_af_params_le *af_params;
+       bool ack;
+       s32 chan_nr;
+
+       brcmf_dbg(TRACE, "Enter\n");
+
+       *cookie = 0;
+
+       mgmt = (const struct ieee80211_mgmt *)buf;
+
+       if (!ieee80211_is_mgmt(mgmt->frame_control)) {
+               brcmf_err("Driver only allows MGMT packet type\n");
+               return -EPERM;
+       }
+
+       if (ieee80211_is_probe_resp(mgmt->frame_control)) {
+               /* Right now the only reason to get a probe response */
+               /* is for p2p listen response or for p2p GO from     */
+               /* wpa_supplicant. Unfortunately the probe is send   */
+               /* on primary ndev, while dongle wants it on the p2p */
+               /* vif. Since this is only reason for a probe        */
+               /* response to be sent, the vif is taken from cfg.   */
+               /* If ever desired to send proberesp for non p2p     */
+               /* response then data should be checked for          */
+               /* "DIRECT-". Note in future supplicant will take    */
+               /* dedicated p2p wdev to do this and then this 'hack'*/
+               /* is not needed anymore.                            */
+               ie_offset =  DOT11_MGMT_HDR_LEN +
+                            DOT11_BCN_PRB_FIXED_LEN;
+               ie_len = len - ie_offset;
+               ifp = netdev_priv(wdev->netdev);
+               vif = ifp->vif;
+               if (vif == cfg->p2p.bss_idx[P2PAPI_BSSCFG_PRIMARY].vif)
+                       vif = cfg->p2p.bss_idx[P2PAPI_BSSCFG_DEVICE].vif;
+               err = brcmf_vif_set_mgmt_ie(vif,
+                                           BRCMF_VNDR_IE_PRBRSP_FLAG,
+                                           &buf[ie_offset],
+                                           ie_len);
+               cfg80211_mgmt_tx_status(wdev, *cookie, buf, len, true,
+                                       GFP_KERNEL);
+       } else if (ieee80211_is_action(mgmt->frame_control)) {
+               af_params = kzalloc(sizeof(*af_params), GFP_KERNEL);
+               if (af_params == NULL) {
+                       brcmf_err("unable to allocate frame\n");
+                       err = -ENOMEM;
+                       goto exit;
+               }
+               action_frame = &af_params->action_frame;
+               /* Add the packet Id */
+               action_frame->packet_id = cpu_to_le32(*cookie);
+               /* Add BSSID */
+               memcpy(&action_frame->da[0], &mgmt->da[0], ETH_ALEN);
+               memcpy(&af_params->bssid[0], &mgmt->bssid[0], ETH_ALEN);
+               /* Add the length exepted for 802.11 header  */
+               action_frame->len = cpu_to_le16(len - DOT11_MGMT_HDR_LEN);
+               /* Add the channel */
+               chan_nr = ieee80211_frequency_to_channel(chan->center_freq);
+               af_params->channel = cpu_to_le32(chan_nr);
+
+               memcpy(action_frame->data, &buf[DOT11_MGMT_HDR_LEN],
+                      le16_to_cpu(action_frame->len));
+
+               brcmf_dbg(TRACE, "Action frame, cookie=%lld, len=%d, freq=%d\n",
+                         *cookie, le16_to_cpu(action_frame->len),
+                         chan->center_freq);
+
+               ack = brcmf_p2p_send_action_frame(cfg, wdev->netdev,
+                                                 af_params);
+
+               cfg80211_mgmt_tx_status(wdev, *cookie, buf, len, ack,
+                                       GFP_KERNEL);
+               kfree(af_params);
+       } else {
+               brcmf_dbg(TRACE, "Unhandled, fc=%04x!!\n", mgmt->frame_control);
+               brcmf_dbg_hex_dump(true, buf, len, "payload, len=%Zu\n", len);
+       }
+
+exit:
+       return err;
+}
+
+
+static int
+brcmf_cfg80211_cancel_remain_on_channel(struct wiphy *wiphy,
+                                       struct wireless_dev *wdev,
+                                       u64 cookie)
+{
+       struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+       struct brcmf_cfg80211_vif *vif;
+       int err = 0;
+
+       brcmf_dbg(TRACE, "Enter p2p listen cancel\n");
+
+       vif = cfg->p2p.bss_idx[P2PAPI_BSSCFG_DEVICE].vif;
+       if (vif == NULL) {
+               brcmf_err("No p2p device available for probe response\n");
+               err = -ENODEV;
+               goto exit;
+       }
+       brcmf_p2p_cancel_remain_on_channel(vif->ifp);
+exit:
+       return err;
+}
+
 static struct cfg80211_ops wl_cfg80211_ops = {
+       .add_virtual_intf = brcmf_cfg80211_add_iface,
+       .del_virtual_intf = brcmf_cfg80211_del_iface,
        .change_virtual_intf = brcmf_cfg80211_change_iface,
        .scan = brcmf_cfg80211_scan,
        .set_wiphy_params = brcmf_cfg80211_set_wiphy_params,
@@ -3650,28 +4067,43 @@ static struct cfg80211_ops wl_cfg80211_ops = {
        .flush_pmksa = brcmf_cfg80211_flush_pmksa,
        .start_ap = brcmf_cfg80211_start_ap,
        .stop_ap = brcmf_cfg80211_stop_ap,
+       .change_beacon = brcmf_cfg80211_change_beacon,
        .del_station = brcmf_cfg80211_del_station,
        .sched_scan_start = brcmf_cfg80211_sched_scan_start,
        .sched_scan_stop = brcmf_cfg80211_sched_scan_stop,
+       .mgmt_frame_register = brcmf_cfg80211_mgmt_frame_register,
+       .mgmt_tx = brcmf_cfg80211_mgmt_tx,
+       .remain_on_channel = brcmf_p2p_remain_on_channel,
+       .cancel_remain_on_channel = brcmf_cfg80211_cancel_remain_on_channel,
 #ifdef CONFIG_NL80211_TESTMODE
        .testmode_cmd = brcmf_cfg80211_testmode
 #endif
 };
 
-static s32 brcmf_mode_to_nl80211_iftype(s32 mode)
+static s32 brcmf_nl80211_iftype_to_mode(enum nl80211_iftype type)
 {
-       s32 err = 0;
-
-       switch (mode) {
-       case WL_MODE_BSS:
-               return NL80211_IFTYPE_STATION;
-       case WL_MODE_IBSS:
-               return NL80211_IFTYPE_ADHOC;
+       switch (type) {
+       case NL80211_IFTYPE_AP_VLAN:
+       case NL80211_IFTYPE_WDS:
+       case NL80211_IFTYPE_MONITOR:
+       case NL80211_IFTYPE_MESH_POINT:
+               return -ENOTSUPP;
+       case NL80211_IFTYPE_ADHOC:
+               return WL_MODE_IBSS;
+       case NL80211_IFTYPE_STATION:
+       case NL80211_IFTYPE_P2P_CLIENT:
+               return WL_MODE_BSS;
+       case NL80211_IFTYPE_AP:
+       case NL80211_IFTYPE_P2P_GO:
+               return WL_MODE_AP;
+       case NL80211_IFTYPE_P2P_DEVICE:
+               return WL_MODE_P2P;
+       case NL80211_IFTYPE_UNSPECIFIED:
        default:
-               return NL80211_IFTYPE_UNSPECIFIED;
+               break;
        }
 
-       return err;
+       return -EINVAL;
 }
 
 static void brcmf_wiphy_pno_params(struct wiphy *wiphy)
@@ -3683,6 +4115,56 @@ static void brcmf_wiphy_pno_params(struct wiphy *wiphy)
        wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
 }
 
+static const struct ieee80211_iface_limit brcmf_iface_limits[] = {
+       {
+               .max = 2,
+               .types = BIT(NL80211_IFTYPE_STATION) |
+                        BIT(NL80211_IFTYPE_ADHOC) |
+                        BIT(NL80211_IFTYPE_AP)
+       },
+       {
+               .max = 1,
+               .types = BIT(NL80211_IFTYPE_P2P_DEVICE)
+       },
+       {
+               .max = 1,
+               .types = BIT(NL80211_IFTYPE_P2P_CLIENT) |
+                        BIT(NL80211_IFTYPE_P2P_GO)
+       },
+};
+static const struct ieee80211_iface_combination brcmf_iface_combos[] = {
+       {
+                .max_interfaces = BRCMF_IFACE_MAX_CNT,
+                .num_different_channels = 1, /* no multi-channel for now */
+                .n_limits = ARRAY_SIZE(brcmf_iface_limits),
+                .limits = brcmf_iface_limits
+       }
+};
+
+static const struct ieee80211_txrx_stypes
+brcmf_txrx_stypes[NUM_NL80211_IFTYPES] = {
+       [NL80211_IFTYPE_STATION] = {
+               .tx = 0xffff,
+               .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+                     BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
+       },
+       [NL80211_IFTYPE_P2P_CLIENT] = {
+               .tx = 0xffff,
+               .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+                     BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
+       },
+       [NL80211_IFTYPE_P2P_GO] = {
+               .tx = 0xffff,
+               .rx = BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) |
+                     BIT(IEEE80211_STYPE_REASSOC_REQ >> 4) |
+                     BIT(IEEE80211_STYPE_PROBE_REQ >> 4) |
+                     BIT(IEEE80211_STYPE_DISASSOC >> 4) |
+                     BIT(IEEE80211_STYPE_AUTH >> 4) |
+                     BIT(IEEE80211_STYPE_DEAUTH >> 4) |
+                     BIT(IEEE80211_STYPE_ACTION >> 4)
+       }
+};
+
 static struct wiphy *brcmf_setup_wiphy(struct device *phydev)
 {
        struct wiphy *wiphy;
@@ -3695,10 +4177,16 @@ static struct wiphy *brcmf_setup_wiphy(struct device *phydev)
        }
        set_wiphy_dev(wiphy, phydev);
        wiphy->max_scan_ssids = WL_NUM_SCAN_MAX;
+       wiphy->max_scan_ie_len = BRCMF_SCAN_IE_LEN_MAX;
        wiphy->max_num_pmkids = WL_NUM_PMKIDS_MAX;
        wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
                                 BIT(NL80211_IFTYPE_ADHOC) |
-                                BIT(NL80211_IFTYPE_AP);
+                                BIT(NL80211_IFTYPE_AP) |
+                                BIT(NL80211_IFTYPE_P2P_CLIENT) |
+                                BIT(NL80211_IFTYPE_P2P_GO) |
+                                BIT(NL80211_IFTYPE_P2P_DEVICE);
+       wiphy->iface_combinations = brcmf_iface_combos;
+       wiphy->n_iface_combinations = ARRAY_SIZE(brcmf_iface_combos);
        wiphy->bands[IEEE80211_BAND_2GHZ] = &__wl_band_2ghz;
        wiphy->bands[IEEE80211_BAND_5GHZ] = &__wl_band_5ghz_a;  /* Set
                                                * it as 11a by default.
@@ -3710,10 +4198,11 @@ static struct wiphy *brcmf_setup_wiphy(struct device *phydev)
        wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
        wiphy->cipher_suites = __wl_cipher_suites;
        wiphy->n_cipher_suites = ARRAY_SIZE(__wl_cipher_suites);
-       wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT;    /* enable power
-                                                                * save mode
-                                                                * by default
-                                                                */
+       wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT |
+                       WIPHY_FLAG_OFFCHAN_TX |
+                       WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
+       wiphy->mgmt_stypes = brcmf_txrx_stypes;
+       wiphy->max_remain_on_channel_duration = 5000;
        brcmf_wiphy_pno_params(wiphy);
        err = wiphy_register(wiphy);
        if (err < 0) {
@@ -3724,31 +4213,25 @@ static struct wiphy *brcmf_setup_wiphy(struct device *phydev)
        return wiphy;
 }
 
-static
 struct brcmf_cfg80211_vif *brcmf_alloc_vif(struct brcmf_cfg80211_info *cfg,
-                                          struct net_device *netdev,
-                                          s32 mode, bool pm_block)
+                                          enum nl80211_iftype type,
+                                          bool pm_block)
 {
        struct brcmf_cfg80211_vif *vif;
 
        if (cfg->vif_cnt == BRCMF_IFACE_MAX_CNT)
                return ERR_PTR(-ENOSPC);
 
+       brcmf_dbg(TRACE, "allocating virtual interface (size=%zu)\n",
+                 sizeof(*vif));
        vif = kzalloc(sizeof(*vif), GFP_KERNEL);
        if (!vif)
                return ERR_PTR(-ENOMEM);
 
        vif->wdev.wiphy = cfg->wiphy;
-       vif->wdev.netdev = netdev;
-       vif->wdev.iftype = brcmf_mode_to_nl80211_iftype(mode);
-
-       if (netdev) {
-               vif->ifp = netdev_priv(netdev);
-               netdev->ieee80211_ptr = &vif->wdev;
-               SET_NETDEV_DEV(netdev, wiphy_dev(cfg->wiphy));
-       }
+       vif->wdev.iftype = type;
 
-       vif->mode = mode;
+       vif->mode = brcmf_nl80211_iftype_to_mode(type);
        vif->pm_block = pm_block;
        vif->roam_off = -1;
 
@@ -3759,7 +4242,7 @@ struct brcmf_cfg80211_vif *brcmf_alloc_vif(struct brcmf_cfg80211_info *cfg,
        return vif;
 }
 
-static void brcmf_free_vif(struct brcmf_cfg80211_vif *vif)
+void brcmf_free_vif(struct brcmf_cfg80211_vif *vif)
 {
        struct brcmf_cfg80211_info *cfg;
        struct wiphy *wiphy;
@@ -3833,9 +4316,9 @@ static void brcmf_clear_assoc_ies(struct brcmf_cfg80211_info *cfg)
        conn_info->resp_ie_len = 0;
 }
 
-static s32 brcmf_get_assoc_ies(struct brcmf_cfg80211_info *cfg)
+static s32 brcmf_get_assoc_ies(struct brcmf_cfg80211_info *cfg,
+                              struct brcmf_if *ifp)
 {
-       struct brcmf_if *ifp = netdev_priv(cfg_to_ndev(cfg));
        struct brcmf_cfg80211_assoc_ielen_le *assoc_info;
        struct brcmf_cfg80211_connect_info *conn_info = cfg_to_conn(cfg);
        u32 req_len;
@@ -3911,9 +4394,9 @@ brcmf_bss_roaming_done(struct brcmf_cfg80211_info *cfg,
 
        brcmf_dbg(TRACE, "Enter\n");
 
-       brcmf_get_assoc_ies(cfg);
+       brcmf_get_assoc_ies(cfg, ifp);
        memcpy(profile->bssid, e->addr, ETH_ALEN);
-       brcmf_update_bss_info(cfg);
+       brcmf_update_bss_info(cfg, ifp);
 
        buf = kzalloc(WL_BSS_INFO_MAX, GFP_KERNEL);
        if (buf == NULL) {
@@ -3968,9 +4451,11 @@ brcmf_bss_connect_done(struct brcmf_cfg80211_info *cfg,
        if (test_and_clear_bit(BRCMF_VIF_STATUS_CONNECTING,
                               &ifp->vif->sme_state)) {
                if (completed) {
-                       brcmf_get_assoc_ies(cfg);
+                       brcmf_get_assoc_ies(cfg, ifp);
                        memcpy(profile->bssid, e->addr, ETH_ALEN);
-                       brcmf_update_bss_info(cfg);
+                       brcmf_update_bss_info(cfg, ifp);
+                       set_bit(BRCMF_VIF_STATUS_CONNECTED,
+                               &ifp->vif->sme_state);
                }
                cfg80211_connect_result(ndev,
                                        (u8 *)profile->bssid,
@@ -3981,9 +4466,6 @@ brcmf_bss_connect_done(struct brcmf_cfg80211_info *cfg,
                                        completed ? WLAN_STATUS_SUCCESS :
                                                    WLAN_STATUS_AUTH_TIMEOUT,
                                        GFP_KERNEL);
-               if (completed)
-                       set_bit(BRCMF_VIF_STATUS_CONNECTED,
-                               &ifp->vif->sme_state);
                brcmf_dbg(CONN, "Report connect result - connection %s\n",
                          completed ? "succeeded" : "failed");
        }
@@ -3996,38 +4478,38 @@ brcmf_notify_connect_status_ap(struct brcmf_cfg80211_info *cfg,
                               struct net_device *ndev,
                               const struct brcmf_event_msg *e, void *data)
 {
-       s32 err = 0;
+       static int generation;
        u32 event = e->event_code;
        u32 reason = e->reason;
-       u32 len = e->datalen;
-       static int generation;
-
        struct station_info sinfo;
 
        brcmf_dbg(CONN, "event %d, reason %d\n", event, reason);
-       memset(&sinfo, 0, sizeof(sinfo));
+       if (event == BRCMF_E_LINK && reason == BRCMF_E_REASON_LINK_BSSCFG_DIS &&
+           ndev != cfg_to_ndev(cfg)) {
+               brcmf_dbg(CONN, "AP mode link down\n");
+               complete(&cfg->vif_disabled);
+               return 0;
+       }
 
-       sinfo.filled = 0;
        if (((event == BRCMF_E_ASSOC_IND) || (event == BRCMF_E_REASSOC_IND)) &&
-           reason == BRCMF_E_STATUS_SUCCESS) {
+           (reason == BRCMF_E_STATUS_SUCCESS)) {
+               memset(&sinfo, 0, sizeof(sinfo));
                sinfo.filled = STATION_INFO_ASSOC_REQ_IES;
                if (!data) {
                        brcmf_err("No IEs present in ASSOC/REASSOC_IND");
                        return -EINVAL;
                }
                sinfo.assoc_req_ies = data;
-               sinfo.assoc_req_ies_len = len;
+               sinfo.assoc_req_ies_len = e->datalen;
                generation++;
                sinfo.generation = generation;
-               cfg80211_new_sta(ndev, e->addr, &sinfo, GFP_ATOMIC);
+               cfg80211_new_sta(ndev, e->addr, &sinfo, GFP_KERNEL);
        } else if ((event == BRCMF_E_DISASSOC_IND) ||
                   (event == BRCMF_E_DEAUTH_IND) ||
                   (event == BRCMF_E_DEAUTH)) {
-               generation++;
-               sinfo.generation = generation;
-               cfg80211_del_sta(ndev, e->addr, GFP_ATOMIC);
+               cfg80211_del_sta(ndev, e->addr, GFP_KERNEL);
        }
-       return err;
+       return 0;
 }
 
 static s32
@@ -4064,6 +4546,8 @@ brcmf_notify_connect_status(struct brcmf_if *ifp,
                }
                brcmf_link_down(ifp->vif);
                brcmf_init_prof(ndev_to_prof(ndev));
+               if (ndev != cfg_to_ndev(cfg))
+                       complete(&cfg->vif_disabled);
        } else if (brcmf_is_nonetwork(cfg, e)) {
                if (brcmf_is_ibssmode(ifp->vif))
                        clear_bit(BRCMF_VIF_STATUS_CONNECTING,
@@ -4112,6 +4596,57 @@ brcmf_notify_mic_status(struct brcmf_if *ifp,
        return 0;
 }
 
+static s32 brcmf_notify_vif_event(struct brcmf_if *ifp,
+                                 const struct brcmf_event_msg *e, void *data)
+{
+       struct brcmf_cfg80211_info *cfg = ifp->drvr->config;
+       struct brcmf_if_event *ifevent = (struct brcmf_if_event *)data;
+       struct brcmf_cfg80211_vif_event *event = &cfg->vif_event;
+       struct brcmf_cfg80211_vif *vif;
+
+       brcmf_dbg(TRACE, "Enter: action %u flags %u ifidx %u bsscfg %u\n",
+                 ifevent->action, ifevent->flags, ifevent->ifidx,
+                 ifevent->bssidx);
+
+       mutex_lock(&event->vif_event_lock);
+       event->action = ifevent->action;
+       vif = event->vif;
+
+       switch (ifevent->action) {
+       case BRCMF_E_IF_ADD:
+               /* waiting process may have timed out */
+               if (!cfg->vif_event.vif)
+                       return -EBADF;
+
+               ifp->vif = vif;
+               vif->ifp = ifp;
+               vif->wdev.netdev = ifp->ndev;
+               ifp->ndev->ieee80211_ptr = &vif->wdev;
+               SET_NETDEV_DEV(ifp->ndev, wiphy_dev(cfg->wiphy));
+               mutex_unlock(&event->vif_event_lock);
+               wake_up(&event->vif_wq);
+               return 0;
+
+       case BRCMF_E_IF_DEL:
+               ifp->vif = NULL;
+               mutex_unlock(&event->vif_event_lock);
+               /* event may not be upon user request */
+               if (brcmf_cfg80211_vif_event_armed(cfg))
+                       wake_up(&event->vif_wq);
+               return 0;
+
+       case BRCMF_E_IF_CHANGE:
+               mutex_unlock(&event->vif_event_lock);
+               wake_up(&event->vif_wq);
+               return 0;
+
+       default:
+               mutex_unlock(&event->vif_event_lock);
+               break;
+       }
+       return -EINVAL;
+}
+
 static void brcmf_init_conf(struct brcmf_cfg80211_conf *conf)
 {
        conf->frag_threshold = (u32)-1;
@@ -4143,6 +4678,18 @@ static void brcmf_register_event_handlers(struct brcmf_cfg80211_info *cfg)
                            brcmf_notify_connect_status);
        brcmf_fweh_register(cfg->pub, BRCMF_E_PFN_NET_FOUND,
                            brcmf_notify_sched_scan_results);
+       brcmf_fweh_register(cfg->pub, BRCMF_E_IF,
+                           brcmf_notify_vif_event);
+       brcmf_fweh_register(cfg->pub, BRCMF_E_P2P_PROBEREQ_MSG,
+                           brcmf_p2p_notify_rx_mgmt_p2p_probereq);
+       brcmf_fweh_register(cfg->pub, BRCMF_E_P2P_DISC_LISTEN_COMPLETE,
+                           brcmf_p2p_notify_listen_complete);
+       brcmf_fweh_register(cfg->pub, BRCMF_E_ACTION_FRAME_RX,
+                           brcmf_p2p_notify_action_frame_rx);
+       brcmf_fweh_register(cfg->pub, BRCMF_E_ACTION_FRAME_COMPLETE,
+                           brcmf_p2p_notify_action_tx_complete);
+       brcmf_fweh_register(cfg->pub, BRCMF_E_ACTION_FRAME_OFF_CHAN_COMPLETE,
+                           brcmf_p2p_notify_action_tx_complete);
 }
 
 static void brcmf_deinit_priv_mem(struct brcmf_cfg80211_info *cfg)
@@ -4198,7 +4745,7 @@ static s32 wl_init_priv(struct brcmf_cfg80211_info *cfg)
        mutex_init(&cfg->usr_sync);
        brcmf_init_escan(cfg);
        brcmf_init_conf(cfg->conf);
-
+       init_completion(&cfg->vif_disabled);
        return err;
 }
 
@@ -4209,6 +4756,12 @@ static void wl_deinit_priv(struct brcmf_cfg80211_info *cfg)
        brcmf_deinit_priv_mem(cfg);
 }
 
+static void init_vif_event(struct brcmf_cfg80211_vif_event *event)
+{
+       init_waitqueue_head(&event->vif_wq);
+       mutex_init(&event->vif_event_lock);
+}
+
 struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr,
                                                  struct device *busdev)
 {
@@ -4232,25 +4785,41 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr,
        cfg = wiphy_priv(wiphy);
        cfg->wiphy = wiphy;
        cfg->pub = drvr;
+       init_vif_event(&cfg->vif_event);
        INIT_LIST_HEAD(&cfg->vif_list);
 
-       vif = brcmf_alloc_vif(cfg, ndev, WL_MODE_BSS, false);
+       vif = brcmf_alloc_vif(cfg, NL80211_IFTYPE_STATION, false);
        if (IS_ERR(vif)) {
                wiphy_free(wiphy);
                return NULL;
        }
 
+       vif->ifp = ifp;
+       vif->wdev.netdev = ndev;
+       ndev->ieee80211_ptr = &vif->wdev;
+       SET_NETDEV_DEV(ndev, wiphy_dev(cfg->wiphy));
+
        err = wl_init_priv(cfg);
        if (err) {
                brcmf_err("Failed to init iwm_priv (%d)\n", err);
                goto cfg80211_attach_out;
        }
-
        ifp->vif = vif;
+
+       err = brcmf_p2p_attach(cfg);
+       if (err) {
+               brcmf_err("P2P initilisation failed (%d)\n", err);
+               goto cfg80211_p2p_attach_out;
+       }
+
        return cfg;
 
+cfg80211_p2p_attach_out:
+       wl_deinit_priv(cfg);
+
 cfg80211_attach_out:
        brcmf_free_vif(vif);
+       wiphy_free(wiphy);
        return NULL;
 }
 
@@ -4489,3 +5058,57 @@ s32 brcmf_cfg80211_down(struct net_device *ndev)
        return err;
 }
 
+u32 wl_get_vif_state_all(struct brcmf_cfg80211_info *cfg, unsigned long state)
+{
+       struct brcmf_cfg80211_vif *vif;
+       bool result = 0;
+
+       list_for_each_entry(vif, &cfg->vif_list, list) {
+               if (test_bit(state, &vif->sme_state))
+                       result++;
+       }
+       return result;
+}
+
+static inline bool vif_event_equals(struct brcmf_cfg80211_vif_event *event,
+                                   u8 action)
+{
+       u8 evt_action;
+
+       mutex_lock(&event->vif_event_lock);
+       evt_action = event->action;
+       mutex_unlock(&event->vif_event_lock);
+       return evt_action == action;
+}
+
+void brcmf_cfg80211_arm_vif_event(struct brcmf_cfg80211_info *cfg,
+                                 struct brcmf_cfg80211_vif *vif)
+{
+       struct brcmf_cfg80211_vif_event *event = &cfg->vif_event;
+
+       mutex_lock(&event->vif_event_lock);
+       event->vif = vif;
+       event->action = 0;
+       mutex_unlock(&event->vif_event_lock);
+}
+
+bool brcmf_cfg80211_vif_event_armed(struct brcmf_cfg80211_info *cfg)
+{
+       struct brcmf_cfg80211_vif_event *event = &cfg->vif_event;
+       bool armed;
+
+       mutex_lock(&event->vif_event_lock);
+       armed = event->vif != NULL;
+       mutex_unlock(&event->vif_event_lock);
+
+       return armed;
+}
+int brcmf_cfg80211_wait_vif_event_timeout(struct brcmf_cfg80211_info *cfg,
+                                         u8 action, ulong timeout)
+{
+       struct brcmf_cfg80211_vif_event *event = &cfg->vif_event;
+
+       return wait_event_timeout(event->vif_wq,
+                                 vif_event_equals(event, action), timeout);
+}
+
index e4d9cc7..8b5d498 100644 (file)
 #define WL_AUTH_SHARED_KEY             1       /* d11 shared authentication */
 #define IE_MAX_LEN                     512
 
+/* IE TLV processing */
+#define TLV_LEN_OFF                    1       /* length offset */
+#define TLV_HDR_LEN                    2       /* header length */
+#define TLV_BODY_OFF                   2       /* body offset */
+#define TLV_OUI_LEN                    3       /* oui id length */
+
+/* 802.11 Mgmt Packet flags */
+#define BRCMF_VNDR_IE_BEACON_FLAG      0x1
+#define BRCMF_VNDR_IE_PRBRSP_FLAG      0x2
+#define BRCMF_VNDR_IE_ASSOCRSP_FLAG    0x4
+#define BRCMF_VNDR_IE_AUTHRSP_FLAG     0x8
+#define BRCMF_VNDR_IE_PRBREQ_FLAG      0x10
+#define BRCMF_VNDR_IE_ASSOCREQ_FLAG    0x20
+/* vendor IE in IW advertisement protocol ID field */
+#define BRCMF_VNDR_IE_IWAPID_FLAG      0x40
+/* allow custom IE id */
+#define BRCMF_VNDR_IE_CUSTOM_FLAG      0x100
+
+/* P2P Action Frames flags (spec ordered) */
+#define BRCMF_VNDR_IE_GONREQ_FLAG     0x001000
+#define BRCMF_VNDR_IE_GONRSP_FLAG     0x002000
+#define BRCMF_VNDR_IE_GONCFM_FLAG     0x004000
+#define BRCMF_VNDR_IE_INVREQ_FLAG     0x008000
+#define BRCMF_VNDR_IE_INVRSP_FLAG     0x010000
+#define BRCMF_VNDR_IE_DISREQ_FLAG     0x020000
+#define BRCMF_VNDR_IE_DISRSP_FLAG     0x040000
+#define BRCMF_VNDR_IE_PRDREQ_FLAG     0x080000
+#define BRCMF_VNDR_IE_PRDRSP_FLAG     0x100000
+
+#define BRCMF_VNDR_IE_P2PAF_SHIFT      12
+
+
 /**
  * enum brcmf_scan_status - dongle scan status
  *
@@ -52,11 +84,19 @@ enum brcmf_scan_status {
        BRCMF_SCAN_STATUS_ABORT,
 };
 
-/* wi-fi mode */
+/**
+ * enum wl_mode - driver mode of virtual interface.
+ *
+ * @WL_MODE_BSS: connects to BSS.
+ * @WL_MODE_IBSS: operate as ad-hoc.
+ * @WL_MODE_AP: operate as access-point.
+ * @WL_MODE_P2P: provide P2P discovery.
+ */
 enum wl_mode {
        WL_MODE_BSS,
        WL_MODE_IBSS,
-       WL_MODE_AP
+       WL_MODE_AP,
+       WL_MODE_P2P
 };
 
 /* dongle configuration */
@@ -108,6 +148,7 @@ struct brcmf_cfg80211_profile {
  * @BRCMF_VIF_STATUS_READY: ready for operation.
  * @BRCMF_VIF_STATUS_CONNECTING: connect/join in progress.
  * @BRCMF_VIF_STATUS_CONNECTED: connected/joined succesfully.
+ * @BRCMF_VIF_STATUS_DISCONNECTING: disconnect/disable in progress.
  * @BRCMF_VIF_STATUS_AP_CREATING: interface configured for AP operation.
  * @BRCMF_VIF_STATUS_AP_CREATED: AP operation started.
  */
@@ -115,6 +156,7 @@ enum brcmf_vif_status {
        BRCMF_VIF_STATUS_READY,
        BRCMF_VIF_STATUS_CONNECTING,
        BRCMF_VIF_STATUS_CONNECTED,
+       BRCMF_VIF_STATUS_DISCONNECTING,
        BRCMF_VIF_STATUS_AP_CREATING,
        BRCMF_VIF_STATUS_AP_CREATED
 };
@@ -122,16 +164,22 @@ enum brcmf_vif_status {
 /**
  * struct vif_saved_ie - holds saved IEs for a virtual interface.
  *
+ * @probe_req_ie: IE info for probe request.
  * @probe_res_ie: IE info for probe response.
  * @beacon_ie: IE info for beacon frame.
+ * @probe_req_ie_len: IE info length for probe request.
  * @probe_res_ie_len: IE info length for probe response.
  * @beacon_ie_len: IE info length for beacon frame.
  */
 struct vif_saved_ie {
+       u8  probe_req_ie[IE_MAX_LEN];
        u8  probe_res_ie[IE_MAX_LEN];
        u8  beacon_ie[IE_MAX_LEN];
+       u8  assoc_req_ie[IE_MAX_LEN];
+       u32 probe_req_ie_len;
        u32 probe_res_ie_len;
        u32 beacon_ie_len;
+       u32 assoc_req_ie_len;
 };
 
 /**
@@ -145,6 +193,7 @@ struct vif_saved_ie {
  * @sme_state: SME state using enum brcmf_vif_status bits.
  * @pm_block: power-management blocked.
  * @list: linked list.
+ * @mgmt_rx_reg: registered rx mgmt frame types.
  */
 struct brcmf_cfg80211_vif {
        struct brcmf_if *ifp;
@@ -156,6 +205,7 @@ struct brcmf_cfg80211_vif {
        bool pm_block;
        struct vif_saved_ie saved_ie;
        struct list_head list;
+       u16 mgmt_rx_reg;
 };
 
 /* association inform */
@@ -189,6 +239,9 @@ struct escan_info {
        u8 escan_buf[WL_ESCAN_BUF_SIZE];
        struct wiphy *wiphy;
        struct net_device *ndev;
+       s32 (*run)(struct brcmf_cfg80211_info *cfg,
+                  struct net_device *ndev,
+                  struct cfg80211_scan_request *request, u16 action);
 };
 
 /**
@@ -273,10 +326,27 @@ struct brcmf_pno_scanresults_le {
 };
 
 /**
+ * struct brcmf_cfg80211_vif_event - virtual interface event information.
+ *
+ * @vif_wq: waitqueue awaiting interface event from firmware.
+ * @vif_event_lock: protects other members in this structure.
+ * @vif_complete: completion for net attach.
+ * @action: either add, change, or delete.
+ * @vif: virtual interface object related to the event.
+ */
+struct brcmf_cfg80211_vif_event {
+       wait_queue_head_t vif_wq;
+       struct mutex vif_event_lock;
+       u8 action;
+       struct brcmf_cfg80211_vif *vif;
+};
+
+/**
  * struct brcmf_cfg80211_info - dongle private data of cfg80211 interface
  *
  * @wiphy: wiphy object for cfg80211 interface.
  * @conf: dongle configuration.
+ * @p2p: peer-to-peer specific information.
  * @scan_request: cfg80211 scan request object.
  * @usr_sync: mainly for dongle up/down synchronization.
  * @bss_list: bss_list holding scanned ap information.
@@ -304,10 +374,12 @@ struct brcmf_pno_scanresults_le {
  * @escan_ioctl_buf: dongle command buffer for escan commands.
  * @vif_list: linked list of vif instances.
  * @vif_cnt: number of vif instances.
+ * @vif_event: vif event signalling.
  */
 struct brcmf_cfg80211_info {
        struct wiphy *wiphy;
        struct brcmf_cfg80211_conf *conf;
+       struct brcmf_p2p_info p2p;
        struct cfg80211_scan_request *scan_request;
        struct mutex usr_sync;
        struct brcmf_scan_results *bss_list;
@@ -335,6 +407,21 @@ struct brcmf_cfg80211_info {
        u8 *escan_ioctl_buf;
        struct list_head vif_list;
        u8 vif_cnt;
+       struct brcmf_cfg80211_vif_event vif_event;
+       struct completion vif_disabled;
+};
+
+/**
+ * struct brcmf_tlv - tag_ID/length/value_buffer tuple.
+ *
+ * @id: tag identifier.
+ * @len: number of bytes in value buffer.
+ * @data: value buffer.
+ */
+struct brcmf_tlv {
+       u8 id;
+       u8 len;
+       u8 data[1];
 };
 
 static inline struct wiphy *cfg_to_wiphy(struct brcmf_cfg80211_info *cfg)
@@ -389,4 +476,26 @@ void brcmf_cfg80211_detach(struct brcmf_cfg80211_info *cfg);
 s32 brcmf_cfg80211_up(struct net_device *ndev);
 s32 brcmf_cfg80211_down(struct net_device *ndev);
 
+struct brcmf_cfg80211_vif *brcmf_alloc_vif(struct brcmf_cfg80211_info *cfg,
+                                          enum nl80211_iftype type,
+                                          bool pm_block);
+void brcmf_free_vif(struct brcmf_cfg80211_vif *vif);
+
+s32 brcmf_vif_set_mgmt_ie(struct brcmf_cfg80211_vif *vif, s32 pktflag,
+                         const u8 *vndr_ie_buf, u32 vndr_ie_len);
+s32 brcmf_vif_clear_mgmt_ies(struct brcmf_cfg80211_vif *vif);
+struct brcmf_tlv *brcmf_parse_tlvs(void *buf, int buflen, uint key);
+u16 channel_to_chanspec(struct ieee80211_channel *ch);
+u32 wl_get_vif_state_all(struct brcmf_cfg80211_info *cfg, unsigned long state);
+void brcmf_cfg80211_arm_vif_event(struct brcmf_cfg80211_info *cfg,
+                                 struct brcmf_cfg80211_vif *vif);
+bool brcmf_cfg80211_vif_event_armed(struct brcmf_cfg80211_info *cfg);
+int brcmf_cfg80211_wait_vif_event_timeout(struct brcmf_cfg80211_info *cfg,
+                                         u8 action, ulong timeout);
+s32 brcmf_notify_escan_complete(struct brcmf_cfg80211_info *cfg,
+                               struct net_device *ndev,
+                               bool aborted, bool fw_abort);
+void brcmf_set_mpc(struct net_device *ndev, int mpc);
+void brcmf_abort_scanning(struct brcmf_cfg80211_info *cfg);
+
 #endif                         /* _wl_cfg80211_h_ */
index cdb62b8..10ee314 100644 (file)
@@ -183,8 +183,7 @@ static bool brcms_c_country_valid(const char *ccode)
         * chars.
         */
        if (!((0x80 & ccode[0]) == 0 && ccode[0] >= 0x41 && ccode[0] <= 0x5A &&
-             (0x80 & ccode[1]) == 0 && ccode[1] >= 0x41 && ccode[1] <= 0x5A &&
-             ccode[2] == '\0'))
+             (0x80 & ccode[1]) == 0 && ccode[1] >= 0x41 && ccode[1] <= 0x5A))
                return false;
 
        /*
index 7fc49ca..c6451c6 100644 (file)
@@ -36,6 +36,7 @@
 #include "debug.h"
 
 #define N_TX_QUEUES    4 /* #tx queues on mac80211<->driver interface */
+#define BRCMS_FLUSH_TIMEOUT    500 /* msec */
 
 /* Flags we support */
 #define MAC_FILTERS (FIF_PROMISC_IN_BSS | \
@@ -542,9 +543,8 @@ brcms_ops_bss_info_changed(struct ieee80211_hw *hw,
 
        if (changed & BSS_CHANGED_ARP_FILTER) {
                /* Hardware ARP filter address list or state changed */
-               brcms_err(core, "%s: arp filtering: enabled %s, count %d"
-                         " (implement)\n", __func__, info->arp_filter_enabled ?
-                         "true" : "false", info->arp_addr_cnt);
+               brcms_err(core, "%s: arp filtering: %d addresses"
+                         " (implement)\n", __func__, info->arp_addr_cnt);
        }
 
        if (changed & BSS_CHANGED_QOS) {
@@ -713,16 +713,29 @@ static void brcms_ops_rfkill_poll(struct ieee80211_hw *hw)
        wiphy_rfkill_set_hw_state(wl->pub->ieee_hw->wiphy, blocked);
 }
 
+static bool brcms_tx_flush_completed(struct brcms_info *wl)
+{
+       bool result;
+
+       spin_lock_bh(&wl->lock);
+       result = brcms_c_tx_flush_completed(wl->wlc);
+       spin_unlock_bh(&wl->lock);
+       return result;
+}
+
 static void brcms_ops_flush(struct ieee80211_hw *hw, bool drop)
 {
        struct brcms_info *wl = hw->priv;
+       int ret;
 
        no_printk("%s: drop = %s\n", __func__, drop ? "true" : "false");
 
-       /* wait for packet queue and dma fifos to run empty */
-       spin_lock_bh(&wl->lock);
-       brcms_c_wait_for_tx_completion(wl->wlc, drop);
-       spin_unlock_bh(&wl->lock);
+       ret = wait_event_timeout(wl->tx_flush_wq,
+                                brcms_tx_flush_completed(wl),
+                                msecs_to_jiffies(BRCMS_FLUSH_TIMEOUT));
+
+       brcms_dbg_mac80211(wl->wlc->hw->d11core,
+                          "ret=%d\n", jiffies_to_msecs(ret));
 }
 
 static const struct ieee80211_ops brcms_ops = {
@@ -777,6 +790,7 @@ void brcms_dpc(unsigned long data)
 
  done:
        spin_unlock_bh(&wl->lock);
+       wake_up(&wl->tx_flush_wq);
 }
 
 /*
@@ -1025,6 +1039,8 @@ static struct brcms_info *brcms_attach(struct bcma_device *pdev)
 
        atomic_set(&wl->callbacks, 0);
 
+       init_waitqueue_head(&wl->tx_flush_wq);
+
        /* setup the bottom half handler */
        tasklet_init(&wl->tasklet, brcms_dpc, (unsigned long) wl);
 
@@ -1614,13 +1630,3 @@ bool brcms_rfkill_set_hw_state(struct brcms_info *wl)
        spin_lock_bh(&wl->lock);
        return blocked;
 }
-
-/*
- * precondition: perimeter lock has been acquired
- */
-void brcms_msleep(struct brcms_info *wl, uint ms)
-{
-       spin_unlock_bh(&wl->lock);
-       msleep(ms);
-       spin_lock_bh(&wl->lock);
-}
index 9358bd5..947ccac 100644 (file)
@@ -68,6 +68,8 @@ struct brcms_info {
        spinlock_t lock;        /* per-device perimeter lock */
        spinlock_t isr_lock;    /* per-device ISR synchronization lock */
 
+       /* tx flush */
+       wait_queue_head_t tx_flush_wq;
 
        /* timer related fields */
        atomic_t callbacks;     /* # outstanding callback functions */
@@ -100,7 +102,6 @@ extern struct brcms_timer *brcms_init_timer(struct brcms_info *wl,
 extern void brcms_free_timer(struct brcms_timer *timer);
 extern void brcms_add_timer(struct brcms_timer *timer, uint ms, int periodic);
 extern bool brcms_del_timer(struct brcms_timer *timer);
-extern void brcms_msleep(struct brcms_info *wl, uint ms);
 extern void brcms_dpc(unsigned long data);
 extern void brcms_timer(struct brcms_timer *t);
 extern void brcms_fatal_error(struct brcms_info *wl);
index c26992a..8ef02dc 100644 (file)
 #define DOT11_RTS_LEN                  16
 #define DOT11_CTS_LEN                  10
 #define DOT11_BA_BITMAP_LEN            128
-#define DOT11_MIN_BEACON_PERIOD                1
-#define DOT11_MAX_BEACON_PERIOD                0xFFFF
 #define DOT11_MAXNUMFRAGS              16
 #define DOT11_MAX_FRAG_LEN             2346
 
@@ -1027,7 +1025,6 @@ brcms_c_dotxstatus(struct brcms_c_info *wlc, struct tx_status *txs)
 static bool
 brcms_b_txstatus(struct brcms_hardware *wlc_hw, bool bound, bool *fatal)
 {
-       bool morepending = false;
        struct bcma_device *core;
        struct tx_status txstatus, *txs;
        u32 s1, s2;
@@ -1041,23 +1038,20 @@ brcms_b_txstatus(struct brcms_hardware *wlc_hw, bool bound, bool *fatal)
        txs = &txstatus;
        core = wlc_hw->d11core;
        *fatal = false;
-       s1 = bcma_read32(core, D11REGOFFS(frmtxstatus));
-       while (!(*fatal)
-              && (s1 & TXS_V)) {
-               /* !give others some time to run! */
-               if (n >= max_tx_num) {
-                       morepending = true;
-                       break;
-               }
 
+       while (n < max_tx_num) {
+               s1 = bcma_read32(core, D11REGOFFS(frmtxstatus));
                if (s1 == 0xffffffff) {
                        brcms_err(core, "wl%d: %s: dead chip\n", wlc_hw->unit,
                                  __func__);
                        *fatal = true;
                        return false;
                }
-               s2 = bcma_read32(core, D11REGOFFS(frmtxstatus2));
+               /* only process when valid */
+               if (!(s1 & TXS_V))
+                       break;
 
+               s2 = bcma_read32(core, D11REGOFFS(frmtxstatus2));
                txs->status = s1 & TXS_STATUS_MASK;
                txs->frameid = (s1 & TXS_FID_MASK) >> TXS_FID_SHIFT;
                txs->sequence = s2 & TXS_SEQ_MASK;
@@ -1065,15 +1059,12 @@ brcms_b_txstatus(struct brcms_hardware *wlc_hw, bool bound, bool *fatal)
                txs->lasttxtime = 0;
 
                *fatal = brcms_c_dotxstatus(wlc_hw->wlc, txs);
-
-               s1 = bcma_read32(core, D11REGOFFS(frmtxstatus));
+               if (*fatal == true)
+                       return false;
                n++;
        }
 
-       if (*fatal)
-               return false;
-
-       return morepending;
+       return n >= max_tx_num;
 }
 
 static void brcms_c_tbtt(struct brcms_c_info *wlc)
@@ -3147,8 +3138,7 @@ void brcms_c_reset(struct brcms_c_info *wlc)
        brcms_c_statsupd(wlc);
 
        /* reset our snapshot of macstat counters */
-       memset((char *)wlc->core->macstat_snapshot, 0,
-               sizeof(struct macstat));
+       memset(wlc->core->macstat_snapshot, 0, sizeof(struct macstat));
 
        brcms_b_reset(wlc->hw);
 }
@@ -4061,7 +4051,7 @@ void brcms_c_wme_setparams(struct brcms_c_info *wlc, u16 aci,
                return;
        }
 
-       memset((char *)&acp_shm, 0, sizeof(struct shm_acparams));
+       memset(&acp_shm, 0, sizeof(struct shm_acparams));
        /* fill in shm ac params struct */
        acp_shm.txop = params->txop;
        /* convert from units of 32us to us for ucode */
@@ -4777,7 +4767,7 @@ static void brcms_c_bss_default_init(struct brcms_c_info *wlc)
        struct brcms_bss_info *bi = wlc->default_bss;
 
        /* init default and target BSS with some sane initial values */
-       memset((char *)(bi), 0, sizeof(struct brcms_bss_info));
+       memset(bi, 0, sizeof(*bi));
        bi->beacon_period = BEACON_INTERVAL_DEFAULT;
 
        /* fill the default channel as the first valid channel
@@ -5306,7 +5296,7 @@ int brcms_c_set_gmode(struct brcms_c_info *wlc, u8 gmode, bool config)
                brcms_c_protection_upd(wlc, BRCMS_PROT_G_USER, gmode);
 
        /* Clear rateset override */
-       memset(&rs, 0, sizeof(struct brcms_c_rateset));
+       memset(&rs, 0, sizeof(rs));
 
        switch (gmode) {
        case GMODE_LEGACY_B:
@@ -5529,7 +5519,7 @@ int brcms_c_set_rateset(struct brcms_c_info *wlc, struct brcm_rateset *rs)
        if (rs->count > BRCMS_NUMRATES)
                return -ENOBUFS;
 
-       memset(&internal_rs, 0, sizeof(struct brcms_c_rateset));
+       memset(&internal_rs, 0, sizeof(internal_rs));
 
        /* Copy only legacy rateset section */
        internal_rs.count = rs->count;
@@ -5555,8 +5545,7 @@ int brcms_c_set_rateset(struct brcms_c_info *wlc, struct brcm_rateset *rs)
 
 int brcms_c_set_beacon_period(struct brcms_c_info *wlc, u16 period)
 {
-       if (period < DOT11_MIN_BEACON_PERIOD ||
-           period > DOT11_MAX_BEACON_PERIOD)
+       if (period == 0)
                return -EINVAL;
 
        wlc->default_bss->beacon_period = period;
@@ -5633,7 +5622,7 @@ int brcms_c_module_unregister(struct brcms_pub *pub, const char *name,
        for (i = 0; i < BRCMS_MAXMODULES; i++) {
                if (!strcmp(wlc->modulecb[i].name, name) &&
                    (wlc->modulecb[i].hdl == hdl)) {
-                       memset(&wlc->modulecb[i], 0, sizeof(struct modulecb));
+                       memset(&wlc->modulecb[i], 0, sizeof(wlc->modulecb[i]));
                        return 0;
                }
        }
@@ -6453,10 +6442,9 @@ brcms_c_d11hdrs_mac80211(struct brcms_c_info *wlc, struct ieee80211_hw *hw,
 
                        if ((txrate[k]->flags & IEEE80211_TX_RC_MCS)
                            && (!is_mcs_rate(rspec[k]))) {
-                               brcms_err(wlc->hw->d11core,
-                                         "wl%d: %s: IEEE80211_TX_"
-                                         "RC_MCS != is_mcs_rate(rspec)\n",
-                                         wlc->pub->unit, __func__);
+                               brcms_warn(wlc->hw->d11core,
+                                          "wl%d: %s: IEEE80211_TX_RC_MCS != is_mcs_rate(rspec)\n",
+                                          wlc->pub->unit, __func__);
                        }
 
                        if (is_mcs_rate(rspec[k])) {
@@ -6689,11 +6677,9 @@ brcms_c_d11hdrs_mac80211(struct brcms_c_info *wlc, struct ieee80211_hw *hw,
                                        (struct ofdm_phy_hdr *) rts_plcp) :
                                rts_plcp[0]) << 8;
        } else {
-               memset((char *)txh->RTSPhyHeader, 0, D11_PHY_HDR_LEN);
-               memset((char *)&txh->rts_frame, 0,
-                       sizeof(struct ieee80211_rts));
-               memset((char *)txh->RTSPLCPFallback, 0,
-                     sizeof(txh->RTSPLCPFallback));
+               memset(txh->RTSPhyHeader, 0, D11_PHY_HDR_LEN);
+               memset(&txh->rts_frame, 0, sizeof(struct ieee80211_rts));
+               memset(txh->RTSPLCPFallback, 0, sizeof(txh->RTSPLCPFallback));
                txh->RTSDurFallback = 0;
        }
 
@@ -6848,21 +6834,19 @@ brcms_c_d11hdrs_mac80211(struct brcms_c_info *wlc, struct ieee80211_hw *hw,
                                        wlc->fragthresh[queue] =
                                            (u16) newfragthresh;
                        } else {
-                               brcms_err(wlc->hw->d11core,
-                                         "wl%d: %s txop invalid "
-                                         "for rate %d\n",
-                                         wlc->pub->unit, fifo_names[queue],
-                                         rspec2rate(rspec[0]));
+                               brcms_warn(wlc->hw->d11core,
+                                          "wl%d: %s txop invalid for rate %d\n",
+                                          wlc->pub->unit, fifo_names[queue],
+                                          rspec2rate(rspec[0]));
                        }
 
                        if (dur > wlc->edcf_txop[ac])
-                               brcms_err(wlc->hw->d11core,
-                                         "wl%d: %s: %s txop "
-                                         "exceeded phylen %d/%d dur %d/%d\n",
-                                         wlc->pub->unit, __func__,
-                                         fifo_names[queue],
-                                         phylen, wlc->fragthresh[queue],
-                                         dur, wlc->edcf_txop[ac]);
+                               brcms_warn(wlc->hw->d11core,
+                                          "wl%d: %s: %s txop exceeded phylen %d/%d dur %d/%d\n",
+                                          wlc->pub->unit, __func__,
+                                          fifo_names[queue],
+                                          phylen, wlc->fragthresh[queue],
+                                          dur, wlc->edcf_txop[ac]);
                }
        }
 
@@ -7337,7 +7321,7 @@ brcms_c_bcn_prb_template(struct brcms_c_info *wlc, u16 type,
        *len = hdr_len + body_len;
 
        /* format PHY and MAC headers */
-       memset((char *)buf, 0, hdr_len);
+       memset(buf, 0, hdr_len);
 
        plcp = (struct cck_phy_hdr *) buf;
 
@@ -7408,9 +7392,13 @@ brcms_c_bss_update_probe_resp(struct brcms_c_info *wlc,
                              struct brcms_bss_cfg *cfg,
                              bool suspend)
 {
-       u16 prb_resp[BCN_TMPL_LEN / 2];
+       u16 *prb_resp;
        int len = BCN_TMPL_LEN;
 
+       prb_resp = kmalloc(BCN_TMPL_LEN, GFP_ATOMIC);
+       if (!prb_resp)
+               return;
+
        /*
         * write the probe response to hardware, or save in
         * the config structure
@@ -7444,6 +7432,8 @@ brcms_c_bss_update_probe_resp(struct brcms_c_info *wlc,
 
        if (suspend)
                brcms_c_enable_mac(wlc);
+
+       kfree(prb_resp);
 }
 
 void brcms_c_update_probe_resp(struct brcms_c_info *wlc, bool suspend)
@@ -7517,25 +7507,16 @@ int brcms_c_get_curband(struct brcms_c_info *wlc)
        return wlc->band->bandunit;
 }
 
-void brcms_c_wait_for_tx_completion(struct brcms_c_info *wlc, bool drop)
+bool brcms_c_tx_flush_completed(struct brcms_c_info *wlc)
 {
-       int timeout = 20;
        int i;
 
        /* Kick DMA to send any pending AMPDU */
        for (i = 0; i < ARRAY_SIZE(wlc->hw->di); i++)
                if (wlc->hw->di[i])
-                       dma_txflush(wlc->hw->di[i]);
-
-       /* wait for queue and DMA fifos to run dry */
-       while (brcms_txpktpendtot(wlc) > 0) {
-               brcms_msleep(wlc->wl, 1);
-
-               if (--timeout == 0)
-                       break;
-       }
+                       dma_kick_tx(wlc->hw->di[i]);
 
-       WARN_ON_ONCE(timeout == 0);
+       return !brcms_txpktpendtot(wlc);
 }
 
 void brcms_c_set_beacon_listen_interval(struct brcms_c_info *wlc, u8 interval)
index 4fb2834..b0f14b7 100644 (file)
@@ -314,8 +314,6 @@ extern void brcms_c_associate_upd(struct brcms_c_info *wlc, bool state);
 extern void brcms_c_scan_start(struct brcms_c_info *wlc);
 extern void brcms_c_scan_stop(struct brcms_c_info *wlc);
 extern int brcms_c_get_curband(struct brcms_c_info *wlc);
-extern void brcms_c_wait_for_tx_completion(struct brcms_c_info *wlc,
-                                          bool drop);
 extern int brcms_c_set_channel(struct brcms_c_info *wlc, u16 channel);
 extern int brcms_c_set_rate_limit(struct brcms_c_info *wlc, u16 srl, u16 lrl);
 extern void brcms_c_get_current_rateset(struct brcms_c_info *wlc,
@@ -332,5 +330,6 @@ extern int brcms_c_set_tx_power(struct brcms_c_info *wlc, int txpwr);
 extern int brcms_c_get_tx_power(struct brcms_c_info *wlc);
 extern bool brcms_c_check_radio_disabled(struct brcms_c_info *wlc);
 extern void brcms_c_mute(struct brcms_c_info *wlc, bool on);
+extern bool brcms_c_tx_flush_completed(struct brcms_c_info *wlc);
 
 #endif                         /* _BRCM_PUB_H_ */
index 050ce7c..3630a41 100644 (file)
@@ -572,26 +572,11 @@ il3945_tx_skb(struct il_priv *il,
        il3945_hw_build_tx_cmd_rate(il, out_cmd, info, hdr, sta_id);
 
        /* Total # bytes to be transmitted */
-       len = (u16) skb->len;
-       tx_cmd->len = cpu_to_le16(len);
+       tx_cmd->len = cpu_to_le16((u16) skb->len);
 
-       il_update_stats(il, true, fc, len);
        tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_A_MSK;
        tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_B_MSK;
 
-       if (!ieee80211_has_morefrags(hdr->frame_control)) {
-               txq->need_update = 1;
-       } else {
-               wait_write_ptr = 1;
-               txq->need_update = 0;
-       }
-
-       D_TX("sequence nr = 0X%x\n", le16_to_cpu(out_cmd->hdr.sequence));
-       D_TX("tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
-       il_print_hex_dump(il, IL_DL_TX, tx_cmd, sizeof(*tx_cmd));
-       il_print_hex_dump(il, IL_DL_TX, (u8 *) tx_cmd->hdr,
-                         ieee80211_hdrlen(fc));
-
        /*
         * Use the first empty entry in this queue's command buffer array
         * to contain the Tx command and MAC header concatenated together
@@ -610,14 +595,8 @@ il3945_tx_skb(struct il_priv *il,
         * within command buffer array. */
        txcmd_phys =
            pci_map_single(il->pci_dev, &out_cmd->hdr, len, PCI_DMA_TODEVICE);
-       /* we do not map meta data ... so we can safely access address to
-        * provide to unmap command*/
-       dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
-       dma_unmap_len_set(out_meta, len, len);
-
-       /* Add buffer containing Tx command and MAC(!) header to TFD's
-        * first entry */
-       il->ops->txq_attach_buf_to_tfd(il, txq, txcmd_phys, len, 1, 0);
+       if (unlikely(pci_dma_mapping_error(il->pci_dev, txcmd_phys)))
+               goto drop_unlock;
 
        /* Set up TFD's 2nd entry to point directly to remainder of skb,
         * if any (802.11 null frames have no payload). */
@@ -626,10 +605,34 @@ il3945_tx_skb(struct il_priv *il,
                phys_addr =
                    pci_map_single(il->pci_dev, skb->data + hdr_len, len,
                                   PCI_DMA_TODEVICE);
+               if (unlikely(pci_dma_mapping_error(il->pci_dev, phys_addr)))
+                       goto drop_unlock;
+       }
+
+       /* Add buffer containing Tx command and MAC(!) header to TFD's
+        * first entry */
+       il->ops->txq_attach_buf_to_tfd(il, txq, txcmd_phys, len, 1, 0);
+       dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
+       dma_unmap_len_set(out_meta, len, len);
+       if (len)
                il->ops->txq_attach_buf_to_tfd(il, txq, phys_addr, len, 0,
                                               U32_PAD(len));
+
+       if (!ieee80211_has_morefrags(hdr->frame_control)) {
+               txq->need_update = 1;
+       } else {
+               wait_write_ptr = 1;
+               txq->need_update = 0;
        }
 
+       il_update_stats(il, true, fc, skb->len);
+
+       D_TX("sequence nr = 0X%x\n", le16_to_cpu(out_cmd->hdr.sequence));
+       D_TX("tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
+       il_print_hex_dump(il, IL_DL_TX, tx_cmd, sizeof(*tx_cmd));
+       il_print_hex_dump(il, IL_DL_TX, (u8 *) tx_cmd->hdr,
+                         ieee80211_hdrlen(fc));
+
        /* Tell device the write idx *just past* this latest filled TFD */
        q->write_ptr = il_queue_inc_wrap(q->write_ptr, q->n_bd);
        il_txq_update_write_ptr(il, txq);
@@ -1001,12 +1004,12 @@ il3945_rx_allocate(struct il_priv *il, gfp_t priority)
        struct list_head *element;
        struct il_rx_buf *rxb;
        struct page *page;
+       dma_addr_t page_dma;
        unsigned long flags;
        gfp_t gfp_mask = priority;
 
        while (1) {
                spin_lock_irqsave(&rxq->lock, flags);
-
                if (list_empty(&rxq->rx_used)) {
                        spin_unlock_irqrestore(&rxq->lock, flags);
                        return;
@@ -1035,26 +1038,34 @@ il3945_rx_allocate(struct il_priv *il, gfp_t priority)
                        break;
                }
 
+               /* Get physical address of RB/SKB */
+               page_dma =
+                   pci_map_page(il->pci_dev, page, 0,
+                                PAGE_SIZE << il->hw_params.rx_page_order,
+                                PCI_DMA_FROMDEVICE);
+
+               if (unlikely(pci_dma_mapping_error(il->pci_dev, page_dma))) {
+                       __free_pages(page, il->hw_params.rx_page_order);
+                       break;
+               }
+
                spin_lock_irqsave(&rxq->lock, flags);
+
                if (list_empty(&rxq->rx_used)) {
                        spin_unlock_irqrestore(&rxq->lock, flags);
+                       pci_unmap_page(il->pci_dev, page_dma,
+                                      PAGE_SIZE << il->hw_params.rx_page_order,
+                                      PCI_DMA_FROMDEVICE);
                        __free_pages(page, il->hw_params.rx_page_order);
                        return;
                }
+
                element = rxq->rx_used.next;
                rxb = list_entry(element, struct il_rx_buf, list);
                list_del(element);
-               spin_unlock_irqrestore(&rxq->lock, flags);
 
                rxb->page = page;
-               /* Get physical address of RB/SKB */
-               rxb->page_dma =
-                   pci_map_page(il->pci_dev, page, 0,
-                                PAGE_SIZE << il->hw_params.rx_page_order,
-                                PCI_DMA_FROMDEVICE);
-
-               spin_lock_irqsave(&rxq->lock, flags);
-
+               rxb->page_dma = page_dma;
                list_add_tail(&rxb->list, &rxq->rx_free);
                rxq->free_count++;
                il->alloc_rxb_page++;
@@ -1284,8 +1295,15 @@ il3945_rx_handle(struct il_priv *il)
                            pci_map_page(il->pci_dev, rxb->page, 0,
                                         PAGE_SIZE << il->hw_params.
                                         rx_page_order, PCI_DMA_FROMDEVICE);
-                       list_add_tail(&rxb->list, &rxq->rx_free);
-                       rxq->free_count++;
+                       if (unlikely(pci_dma_mapping_error(il->pci_dev,
+                                                          rxb->page_dma))) {
+                               __il_free_pages(il, rxb->page);
+                               rxb->page = NULL;
+                               list_add_tail(&rxb->list, &rxq->rx_used);
+                       } else {
+                               list_add_tail(&rxb->list, &rxq->rx_free);
+                               rxq->free_count++;
+                       }
                } else
                        list_add_tail(&rxb->list, &rxq->rx_used);
 
index f1dc040..7941eb3 100644 (file)
@@ -319,6 +319,7 @@ il4965_rx_allocate(struct il_priv *il, gfp_t priority)
        struct list_head *element;
        struct il_rx_buf *rxb;
        struct page *page;
+       dma_addr_t page_dma;
        unsigned long flags;
        gfp_t gfp_mask = priority;
 
@@ -356,33 +357,35 @@ il4965_rx_allocate(struct il_priv *il, gfp_t priority)
                        return;
                }
 
+               /* Get physical address of the RB */
+               page_dma =
+                   pci_map_page(il->pci_dev, page, 0,
+                                PAGE_SIZE << il->hw_params.rx_page_order,
+                                PCI_DMA_FROMDEVICE);
+               if (unlikely(pci_dma_mapping_error(il->pci_dev, page_dma))) {
+                       __free_pages(page, il->hw_params.rx_page_order);
+                       break;
+               }
+
                spin_lock_irqsave(&rxq->lock, flags);
 
                if (list_empty(&rxq->rx_used)) {
                        spin_unlock_irqrestore(&rxq->lock, flags);
+                       pci_unmap_page(il->pci_dev, page_dma,
+                                      PAGE_SIZE << il->hw_params.rx_page_order,
+                                      PCI_DMA_FROMDEVICE);
                        __free_pages(page, il->hw_params.rx_page_order);
                        return;
                }
+
                element = rxq->rx_used.next;
                rxb = list_entry(element, struct il_rx_buf, list);
                list_del(element);
 
-               spin_unlock_irqrestore(&rxq->lock, flags);
-
                BUG_ON(rxb->page);
-               rxb->page = page;
-               /* Get physical address of the RB */
-               rxb->page_dma =
-                   pci_map_page(il->pci_dev, page, 0,
-                                PAGE_SIZE << il->hw_params.rx_page_order,
-                                PCI_DMA_FROMDEVICE);
-               /* dma address must be no more than 36 bits */
-               BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
-               /* and also 256 byte aligned! */
-               BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
-
-               spin_lock_irqsave(&rxq->lock, flags);
 
+               rxb->page = page;
+               rxb->page_dma = page_dma;
                list_add_tail(&rxb->list, &rxq->rx_free);
                rxq->free_count++;
                il->alloc_rxb_page++;
@@ -725,6 +728,16 @@ il4965_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb)
        if (rate_n_flags & RATE_MCS_SGI_MSK)
                rx_status.flag |= RX_FLAG_SHORT_GI;
 
+       if (phy_res->phy_flags & RX_RES_PHY_FLAGS_AGG_MSK) {
+               /* We know which subframes of an A-MPDU belong
+                * together since we get a single PHY response
+                * from the firmware for all of them.
+                */
+
+               rx_status.flag |= RX_FLAG_AMPDU_DETAILS;
+               rx_status.ampdu_reference = il->_4965.ampdu_ref;
+       }
+
        il4965_pass_packet_to_mac80211(il, header, len, ampdu_status, rxb,
                                       &rx_status);
 }
@@ -736,6 +749,7 @@ il4965_hdl_rx_phy(struct il_priv *il, struct il_rx_buf *rxb)
 {
        struct il_rx_pkt *pkt = rxb_addr(rxb);
        il->_4965.last_phy_res_valid = true;
+       il->_4965.ampdu_ref++;
        memcpy(&il->_4965.last_phy_res, pkt->u.raw,
               sizeof(struct il_rx_phy_res));
 }
@@ -1779,8 +1793,7 @@ il4965_tx_skb(struct il_priv *il,
        memcpy(tx_cmd->hdr, hdr, hdr_len);
 
        /* Total # bytes to be transmitted */
-       len = (u16) skb->len;
-       tx_cmd->len = cpu_to_le16(len);
+       tx_cmd->len = cpu_to_le16((u16) skb->len);
 
        if (info->control.hw_key)
                il4965_tx_cmd_build_hwcrypto(il, info, tx_cmd, skb, sta_id);
@@ -1790,7 +1803,6 @@ il4965_tx_skb(struct il_priv *il,
 
        il4965_tx_cmd_build_rate(il, tx_cmd, info, sta, fc);
 
-       il_update_stats(il, true, fc, len);
        /*
         * Use the first empty entry in this queue's command buffer array
         * to contain the Tx command and MAC header concatenated together
@@ -1812,18 +1824,8 @@ il4965_tx_skb(struct il_priv *il,
        txcmd_phys =
            pci_map_single(il->pci_dev, &out_cmd->hdr, firstlen,
                           PCI_DMA_BIDIRECTIONAL);
-       dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
-       dma_unmap_len_set(out_meta, len, firstlen);
-       /* Add buffer containing Tx command and MAC(!) header to TFD's
-        * first entry */
-       il->ops->txq_attach_buf_to_tfd(il, txq, txcmd_phys, firstlen, 1, 0);
-
-       if (!ieee80211_has_morefrags(hdr->frame_control)) {
-               txq->need_update = 1;
-       } else {
-               wait_write_ptr = 1;
-               txq->need_update = 0;
-       }
+       if (unlikely(pci_dma_mapping_error(il->pci_dev, txcmd_phys)))
+               goto drop_unlock;
 
        /* Set up TFD's 2nd entry to point directly to remainder of skb,
         * if any (802.11 null frames have no payload). */
@@ -1832,8 +1834,24 @@ il4965_tx_skb(struct il_priv *il,
                phys_addr =
                    pci_map_single(il->pci_dev, skb->data + hdr_len, secondlen,
                                   PCI_DMA_TODEVICE);
+               if (unlikely(pci_dma_mapping_error(il->pci_dev, phys_addr)))
+                       goto drop_unlock;
+       }
+
+       /* Add buffer containing Tx command and MAC(!) header to TFD's
+        * first entry */
+       il->ops->txq_attach_buf_to_tfd(il, txq, txcmd_phys, firstlen, 1, 0);
+       dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
+       dma_unmap_len_set(out_meta, len, firstlen);
+       if (secondlen)
                il->ops->txq_attach_buf_to_tfd(il, txq, phys_addr, secondlen,
                                               0, 0);
+
+       if (!ieee80211_has_morefrags(hdr->frame_control)) {
+               txq->need_update = 1;
+       } else {
+               wait_write_ptr = 1;
+               txq->need_update = 0;
        }
 
        scratch_phys =
@@ -1846,6 +1864,8 @@ il4965_tx_skb(struct il_priv *il,
        tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
        tx_cmd->dram_msb_ptr = il_get_dma_hi_addr(scratch_phys);
 
+       il_update_stats(il, true, fc, skb->len);
+
        D_TX("sequence nr = 0X%x\n", le16_to_cpu(out_cmd->hdr.sequence));
        D_TX("tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
        il_print_hex_dump(il, IL_DL_TX, (u8 *) tx_cmd, sizeof(*tx_cmd));
@@ -4281,8 +4301,16 @@ il4965_rx_handle(struct il_priv *il)
                            pci_map_page(il->pci_dev, rxb->page, 0,
                                         PAGE_SIZE << il->hw_params.
                                         rx_page_order, PCI_DMA_FROMDEVICE);
-                       list_add_tail(&rxb->list, &rxq->rx_free);
-                       rxq->free_count++;
+
+                       if (unlikely(pci_dma_mapping_error(il->pci_dev,
+                                                          rxb->page_dma))) {
+                               __il_free_pages(il, rxb->page);
+                               rxb->page = NULL;
+                               list_add_tail(&rxb->list, &rxq->rx_used);
+                       } else {
+                               list_add_tail(&rxb->list, &rxq->rx_free);
+                               rxq->free_count++;
+                       }
                } else
                        list_add_tail(&rxb->list, &rxq->rx_used);
 
@@ -5711,7 +5739,7 @@ il4965_mac_setup_register(struct il_priv *il, u32 max_probe_length)
        /* Tell mac80211 our characteristics */
        hw->flags =
            IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_AMPDU_AGGREGATION |
-           IEEE80211_HW_NEED_DTIM_PERIOD | IEEE80211_HW_SPECTRUM_MGMT |
+           IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC | IEEE80211_HW_SPECTRUM_MGMT |
            IEEE80211_HW_REPORTS_TX_ACK_STATUS | IEEE80211_HW_SUPPORTS_PS |
            IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
        if (il->cfg->sku & IL_SKU_N)
@@ -6573,9 +6601,6 @@ il4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (err)
                goto out_free_eeprom;
 
-       if (err)
-               goto out_free_eeprom;
-
        /* extract MAC Address */
        il4965_eeprom_get_mac(il, il->addresses[0].addr);
        D_INFO("MAC address: %pM\n", il->addresses[0].addr);
index f3b8e91..e8324b5 100644 (file)
@@ -1183,8 +1183,7 @@ il4965_rs_switch_to_mimo2(struct il_priv *il, struct il_lq_sta *lq_sta,
        if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
                return -1;
 
-       if (((sta->ht_cap.cap & IEEE80211_HT_CAP_SM_PS) >> 2) ==
-           WLAN_HT_CAP_SM_PS_STATIC)
+       if (sta->smps_mode == IEEE80211_SMPS_STATIC)
                return -1;
 
        /* Need both Tx chains/antennas to support MIMO */
index 5db1171..91eb2d0 100644 (file)
@@ -1748,7 +1748,6 @@ static void
 il4965_post_associate(struct il_priv *il)
 {
        struct ieee80211_vif *vif = il->vif;
-       struct ieee80211_conf *conf = NULL;
        int ret = 0;
 
        if (!vif || !il->is_open)
@@ -1759,8 +1758,6 @@ il4965_post_associate(struct il_priv *il)
 
        il_scan_cancel_timeout(il, 200);
 
-       conf = &il->hw->conf;
-
        il->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
        il_commit_rxon(il);
 
index 25dd7d2..3b6c994 100644 (file)
@@ -1134,8 +1134,9 @@ struct il_wep_cmd {
 #define RX_RES_PHY_FLAGS_MOD_CCK_MSK           cpu_to_le16(1 << 1)
 #define RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK    cpu_to_le16(1 << 2)
 #define RX_RES_PHY_FLAGS_NARROW_BAND_MSK       cpu_to_le16(1 << 3)
-#define RX_RES_PHY_FLAGS_ANTENNA_MSK           0xf0
+#define RX_RES_PHY_FLAGS_ANTENNA_MSK           0x70
 #define RX_RES_PHY_FLAGS_ANTENNA_POS           4
+#define RX_RES_PHY_FLAGS_AGG_MSK       cpu_to_le16(1 << 7)
 
 #define RX_RES_STATUS_SEC_TYPE_MSK     (0x7 << 8)
 #define RX_RES_STATUS_SEC_TYPE_NONE    (0x0 << 8)
index 1f59860..e006ea8 100644 (file)
@@ -1830,32 +1830,30 @@ il_set_ht_add_station(struct il_priv *il, u8 idx, struct ieee80211_sta *sta)
 {
        struct ieee80211_sta_ht_cap *sta_ht_inf = &sta->ht_cap;
        __le32 sta_flags;
-       u8 mimo_ps_mode;
 
        if (!sta || !sta_ht_inf->ht_supported)
                goto done;
 
-       mimo_ps_mode = (sta_ht_inf->cap & IEEE80211_HT_CAP_SM_PS) >> 2;
        D_ASSOC("spatial multiplexing power save mode: %s\n",
-               (mimo_ps_mode == WLAN_HT_CAP_SM_PS_STATIC) ? "static" :
-               (mimo_ps_mode == WLAN_HT_CAP_SM_PS_DYNAMIC) ? "dynamic" :
+               (sta->smps_mode == IEEE80211_SMPS_STATIC) ? "static" :
+               (sta->smps_mode == IEEE80211_SMPS_DYNAMIC) ? "dynamic" :
                "disabled");
 
        sta_flags = il->stations[idx].sta.station_flags;
 
        sta_flags &= ~(STA_FLG_RTS_MIMO_PROT_MSK | STA_FLG_MIMO_DIS_MSK);
 
-       switch (mimo_ps_mode) {
-       case WLAN_HT_CAP_SM_PS_STATIC:
+       switch (sta->smps_mode) {
+       case IEEE80211_SMPS_STATIC:
                sta_flags |= STA_FLG_MIMO_DIS_MSK;
                break;
-       case WLAN_HT_CAP_SM_PS_DYNAMIC:
+       case IEEE80211_SMPS_DYNAMIC:
                sta_flags |= STA_FLG_RTS_MIMO_PROT_MSK;
                break;
-       case WLAN_HT_CAP_SM_PS_DISABLED:
+       case IEEE80211_SMPS_OFF:
                break;
        default:
-               IL_WARN("Invalid MIMO PS mode %d\n", mimo_ps_mode);
+               IL_WARN("Invalid MIMO PS mode %d\n", sta->smps_mode);
                break;
        }
 
@@ -3162,18 +3160,23 @@ il_enqueue_hcmd(struct il_priv *il, struct il_host_cmd *cmd)
                     idx, il->cmd_queue);
        }
 #endif
-       txq->need_update = 1;
-
-       if (il->ops->txq_update_byte_cnt_tbl)
-               /* Set up entry in queue's byte count circular buffer */
-               il->ops->txq_update_byte_cnt_tbl(il, txq, 0);
 
        phys_addr =
            pci_map_single(il->pci_dev, &out_cmd->hdr, fix_size,
                           PCI_DMA_BIDIRECTIONAL);
+       if (unlikely(pci_dma_mapping_error(il->pci_dev, phys_addr))) {
+               idx = -ENOMEM;
+               goto out;
+       }
        dma_unmap_addr_set(out_meta, mapping, phys_addr);
        dma_unmap_len_set(out_meta, len, fix_size);
 
+       txq->need_update = 1;
+
+       if (il->ops->txq_update_byte_cnt_tbl)
+               /* Set up entry in queue's byte count circular buffer */
+               il->ops->txq_update_byte_cnt_tbl(il, txq, 0);
+
        il->ops->txq_attach_buf_to_tfd(il, txq, phys_addr, fix_size, 1,
                                            U32_PAD(cmd->len));
 
@@ -3181,6 +3184,7 @@ il_enqueue_hcmd(struct il_priv *il, struct il_host_cmd *cmd)
        q->write_ptr = il_queue_inc_wrap(q->write_ptr, q->n_bd);
        il_txq_update_write_ptr(il, txq);
 
+out:
        spin_unlock_irqrestore(&il->hcmd_lock, flags);
        return idx;
 }
index 37fe553..96f2025 100644 (file)
@@ -1356,6 +1356,7 @@ struct il_priv {
                struct {
                        struct il_rx_phy_res last_phy_res;
                        bool last_phy_res_valid;
+                       u32 ampdu_ref;
 
                        struct completion firmware_loading_complete;
 
index 5cf4323..ba319cb 100644 (file)
@@ -43,8 +43,20 @@ config IWLWIFI
          module will be called iwlwifi.
 
 config IWLDVM
-       tristate "Intel Wireless WiFi"
+       tristate "Intel Wireless WiFi DVM Firmware support"
        depends on IWLWIFI
+       help
+         This is the driver supporting the DVM firmware which is
+         currently the only firmware available for existing devices.
+
+config IWLMVM
+       tristate "Intel Wireless WiFi MVM Firmware support"
+       depends on IWLWIFI
+       help
+         This is the driver supporting the MVM firmware which is
+         currently only available for 7000 series devices.
+
+         Say yes if you have such a device.
 
 menu "Debugging Options"
        depends on IWLWIFI
index 170ec33..6c78000 100644 (file)
@@ -5,8 +5,10 @@ iwlwifi-objs           += iwl-drv.o
 iwlwifi-objs           += iwl-debug.o
 iwlwifi-objs           += iwl-notif-wait.o
 iwlwifi-objs           += iwl-eeprom-read.o iwl-eeprom-parse.o
+iwlwifi-objs           += iwl-phy-db.o iwl-nvm-parse.o
 iwlwifi-objs           += pcie/drv.o pcie/rx.o pcie/tx.o pcie/trans.o
 iwlwifi-objs           += pcie/1000.o pcie/2000.o pcie/5000.o pcie/6000.o
+iwlwifi-objs           += pcie/7000.o
 
 iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o
 iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TESTMODE) += iwl-test.o
@@ -15,5 +17,6 @@ ccflags-y += -D__CHECK_ENDIAN__ -I$(src)
 
 
 obj-$(CONFIG_IWLDVM)   += dvm/
+obj-$(CONFIG_IWLMVM)   += mvm/
 
 CFLAGS_iwl-devtrace.o := -I$(src)
index 33b3ad2..41ec27c 100644 (file)
@@ -5,7 +5,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
  *
  * BSD LICENSE
  *
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -338,7 +338,7 @@ int iwl_sta_update_ht(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
 
 bool iwl_is_ht40_tx_allowed(struct iwl_priv *priv,
                            struct iwl_rxon_context *ctx,
-                           struct ieee80211_sta_ht_cap *ht_cap);
+                           struct ieee80211_sta *sta);
 
 static inline int iwl_sta_id(struct ieee80211_sta *sta)
 {
index de54713..6468de8 100644 (file)
@@ -5,7 +5,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
  *
  * BSD LICENSE
  *
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 2349f39..65e920c 100644 (file)
@@ -5,7 +5,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
  *
  * BSD LICENSE
  *
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 0ca99c1..02c9ebb 100644 (file)
@@ -5,7 +5,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
  *
  * BSD LICENSE
  *
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -3897,6 +3897,24 @@ struct iwlagn_wowlan_kek_kck_material_cmd {
        __le64  replay_ctr;
 } __packed;
 
+#define RF_KILL_INDICATOR_FOR_WOWLAN   0x87
+
+/*
+ * REPLY_WOWLAN_GET_STATUS = 0xe5
+ */
+struct iwlagn_wowlan_status {
+       __le64 replay_ctr;
+       __le32 rekey_status;
+       __le32 wakeup_reason;
+       u8 pattern_number;
+       u8 reserved1;
+       __le16 qos_seq_ctr[8];
+       __le16 non_qos_seq_ctr;
+       __le16 reserved2;
+       union iwlagn_all_tsc_rsc tsc_rsc;
+       __le16 reserved3;
+} __packed;
+
 /*
  * REPLY_WIPAN_PARAMS = 0xb2 (Commands and Notification)
  */
index 72c74af..20806ca 100644 (file)
@@ -2,7 +2,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
index 2653a89..71ea775 100644 (file)
@@ -1,6 +1,6 @@
 /******************************************************************************
  *
- * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of version 2 of the GNU General Public License as
index 8c72be3..15cca2e 100644 (file)
@@ -1,6 +1,6 @@
 /******************************************************************************
  *
- * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of version 2 of the GNU General Public License as
index 844a17f..33c7e15 100644 (file)
@@ -1,6 +1,6 @@
 /******************************************************************************
  *
- * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of version 2 of the GNU General Public License as
index b02a853..8749dcf 100644 (file)
@@ -1,6 +1,6 @@
 /******************************************************************************
  *
- * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of version 2 of the GNU General Public License as
index 6ff4660..86ea5f4 100644 (file)
@@ -2,7 +2,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
index 0353e1c..323e4a3 100644 (file)
@@ -1,6 +1,6 @@
 /******************************************************************************
  *
- * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
  *
  * Portions of this file are derived from the ipw3945 project, as well
  * as portions of the ieee80211 subsystem header files.
@@ -145,14 +145,13 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
        /* Tell mac80211 our characteristics */
        hw->flags = IEEE80211_HW_SIGNAL_DBM |
                    IEEE80211_HW_AMPDU_AGGREGATION |
-                   IEEE80211_HW_NEED_DTIM_PERIOD |
+                   IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC |
                    IEEE80211_HW_SPECTRUM_MGMT |
                    IEEE80211_HW_REPORTS_TX_ACK_STATUS |
                    IEEE80211_HW_QUEUE_CONTROL |
                    IEEE80211_HW_SUPPORTS_PS |
                    IEEE80211_HW_SUPPORTS_DYNAMIC_PS |
-                   IEEE80211_HW_WANT_MONITOR_VIF |
-                   IEEE80211_HW_SCAN_WHILE_IDLE;
+                   IEEE80211_HW_WANT_MONITOR_VIF;
 
        hw->offchannel_tx_hw_queue = IWL_AUX_QUEUE;
        hw->radiotap_mcs_details |= IEEE80211_RADIOTAP_MCS_HAVE_FMT;
@@ -442,54 +441,154 @@ static int iwlagn_mac_suspend(struct ieee80211_hw *hw,
        return ret;
 }
 
+struct iwl_resume_data {
+       struct iwl_priv *priv;
+       struct iwlagn_wowlan_status *cmd;
+       bool valid;
+};
+
+static bool iwl_resume_status_fn(struct iwl_notif_wait_data *notif_wait,
+                                struct iwl_rx_packet *pkt, void *data)
+{
+       struct iwl_resume_data *resume_data = data;
+       struct iwl_priv *priv = resume_data->priv;
+       u32 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
+
+       if (len - 4 != sizeof(*resume_data->cmd)) {
+               IWL_ERR(priv, "rx wrong size data\n");
+               return true;
+       }
+       memcpy(resume_data->cmd, pkt->data, sizeof(*resume_data->cmd));
+       resume_data->valid = true;
+
+       return true;
+}
+
 static int iwlagn_mac_resume(struct ieee80211_hw *hw)
 {
        struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
        struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
        struct ieee80211_vif *vif;
-       unsigned long flags;
-       u32 base, status = 0xffffffff;
-       int ret = -EIO;
+       u32 base;
+       int ret;
+       enum iwl_d3_status d3_status;
+       struct error_table_start {
+               /* cf. struct iwl_error_event_table */
+               u32 valid;
+               u32 error_id;
+       } err_info;
+       struct iwl_notification_wait status_wait;
+       static const u8 status_cmd[] = {
+               REPLY_WOWLAN_GET_STATUS,
+       };
+       struct iwlagn_wowlan_status status_data = {};
+       struct iwl_resume_data resume_data = {
+               .priv = priv,
+               .cmd = &status_data,
+               .valid = false,
+       };
+       struct cfg80211_wowlan_wakeup wakeup = {
+               .pattern_idx = -1,
+       };
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+       const struct fw_img *img;
+#endif
 
        IWL_DEBUG_MAC80211(priv, "enter\n");
        mutex_lock(&priv->mutex);
 
-       iwl_write32(priv->trans, CSR_UCODE_DRV_GP1_CLR,
-                         CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE);
+       /* we'll clear ctx->vif during iwlagn_prepare_restart() */
+       vif = ctx->vif;
+
+       ret = iwl_trans_d3_resume(priv->trans, &d3_status);
+       if (ret)
+               goto out_unlock;
+
+       if (d3_status != IWL_D3_STATUS_ALIVE) {
+               IWL_INFO(priv, "Device was reset during suspend\n");
+               goto out_unlock;
+       }
 
        base = priv->device_pointers.error_event_table;
-       if (iwlagn_hw_valid_rtc_data_addr(base)) {
-               spin_lock_irqsave(&priv->trans->reg_lock, flags);
-               if (iwl_trans_grab_nic_access(priv->trans, true)) {
-                       iwl_write32(priv->trans, HBUS_TARG_MEM_RADDR, base);
-                       status = iwl_read32(priv->trans, HBUS_TARG_MEM_RDAT);
-                       iwl_trans_release_nic_access(priv->trans);
-                       ret = 0;
+       if (!iwlagn_hw_valid_rtc_data_addr(base)) {
+               IWL_WARN(priv, "Invalid error table during resume!\n");
+               goto out_unlock;
+       }
+
+       iwl_trans_read_mem_bytes(priv->trans, base,
+                                &err_info, sizeof(err_info));
+
+       if (err_info.valid) {
+               IWL_INFO(priv, "error table is valid (%d, 0x%x)\n",
+                        err_info.valid, err_info.error_id);
+               if (err_info.error_id == RF_KILL_INDICATOR_FOR_WOWLAN) {
+                       wakeup.rfkill_release = true;
+                       ieee80211_report_wowlan_wakeup(vif, &wakeup,
+                                                      GFP_KERNEL);
                }
-               spin_unlock_irqrestore(&priv->trans->reg_lock, flags);
+               goto out_unlock;
+       }
 
 #ifdef CONFIG_IWLWIFI_DEBUGFS
-               if (ret == 0) {
-                       const struct fw_img *img;
-
-                       img = &(priv->fw->img[IWL_UCODE_WOWLAN]);
-                       if (!priv->wowlan_sram) {
-                               priv->wowlan_sram =
-                                  kzalloc(img->sec[IWL_UCODE_SECTION_DATA].len,
-                                               GFP_KERNEL);
-                       }
+       img = &priv->fw->img[IWL_UCODE_WOWLAN];
+       if (!priv->wowlan_sram)
+               priv->wowlan_sram =
+                       kzalloc(img->sec[IWL_UCODE_SECTION_DATA].len,
+                               GFP_KERNEL);
+
+       if (priv->wowlan_sram)
+               iwl_trans_read_mem(priv->trans, 0x800000,
+                                  priv->wowlan_sram,
+                                  img->sec[IWL_UCODE_SECTION_DATA].len / 4);
+#endif
+
+       /*
+        * This is very strange. The GET_STATUS command is sent but the device
+        * doesn't reply properly, it seems it doesn't close the RBD so one is
+        * always left open ... As a result, we need to send another command
+        * and have to reset the driver afterwards. As we need to switch to
+        * runtime firmware again that'll happen.
+        */
+
+       iwl_init_notification_wait(&priv->notif_wait, &status_wait, status_cmd,
+                                  ARRAY_SIZE(status_cmd), iwl_resume_status_fn,
+                                  &resume_data);
+
+       iwl_dvm_send_cmd_pdu(priv, REPLY_WOWLAN_GET_STATUS, CMD_ASYNC, 0, NULL);
+       iwl_dvm_send_cmd_pdu(priv, REPLY_ECHO, CMD_ASYNC, 0, NULL);
+       /* an RBD is left open in the firmware now! */
 
-                       if (priv->wowlan_sram)
-                               iwl_trans_read_mem(
-                                     priv->trans, 0x800000,
-                                     priv->wowlan_sram,
-                                     img->sec[IWL_UCODE_SECTION_DATA].len / 4);
+       ret = iwl_wait_notification(&priv->notif_wait, &status_wait, HZ/5);
+       if (ret)
+               goto out_unlock;
+
+       if (resume_data.valid && priv->contexts[IWL_RXON_CTX_BSS].vif) {
+               u32 reasons = le32_to_cpu(status_data.wakeup_reason);
+               struct cfg80211_wowlan_wakeup *wakeup_report;
+
+               IWL_INFO(priv, "WoWLAN wakeup reason(s): 0x%.8x\n", reasons);
+
+               if (reasons) {
+                       if (reasons & IWLAGN_WOWLAN_WAKEUP_MAGIC_PACKET)
+                               wakeup.magic_pkt = true;
+                       if (reasons & IWLAGN_WOWLAN_WAKEUP_PATTERN_MATCH)
+                               wakeup.pattern_idx = status_data.pattern_number;
+                       if (reasons & (IWLAGN_WOWLAN_WAKEUP_BEACON_MISS |
+                                      IWLAGN_WOWLAN_WAKEUP_LINK_CHANGE))
+                               wakeup.disconnect = true;
+                       if (reasons & IWLAGN_WOWLAN_WAKEUP_GTK_REKEY_FAIL)
+                               wakeup.gtk_rekey_failure = true;
+                       if (reasons & IWLAGN_WOWLAN_WAKEUP_EAP_IDENT_REQ)
+                               wakeup.eap_identity_req = true;
+                       if (reasons & IWLAGN_WOWLAN_WAKEUP_4WAY_HANDSHAKE)
+                               wakeup.four_way_handshake = true;
+                       wakeup_report = &wakeup;
+               } else {
+                       wakeup_report = NULL;
                }
-#endif
-       }
 
-       /* we'll clear ctx->vif during iwlagn_prepare_restart() */
-       vif = ctx->vif;
+               ieee80211_report_wowlan_wakeup(vif, wakeup_report, GFP_KERNEL);
+       }
 
        priv->wowlan = false;
 
@@ -499,6 +598,7 @@ static int iwlagn_mac_resume(struct ieee80211_hw *hw)
        iwl_connection_init_rx_config(priv, ctx);
        iwlagn_set_rxon_chain(priv, ctx);
 
+ out_unlock:
        mutex_unlock(&priv->mutex);
        IWL_DEBUG_MAC80211(priv, "leave\n");
 
@@ -1154,6 +1254,7 @@ static int iwlagn_mac_cancel_remain_on_channel(struct ieee80211_hw *hw)
 }
 
 static void iwlagn_mac_rssi_callback(struct ieee80211_hw *hw,
+                                    struct ieee80211_vif *vif,
                                     enum ieee80211_rssi_event rssi_event)
 {
        struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
index a64f361..b9e3517 100644 (file)
@@ -1,6 +1,6 @@
 /******************************************************************************
  *
- * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
  *
  * Portions of this file are derived from the ipw3945 project, as well
  * as portions of the ieee80211 subsystem header files.
@@ -353,11 +353,8 @@ static void iwl_print_cont_event_trace(struct iwl_priv *priv, u32 base,
                ptr = base + (4 * sizeof(u32)) + (start_idx * 3 * sizeof(u32));
 
        /* Make sure device is powered up for SRAM reads */
-       spin_lock_irqsave(&priv->trans->reg_lock, reg_flags);
-       if (!iwl_trans_grab_nic_access(priv->trans, false)) {
-               spin_unlock_irqrestore(&priv->trans->reg_lock, reg_flags);
+       if (!iwl_trans_grab_nic_access(priv->trans, false, &reg_flags))
                return;
-       }
 
        /* Set starting address; reads will auto-increment */
        iwl_write32(priv->trans, HBUS_TARG_MEM_RADDR, ptr);
@@ -388,8 +385,7 @@ static void iwl_print_cont_event_trace(struct iwl_priv *priv, u32 base,
                }
        }
        /* Allow device to power down */
-       iwl_trans_release_nic_access(priv->trans);
-       spin_unlock_irqrestore(&priv->trans->reg_lock, reg_flags);
+       iwl_trans_release_nic_access(priv->trans, &reg_flags);
 }
 
 static void iwl_continuous_event_trace(struct iwl_priv *priv)
@@ -1717,9 +1713,8 @@ static int iwl_print_event_log(struct iwl_priv *priv, u32 start_idx,
        ptr = base + EVENT_START_OFFSET + (start_idx * event_size);
 
        /* Make sure device is powered up for SRAM reads */
-       spin_lock_irqsave(&trans->reg_lock, reg_flags);
-       if (!iwl_trans_grab_nic_access(trans, false))
-               goto out_unlock;
+       if (!iwl_trans_grab_nic_access(trans, false, &reg_flags))
+               return pos;
 
        /* Set starting address; reads will auto-increment */
        iwl_write32(trans, HBUS_TARG_MEM_RADDR, ptr);
@@ -1757,9 +1752,7 @@ static int iwl_print_event_log(struct iwl_priv *priv, u32 start_idx,
        }
 
        /* Allow device to power down */
-       iwl_trans_release_nic_access(trans);
-out_unlock:
-       spin_unlock_irqrestore(&trans->reg_lock, reg_flags);
+       iwl_trans_release_nic_access(trans, &reg_flags);
        return pos;
 }
 
@@ -1991,13 +1984,13 @@ static void iwl_nic_config(struct iwl_op_mode *op_mode)
        struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
 
        /* SKU Control */
-       iwl_set_bits_mask(priv->trans, CSR_HW_IF_CONFIG_REG,
-                         CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH |
-                         CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP,
-                         (CSR_HW_REV_STEP(priv->trans->hw_rev) <<
-                               CSR_HW_IF_CONFIG_REG_POS_MAC_STEP) |
-                         (CSR_HW_REV_DASH(priv->trans->hw_rev) <<
-                               CSR_HW_IF_CONFIG_REG_POS_MAC_DASH));
+       iwl_trans_set_bits_mask(priv->trans, CSR_HW_IF_CONFIG_REG,
+                               CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH |
+                               CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP,
+                               (CSR_HW_REV_STEP(priv->trans->hw_rev) <<
+                                       CSR_HW_IF_CONFIG_REG_POS_MAC_STEP) |
+                               (CSR_HW_REV_DASH(priv->trans->hw_rev) <<
+                                       CSR_HW_IF_CONFIG_REG_POS_MAC_DASH));
 
        /* write radio config values to register */
        if (priv->nvm_data->radio_cfg_type <= EEPROM_RF_CONFIG_TYPE_MAX) {
@@ -2009,10 +2002,11 @@ static void iwl_nic_config(struct iwl_op_mode *op_mode)
                        priv->nvm_data->radio_cfg_dash <<
                                CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
 
-               iwl_set_bits_mask(priv->trans, CSR_HW_IF_CONFIG_REG,
-                                 CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE |
-                                 CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP |
-                                 CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH, reg_val);
+               iwl_trans_set_bits_mask(priv->trans, CSR_HW_IF_CONFIG_REG,
+                                       CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE |
+                                       CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP |
+                                       CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH,
+                                       reg_val);
 
                IWL_INFO(priv, "Radio type=0x%x-0x%x-0x%x\n",
                         priv->nvm_data->radio_cfg_type,
index 518cf37..bd69018 100644 (file)
@@ -1,6 +1,6 @@
 /******************************************************************************
  *
- * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
  *
  * Portions of this file are derived from the ipw3945 project, as well
  * as portions of the ieee80211 subsystem header files.
index a2cee7f..7b03e13 100644 (file)
@@ -1,6 +1,6 @@
 /******************************************************************************
  *
- * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
  *
  * Portions of this file are derived from the ipw3945 project, as well
  * as portions of the ieee80211 subsystem header files.
index f3dd0da..abe3042 100644 (file)
@@ -1,6 +1,6 @@
 /******************************************************************************
  *
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of version 2 of the GNU General Public License as
@@ -411,8 +411,9 @@ static int rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv,
         * BT traffic, as they would just be disrupted by BT.
         */
        if (priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH) {
-               IWL_ERR(priv, "BT traffic (%d), no aggregation allowed\n",
-                       priv->bt_traffic_load);
+               IWL_DEBUG_COEX(priv,
+                              "BT traffic (%d), no aggregation allowed\n",
+                              priv->bt_traffic_load);
                return ret;
        }
 
@@ -1288,8 +1289,7 @@ static int rs_switch_to_mimo2(struct iwl_priv *priv,
        if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
                return -1;
 
-       if (((sta->ht_cap.cap & IEEE80211_HT_CAP_SM_PS) >> 2)
-                                               == WLAN_HT_CAP_SM_PS_STATIC)
+       if (sta->smps_mode == IEEE80211_SMPS_STATIC)
                return -1;
 
        /* Need both Tx chains/antennas to support MIMO */
@@ -1304,7 +1304,7 @@ static int rs_switch_to_mimo2(struct iwl_priv *priv,
        tbl->max_search = IWL_MAX_SEARCH;
        rate_mask = lq_sta->active_mimo2_rate;
 
-       if (iwl_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap))
+       if (iwl_is_ht40_tx_allowed(priv, ctx, sta))
                tbl->is_ht40 = 1;
        else
                tbl->is_ht40 = 0;
@@ -1344,8 +1344,7 @@ static int rs_switch_to_mimo3(struct iwl_priv *priv,
        if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
                return -1;
 
-       if (((sta->ht_cap.cap & IEEE80211_HT_CAP_SM_PS) >> 2)
-                                               == WLAN_HT_CAP_SM_PS_STATIC)
+       if (sta->smps_mode == IEEE80211_SMPS_STATIC)
                return -1;
 
        /* Need both Tx chains/antennas to support MIMO */
@@ -1360,7 +1359,7 @@ static int rs_switch_to_mimo3(struct iwl_priv *priv,
        tbl->max_search = IWL_MAX_11N_MIMO3_SEARCH;
        rate_mask = lq_sta->active_mimo3_rate;
 
-       if (iwl_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap))
+       if (iwl_is_ht40_tx_allowed(priv, ctx, sta))
                tbl->is_ht40 = 1;
        else
                tbl->is_ht40 = 0;
@@ -1409,7 +1408,7 @@ static int rs_switch_to_siso(struct iwl_priv *priv,
        tbl->max_search = IWL_MAX_SEARCH;
        rate_mask = lq_sta->active_siso_rate;
 
-       if (iwl_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap))
+       if (iwl_is_ht40_tx_allowed(priv, ctx, sta))
                tbl->is_ht40 = 1;
        else
                tbl->is_ht40 = 0;
index ad3aea8..5d83cab 100644 (file)
@@ -1,6 +1,6 @@
 /******************************************************************************
  *
- * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of version 2 of the GNU General Public License as
index cac4f37..a4eed20 100644 (file)
@@ -1,6 +1,6 @@
 /******************************************************************************
  *
- * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
  *
  * Portions of this file are derived from the ipw3945 project, as well
  * as portionhelp of the ieee80211 subsystem header files.
@@ -790,7 +790,7 @@ static void iwlagn_pass_packet_to_mac80211(struct iwl_priv *priv,
 
        memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
 
-       ieee80211_rx(priv->hw, skb);
+       ieee80211_rx_ni(priv->hw, skb);
 }
 
 static u32 iwlagn_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in)
index 9a891e6..23be948 100644 (file)
@@ -1,6 +1,6 @@
 /******************************************************************************
  *
- * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of version 2 of the GNU General Public License as
@@ -1545,10 +1545,9 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
                                bss_conf->bssid);
        }
 
-       if (changes & BSS_CHANGED_BEACON && vif->type == NL80211_IFTYPE_ADHOC &&
-           priv->beacon_ctx) {
+       if (changes & BSS_CHANGED_BEACON && priv->beacon_ctx == ctx) {
                if (iwlagn_update_beacon(priv, vif))
-                       IWL_ERR(priv, "Error sending IBSS beacon\n");
+                       IWL_ERR(priv, "Error updating beacon\n");
        }
 
        mutex_unlock(&priv->mutex);
index 610ed22..3a4aa52 100644 (file)
@@ -2,7 +2,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
index bdba954..94ef338 100644 (file)
@@ -1,6 +1,6 @@
 /******************************************************************************
  *
- * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
  *
  * Portions of this file are derived from the ipw3945 project, as well
  * as portions of the ieee80211 subsystem header files.
@@ -77,7 +77,7 @@ static int iwl_process_add_sta_resp(struct iwl_priv *priv,
        IWL_DEBUG_INFO(priv, "Processing response for adding station %u\n",
                       sta_id);
 
-       spin_lock(&priv->sta_lock);
+       spin_lock_bh(&priv->sta_lock);
 
        switch (add_sta_resp->status) {
        case ADD_STA_SUCCESS_MSK:
@@ -119,7 +119,7 @@ static int iwl_process_add_sta_resp(struct iwl_priv *priv,
                       priv->stations[sta_id].sta.mode ==
                       STA_CONTROL_MODIFY_MSK ? "Modified" : "Added",
                       addsta->sta.addr);
-       spin_unlock(&priv->sta_lock);
+       spin_unlock_bh(&priv->sta_lock);
 
        return ret;
 }
@@ -173,7 +173,7 @@ int iwl_send_add_sta(struct iwl_priv *priv,
 
 bool iwl_is_ht40_tx_allowed(struct iwl_priv *priv,
                            struct iwl_rxon_context *ctx,
-                           struct ieee80211_sta_ht_cap *ht_cap)
+                           struct ieee80211_sta *sta)
 {
        if (!ctx->ht.enabled || !ctx->ht.is_40mhz)
                return false;
@@ -183,20 +183,11 @@ bool iwl_is_ht40_tx_allowed(struct iwl_priv *priv,
                return false;
 #endif
 
-       /*
-        * Remainder of this function checks ht_cap, but if it's
-        * NULL then we can do HT40 (special case for RXON)
-        */
-       if (!ht_cap)
+       /* special case for RXON */
+       if (!sta)
                return true;
 
-       if (!ht_cap->ht_supported)
-               return false;
-
-       if (!(ht_cap->cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40))
-               return false;
-
-       return true;
+       return sta->bandwidth >= IEEE80211_STA_RX_BW_40;
 }
 
 static void iwl_sta_calc_ht_flags(struct iwl_priv *priv,
@@ -205,7 +196,6 @@ static void iwl_sta_calc_ht_flags(struct iwl_priv *priv,
                                  __le32 *flags, __le32 *mask)
 {
        struct ieee80211_sta_ht_cap *sta_ht_inf = &sta->ht_cap;
-       u8 mimo_ps_mode;
 
        *mask = STA_FLG_RTS_MIMO_PROT_MSK |
                STA_FLG_MIMO_DIS_MSK |
@@ -217,26 +207,24 @@ static void iwl_sta_calc_ht_flags(struct iwl_priv *priv,
        if (!sta || !sta_ht_inf->ht_supported)
                return;
 
-       mimo_ps_mode = (sta_ht_inf->cap & IEEE80211_HT_CAP_SM_PS) >> 2;
-
        IWL_DEBUG_INFO(priv, "STA %pM SM PS mode: %s\n",
                        sta->addr,
-                       (mimo_ps_mode == WLAN_HT_CAP_SM_PS_STATIC) ?
+                       (sta->smps_mode == IEEE80211_SMPS_STATIC) ?
                        "static" :
-                       (mimo_ps_mode == WLAN_HT_CAP_SM_PS_DYNAMIC) ?
+                       (sta->smps_mode == IEEE80211_SMPS_DYNAMIC) ?
                        "dynamic" : "disabled");
 
-       switch (mimo_ps_mode) {
-       case WLAN_HT_CAP_SM_PS_STATIC:
+       switch (sta->smps_mode) {
+       case IEEE80211_SMPS_STATIC:
                *flags |= STA_FLG_MIMO_DIS_MSK;
                break;
-       case WLAN_HT_CAP_SM_PS_DYNAMIC:
+       case IEEE80211_SMPS_DYNAMIC:
                *flags |= STA_FLG_RTS_MIMO_PROT_MSK;
                break;
-       case WLAN_HT_CAP_SM_PS_DISABLED:
+       case IEEE80211_SMPS_OFF:
                break;
        default:
-               IWL_WARN(priv, "Invalid MIMO PS mode %d\n", mimo_ps_mode);
+               IWL_WARN(priv, "Invalid MIMO PS mode %d\n", sta->smps_mode);
                break;
        }
 
@@ -246,7 +234,7 @@ static void iwl_sta_calc_ht_flags(struct iwl_priv *priv,
        *flags |= cpu_to_le32(
                (u32)sta_ht_inf->ampdu_density << STA_FLG_AGG_MPDU_DENSITY_POS);
 
-       if (iwl_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap))
+       if (iwl_is_ht40_tx_allowed(priv, ctx, sta))
                *flags |= STA_FLG_HT40_EN_MSK;
 }
 
index 57b918c..dc6f965 100644 (file)
@@ -5,7 +5,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2010 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010 - 2013 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
  *
  * BSD LICENSE
  *
- * Copyright(c) 2010 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010 - 2013 Intel Corporation. All rights reserved.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index b28cfc8..67e2e13 100644 (file)
@@ -1,6 +1,6 @@
 /******************************************************************************
  *
- * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
  *
  * Portions of this file are derived from the ipw3945 project, as well
  * as portions of the ieee80211 subsystem header files.
@@ -185,10 +185,8 @@ static void iwl_tt_check_exit_ct_kill(unsigned long data)
                        priv->thermal_throttle.ct_kill_toggle = true;
                }
                iwl_read32(priv->trans, CSR_UCODE_DRV_GP1);
-               spin_lock_irqsave(&priv->trans->reg_lock, flags);
-               if (iwl_trans_grab_nic_access(priv->trans, false))
-                       iwl_trans_release_nic_access(priv->trans);
-               spin_unlock_irqrestore(&priv->trans->reg_lock, flags);
+               if (iwl_trans_grab_nic_access(priv->trans, false, &flags))
+                       iwl_trans_release_nic_access(priv->trans, &flags);
 
                /* Reschedule the ct_kill timer to occur in
                 * CT_KILL_EXIT_DURATION seconds to ensure we get a
index 44c7c8f..9356c4b 100644 (file)
@@ -1,6 +1,6 @@
 /******************************************************************************
  *
- * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
  *
  * Portions of this file are derived from the ipw3945 project, as well
  * as portions of the ieee80211 subsystem header files.
index 6b01fc1..d1dccb3 100644 (file)
@@ -2,7 +2,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -1117,7 +1117,7 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
        sta_id = (tx_resp->ra_tid & IWLAGN_TX_RES_RA_MSK) >>
                IWLAGN_TX_RES_RA_POS;
 
-       spin_lock(&priv->sta_lock);
+       spin_lock_bh(&priv->sta_lock);
 
        if (is_agg)
                iwl_rx_reply_tx_agg(priv, tx_resp);
@@ -1145,6 +1145,13 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
                        next_reclaimed = ssn;
                }
 
+               if (tid != IWL_TID_NON_QOS) {
+                       priv->tid_data[sta_id][tid].next_reclaimed =
+                               next_reclaimed;
+                       IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d\n",
+                                                 next_reclaimed);
+               }
+
                iwl_trans_reclaim(priv->trans, txq_id, ssn, &skbs);
 
                iwlagn_check_ratid_empty(priv, sta_id, tid);
@@ -1195,16 +1202,6 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
                        if (!is_agg)
                                iwlagn_non_agg_tx_status(priv, ctx, hdr->addr1);
 
-                       /*
-                        * W/A for FW bug - the seq_ctl isn't updated when the
-                        * queues are flushed. Fetch it from the packet itself
-                        */
-                       if (!is_agg && status == TX_STATUS_FAIL_FIFO_FLUSHED) {
-                               next_reclaimed = le16_to_cpu(hdr->seq_ctrl);
-                               next_reclaimed =
-                                       SEQ_TO_SN(next_reclaimed + 0x10);
-                       }
-
                        is_offchannel_skb =
                                (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN);
                        freed++;
@@ -1239,11 +1236,11 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
                           le16_to_cpu(tx_resp->seq_ctl));
 
        iwl_check_abort_status(priv, tx_resp->frame_count, status);
-       spin_unlock(&priv->sta_lock);
+       spin_unlock_bh(&priv->sta_lock);
 
        while (!skb_queue_empty(&skbs)) {
                skb = __skb_dequeue(&skbs);
-               ieee80211_tx_status(priv->hw, skb);
+               ieee80211_tx_status_ni(priv->hw, skb);
        }
 
        if (is_offchannel_skb)
@@ -1290,12 +1287,12 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
        tid = ba_resp->tid;
        agg = &priv->tid_data[sta_id][tid].agg;
 
-       spin_lock(&priv->sta_lock);
+       spin_lock_bh(&priv->sta_lock);
 
        if (unlikely(!agg->wait_for_ba)) {
                if (unlikely(ba_resp->bitmap))
                        IWL_ERR(priv, "Received BA when not expected\n");
-               spin_unlock(&priv->sta_lock);
+               spin_unlock_bh(&priv->sta_lock);
                return 0;
        }
 
@@ -1309,7 +1306,7 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
                IWL_DEBUG_TX_QUEUES(priv,
                                    "Bad queue mapping txq_id=%d, agg_txq[sta:%d,tid:%d]=%d\n",
                                    scd_flow, sta_id, tid, agg->txq_id);
-               spin_unlock(&priv->sta_lock);
+               spin_unlock_bh(&priv->sta_lock);
                return 0;
        }
 
@@ -1378,11 +1375,11 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
                }
        }
 
-       spin_unlock(&priv->sta_lock);
+       spin_unlock_bh(&priv->sta_lock);
 
        while (!skb_queue_empty(&reclaimed_skbs)) {
                skb = __skb_dequeue(&reclaimed_skbs);
-               ieee80211_tx_status(priv->hw, skb);
+               ieee80211_tx_status_ni(priv->hw, skb);
        }
 
        return 0;
index ebec13a..736fe9b 100644 (file)
@@ -2,7 +2,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
index 7960a52..e9975c5 100644 (file)
@@ -5,7 +5,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
  *
  * BSD LICENSE
  *
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 864219d..743b483 100644 (file)
@@ -5,7 +5,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
  *
  * BSD LICENSE
  *
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -83,6 +83,7 @@ enum iwl_device_family {
        IWL_DEVICE_FAMILY_6030,
        IWL_DEVICE_FAMILY_6050,
        IWL_DEVICE_FAMILY_6150,
+       IWL_DEVICE_FAMILY_7000,
 };
 
 /*
index b419a1e..df3463a 100644 (file)
@@ -5,7 +5,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
  *
  * BSD LICENSE
  *
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 42b20b0..8cf5db7 100644 (file)
@@ -1,6 +1,6 @@
 /******************************************************************************
  *
- * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
  *
  * Portions of this file are derived from the ipw3945 project.
  *
@@ -116,6 +116,7 @@ do {                                                                \
 #define IWL_DL_HCMD            0x00000004
 #define IWL_DL_STATE           0x00000008
 /* 0x000000F0 - 0x00000010 */
+#define IWL_DL_TE              0x00000020
 #define IWL_DL_EEPROM          0x00000040
 #define IWL_DL_RADIO           0x00000080
 /* 0x00000F00 - 0x00000100 */
@@ -156,6 +157,7 @@ do {                                                                \
 #define IWL_DEBUG_LED(p, f, a...)      IWL_DEBUG(p, IWL_DL_LED, f, ## a)
 #define IWL_DEBUG_WEP(p, f, a...)      IWL_DEBUG(p, IWL_DL_WEP, f, ## a)
 #define IWL_DEBUG_HC(p, f, a...)       IWL_DEBUG(p, IWL_DL_HCMD, f, ## a)
+#define IWL_DEBUG_TE(p, f, a...)       IWL_DEBUG(p, IWL_DL_TE, f, ## a)
 #define IWL_DEBUG_EEPROM(d, f, a...)   IWL_DEBUG_DEV(d, IWL_DL_EEPROM, f, ## a)
 #define IWL_DEBUG_CALIB(p, f, a...)    IWL_DEBUG(p, IWL_DL_CALIB, f, ## a)
 #define IWL_DEBUG_FW(p, f, a...)       IWL_DEBUG(p, IWL_DL_FW, f, ## a)
index 70191dd..8f61c71 100644 (file)
@@ -1,6 +1,6 @@
 /******************************************************************************
  *
- * Copyright(c) 2009 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2009 - 2013 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of version 2 of the GNU General Public License as
index dc7e26b..9a0f45e 100644 (file)
@@ -1,6 +1,6 @@
 /******************************************************************************
  *
- * Copyright(c) 2009 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2009 - 2013 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of version 2 of the GNU General Public License as
index d3549f4..6f228bb 100644 (file)
@@ -5,7 +5,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
  *
  * BSD LICENSE
  *
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -139,8 +139,10 @@ struct iwl_drv {
 #endif
 };
 
-#define DVM_OP_MODE    0
-#define MVM_OP_MODE    1
+enum {
+       DVM_OP_MODE =   0,
+       MVM_OP_MODE =   1,
+};
 
 /* Protects the table contents, i.e. the ops pointer & drv list */
 static struct mutex iwlwifi_opmode_table_mtx;
@@ -149,8 +151,8 @@ static struct iwlwifi_opmode_table {
        const struct iwl_op_mode_ops *ops;      /* pointer to op_mode ops */
        struct list_head drv;           /* list of devices using this op_mode */
 } iwlwifi_opmode_table[] = {           /* ops set when driver is initialized */
-       { .name = "iwldvm", .ops = NULL },
-       { .name = "iwlmvm", .ops = NULL },
+       [DVM_OP_MODE] = { .name = "iwldvm", .ops = NULL },
+       [MVM_OP_MODE] = { .name = "iwlmvm", .ops = NULL },
 };
 
 /*
@@ -268,7 +270,7 @@ struct fw_sec_parsing {
  */
 struct iwl_tlv_calib_data {
        __le32 ucode_type;
-       __le64 calib;
+       struct iwl_tlv_calib_ctrl calib;
 } __packed;
 
 struct iwl_firmware_pieces {
@@ -358,7 +360,11 @@ static int iwl_set_default_calib(struct iwl_drv *drv, const u8 *data)
                        ucode_type);
                return -EINVAL;
        }
-       drv->fw.default_calib[ucode_type] = le64_to_cpu(def_calib->calib);
+       drv->fw.default_calib[ucode_type].flow_trigger =
+               def_calib->calib.flow_trigger;
+       drv->fw.default_calib[ucode_type].event_trigger =
+               def_calib->calib.event_trigger;
+
        return 0;
 }
 
@@ -959,7 +965,10 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
        release_firmware(ucode_raw);
 
        mutex_lock(&iwlwifi_opmode_table_mtx);
-       op = &iwlwifi_opmode_table[DVM_OP_MODE];
+       if (fw->mvm_fw)
+               op = &iwlwifi_opmode_table[MVM_OP_MODE];
+       else
+               op = &iwlwifi_opmode_table[DVM_OP_MODE];
 
        /* add this device to the list of devices using this op_mode */
        list_add_tail(&drv->list, &op->drv);
index 285de5f..594a5c7 100644 (file)
@@ -5,7 +5,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
  *
  * BSD LICENSE
  *
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -66,7 +66,7 @@
 /* for all modules */
 #define DRV_NAME        "iwlwifi"
 #define IWLWIFI_VERSION "in-tree:"
-#define DRV_COPYRIGHT  "Copyright(c) 2003-2012 Intel Corporation"
+#define DRV_COPYRIGHT  "Copyright(c) 2003-2013 Intel Corporation"
 #define DRV_AUTHOR     "<ilw@linux.intel.com>"
 
 
index 4719866..034f2ff 100644 (file)
@@ -5,7 +5,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
  *
  * BSD LICENSE
  *
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -703,9 +703,9 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
        return n_channels;
 }
 
-static int iwl_init_sband_channels(struct iwl_nvm_data *data,
-                                  struct ieee80211_supported_band *sband,
-                                  int n_channels, enum ieee80211_band band)
+int iwl_init_sband_channels(struct iwl_nvm_data *data,
+                           struct ieee80211_supported_band *sband,
+                           int n_channels, enum ieee80211_band band)
 {
        struct ieee80211_channel *chan = &data->channels[0];
        int n = 0, idx = 0;
@@ -728,10 +728,10 @@ static int iwl_init_sband_channels(struct iwl_nvm_data *data,
 #define MAX_BIT_RATE_40_MHZ    150 /* Mbps */
 #define MAX_BIT_RATE_20_MHZ    72 /* Mbps */
 
-static void iwl_init_ht_hw_capab(const struct iwl_cfg *cfg,
-                                struct iwl_nvm_data *data,
-                                struct ieee80211_sta_ht_cap *ht_info,
-                                enum ieee80211_band band)
+void iwl_init_ht_hw_capab(const struct iwl_cfg *cfg,
+                         struct iwl_nvm_data *data,
+                         struct ieee80211_sta_ht_cap *ht_info,
+                         enum ieee80211_band band)
 {
        int max_bit_rate = 0;
        u8 rx_chains;
index 555f0eb..683fe6a 100644 (file)
@@ -5,7 +5,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
  *
  * BSD LICENSE
  *
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -126,4 +126,13 @@ static inline void iwl_free_nvm_data(struct iwl_nvm_data *data)
 int iwl_nvm_check_version(struct iwl_nvm_data *data,
                          struct iwl_trans *trans);
 
+int iwl_init_sband_channels(struct iwl_nvm_data *data,
+                           struct ieee80211_supported_band *sband,
+                           int n_channels, enum ieee80211_band band);
+
+void iwl_init_ht_hw_capab(const struct iwl_cfg *cfg,
+                         struct iwl_nvm_data *data,
+                         struct ieee80211_sta_ht_cap *ht_info,
+                         enum ieee80211_band band);
+
 #endif /* __iwl_eeprom_parse_h__ */
index 27c7da3..ef4806f 100644 (file)
@@ -5,7 +5,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
  *
  * BSD LICENSE
  *
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 1337c9d..b2588c5 100644 (file)
@@ -5,7 +5,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
  *
  * BSD LICENSE
  *
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index c646a90..f5592fb 100644 (file)
@@ -5,7 +5,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
  *
  * BSD LICENSE
  *
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -414,6 +414,7 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl)
  *     uCode/driver must write "1" in order to clear this flag
  */
 #define FH_TSSR_TX_ERROR_REG           (FH_TSSR_LOWER_BOUND + 0x018)
+#define FH_TSSR_TX_MSG_CONFIG_REG      (FH_TSSR_LOWER_BOUND + 0x008)
 
 #define FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(_chnl) ((1 << (_chnl)) << 16)
 
index e715640..90873ec 100644 (file)
@@ -5,7 +5,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
  *
  * BSD LICENSE
  *
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 715291e..b545178 100644 (file)
@@ -5,7 +5,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
  *
  * BSD LICENSE
  *
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -139,6 +139,19 @@ struct fw_img {
 #define IWL_UCODE_API(ver)     (((ver) & 0x0000FF00) >> 8)
 #define IWL_UCODE_SERIAL(ver)  ((ver) & 0x000000FF)
 
+/*
+ * Calibration control struct.
+ * Sent as part of the phy configuration command.
+ * @flow_trigger: bitmap for which calibrations to perform according to
+ *             flow triggers.
+ * @event_trigger: bitmap for which calibrations to perform according to
+ *             event triggers.
+ */
+struct iwl_tlv_calib_ctrl {
+       __le32 flow_trigger;
+       __le32 event_trigger;
+} __packed;
+
 /**
  * struct iwl_fw - variables associated with the firmware
  *
@@ -153,6 +166,7 @@ struct fw_img {
  * @inst_evtlog_ptr: event log offset for runtime ucode.
  * @inst_evtlog_size: event log size for runtime ucode.
  * @inst_errlog_ptr: error log offfset for runtime ucode.
+ * @mvm_fw: indicates this is MVM firmware
  */
 struct iwl_fw {
        u32 ucode_ver;
@@ -168,7 +182,7 @@ struct iwl_fw {
        u32 init_evtlog_ptr, init_evtlog_size, init_errlog_ptr;
        u32 inst_evtlog_ptr, inst_evtlog_size, inst_errlog_ptr;
 
-       u64 default_calib[IWL_UCODE_TYPE_MAX];
+       struct iwl_tlv_calib_ctrl default_calib[IWL_UCODE_TYPE_MAX];
        u32 phy_config;
 
        bool mvm_fw;
index bff3ac9..276410d 100644 (file)
@@ -1,6 +1,6 @@
 /******************************************************************************
  *
- * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
  *
  * Portions of this file are derived from the ipw3945 project.
  *
 
 #define IWL_POLL_INTERVAL 10   /* microseconds */
 
-void __iwl_set_bit(struct iwl_trans *trans, u32 reg, u32 mask)
-{
-       iwl_write32(trans, reg, iwl_read32(trans, reg) | mask);
-}
-
-void __iwl_clear_bit(struct iwl_trans *trans, u32 reg, u32 mask)
-{
-       iwl_write32(trans, reg, iwl_read32(trans, reg) & ~mask);
-}
-
-void iwl_set_bit(struct iwl_trans *trans, u32 reg, u32 mask)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&trans->reg_lock, flags);
-       __iwl_set_bit(trans, reg, mask);
-       spin_unlock_irqrestore(&trans->reg_lock, flags);
-}
-EXPORT_SYMBOL_GPL(iwl_set_bit);
-
-void iwl_clear_bit(struct iwl_trans *trans, u32 reg, u32 mask)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&trans->reg_lock, flags);
-       __iwl_clear_bit(trans, reg, mask);
-       spin_unlock_irqrestore(&trans->reg_lock, flags);
-}
-EXPORT_SYMBOL_GPL(iwl_clear_bit);
-
-void iwl_set_bits_mask(struct iwl_trans *trans, u32 reg, u32 mask, u32 value)
-{
-       unsigned long flags;
-       u32 v;
-
-#ifdef CONFIG_IWLWIFI_DEBUG
-       WARN_ON_ONCE(value & ~mask);
-#endif
-
-       spin_lock_irqsave(&trans->reg_lock, flags);
-       v = iwl_read32(trans, reg);
-       v &= ~mask;
-       v |= value;
-       iwl_write32(trans, reg, v);
-       spin_unlock_irqrestore(&trans->reg_lock, flags);
-}
-EXPORT_SYMBOL_GPL(iwl_set_bits_mask);
-
 int iwl_poll_bit(struct iwl_trans *trans, u32 addr,
                 u32 bits, u32 mask, int timeout)
 {
@@ -103,13 +55,10 @@ u32 iwl_read_direct32(struct iwl_trans *trans, u32 reg)
 {
        u32 value = 0x5a5a5a5a;
        unsigned long flags;
-
-       spin_lock_irqsave(&trans->reg_lock, flags);
-       if (iwl_trans_grab_nic_access(trans, false)) {
+       if (iwl_trans_grab_nic_access(trans, false, &flags)) {
                value = iwl_read32(trans, reg);
-               iwl_trans_release_nic_access(trans);
+               iwl_trans_release_nic_access(trans, &flags);
        }
-       spin_unlock_irqrestore(&trans->reg_lock, flags);
 
        return value;
 }
@@ -119,12 +68,10 @@ void iwl_write_direct32(struct iwl_trans *trans, u32 reg, u32 value)
 {
        unsigned long flags;
 
-       spin_lock_irqsave(&trans->reg_lock, flags);
-       if (iwl_trans_grab_nic_access(trans, false)) {
+       if (iwl_trans_grab_nic_access(trans, false, &flags)) {
                iwl_write32(trans, reg, value);
-               iwl_trans_release_nic_access(trans);
+               iwl_trans_release_nic_access(trans, &flags);
        }
-       spin_unlock_irqrestore(&trans->reg_lock, flags);
 }
 EXPORT_SYMBOL_GPL(iwl_write_direct32);
 
@@ -162,12 +109,10 @@ u32 iwl_read_prph(struct iwl_trans *trans, u32 ofs)
        unsigned long flags;
        u32 val = 0x5a5a5a5a;
 
-       spin_lock_irqsave(&trans->reg_lock, flags);
-       if (iwl_trans_grab_nic_access(trans, false)) {
+       if (iwl_trans_grab_nic_access(trans, false, &flags)) {
                val = __iwl_read_prph(trans, ofs);
-               iwl_trans_release_nic_access(trans);
+               iwl_trans_release_nic_access(trans, &flags);
        }
-       spin_unlock_irqrestore(&trans->reg_lock, flags);
        return val;
 }
 EXPORT_SYMBOL_GPL(iwl_read_prph);
@@ -176,12 +121,10 @@ void iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val)
 {
        unsigned long flags;
 
-       spin_lock_irqsave(&trans->reg_lock, flags);
-       if (iwl_trans_grab_nic_access(trans, false)) {
+       if (iwl_trans_grab_nic_access(trans, false, &flags)) {
                __iwl_write_prph(trans, ofs, val);
-               iwl_trans_release_nic_access(trans);
+               iwl_trans_release_nic_access(trans, &flags);
        }
-       spin_unlock_irqrestore(&trans->reg_lock, flags);
 }
 EXPORT_SYMBOL_GPL(iwl_write_prph);
 
@@ -189,13 +132,11 @@ void iwl_set_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask)
 {
        unsigned long flags;
 
-       spin_lock_irqsave(&trans->reg_lock, flags);
-       if (iwl_trans_grab_nic_access(trans, false)) {
+       if (iwl_trans_grab_nic_access(trans, false, &flags)) {
                __iwl_write_prph(trans, ofs,
                                 __iwl_read_prph(trans, ofs) | mask);
-               iwl_trans_release_nic_access(trans);
+               iwl_trans_release_nic_access(trans, &flags);
        }
-       spin_unlock_irqrestore(&trans->reg_lock, flags);
 }
 EXPORT_SYMBOL_GPL(iwl_set_bits_prph);
 
@@ -204,13 +145,11 @@ void iwl_set_bits_mask_prph(struct iwl_trans *trans, u32 ofs,
 {
        unsigned long flags;
 
-       spin_lock_irqsave(&trans->reg_lock, flags);
-       if (iwl_trans_grab_nic_access(trans, false)) {
+       if (iwl_trans_grab_nic_access(trans, false, &flags)) {
                __iwl_write_prph(trans, ofs,
                                 (__iwl_read_prph(trans, ofs) & mask) | bits);
-               iwl_trans_release_nic_access(trans);
+               iwl_trans_release_nic_access(trans, &flags);
        }
-       spin_unlock_irqrestore(&trans->reg_lock, flags);
 }
 EXPORT_SYMBOL_GPL(iwl_set_bits_mask_prph);
 
@@ -219,12 +158,10 @@ void iwl_clear_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask)
        unsigned long flags;
        u32 val;
 
-       spin_lock_irqsave(&trans->reg_lock, flags);
-       if (iwl_trans_grab_nic_access(trans, false)) {
+       if (iwl_trans_grab_nic_access(trans, false, &flags)) {
                val = __iwl_read_prph(trans, ofs);
                __iwl_write_prph(trans, ofs, (val & ~mask));
-               iwl_trans_release_nic_access(trans);
+               iwl_trans_release_nic_access(trans, &flags);
        }
-       spin_unlock_irqrestore(&trans->reg_lock, flags);
 }
 EXPORT_SYMBOL_GPL(iwl_clear_bits_prph);
index dc47806..fd9f5b9 100644 (file)
@@ -1,6 +1,6 @@
 /******************************************************************************
  *
- * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
  *
  * Portions of this file are derived from the ipw3945 project.
  *
@@ -51,12 +51,15 @@ static inline u32 iwl_read32(struct iwl_trans *trans, u32 ofs)
        return val;
 }
 
-void iwl_set_bit(struct iwl_trans *trans, u32 reg, u32 mask);
-void iwl_clear_bit(struct iwl_trans *trans, u32 reg, u32 mask);
-void __iwl_set_bit(struct iwl_trans *trans, u32 reg, u32 mask);
-void __iwl_clear_bit(struct iwl_trans *trans, u32 reg, u32 mask);
+static inline void iwl_set_bit(struct iwl_trans *trans, u32 reg, u32 mask)
+{
+       iwl_trans_set_bits_mask(trans, reg, mask, mask);
+}
 
-void iwl_set_bits_mask(struct iwl_trans *trans, u32 reg, u32 mask, u32 value);
+static inline void iwl_clear_bit(struct iwl_trans *trans, u32 reg, u32 mask)
+{
+       iwl_trans_set_bits_mask(trans, reg, mask, 0);
+}
 
 int iwl_poll_bit(struct iwl_trans *trans, u32 addr,
                 u32 bits, u32 mask, int timeout);
index d9a86d6..e5e3a79 100644 (file)
@@ -5,7 +5,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
  *
  * BSD LICENSE
  *
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index c61f207..c3affbc 100644 (file)
@@ -5,7 +5,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
  *
  * BSD LICENSE
  *
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 8215231..c2ce764 100644 (file)
@@ -5,7 +5,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
  *
  * BSD LICENSE
  *
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
new file mode 100644 (file)
index 0000000..a70213b
--- /dev/null
@@ -0,0 +1,346 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *****************************************************************************/
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/export.h>
+#include "iwl-modparams.h"
+#include "iwl-nvm-parse.h"
+
+/* NVM offsets (in words) definitions */
+enum wkp_nvm_offsets {
+       /* NVM HW-Section offset (in words) definitions */
+       HW_ADDR = 0x15,
+
+/* NVM SW-Section offset (in words) definitions */
+       NVM_SW_SECTION = 0x1C0,
+       NVM_VERSION = 0,
+       RADIO_CFG = 1,
+       SKU = 2,
+       N_HW_ADDRS = 3,
+       NVM_CHANNELS = 0x1E0 - NVM_SW_SECTION,
+
+/* NVM calibration section offset (in words) definitions */
+       NVM_CALIB_SECTION = 0x2B8,
+       XTAL_CALIB = 0x316 - NVM_CALIB_SECTION
+};
+
+/* SKU Capabilities (actual values from NVM definition) */
+enum nvm_sku_bits {
+       NVM_SKU_CAP_BAND_24GHZ  = BIT(0),
+       NVM_SKU_CAP_BAND_52GHZ  = BIT(1),
+       NVM_SKU_CAP_11N_ENABLE  = BIT(2),
+};
+
+/* radio config bits (actual values from NVM definition) */
+#define NVM_RF_CFG_DASH_MSK(x)   (x & 0x3)         /* bits 0-1   */
+#define NVM_RF_CFG_STEP_MSK(x)   ((x >> 2)  & 0x3) /* bits 2-3   */
+#define NVM_RF_CFG_TYPE_MSK(x)   ((x >> 4)  & 0x3) /* bits 4-5   */
+#define NVM_RF_CFG_PNUM_MSK(x)   ((x >> 6)  & 0x3) /* bits 6-7   */
+#define NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8)  & 0xF) /* bits 8-11  */
+#define NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
+
+/*
+ * These are the channel numbers in the order that they are stored in the NVM
+ */
+static const u8 iwl_nvm_channels[] = {
+       /* 2.4 GHz */
+       1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+       /* 5 GHz */
+       36, 40, 44 , 48, 52, 56, 60, 64,
+       100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
+       149, 153, 157, 161, 165
+};
+
+#define IWL_NUM_CHANNELS       ARRAY_SIZE(iwl_nvm_channels)
+#define NUM_2GHZ_CHANNELS      14
+#define FIRST_2GHZ_HT_MINUS    5
+#define LAST_2GHZ_HT_PLUS      9
+#define LAST_5GHZ_HT           161
+
+
+/* rate data (static) */
+static struct ieee80211_rate iwl_cfg80211_rates[] = {
+       { .bitrate = 1 * 10, .hw_value = 0, .hw_value_short = 0, },
+       { .bitrate = 2 * 10, .hw_value = 1, .hw_value_short = 1,
+         .flags = IEEE80211_RATE_SHORT_PREAMBLE, },
+       { .bitrate = 5.5 * 10, .hw_value = 2, .hw_value_short = 2,
+         .flags = IEEE80211_RATE_SHORT_PREAMBLE, },
+       { .bitrate = 11 * 10, .hw_value = 3, .hw_value_short = 3,
+         .flags = IEEE80211_RATE_SHORT_PREAMBLE, },
+       { .bitrate = 6 * 10, .hw_value = 4, .hw_value_short = 4, },
+       { .bitrate = 9 * 10, .hw_value = 5, .hw_value_short = 5, },
+       { .bitrate = 12 * 10, .hw_value = 6, .hw_value_short = 6, },
+       { .bitrate = 18 * 10, .hw_value = 7, .hw_value_short = 7, },
+       { .bitrate = 24 * 10, .hw_value = 8, .hw_value_short = 8, },
+       { .bitrate = 36 * 10, .hw_value = 9, .hw_value_short = 9, },
+       { .bitrate = 48 * 10, .hw_value = 10, .hw_value_short = 10, },
+       { .bitrate = 54 * 10, .hw_value = 11, .hw_value_short = 11, },
+};
+#define RATES_24_OFFS  0
+#define N_RATES_24     ARRAY_SIZE(iwl_cfg80211_rates)
+#define RATES_52_OFFS  4
+#define N_RATES_52     (N_RATES_24 - RATES_52_OFFS)
+
+/**
+ * enum iwl_nvm_channel_flags - channel flags in NVM
+ * @NVM_CHANNEL_VALID: channel is usable for this SKU/geo
+ * @NVM_CHANNEL_IBSS: usable as an IBSS channel
+ * @NVM_CHANNEL_ACTIVE: active scanning allowed
+ * @NVM_CHANNEL_RADAR: radar detection required
+ * @NVM_CHANNEL_DFS: dynamic freq selection candidate
+ * @NVM_CHANNEL_WIDE: 20 MHz channel okay (?)
+ * @NVM_CHANNEL_40MHZ: 40 MHz channel okay (?)
+ */
+enum iwl_nvm_channel_flags {
+       NVM_CHANNEL_VALID = BIT(0),
+       NVM_CHANNEL_IBSS = BIT(1),
+       NVM_CHANNEL_ACTIVE = BIT(3),
+       NVM_CHANNEL_RADAR = BIT(4),
+       NVM_CHANNEL_DFS = BIT(7),
+       NVM_CHANNEL_WIDE = BIT(8),
+       NVM_CHANNEL_40MHZ = BIT(9),
+};
+
+#define CHECK_AND_PRINT_I(x)   \
+       ((ch_flags & NVM_CHANNEL_##x) ? # x " " : "")
+
+static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
+                               struct iwl_nvm_data *data,
+                               const __le16 * const nvm_ch_flags)
+{
+       int ch_idx;
+       int n_channels = 0;
+       struct ieee80211_channel *channel;
+       u16 ch_flags;
+       bool is_5ghz;
+
+       for (ch_idx = 0; ch_idx < IWL_NUM_CHANNELS; ch_idx++) {
+               ch_flags = __le16_to_cpup(nvm_ch_flags + ch_idx);
+               if (!(ch_flags & NVM_CHANNEL_VALID)) {
+                       IWL_DEBUG_EEPROM(dev,
+                                        "Ch. %d Flags %x [%sGHz] - No traffic\n",
+                                        iwl_nvm_channels[ch_idx],
+                                        ch_flags,
+                                        (ch_idx >= NUM_2GHZ_CHANNELS) ?
+                                        "5.2" : "2.4");
+                       continue;
+               }
+
+               channel = &data->channels[n_channels];
+               n_channels++;
+
+               channel->hw_value = iwl_nvm_channels[ch_idx];
+               channel->band = (ch_idx < NUM_2GHZ_CHANNELS) ?
+                               IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
+               channel->center_freq =
+                       ieee80211_channel_to_frequency(
+                               channel->hw_value, channel->band);
+
+               /* TODO: Need to be dependent to the NVM */
+               channel->flags = IEEE80211_CHAN_NO_HT40;
+               if (ch_idx < NUM_2GHZ_CHANNELS &&
+                   (ch_flags & NVM_CHANNEL_40MHZ)) {
+                       if (iwl_nvm_channels[ch_idx] <= LAST_2GHZ_HT_PLUS)
+                               channel->flags &= ~IEEE80211_CHAN_NO_HT40PLUS;
+                       if (iwl_nvm_channels[ch_idx] >= FIRST_2GHZ_HT_MINUS)
+                               channel->flags &= ~IEEE80211_CHAN_NO_HT40MINUS;
+               } else if (iwl_nvm_channels[ch_idx] <= LAST_5GHZ_HT &&
+                          (ch_flags & NVM_CHANNEL_40MHZ)) {
+                       if ((ch_idx - NUM_2GHZ_CHANNELS) % 2 == 0)
+                               channel->flags &= ~IEEE80211_CHAN_NO_HT40PLUS;
+                       else
+                               channel->flags &= ~IEEE80211_CHAN_NO_HT40MINUS;
+               }
+
+               if (!(ch_flags & NVM_CHANNEL_IBSS))
+                       channel->flags |= IEEE80211_CHAN_NO_IBSS;
+
+               if (!(ch_flags & NVM_CHANNEL_ACTIVE))
+                       channel->flags |= IEEE80211_CHAN_PASSIVE_SCAN;
+
+               if (ch_flags & NVM_CHANNEL_RADAR)
+                       channel->flags |= IEEE80211_CHAN_RADAR;
+
+               /* Initialize regulatory-based run-time data */
+
+               /* TODO: read the real value from the NVM */
+               channel->max_power = 0;
+               is_5ghz = channel->band == IEEE80211_BAND_5GHZ;
+               IWL_DEBUG_EEPROM(dev,
+                                "Ch. %d [%sGHz] %s%s%s%s%s%s(0x%02x %ddBm): Ad-Hoc %ssupported\n",
+                                channel->hw_value,
+                                is_5ghz ? "5.2" : "2.4",
+                                CHECK_AND_PRINT_I(VALID),
+                                CHECK_AND_PRINT_I(IBSS),
+                                CHECK_AND_PRINT_I(ACTIVE),
+                                CHECK_AND_PRINT_I(RADAR),
+                                CHECK_AND_PRINT_I(WIDE),
+                                CHECK_AND_PRINT_I(DFS),
+                                ch_flags,
+                                channel->max_power,
+                                ((ch_flags & NVM_CHANNEL_IBSS) &&
+                                 !(ch_flags & NVM_CHANNEL_RADAR))
+                                       ? "" : "not ");
+       }
+
+       return n_channels;
+}
+
+static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
+                           struct iwl_nvm_data *data, const __le16 *nvm_sw)
+{
+       int n_channels = iwl_init_channel_map(dev, cfg, data,
+                       &nvm_sw[NVM_CHANNELS]);
+       int n_used = 0;
+       struct ieee80211_supported_band *sband;
+
+       sband = &data->bands[IEEE80211_BAND_2GHZ];
+       sband->band = IEEE80211_BAND_2GHZ;
+       sband->bitrates = &iwl_cfg80211_rates[RATES_24_OFFS];
+       sband->n_bitrates = N_RATES_24;
+       n_used += iwl_init_sband_channels(data, sband, n_channels,
+                                         IEEE80211_BAND_2GHZ);
+       iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_2GHZ);
+
+       sband = &data->bands[IEEE80211_BAND_5GHZ];
+       sband->band = IEEE80211_BAND_5GHZ;
+       sband->bitrates = &iwl_cfg80211_rates[RATES_52_OFFS];
+       sband->n_bitrates = N_RATES_52;
+       n_used += iwl_init_sband_channels(data, sband, n_channels,
+                                         IEEE80211_BAND_5GHZ);
+       iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_5GHZ);
+
+       if (n_channels != n_used)
+               IWL_ERR_DEV(dev, "NVM: used only %d of %d channels\n",
+                           n_used, n_channels);
+}
+
+struct iwl_nvm_data *
+iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg,
+                  const __le16 *nvm_hw, const __le16 *nvm_sw,
+                  const __le16 *nvm_calib)
+{
+       struct iwl_nvm_data *data;
+       u8 hw_addr[ETH_ALEN];
+       u16 radio_cfg, sku;
+
+       data = kzalloc(sizeof(*data) +
+                      sizeof(struct ieee80211_channel) * IWL_NUM_CHANNELS,
+                      GFP_KERNEL);
+       if (!data)
+               return NULL;
+
+       data->nvm_version = le16_to_cpup(nvm_sw + NVM_VERSION);
+
+       radio_cfg = le16_to_cpup(nvm_sw + RADIO_CFG);
+       data->radio_cfg_type = NVM_RF_CFG_TYPE_MSK(radio_cfg);
+       data->radio_cfg_step = NVM_RF_CFG_STEP_MSK(radio_cfg);
+       data->radio_cfg_dash = NVM_RF_CFG_DASH_MSK(radio_cfg);
+       data->radio_cfg_pnum = NVM_RF_CFG_PNUM_MSK(radio_cfg);
+       data->valid_tx_ant = NVM_RF_CFG_TX_ANT_MSK(radio_cfg);
+       data->valid_rx_ant = NVM_RF_CFG_RX_ANT_MSK(radio_cfg);
+
+       sku = le16_to_cpup(nvm_sw + SKU);
+       data->sku_cap_band_24GHz_enable = sku & NVM_SKU_CAP_BAND_24GHZ;
+       data->sku_cap_band_52GHz_enable = sku & NVM_SKU_CAP_BAND_52GHZ;
+       data->sku_cap_11n_enable = sku & NVM_SKU_CAP_11N_ENABLE;
+       if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_ALL)
+               data->sku_cap_11n_enable = false;
+
+       /* check overrides (some devices have wrong NVM) */
+       if (cfg->valid_tx_ant)
+               data->valid_tx_ant = cfg->valid_tx_ant;
+       if (cfg->valid_rx_ant)
+               data->valid_rx_ant = cfg->valid_rx_ant;
+
+       if (!data->valid_tx_ant || !data->valid_rx_ant) {
+               IWL_ERR_DEV(dev, "invalid antennas (0x%x, 0x%x)\n",
+                           data->valid_tx_ant, data->valid_rx_ant);
+               kfree(data);
+               return NULL;
+       }
+
+       data->n_hw_addrs = le16_to_cpup(nvm_sw + N_HW_ADDRS);
+
+       data->xtal_calib[0] = *(nvm_calib + XTAL_CALIB);
+       data->xtal_calib[1] = *(nvm_calib + XTAL_CALIB + 1);
+
+       /* The byte order is little endian 16 bit, meaning 214365 */
+       memcpy(hw_addr, nvm_hw + HW_ADDR, ETH_ALEN);
+       data->hw_addr[0] = hw_addr[1];
+       data->hw_addr[1] = hw_addr[0];
+       data->hw_addr[2] = hw_addr[3];
+       data->hw_addr[3] = hw_addr[2];
+       data->hw_addr[4] = hw_addr[5];
+       data->hw_addr[5] = hw_addr[4];
+
+       iwl_init_sbands(dev, cfg, data, nvm_sw);
+
+       data->calib_version = 255;   /* TODO:
+                                       this value will prevent some checks from
+                                       failing, we need to check if this
+                                       field is still needed, and if it does,
+                                       where is it in the NVM*/
+
+       return data;
+}
+EXPORT_SYMBOL_GPL(iwl_parse_nvm_data);
diff --git a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.h b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.h
new file mode 100644 (file)
index 0000000..b2692bd
--- /dev/null
@@ -0,0 +1,80 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *****************************************************************************/
+#ifndef __iwl_nvm_parse_h__
+#define __iwl_nvm_parse_h__
+
+#include "iwl-eeprom-parse.h"
+
+/**
+ * iwl_parse_nvm_data - parse NVM data and return values
+ *
+ * This function parses all NVM values we need and then
+ * returns a (newly allocated) struct containing all the
+ * relevant values for driver use. The struct must be freed
+ * later with iwl_free_nvm_data().
+ */
+struct iwl_nvm_data *
+iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg,
+                  const __le16 *nvm_hw, const __le16 *nvm_sw,
+                  const __le16 *nvm_calib);
+
+#endif /* __iwl_nvm_parse_h__ */
index c8d9b95..4a68001 100644 (file)
@@ -5,7 +5,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
  *
  * BSD LICENSE
  *
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -63,6 +63,8 @@
 #ifndef __iwl_op_mode_h__
 #define __iwl_op_mode_h__
 
+#include <linux/debugfs.h>
+
 struct iwl_op_mode;
 struct iwl_trans;
 struct sk_buff;
@@ -111,13 +113,13 @@ struct iwl_cfg;
  *     May sleep
  * @rx: Rx notification to the op_mode. rxb is the Rx buffer itself. Cmd is the
  *     HCMD the this Rx responds to.
- *     Must be atomic and called with BH disabled.
+ *     This callback may sleep, it is called from a threaded IRQ handler.
  * @queue_full: notifies that a HW queue is full.
  *     Must be atomic and called with BH disabled.
  * @queue_not_full: notifies that a HW queue is not full any more.
  *     Must be atomic and called with BH disabled.
  * @hw_rf_kill:notifies of a change in the HW rf kill switch. True means that
- *     the radio is killed. Must be atomic.
+ *     the radio is killed. May sleep.
  * @free_skb: allows the transport layer to free skbs that haven't been
  *     reclaimed by the op_mode. This can happen when the driver is freed and
  *     there are Tx packets pending in the transport layer.
@@ -128,8 +130,7 @@ struct iwl_cfg;
  *     called with BH disabled.
  * @nic_config: configure NIC, called before firmware is started.
  *     May sleep
- * @wimax_active: invoked when WiMax becomes active.  Must be atomic and called
- *     with BH disabled.
+ * @wimax_active: invoked when WiMax becomes active. May sleep
  */
 struct iwl_op_mode_ops {
        struct iwl_op_mode *(*start)(struct iwl_trans *trans,
@@ -176,6 +177,7 @@ static inline int iwl_op_mode_rx(struct iwl_op_mode *op_mode,
                                  struct iwl_rx_cmd_buffer *rxb,
                                  struct iwl_device_cmd *cmd)
 {
+       might_sleep();
        return op_mode->ops->rx(op_mode, rxb, cmd);
 }
 
@@ -194,6 +196,7 @@ static inline void iwl_op_mode_queue_not_full(struct iwl_op_mode *op_mode,
 static inline void iwl_op_mode_hw_rf_kill(struct iwl_op_mode *op_mode,
                                          bool state)
 {
+       might_sleep();
        op_mode->ops->hw_rf_kill(op_mode, state);
 }
 
@@ -221,6 +224,7 @@ static inline void iwl_op_mode_nic_config(struct iwl_op_mode *op_mode)
 
 static inline void iwl_op_mode_wimax_active(struct iwl_op_mode *op_mode)
 {
+       might_sleep();
        op_mode->ops->wimax_active(op_mode);
 }
 
diff --git a/drivers/net/wireless/iwlwifi/iwl-phy-db.c b/drivers/net/wireless/iwlwifi/iwl-phy-db.c
new file mode 100644 (file)
index 0000000..14fc8d3
--- /dev/null
@@ -0,0 +1,514 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/export.h>
+
+#include "iwl-phy-db.h"
+#include "iwl-debug.h"
+#include "iwl-op-mode.h"
+#include "iwl-trans.h"
+
+#define CHANNEL_NUM_SIZE       4       /* num of channels in calib_ch size */
+#define IWL_NUM_PAPD_CH_GROUPS 4
+#define IWL_NUM_TXP_CH_GROUPS  9
+
+struct iwl_phy_db_entry {
+       u16     size;
+       u8      *data;
+};
+
+/**
+ * struct iwl_phy_db - stores phy configuration and calibration data.
+ *
+ * @cfg: phy configuration.
+ * @calib_nch: non channel specific calibration data.
+ * @calib_ch: channel specific calibration data.
+ * @calib_ch_group_papd: calibration data related to papd channel group.
+ * @calib_ch_group_txp: calibration data related to tx power chanel group.
+ */
+struct iwl_phy_db {
+       struct iwl_phy_db_entry cfg;
+       struct iwl_phy_db_entry calib_nch;
+       struct iwl_phy_db_entry calib_ch;
+       struct iwl_phy_db_entry calib_ch_group_papd[IWL_NUM_PAPD_CH_GROUPS];
+       struct iwl_phy_db_entry calib_ch_group_txp[IWL_NUM_TXP_CH_GROUPS];
+
+       u32 channel_num;
+       u32 channel_size;
+
+       struct iwl_trans *trans;
+};
+
+enum iwl_phy_db_section_type {
+       IWL_PHY_DB_CFG = 1,
+       IWL_PHY_DB_CALIB_NCH,
+       IWL_PHY_DB_CALIB_CH,
+       IWL_PHY_DB_CALIB_CHG_PAPD,
+       IWL_PHY_DB_CALIB_CHG_TXP,
+       IWL_PHY_DB_MAX
+};
+
+#define PHY_DB_CMD 0x6c /* TEMP API - The actual is 0x8c */
+
+/*
+ * phy db - configure operational ucode
+ */
+struct iwl_phy_db_cmd {
+       __le16 type;
+       __le16 length;
+       u8 data[];
+} __packed;
+
+/* for parsing of tx power channel group data that comes from the firmware*/
+struct iwl_phy_db_chg_txp {
+       __le32 space;
+       __le16 max_channel_idx;
+} __packed;
+
+/*
+ * phy db - Receieve phy db chunk after calibrations
+ */
+struct iwl_calib_res_notif_phy_db {
+       __le16 type;
+       __le16 length;
+       u8 data[];
+} __packed;
+
+#define IWL_PHY_DB_STATIC_PIC cpu_to_le32(0x21436587)
+static inline void iwl_phy_db_test_pic(__le32 pic)
+{
+       WARN_ON(IWL_PHY_DB_STATIC_PIC != pic);
+}
+
+struct iwl_phy_db *iwl_phy_db_init(struct iwl_trans *trans)
+{
+       struct iwl_phy_db *phy_db = kzalloc(sizeof(struct iwl_phy_db),
+                                           GFP_KERNEL);
+
+       if (!phy_db)
+               return phy_db;
+
+       phy_db->trans = trans;
+
+       /* TODO: add default values of the phy db. */
+       return phy_db;
+}
+EXPORT_SYMBOL(iwl_phy_db_init);
+
+/*
+ * get phy db section: returns a pointer to a phy db section specified by
+ * type and channel group id.
+ */
+static struct iwl_phy_db_entry *
+iwl_phy_db_get_section(struct iwl_phy_db *phy_db,
+                      enum iwl_phy_db_section_type type,
+                      u16 chg_id)
+{
+       if (!phy_db || type >= IWL_PHY_DB_MAX)
+               return NULL;
+
+       switch (type) {
+       case IWL_PHY_DB_CFG:
+               return &phy_db->cfg;
+       case IWL_PHY_DB_CALIB_NCH:
+               return &phy_db->calib_nch;
+       case IWL_PHY_DB_CALIB_CH:
+               return &phy_db->calib_ch;
+       case IWL_PHY_DB_CALIB_CHG_PAPD:
+               if (chg_id >= IWL_NUM_PAPD_CH_GROUPS)
+                       return NULL;
+               return &phy_db->calib_ch_group_papd[chg_id];
+       case IWL_PHY_DB_CALIB_CHG_TXP:
+               if (chg_id >= IWL_NUM_TXP_CH_GROUPS)
+                       return NULL;
+               return &phy_db->calib_ch_group_txp[chg_id];
+       default:
+               return NULL;
+       }
+       return NULL;
+}
+
+static void iwl_phy_db_free_section(struct iwl_phy_db *phy_db,
+                                   enum iwl_phy_db_section_type type,
+                                   u16 chg_id)
+{
+       struct iwl_phy_db_entry *entry =
+                               iwl_phy_db_get_section(phy_db, type, chg_id);
+       if (!entry)
+               return;
+
+       kfree(entry->data);
+       entry->data = NULL;
+       entry->size = 0;
+}
+
+void iwl_phy_db_free(struct iwl_phy_db *phy_db)
+{
+       int i;
+
+       if (!phy_db)
+               return;
+
+       iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CFG, 0);
+       iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_NCH, 0);
+       iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_CH, 0);
+       for (i = 0; i < IWL_NUM_PAPD_CH_GROUPS; i++)
+               iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_CHG_PAPD, i);
+       for (i = 0; i < IWL_NUM_TXP_CH_GROUPS; i++)
+               iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_CHG_TXP, i);
+
+       kfree(phy_db);
+}
+EXPORT_SYMBOL(iwl_phy_db_free);
+
+int iwl_phy_db_set_section(struct iwl_phy_db *phy_db, struct iwl_rx_packet *pkt,
+                          gfp_t alloc_ctx)
+{
+       struct iwl_calib_res_notif_phy_db *phy_db_notif =
+                       (struct iwl_calib_res_notif_phy_db *)pkt->data;
+       enum iwl_phy_db_section_type type = le16_to_cpu(phy_db_notif->type);
+       u16 size  = le16_to_cpu(phy_db_notif->length);
+       struct iwl_phy_db_entry *entry;
+       u16 chg_id = 0;
+
+       if (!phy_db)
+               return -EINVAL;
+
+       if (type == IWL_PHY_DB_CALIB_CHG_PAPD ||
+           type == IWL_PHY_DB_CALIB_CHG_TXP)
+               chg_id = le16_to_cpup((__le16 *)phy_db_notif->data);
+
+       entry = iwl_phy_db_get_section(phy_db, type, chg_id);
+       if (!entry)
+               return -EINVAL;
+
+       kfree(entry->data);
+       entry->data = kmemdup(phy_db_notif->data, size, alloc_ctx);
+       if (!entry->data) {
+               entry->size = 0;
+               return -ENOMEM;
+       }
+
+       entry->size = size;
+
+       if (type == IWL_PHY_DB_CALIB_CH) {
+               phy_db->channel_num =
+                       le32_to_cpup((__le32 *)phy_db_notif->data);
+               phy_db->channel_size =
+                       (size - CHANNEL_NUM_SIZE) / phy_db->channel_num;
+       }
+
+       /* Test PIC */
+       if (type != IWL_PHY_DB_CFG)
+               iwl_phy_db_test_pic(*(((__le32 *)phy_db_notif->data) +
+                                     (size / sizeof(__le32)) - 1));
+
+       IWL_DEBUG_INFO(phy_db->trans,
+                      "%s(%d): [PHYDB]SET: Type %d , Size: %d\n",
+                      __func__, __LINE__, type, size);
+
+       return 0;
+}
+EXPORT_SYMBOL(iwl_phy_db_set_section);
+
+static int is_valid_channel(u16 ch_id)
+{
+       if (ch_id <= 14 ||
+           (36 <= ch_id && ch_id <= 64 && ch_id % 4 == 0) ||
+           (100 <= ch_id && ch_id <= 140 && ch_id % 4 == 0) ||
+           (145 <= ch_id && ch_id <= 165 && ch_id % 4 == 1))
+               return 1;
+       return 0;
+}
+
+static u8 ch_id_to_ch_index(u16 ch_id)
+{
+       if (WARN_ON(!is_valid_channel(ch_id)))
+               return 0xff;
+
+       if (ch_id <= 14)
+               return ch_id - 1;
+       if (ch_id <= 64)
+               return (ch_id + 20) / 4;
+       if (ch_id <= 140)
+               return (ch_id - 12) / 4;
+       return (ch_id - 13) / 4;
+}
+
+
+static u16 channel_id_to_papd(u16 ch_id)
+{
+       if (WARN_ON(!is_valid_channel(ch_id)))
+               return 0xff;
+
+       if (1 <= ch_id && ch_id <= 14)
+               return 0;
+       if (36 <= ch_id && ch_id <= 64)
+               return 1;
+       if (100 <= ch_id && ch_id <= 140)
+               return 2;
+       return 3;
+}
+
+static u16 channel_id_to_txp(struct iwl_phy_db *phy_db, u16 ch_id)
+{
+       struct iwl_phy_db_chg_txp *txp_chg;
+       int i;
+       u8 ch_index = ch_id_to_ch_index(ch_id);
+       if (ch_index == 0xff)
+               return 0xff;
+
+       for (i = 0; i < IWL_NUM_TXP_CH_GROUPS; i++) {
+               txp_chg = (void *)phy_db->calib_ch_group_txp[i].data;
+               if (!txp_chg)
+                       return 0xff;
+               /*
+                * Looking for the first channel group that its max channel is
+                * higher then wanted channel.
+                */
+               if (le16_to_cpu(txp_chg->max_channel_idx) >= ch_index)
+                       return i;
+       }
+       return 0xff;
+}
+static
+int iwl_phy_db_get_section_data(struct iwl_phy_db *phy_db,
+                               u32 type, u8 **data, u16 *size, u16 ch_id)
+{
+       struct iwl_phy_db_entry *entry;
+       u32 channel_num;
+       u32 channel_size;
+       u16 ch_group_id = 0;
+       u16 index;
+
+       if (!phy_db)
+               return -EINVAL;
+
+       /* find wanted channel group */
+       if (type == IWL_PHY_DB_CALIB_CHG_PAPD)
+               ch_group_id = channel_id_to_papd(ch_id);
+       else if (type == IWL_PHY_DB_CALIB_CHG_TXP)
+               ch_group_id = channel_id_to_txp(phy_db, ch_id);
+
+       entry = iwl_phy_db_get_section(phy_db, type, ch_group_id);
+       if (!entry)
+               return -EINVAL;
+
+       if (type == IWL_PHY_DB_CALIB_CH) {
+               index = ch_id_to_ch_index(ch_id);
+               channel_num = phy_db->channel_num;
+               channel_size = phy_db->channel_size;
+               if (index >= channel_num) {
+                       IWL_ERR(phy_db->trans, "Wrong channel number %d\n",
+                               ch_id);
+                       return -EINVAL;
+               }
+               *data = entry->data + CHANNEL_NUM_SIZE + index * channel_size;
+               *size = channel_size;
+       } else {
+               *data = entry->data;
+               *size = entry->size;
+       }
+
+       /* Test PIC */
+       if (type != IWL_PHY_DB_CFG)
+               iwl_phy_db_test_pic(*(((__le32 *)*data) +
+                                     (*size / sizeof(__le32)) - 1));
+
+       IWL_DEBUG_INFO(phy_db->trans,
+                      "%s(%d): [PHYDB] GET: Type %d , Size: %d\n",
+                      __func__, __LINE__, type, *size);
+
+       return 0;
+}
+
+static int iwl_send_phy_db_cmd(struct iwl_phy_db *phy_db, u16 type,
+                              u16 length, void *data)
+{
+       struct iwl_phy_db_cmd phy_db_cmd;
+       struct iwl_host_cmd cmd = {
+               .id = PHY_DB_CMD,
+               .flags = CMD_SYNC,
+       };
+
+       IWL_DEBUG_INFO(phy_db->trans,
+                      "Sending PHY-DB hcmd of type %d, of length %d\n",
+                      type, length);
+
+       /* Set phy db cmd variables */
+       phy_db_cmd.type = cpu_to_le16(type);
+       phy_db_cmd.length = cpu_to_le16(length);
+
+       /* Set hcmd variables */
+       cmd.data[0] = &phy_db_cmd;
+       cmd.len[0] = sizeof(struct iwl_phy_db_cmd);
+       cmd.data[1] = data;
+       cmd.len[1] = length;
+       cmd.dataflags[1] = IWL_HCMD_DFL_NOCOPY;
+
+       return iwl_trans_send_cmd(phy_db->trans, &cmd);
+}
+
+static int iwl_phy_db_send_all_channel_groups(
+                                       struct iwl_phy_db *phy_db,
+                                       enum iwl_phy_db_section_type type,
+                                       u8 max_ch_groups)
+{
+       u16 i;
+       int err;
+       struct iwl_phy_db_entry *entry;
+
+       /* Send all the  channel specific groups to operational fw */
+       for (i = 0; i < max_ch_groups; i++) {
+               entry = iwl_phy_db_get_section(phy_db,
+                                              type,
+                                              i);
+               if (!entry)
+                       return -EINVAL;
+
+               /* Send the requested PHY DB section */
+               err = iwl_send_phy_db_cmd(phy_db,
+                                         type,
+                                         entry->size,
+                                         entry->data);
+               if (err) {
+                       IWL_ERR(phy_db->trans,
+                               "Can't SEND phy_db section %d (%d), err %d",
+                               type, i, err);
+                       return err;
+               }
+
+               IWL_DEBUG_INFO(phy_db->trans,
+                              "Sent PHY_DB HCMD, type = %d num = %d",
+                              type, i);
+       }
+
+       return 0;
+}
+
+int iwl_send_phy_db_data(struct iwl_phy_db *phy_db)
+{
+       u8 *data = NULL;
+       u16 size = 0;
+       int err;
+
+       IWL_DEBUG_INFO(phy_db->trans,
+                      "Sending phy db data and configuration to runtime image\n");
+
+       /* Send PHY DB CFG section */
+       err = iwl_phy_db_get_section_data(phy_db, IWL_PHY_DB_CFG,
+                                         &data, &size, 0);
+       if (err) {
+               IWL_ERR(phy_db->trans, "Cannot get Phy DB cfg section\n");
+               return err;
+       }
+
+       err = iwl_send_phy_db_cmd(phy_db, IWL_PHY_DB_CFG, size, data);
+       if (err) {
+               IWL_ERR(phy_db->trans,
+                       "Cannot send HCMD of  Phy DB cfg section\n");
+               return err;
+       }
+
+       err = iwl_phy_db_get_section_data(phy_db, IWL_PHY_DB_CALIB_NCH,
+                                         &data, &size, 0);
+       if (err) {
+               IWL_ERR(phy_db->trans,
+                       "Cannot get Phy DB non specific channel section\n");
+               return err;
+       }
+
+       err = iwl_send_phy_db_cmd(phy_db, IWL_PHY_DB_CALIB_NCH, size, data);
+       if (err) {
+               IWL_ERR(phy_db->trans,
+                       "Cannot send HCMD of Phy DB non specific channel section\n");
+               return err;
+       }
+
+       /* Send all the TXP channel specific data */
+       err = iwl_phy_db_send_all_channel_groups(phy_db,
+                                                IWL_PHY_DB_CALIB_CHG_PAPD,
+                                                IWL_NUM_PAPD_CH_GROUPS);
+       if (err) {
+               IWL_ERR(phy_db->trans,
+                       "Cannot send channel specific PAPD groups");
+               return err;
+       }
+
+       /* Send all the TXP channel specific data */
+       err = iwl_phy_db_send_all_channel_groups(phy_db,
+                                                IWL_PHY_DB_CALIB_CHG_TXP,
+                                                IWL_NUM_TXP_CH_GROUPS);
+       if (err) {
+               IWL_ERR(phy_db->trans,
+                       "Cannot send channel specific TX power groups");
+               return err;
+       }
+
+       IWL_DEBUG_INFO(phy_db->trans,
+                      "Finished sending phy db non channel data\n");
+       return 0;
+}
+EXPORT_SYMBOL(iwl_send_phy_db_data);
diff --git a/drivers/net/wireless/iwlwifi/iwl-phy-db.h b/drivers/net/wireless/iwlwifi/iwl-phy-db.h
new file mode 100644 (file)
index 0000000..d0e43d9
--- /dev/null
@@ -0,0 +1,82 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#ifndef __IWL_PHYDB_H__
+#define __IWL_PHYDB_H__
+
+#include <linux/types.h>
+
+#include "iwl-op-mode.h"
+#include "iwl-trans.h"
+
+struct iwl_phy_db *iwl_phy_db_init(struct iwl_trans *trans);
+
+void iwl_phy_db_free(struct iwl_phy_db *phy_db);
+
+int iwl_phy_db_set_section(struct iwl_phy_db *phy_db, struct iwl_rx_packet *pkt,
+                          gfp_t alloc_ctx);
+
+
+int iwl_send_phy_db_data(struct iwl_phy_db *phy_db);
+
+#endif /* __IWL_PHYDB_H__ */
index c3a4bb4..f76e9ca 100644 (file)
@@ -5,7 +5,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
  *
  * BSD LICENSE
  *
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -97,6 +97,9 @@
 
 #define APMG_PCIDEV_STT_VAL_L1_ACT_DIS         (0x00000800)
 
+/* Device system time */
+#define DEVICE_SYSTEM_TIME_REG 0xA0206C
+
 /**
  * Tx Scheduler
  *
index 1a22611..ce0c67b 100644 (file)
@@ -5,7 +5,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2010 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010 - 2013 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
  *
  * BSD LICENSE
  *
- * Copyright(c) 2010 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010 - 2013 Intel Corporation. All rights reserved.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -466,9 +466,7 @@ static int iwl_test_indirect_read(struct iwl_test *tst, u32 addr, u32 size)
        /* Hard-coded periphery absolute address */
        if (IWL_ABS_PRPH_START <= addr &&
            addr < IWL_ABS_PRPH_START + PRPH_END) {
-                       spin_lock_irqsave(&trans->reg_lock, flags);
-                       if (!iwl_trans_grab_nic_access(trans, false)) {
-                               spin_unlock_irqrestore(&trans->reg_lock, flags);
+                       if (!iwl_trans_grab_nic_access(trans, false, &flags)) {
                                return -EIO;
                        }
                        iwl_write32(trans, HBUS_TARG_PRPH_RADDR,
@@ -476,8 +474,7 @@ static int iwl_test_indirect_read(struct iwl_test *tst, u32 addr, u32 size)
                        for (i = 0; i < size; i += 4)
                                *(u32 *)(tst->mem.addr + i) =
                                        iwl_read32(trans, HBUS_TARG_PRPH_RDAT);
-                       iwl_trans_release_nic_access(trans);
-                       spin_unlock_irqrestore(&trans->reg_lock, flags);
+                       iwl_trans_release_nic_access(trans, &flags);
        } else { /* target memory (SRAM) */
                iwl_trans_read_mem(trans, addr, tst->mem.addr,
                                   tst->mem.size / 4);
@@ -506,19 +503,13 @@ static int iwl_test_indirect_write(struct iwl_test *tst, u32 addr,
                /* Periphery writes can be 1-3 bytes long, or DWORDs */
                if (size < 4) {
                        memcpy(&val, buf, size);
-                       spin_lock_irqsave(&trans->reg_lock, flags);
-                       if (!iwl_trans_grab_nic_access(trans, false)) {
-                               spin_unlock_irqrestore(&trans->reg_lock, flags);
+                       if (!iwl_trans_grab_nic_access(trans, false, &flags))
                                        return -EIO;
-                       }
                        iwl_write32(trans, HBUS_TARG_PRPH_WADDR,
                                    (addr & 0x0000FFFF) |
                                    ((size - 1) << 24));
                        iwl_write32(trans, HBUS_TARG_PRPH_WDAT, val);
-                       iwl_trans_release_nic_access(trans);
-                       /* needed after consecutive writes w/o read */
-                       mmiowb();
-                       spin_unlock_irqrestore(&trans->reg_lock, flags);
+                       iwl_trans_release_nic_access(trans, &flags);
                } else {
                        if (size % 4)
                                return -EINVAL;
index e13ffa8..7fbf4d7 100644 (file)
@@ -5,7 +5,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2010 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010 - 2013 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
  *
  * BSD LICENSE
  *
- * Copyright(c) 2010 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010 - 2013 Intel Corporation. All rights reserved.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 6ba211b..a963f45 100644 (file)
@@ -5,7 +5,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2010 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010 - 2013 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
  *
  * BSD LICENSE
  *
- * Copyright(c) 2010 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010 - 2013 Intel Corporation. All rights reserved.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 0f85eb3..8c7bec6 100644 (file)
@@ -5,7 +5,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
  *
  * BSD LICENSE
  *
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -65,6 +65,7 @@
 
 #include <linux/ieee80211.h>
 #include <linux/mm.h> /* for page_address */
+#include <linux/lockdep.h>
 
 #include "iwl-debug.h"
 #include "iwl-config.h"
@@ -193,11 +194,11 @@ struct iwl_rx_packet {
  * @CMD_ON_DEMAND: This command is sent by the test mode pipe.
  */
 enum CMD_MODE {
-       CMD_SYNC = 0,
-       CMD_ASYNC = BIT(0),
-       CMD_WANT_SKB = BIT(1),
-       CMD_WANT_HCMD = BIT(2),
-       CMD_ON_DEMAND = BIT(3),
+       CMD_SYNC                = 0,
+       CMD_ASYNC               = BIT(0),
+       CMD_WANT_SKB            = BIT(1),
+       CMD_WANT_HCMD           = BIT(2),
+       CMD_ON_DEMAND           = BIT(3),
 };
 
 #define DEF_CMD_PAYLOAD_SIZE 320
@@ -274,6 +275,7 @@ struct iwl_rx_cmd_buffer {
        struct page *_page;
        int _offset;
        bool _page_stolen;
+       u32 _rx_page_order;
        unsigned int truesize;
 };
 
@@ -294,6 +296,11 @@ static inline struct page *rxb_steal_page(struct iwl_rx_cmd_buffer *r)
        return r->_page;
 }
 
+static inline void iwl_free_rxb(struct iwl_rx_cmd_buffer *r)
+{
+       __free_pages(r->_page, r->_rx_page_order);
+}
+
 #define MAX_NO_RECLAIM_CMDS    6
 
 #define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
@@ -410,8 +417,12 @@ struct iwl_trans;
  *     the op_mode. May be called several times before start_fw, can't be
  *     called after that.
  * @set_pmi: set the power pmi state
- * @grab_nic_access: wake the NIC to be able to access non-HBUS regs
- * @release_nic_access: let the NIC go to sleep
+ * @grab_nic_access: wake the NIC to be able to access non-HBUS regs.
+ *     Sleeping is not allowed between grab_nic_access and
+ *     release_nic_access.
+ * @release_nic_access: let the NIC go to sleep. The "flags" parameter
+ *     must be the same one that was sent before to the grab_nic_access.
+ * @set_bits_mask - set SRAM register according to value and mask.
  */
 struct iwl_trans_ops {
 
@@ -454,8 +465,12 @@ struct iwl_trans_ops {
        void (*configure)(struct iwl_trans *trans,
                          const struct iwl_trans_config *trans_cfg);
        void (*set_pmi)(struct iwl_trans *trans, bool state);
-       bool (*grab_nic_access)(struct iwl_trans *trans, bool silent);
-       void (*release_nic_access)(struct iwl_trans *trans);
+       bool (*grab_nic_access)(struct iwl_trans *trans, bool silent,
+                               unsigned long *flags);
+       void (*release_nic_access)(struct iwl_trans *trans,
+                                  unsigned long *flags);
+       void (*set_bits_mask)(struct iwl_trans *trans, u32 reg, u32 mask,
+                             u32 value);
 };
 
 /**
@@ -475,7 +490,6 @@ enum iwl_trans_state {
  * @ops - pointer to iwl_trans_ops
  * @op_mode - pointer to the op_mode
  * @cfg - pointer to the configuration
- * @reg_lock - protect hw register access
  * @dev - pointer to struct device * that represents the device
  * @hw_id: a u32 with the ID of the device / subdevice.
  *     Set during transport allocation.
@@ -496,7 +510,6 @@ struct iwl_trans {
        struct iwl_op_mode *op_mode;
        const struct iwl_cfg *cfg;
        enum iwl_trans_state state;
-       spinlock_t reg_lock;
 
        struct device *dev;
        u32 hw_rev;
@@ -514,6 +527,10 @@ struct iwl_trans {
 
        struct dentry *dbgfs_dir;
 
+#ifdef CONFIG_LOCKDEP
+       struct lockdep_map sync_cmd_lockdep_map;
+#endif
+
        /* pointer to trans specific struct */
        /*Ensure that this pointer will always be aligned to sizeof pointer */
        char trans_specific[0] __aligned(sizeof(void *));
@@ -590,12 +607,22 @@ static inline int iwl_trans_d3_resume(struct iwl_trans *trans,
 }
 
 static inline int iwl_trans_send_cmd(struct iwl_trans *trans,
-                               struct iwl_host_cmd *cmd)
+                                    struct iwl_host_cmd *cmd)
 {
+       int ret;
+
        WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
                  "%s bad state = %d", __func__, trans->state);
 
-       return trans->ops->send_cmd(trans, cmd);
+       if (!(cmd->flags & CMD_ASYNC))
+               lock_map_acquire_read(&trans->sync_cmd_lockdep_map);
+
+       ret = trans->ops->send_cmd(trans, cmd);
+
+       if (!(cmd->flags & CMD_ASYNC))
+               lock_map_release(&trans->sync_cmd_lockdep_map);
+
+       return ret;
 }
 
 static inline struct iwl_device_cmd *
@@ -756,14 +783,20 @@ static inline void iwl_trans_set_pmi(struct iwl_trans *trans, bool state)
        trans->ops->set_pmi(trans, state);
 }
 
-#define iwl_trans_grab_nic_access(trans, silent)       \
+static inline void
+iwl_trans_set_bits_mask(struct iwl_trans *trans, u32 reg, u32 mask, u32 value)
+{
+       trans->ops->set_bits_mask(trans, reg, mask, value);
+}
+
+#define iwl_trans_grab_nic_access(trans, silent, flags)        \
        __cond_lock(nic_access,                         \
-                   likely((trans)->ops->grab_nic_access(trans, silent)))
+                   likely((trans)->ops->grab_nic_access(trans, silent, flags)))
 
 static inline void __releases(nic_access)
-iwl_trans_release_nic_access(struct iwl_trans *trans)
+iwl_trans_release_nic_access(struct iwl_trans *trans, unsigned long *flags)
 {
-       trans->ops->release_nic_access(trans);
+       trans->ops->release_nic_access(trans, flags);
        __release(nic_access);
 }
 
@@ -773,4 +806,14 @@ iwl_trans_release_nic_access(struct iwl_trans *trans)
 int __must_check iwl_pci_register_driver(void);
 void iwl_pci_unregister_driver(void);
 
+static inline void trans_lockdep_init(struct iwl_trans *trans)
+{
+#ifdef CONFIG_LOCKDEP
+       static struct lock_class_key __key;
+
+       lockdep_init_map(&trans->sync_cmd_lockdep_map, "sync_cmd_lockdep_map",
+                        &__key, 0);
+#endif
+}
+
 #endif /* __iwl_trans_h__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/Makefile b/drivers/net/wireless/iwlwifi/mvm/Makefile
new file mode 100644 (file)
index 0000000..807b250
--- /dev/null
@@ -0,0 +1,10 @@
+obj-$(CONFIG_IWLMVM)   += iwlmvm.o
+iwlmvm-y += fw.o mac80211.o nvm.o ops.o phy-ctxt.o mac-ctxt.o
+iwlmvm-y += utils.o rx.o tx.o binding.o quota.o sta.o
+iwlmvm-y += scan.o time-event.o rs.o
+iwlmvm-y += power.o
+iwlmvm-y += led.o
+iwlmvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o
+iwlmvm-$(CONFIG_PM_SLEEP) += d3.o
+
+ccflags-y += -D__CHECK_ENDIAN__ -I$(src)/../
diff --git a/drivers/net/wireless/iwlwifi/mvm/binding.c b/drivers/net/wireless/iwlwifi/mvm/binding.c
new file mode 100644 (file)
index 0000000..73d24aa
--- /dev/null
@@ -0,0 +1,197 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#include <net/mac80211.h>
+#include "fw-api.h"
+#include "mvm.h"
+
+struct iwl_mvm_iface_iterator_data {
+       struct ieee80211_vif *ignore_vif;
+       int idx;
+
+       struct iwl_mvm_phy_ctxt *phyctxt;
+
+       u16 ids[MAX_MACS_IN_BINDING];
+       u16 colors[MAX_MACS_IN_BINDING];
+};
+
+static int iwl_mvm_binding_cmd(struct iwl_mvm *mvm, u32 action,
+                              struct iwl_mvm_iface_iterator_data *data)
+{
+       struct iwl_binding_cmd cmd;
+       struct iwl_mvm_phy_ctxt *phyctxt = data->phyctxt;
+       int i, ret;
+       u32 status;
+
+       memset(&cmd, 0, sizeof(cmd));
+
+       cmd.id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(phyctxt->id,
+                                                          phyctxt->color));
+       cmd.action = cpu_to_le32(action);
+       cmd.phy = cpu_to_le32(FW_CMD_ID_AND_COLOR(phyctxt->id,
+                                                 phyctxt->color));
+
+       for (i = 0; i < MAX_MACS_IN_BINDING; i++)
+               cmd.macs[i] = cpu_to_le32(FW_CTXT_INVALID);
+       for (i = 0; i < data->idx; i++)
+               cmd.macs[i] = cpu_to_le32(FW_CMD_ID_AND_COLOR(data->ids[i],
+                                                             data->colors[i]));
+
+       status = 0;
+       ret = iwl_mvm_send_cmd_pdu_status(mvm, BINDING_CONTEXT_CMD,
+                                         sizeof(cmd), &cmd, &status);
+       if (ret) {
+               IWL_ERR(mvm, "Failed to send binding (action:%d): %d\n",
+                       action, ret);
+               return ret;
+       }
+
+       if (status) {
+               IWL_ERR(mvm, "Binding command failed: %u\n", status);
+               ret = -EIO;
+       }
+
+       return ret;
+}
+
+static void iwl_mvm_iface_iterator(void *_data, u8 *mac,
+                                  struct ieee80211_vif *vif)
+{
+       struct iwl_mvm_iface_iterator_data *data = _data;
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+       if (vif == data->ignore_vif)
+               return;
+
+       if (mvmvif->phy_ctxt != data->phyctxt)
+               return;
+
+       if (WARN_ON_ONCE(data->idx >= MAX_MACS_IN_BINDING))
+               return;
+
+       data->ids[data->idx] = mvmvif->id;
+       data->colors[data->idx] = mvmvif->color;
+       data->idx++;
+}
+
+static int iwl_mvm_binding_update(struct iwl_mvm *mvm,
+                                 struct ieee80211_vif *vif,
+                                 struct iwl_mvm_phy_ctxt *phyctxt,
+                                 bool add)
+{
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       struct iwl_mvm_iface_iterator_data data = {
+               .ignore_vif = vif,
+               .phyctxt = phyctxt,
+       };
+       u32 action = FW_CTXT_ACTION_MODIFY;
+
+       lockdep_assert_held(&mvm->mutex);
+
+       ieee80211_iterate_active_interfaces_atomic(mvm->hw,
+                                                  IEEE80211_IFACE_ITER_NORMAL,
+                                                  iwl_mvm_iface_iterator,
+                                                  &data);
+
+       /*
+        * If there are no other interfaces yet we
+        * need to create a new binding.
+        */
+       if (data.idx == 0) {
+               if (add)
+                       action = FW_CTXT_ACTION_ADD;
+               else
+                       action = FW_CTXT_ACTION_REMOVE;
+       }
+
+       if (add) {
+               if (WARN_ON_ONCE(data.idx >= MAX_MACS_IN_BINDING))
+                       return -EINVAL;
+
+               data.ids[data.idx] = mvmvif->id;
+               data.colors[data.idx] = mvmvif->color;
+               data.idx++;
+       }
+
+       return iwl_mvm_binding_cmd(mvm, action, &data);
+}
+
+int iwl_mvm_binding_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+       if (WARN_ON_ONCE(!mvmvif->phy_ctxt))
+               return -EINVAL;
+
+       return iwl_mvm_binding_update(mvm, vif, mvmvif->phy_ctxt, true);
+}
+
+int iwl_mvm_binding_remove_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+       if (WARN_ON_ONCE(!mvmvif->phy_ctxt))
+               return -EINVAL;
+
+       return iwl_mvm_binding_update(mvm, vif, mvmvif->phy_ctxt, false);
+}
diff --git a/drivers/net/wireless/iwlwifi/mvm/d3.c b/drivers/net/wireless/iwlwifi/mvm/d3.c
new file mode 100644 (file)
index 0000000..c64d864
--- /dev/null
@@ -0,0 +1,955 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#include <net/cfg80211.h>
+#include <net/ipv6.h>
+#include "iwl-modparams.h"
+#include "fw-api.h"
+#include "mvm.h"
+
+void iwl_mvm_set_rekey_data(struct ieee80211_hw *hw,
+                           struct ieee80211_vif *vif,
+                           struct cfg80211_gtk_rekey_data *data)
+{
+       struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+       if (iwlwifi_mod_params.sw_crypto)
+               return;
+
+       mutex_lock(&mvm->mutex);
+
+       memcpy(mvmvif->rekey_data.kek, data->kek, NL80211_KEK_LEN);
+       memcpy(mvmvif->rekey_data.kck, data->kck, NL80211_KCK_LEN);
+       mvmvif->rekey_data.replay_ctr =
+               cpu_to_le64(be64_to_cpup((__be64 *)&data->replay_ctr));
+       mvmvif->rekey_data.valid = true;
+
+       mutex_unlock(&mvm->mutex);
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+void iwl_mvm_ipv6_addr_change(struct ieee80211_hw *hw,
+                             struct ieee80211_vif *vif,
+                             struct inet6_dev *idev)
+{
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       struct inet6_ifaddr *ifa;
+       int idx = 0;
+
+       read_lock_bh(&idev->lock);
+       list_for_each_entry(ifa, &idev->addr_list, if_list) {
+               mvmvif->target_ipv6_addrs[idx] = ifa->addr;
+               idx++;
+               if (idx >= IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS)
+                       break;
+       }
+       read_unlock_bh(&idev->lock);
+
+       mvmvif->num_target_ipv6_addrs = idx;
+}
+#endif
+
+void iwl_mvm_set_default_unicast_key(struct ieee80211_hw *hw,
+                                    struct ieee80211_vif *vif, int idx)
+{
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+       mvmvif->tx_key_idx = idx;
+}
+
+static void iwl_mvm_convert_p1k(u16 *p1k, __le16 *out)
+{
+       int i;
+
+       for (i = 0; i < IWL_P1K_SIZE; i++)
+               out[i] = cpu_to_le16(p1k[i]);
+}
+
+struct wowlan_key_data {
+       struct iwl_wowlan_rsc_tsc_params_cmd *rsc_tsc;
+       struct iwl_wowlan_tkip_params_cmd *tkip;
+       bool error, use_rsc_tsc, use_tkip;
+       int gtk_key_idx;
+};
+
+static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw,
+                                       struct ieee80211_vif *vif,
+                                       struct ieee80211_sta *sta,
+                                       struct ieee80211_key_conf *key,
+                                       void *_data)
+{
+       struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       struct wowlan_key_data *data = _data;
+       struct aes_sc *aes_sc, *aes_tx_sc = NULL;
+       struct tkip_sc *tkip_sc, *tkip_tx_sc = NULL;
+       struct iwl_p1k_cache *rx_p1ks;
+       u8 *rx_mic_key;
+       struct ieee80211_key_seq seq;
+       u32 cur_rx_iv32 = 0;
+       u16 p1k[IWL_P1K_SIZE];
+       int ret, i;
+
+       mutex_lock(&mvm->mutex);
+
+       switch (key->cipher) {
+       case WLAN_CIPHER_SUITE_WEP40:
+       case WLAN_CIPHER_SUITE_WEP104: { /* hack it for now */
+               struct {
+                       struct iwl_mvm_wep_key_cmd wep_key_cmd;
+                       struct iwl_mvm_wep_key wep_key;
+               } __packed wkc = {
+                       .wep_key_cmd.mac_id_n_color =
+                               cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
+                                                               mvmvif->color)),
+                       .wep_key_cmd.num_keys = 1,
+                       /* firmware sets STA_KEY_FLG_WEP_13BYTES */
+                       .wep_key_cmd.decryption_type = STA_KEY_FLG_WEP,
+                       .wep_key.key_index = key->keyidx,
+                       .wep_key.key_size = key->keylen,
+               };
+
+               /*
+                * This will fail -- the key functions don't set support
+                * pairwise WEP keys. However, that's better than silently
+                * failing WoWLAN. Or maybe not?
+                */
+               if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
+                       break;
+
+               memcpy(&wkc.wep_key.key[3], key->key, key->keylen);
+               if (key->keyidx == mvmvif->tx_key_idx) {
+                       /* TX key must be at offset 0 */
+                       wkc.wep_key.key_offset = 0;
+               } else {
+                       /* others start at 1 */
+                       data->gtk_key_idx++;
+                       wkc.wep_key.key_offset = data->gtk_key_idx;
+               }
+
+               ret = iwl_mvm_send_cmd_pdu(mvm, WEP_KEY, CMD_SYNC,
+                                          sizeof(wkc), &wkc);
+               data->error = ret != 0;
+
+               /* don't upload key again */
+               goto out_unlock;
+       }
+       default:
+               data->error = true;
+               goto out_unlock;
+       case WLAN_CIPHER_SUITE_AES_CMAC:
+               /*
+                * Ignore CMAC keys -- the WoWLAN firmware doesn't support them
+                * but we also shouldn't abort suspend due to that. It does have
+                * support for the IGTK key renewal, but doesn't really use the
+                * IGTK for anything. This means we could spuriously wake up or
+                * be deauthenticated, but that was considered acceptable.
+                */
+               goto out_unlock;
+       case WLAN_CIPHER_SUITE_TKIP:
+               if (sta) {
+                       tkip_sc = data->rsc_tsc->all_tsc_rsc.tkip.unicast_rsc;
+                       tkip_tx_sc = &data->rsc_tsc->all_tsc_rsc.tkip.tsc;
+
+                       rx_p1ks = data->tkip->rx_uni;
+
+                       ieee80211_get_key_tx_seq(key, &seq);
+                       tkip_tx_sc->iv16 = cpu_to_le16(seq.tkip.iv16);
+                       tkip_tx_sc->iv32 = cpu_to_le32(seq.tkip.iv32);
+
+                       ieee80211_get_tkip_p1k_iv(key, seq.tkip.iv32, p1k);
+                       iwl_mvm_convert_p1k(p1k, data->tkip->tx.p1k);
+
+                       memcpy(data->tkip->mic_keys.tx,
+                              &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY],
+                              IWL_MIC_KEY_SIZE);
+
+                       rx_mic_key = data->tkip->mic_keys.rx_unicast;
+               } else {
+                       tkip_sc =
+                               data->rsc_tsc->all_tsc_rsc.tkip.multicast_rsc;
+                       rx_p1ks = data->tkip->rx_multi;
+                       rx_mic_key = data->tkip->mic_keys.rx_mcast;
+               }
+
+               /*
+                * For non-QoS this relies on the fact that both the uCode and
+                * mac80211 use TID 0 (as they need to to avoid replay attacks)
+                * for checking the IV in the frames.
+                */
+               for (i = 0; i < IWL_NUM_RSC; i++) {
+                       ieee80211_get_key_rx_seq(key, i, &seq);
+                       tkip_sc[i].iv16 = cpu_to_le16(seq.tkip.iv16);
+                       tkip_sc[i].iv32 = cpu_to_le32(seq.tkip.iv32);
+                       /* wrapping isn't allowed, AP must rekey */
+                       if (seq.tkip.iv32 > cur_rx_iv32)
+                               cur_rx_iv32 = seq.tkip.iv32;
+               }
+
+               ieee80211_get_tkip_rx_p1k(key, vif->bss_conf.bssid,
+                                         cur_rx_iv32, p1k);
+               iwl_mvm_convert_p1k(p1k, rx_p1ks[0].p1k);
+               ieee80211_get_tkip_rx_p1k(key, vif->bss_conf.bssid,
+                                         cur_rx_iv32 + 1, p1k);
+               iwl_mvm_convert_p1k(p1k, rx_p1ks[1].p1k);
+
+               memcpy(rx_mic_key,
+                      &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY],
+                      IWL_MIC_KEY_SIZE);
+
+               data->use_tkip = true;
+               data->use_rsc_tsc = true;
+               break;
+       case WLAN_CIPHER_SUITE_CCMP:
+               if (sta) {
+                       u8 *pn = seq.ccmp.pn;
+
+                       aes_sc = data->rsc_tsc->all_tsc_rsc.aes.unicast_rsc;
+                       aes_tx_sc = &data->rsc_tsc->all_tsc_rsc.aes.tsc;
+
+                       ieee80211_get_key_tx_seq(key, &seq);
+                       aes_tx_sc->pn = cpu_to_le64((u64)pn[5] |
+                                                   ((u64)pn[4] << 8) |
+                                                   ((u64)pn[3] << 16) |
+                                                   ((u64)pn[2] << 24) |
+                                                   ((u64)pn[1] << 32) |
+                                                   ((u64)pn[0] << 40));
+               } else {
+                       aes_sc = data->rsc_tsc->all_tsc_rsc.aes.multicast_rsc;
+               }
+
+               /*
+                * For non-QoS this relies on the fact that both the uCode and
+                * mac80211 use TID 0 for checking the IV in the frames.
+                */
+               for (i = 0; i < IWL_NUM_RSC; i++) {
+                       u8 *pn = seq.ccmp.pn;
+
+                       ieee80211_get_key_rx_seq(key, i, &seq);
+                       aes_sc->pn = cpu_to_le64((u64)pn[5] |
+                                                ((u64)pn[4] << 8) |
+                                                ((u64)pn[3] << 16) |
+                                                ((u64)pn[2] << 24) |
+                                                ((u64)pn[1] << 32) |
+                                                ((u64)pn[0] << 40));
+               }
+               data->use_rsc_tsc = true;
+               break;
+       }
+
+       /*
+        * The D3 firmware hardcodes the key offset 0 as the key it uses
+        * to transmit packets to the AP, i.e. the PTK.
+        */
+       if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
+               key->hw_key_idx = 0;
+       } else {
+               data->gtk_key_idx++;
+               key->hw_key_idx = data->gtk_key_idx;
+       }
+
+       ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, true);
+       data->error = ret != 0;
+out_unlock:
+       mutex_unlock(&mvm->mutex);
+}
+
+static int iwl_mvm_send_patterns(struct iwl_mvm *mvm,
+                                struct cfg80211_wowlan *wowlan)
+{
+       struct iwl_wowlan_patterns_cmd *pattern_cmd;
+       struct iwl_host_cmd cmd = {
+               .id = WOWLAN_PATTERNS,
+               .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
+               .flags = CMD_SYNC,
+       };
+       int i, err;
+
+       if (!wowlan->n_patterns)
+               return 0;
+
+       cmd.len[0] = sizeof(*pattern_cmd) +
+               wowlan->n_patterns * sizeof(struct iwl_wowlan_pattern);
+
+       pattern_cmd = kmalloc(cmd.len[0], GFP_KERNEL);
+       if (!pattern_cmd)
+               return -ENOMEM;
+
+       pattern_cmd->n_patterns = cpu_to_le32(wowlan->n_patterns);
+
+       for (i = 0; i < wowlan->n_patterns; i++) {
+               int mask_len = DIV_ROUND_UP(wowlan->patterns[i].pattern_len, 8);
+
+               memcpy(&pattern_cmd->patterns[i].mask,
+                      wowlan->patterns[i].mask, mask_len);
+               memcpy(&pattern_cmd->patterns[i].pattern,
+                      wowlan->patterns[i].pattern,
+                      wowlan->patterns[i].pattern_len);
+               pattern_cmd->patterns[i].mask_size = mask_len;
+               pattern_cmd->patterns[i].pattern_size =
+                       wowlan->patterns[i].pattern_len;
+       }
+
+       cmd.data[0] = pattern_cmd;
+       err = iwl_mvm_send_cmd(mvm, &cmd);
+       kfree(pattern_cmd);
+       return err;
+}
+
+static int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
+                                     struct ieee80211_vif *vif)
+{
+       struct iwl_proto_offload_cmd cmd = {};
+#if IS_ENABLED(CONFIG_IPV6)
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       int i;
+
+       if (mvmvif->num_target_ipv6_addrs) {
+               cmd.enabled |= cpu_to_le32(IWL_D3_PROTO_OFFLOAD_NS);
+               memcpy(cmd.ndp_mac_addr, vif->addr, ETH_ALEN);
+       }
+
+       BUILD_BUG_ON(sizeof(cmd.target_ipv6_addr[i]) !=
+                    sizeof(mvmvif->target_ipv6_addrs[i]));
+
+       for (i = 0; i < mvmvif->num_target_ipv6_addrs; i++)
+               memcpy(cmd.target_ipv6_addr[i],
+                      &mvmvif->target_ipv6_addrs[i],
+                      sizeof(cmd.target_ipv6_addr[i]));
+#endif
+
+       if (vif->bss_conf.arp_addr_cnt) {
+               cmd.enabled |= cpu_to_le32(IWL_D3_PROTO_OFFLOAD_ARP);
+               cmd.host_ipv4_addr = vif->bss_conf.arp_addr_list[0];
+               memcpy(cmd.arp_mac_addr, vif->addr, ETH_ALEN);
+       }
+
+       if (!cmd.enabled)
+               return 0;
+
+       return iwl_mvm_send_cmd_pdu(mvm, PROT_OFFLOAD_CONFIG_CMD, CMD_SYNC,
+                                   sizeof(cmd), &cmd);
+}
+
+struct iwl_d3_iter_data {
+       struct iwl_mvm *mvm;
+       struct ieee80211_vif *vif;
+       bool error;
+};
+
+static void iwl_mvm_d3_iface_iterator(void *_data, u8 *mac,
+                                     struct ieee80211_vif *vif)
+{
+       struct iwl_d3_iter_data *data = _data;
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+       if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
+               return;
+
+       if (mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT)
+               return;
+
+       if (data->vif) {
+               IWL_ERR(data->mvm, "More than one managed interface active!\n");
+               data->error = true;
+               return;
+       }
+
+       data->vif = vif;
+}
+
+static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+                               struct ieee80211_sta *ap_sta)
+{
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       struct ieee80211_chanctx_conf *ctx;
+       u8 chains_static, chains_dynamic;
+       struct cfg80211_chan_def chandef;
+       int ret, i;
+       struct iwl_binding_cmd binding_cmd = {};
+       struct iwl_time_quota_cmd quota_cmd = {};
+       u32 status;
+
+       /* add back the PHY */
+       if (WARN_ON(!mvmvif->phy_ctxt))
+               return -EINVAL;
+
+       rcu_read_lock();
+       ctx = rcu_dereference(vif->chanctx_conf);
+       if (WARN_ON(!ctx)) {
+               rcu_read_unlock();
+               return -EINVAL;
+       }
+       chandef = ctx->def;
+       chains_static = ctx->rx_chains_static;
+       chains_dynamic = ctx->rx_chains_dynamic;
+       rcu_read_unlock();
+
+       ret = iwl_mvm_phy_ctxt_add(mvm, mvmvif->phy_ctxt, &chandef,
+                                  chains_static, chains_dynamic);
+       if (ret)
+               return ret;
+
+       /* add back the MAC */
+       mvmvif->uploaded = false;
+
+       if (WARN_ON(!vif->bss_conf.assoc))
+               return -EINVAL;
+       /* hack */
+       vif->bss_conf.assoc = false;
+       ret = iwl_mvm_mac_ctxt_add(mvm, vif);
+       vif->bss_conf.assoc = true;
+       if (ret)
+               return ret;
+
+       /* add back binding - XXX refactor? */
+       binding_cmd.id_and_color =
+               cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->phy_ctxt->id,
+                                               mvmvif->phy_ctxt->color));
+       binding_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD);
+       binding_cmd.phy =
+               cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->phy_ctxt->id,
+                                               mvmvif->phy_ctxt->color));
+       binding_cmd.macs[0] = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
+                                                             mvmvif->color));
+       for (i = 1; i < MAX_MACS_IN_BINDING; i++)
+               binding_cmd.macs[i] = cpu_to_le32(FW_CTXT_INVALID);
+
+       status = 0;
+       ret = iwl_mvm_send_cmd_pdu_status(mvm, BINDING_CONTEXT_CMD,
+                                         sizeof(binding_cmd), &binding_cmd,
+                                         &status);
+       if (ret) {
+               IWL_ERR(mvm, "Failed to add binding: %d\n", ret);
+               return ret;
+       }
+
+       if (status) {
+               IWL_ERR(mvm, "Binding command failed: %u\n", status);
+               return -EIO;
+       }
+
+       ret = iwl_mvm_sta_send_to_fw(mvm, ap_sta, false);
+       if (ret)
+               return ret;
+       rcu_assign_pointer(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id], ap_sta);
+
+       ret = iwl_mvm_mac_ctxt_changed(mvm, vif);
+       if (ret)
+               return ret;
+
+       /* and some quota */
+       quota_cmd.quotas[0].id_and_color =
+               cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->phy_ctxt->id,
+                                               mvmvif->phy_ctxt->color));
+       quota_cmd.quotas[0].quota = cpu_to_le32(100);
+       quota_cmd.quotas[0].max_duration = cpu_to_le32(1000);
+
+       for (i = 1; i < MAX_BINDINGS; i++)
+               quota_cmd.quotas[i].id_and_color = cpu_to_le32(FW_CTXT_INVALID);
+
+       ret = iwl_mvm_send_cmd_pdu(mvm, TIME_QUOTA_CMD, CMD_SYNC,
+                                  sizeof(quota_cmd), &quota_cmd);
+       if (ret)
+               IWL_ERR(mvm, "Failed to send quota: %d\n", ret);
+
+       return 0;
+}
+
+int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
+{
+       struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+       struct iwl_d3_iter_data suspend_iter_data = {
+               .mvm = mvm,
+       };
+       struct ieee80211_vif *vif;
+       struct iwl_mvm_vif *mvmvif;
+       struct ieee80211_sta *ap_sta;
+       struct iwl_mvm_sta *mvm_ap_sta;
+       struct iwl_wowlan_config_cmd wowlan_config_cmd = {};
+       struct iwl_wowlan_kek_kck_material_cmd kek_kck_cmd = {};
+       struct iwl_wowlan_tkip_params_cmd tkip_cmd = {};
+       struct iwl_d3_manager_config d3_cfg_cmd = {};
+       struct wowlan_key_data key_data = {
+               .use_rsc_tsc = false,
+               .tkip = &tkip_cmd,
+               .use_tkip = false,
+       };
+       int ret, i;
+       u16 seq;
+       u8 old_aux_sta_id, old_ap_sta_id = IWL_MVM_STATION_COUNT;
+
+       if (WARN_ON(!wowlan))
+               return -EINVAL;
+
+       key_data.rsc_tsc = kzalloc(sizeof(*key_data.rsc_tsc), GFP_KERNEL);
+       if (!key_data.rsc_tsc)
+               return -ENOMEM;
+
+       mutex_lock(&mvm->mutex);
+
+       old_aux_sta_id = mvm->aux_sta.sta_id;
+
+       /* see if there's only a single BSS vif and it's associated */
+       ieee80211_iterate_active_interfaces_atomic(
+               mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+               iwl_mvm_d3_iface_iterator, &suspend_iter_data);
+
+       if (suspend_iter_data.error || !suspend_iter_data.vif) {
+               ret = 1;
+               goto out_noreset;
+       }
+
+       vif = suspend_iter_data.vif;
+       mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+       ap_sta = rcu_dereference_protected(
+                       mvm->fw_id_to_mac_id[mvmvif->ap_sta_id],
+                       lockdep_is_held(&mvm->mutex));
+       if (IS_ERR_OR_NULL(ap_sta)) {
+               ret = -EINVAL;
+               goto out_noreset;
+       }
+
+       mvm_ap_sta = (struct iwl_mvm_sta *)ap_sta->drv_priv;
+
+       /*
+        * The D3 firmware still hardcodes the AP station ID for the
+        * BSS we're associated with as 0. Store the real STA ID here
+        * and assign 0. When we leave this function, we'll restore
+        * the original value for the resume code.
+        */
+       old_ap_sta_id = mvm_ap_sta->sta_id;
+       mvm_ap_sta->sta_id = 0;
+       mvmvif->ap_sta_id = 0;
+
+       /* TODO: wowlan_config_cmd.wowlan_ba_teardown_tids */
+
+       wowlan_config_cmd.is_11n_connection = ap_sta->ht_cap.ht_supported;
+
+       /*
+        * We know the last used seqno, and the uCode expects to know that
+        * one, it will increment before TX.
+        */
+       seq = mvm_ap_sta->last_seq_ctl & IEEE80211_SCTL_SEQ;
+       wowlan_config_cmd.non_qos_seq = cpu_to_le16(seq);
+
+       /*
+        * For QoS counters, we store the one to use next, so subtract 0x10
+        * since the uCode will add 0x10 *before* using the value while we
+        * increment after using the value (i.e. store the next value to use).
+        */
+       for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
+               seq = mvm_ap_sta->tid_data[i].seq_number;
+               seq -= 0x10;
+               wowlan_config_cmd.qos_seq[i] = cpu_to_le16(seq);
+       }
+
+       if (wowlan->disconnect)
+               wowlan_config_cmd.wakeup_filter |=
+                       cpu_to_le32(IWL_WOWLAN_WAKEUP_BEACON_MISS |
+                                   IWL_WOWLAN_WAKEUP_LINK_CHANGE);
+       if (wowlan->magic_pkt)
+               wowlan_config_cmd.wakeup_filter |=
+                       cpu_to_le32(IWL_WOWLAN_WAKEUP_MAGIC_PACKET);
+       if (wowlan->gtk_rekey_failure)
+               wowlan_config_cmd.wakeup_filter |=
+                       cpu_to_le32(IWL_WOWLAN_WAKEUP_GTK_REKEY_FAIL);
+       if (wowlan->eap_identity_req)
+               wowlan_config_cmd.wakeup_filter |=
+                       cpu_to_le32(IWL_WOWLAN_WAKEUP_EAP_IDENT_REQ);
+       if (wowlan->four_way_handshake)
+               wowlan_config_cmd.wakeup_filter |=
+                       cpu_to_le32(IWL_WOWLAN_WAKEUP_4WAY_HANDSHAKE);
+       if (wowlan->n_patterns)
+               wowlan_config_cmd.wakeup_filter |=
+                       cpu_to_le32(IWL_WOWLAN_WAKEUP_PATTERN_MATCH);
+
+       if (wowlan->rfkill_release)
+               d3_cfg_cmd.wakeup_flags |=
+                       cpu_to_le32(IWL_WOWLAN_WAKEUP_RF_KILL_DEASSERT);
+
+       iwl_mvm_cancel_scan(mvm);
+
+       iwl_trans_stop_device(mvm->trans);
+
+       /*
+        * Set the HW restart bit -- this is mostly true as we're
+        * going to load new firmware and reprogram that, though
+        * the reprogramming is going to be manual to avoid adding
+        * all the MACs that aren't support.
+        * We don't have to clear up everything though because the
+        * reprogramming is manual. When we resume, we'll actually
+        * go through a proper restart sequence again to switch
+        * back to the runtime firmware image.
+        */
+       set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
+
+       /* We reprogram keys and shouldn't allocate new key indices */
+       memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table));
+
+       /*
+        * The D3 firmware still hardcodes the AP station ID for the
+        * BSS we're associated with as 0. As a result, we have to move
+        * the auxiliary station to ID 1 so the ID 0 remains free for
+        * the AP station for later.
+        * We set the sta_id to 1 here, and reset it to its previous
+        * value (that we stored above) later.
+        */
+       mvm->aux_sta.sta_id = 1;
+
+       ret = iwl_mvm_load_d3_fw(mvm);
+       if (ret)
+               goto out;
+
+       ret = iwl_mvm_d3_reprogram(mvm, vif, ap_sta);
+       if (ret)
+               goto out;
+
+       if (!iwlwifi_mod_params.sw_crypto) {
+               /*
+                * This needs to be unlocked due to lock ordering
+                * constraints. Since we're in the suspend path
+                * that isn't really a problem though.
+                */
+               mutex_unlock(&mvm->mutex);
+               ieee80211_iter_keys(mvm->hw, vif,
+                                   iwl_mvm_wowlan_program_keys,
+                                   &key_data);
+               mutex_lock(&mvm->mutex);
+               if (key_data.error) {
+                       ret = -EIO;
+                       goto out;
+               }
+
+               if (key_data.use_rsc_tsc) {
+                       struct iwl_host_cmd rsc_tsc_cmd = {
+                               .id = WOWLAN_TSC_RSC_PARAM,
+                               .flags = CMD_SYNC,
+                               .data[0] = key_data.rsc_tsc,
+                               .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
+                               .len[0] = sizeof(*key_data.rsc_tsc),
+                       };
+
+                       ret = iwl_mvm_send_cmd(mvm, &rsc_tsc_cmd);
+                       if (ret)
+                               goto out;
+               }
+
+               if (key_data.use_tkip) {
+                       ret = iwl_mvm_send_cmd_pdu(mvm,
+                                                  WOWLAN_TKIP_PARAM,
+                                                  CMD_SYNC, sizeof(tkip_cmd),
+                                                  &tkip_cmd);
+                       if (ret)
+                               goto out;
+               }
+
+               if (mvmvif->rekey_data.valid) {
+                       memset(&kek_kck_cmd, 0, sizeof(kek_kck_cmd));
+                       memcpy(kek_kck_cmd.kck, mvmvif->rekey_data.kck,
+                              NL80211_KCK_LEN);
+                       kek_kck_cmd.kck_len = cpu_to_le16(NL80211_KCK_LEN);
+                       memcpy(kek_kck_cmd.kek, mvmvif->rekey_data.kek,
+                              NL80211_KEK_LEN);
+                       kek_kck_cmd.kek_len = cpu_to_le16(NL80211_KEK_LEN);
+                       kek_kck_cmd.replay_ctr = mvmvif->rekey_data.replay_ctr;
+
+                       ret = iwl_mvm_send_cmd_pdu(mvm,
+                                                  WOWLAN_KEK_KCK_MATERIAL,
+                                                  CMD_SYNC,
+                                                  sizeof(kek_kck_cmd),
+                                                  &kek_kck_cmd);
+                       if (ret)
+                               goto out;
+               }
+       }
+
+       ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION,
+                                  CMD_SYNC, sizeof(wowlan_config_cmd),
+                                  &wowlan_config_cmd);
+       if (ret)
+               goto out;
+
+       ret = iwl_mvm_send_patterns(mvm, wowlan);
+       if (ret)
+               goto out;
+
+       ret = iwl_mvm_send_proto_offload(mvm, vif);
+       if (ret)
+               goto out;
+
+       /* must be last -- this switches firmware state */
+       ret = iwl_mvm_send_cmd_pdu(mvm, D3_CONFIG_CMD, CMD_SYNC,
+                                  sizeof(d3_cfg_cmd), &d3_cfg_cmd);
+       if (ret)
+               goto out;
+
+       clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
+
+       iwl_trans_d3_suspend(mvm->trans);
+ out:
+       mvm->aux_sta.sta_id = old_aux_sta_id;
+       mvm_ap_sta->sta_id = old_ap_sta_id;
+       mvmvif->ap_sta_id = old_ap_sta_id;
+ out_noreset:
+       kfree(key_data.rsc_tsc);
+       if (ret < 0)
+               ieee80211_restart_hw(mvm->hw);
+
+       mutex_unlock(&mvm->mutex);
+
+       return ret;
+}
+
+static void iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
+                                        struct ieee80211_vif *vif)
+{
+       u32 base = mvm->error_event_table;
+       struct error_table_start {
+               /* cf. struct iwl_error_event_table */
+               u32 valid;
+               u32 error_id;
+       } err_info;
+       struct cfg80211_wowlan_wakeup wakeup = {
+               .pattern_idx = -1,
+       };
+       struct cfg80211_wowlan_wakeup *wakeup_report = &wakeup;
+       struct iwl_host_cmd cmd = {
+               .id = WOWLAN_GET_STATUSES,
+               .flags = CMD_SYNC | CMD_WANT_SKB,
+       };
+       struct iwl_wowlan_status *status;
+       u32 reasons;
+       int ret, len;
+       bool pkt8023 = false;
+       struct sk_buff *pkt = NULL;
+
+       iwl_trans_read_mem_bytes(mvm->trans, base,
+                                &err_info, sizeof(err_info));
+
+       if (err_info.valid) {
+               IWL_INFO(mvm, "error table is valid (%d)\n",
+                        err_info.valid);
+               if (err_info.error_id == RF_KILL_INDICATOR_FOR_WOWLAN) {
+                       wakeup.rfkill_release = true;
+                       ieee80211_report_wowlan_wakeup(vif, &wakeup,
+                                                      GFP_KERNEL);
+               }
+               return;
+       }
+
+       /* only for tracing for now */
+       ret = iwl_mvm_send_cmd_pdu(mvm, OFFLOADS_QUERY_CMD, CMD_SYNC, 0, NULL);
+       if (ret)
+               IWL_ERR(mvm, "failed to query offload statistics (%d)\n", ret);
+
+       ret = iwl_mvm_send_cmd(mvm, &cmd);
+       if (ret) {
+               IWL_ERR(mvm, "failed to query status (%d)\n", ret);
+               return;
+       }
+
+       /* RF-kill already asserted again... */
+       if (!cmd.resp_pkt)
+               return;
+
+       len = le32_to_cpu(cmd.resp_pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
+       if (len - sizeof(struct iwl_cmd_header) < sizeof(*status)) {
+               IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
+               goto out;
+       }
+
+       status = (void *)cmd.resp_pkt->data;
+
+       if (len - sizeof(struct iwl_cmd_header) !=
+           sizeof(*status) + le32_to_cpu(status->wake_packet_bufsize)) {
+               IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
+               goto out;
+       }
+
+       reasons = le32_to_cpu(status->wakeup_reasons);
+
+       if (reasons == IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS) {
+               wakeup_report = NULL;
+               goto report;
+       }
+
+       if (reasons & IWL_WOWLAN_WAKEUP_BY_MAGIC_PACKET) {
+               wakeup.magic_pkt = true;
+               pkt8023 = true;
+       }
+
+       if (reasons & IWL_WOWLAN_WAKEUP_BY_PATTERN) {
+               wakeup.pattern_idx =
+                       le16_to_cpu(status->pattern_number);
+               pkt8023 = true;
+       }
+
+       if (reasons & (IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON |
+                      IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH))
+               wakeup.disconnect = true;
+
+       if (reasons & IWL_WOWLAN_WAKEUP_BY_GTK_REKEY_FAILURE) {
+               wakeup.gtk_rekey_failure = true;
+               pkt8023 = true;
+       }
+
+       if (reasons & IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED) {
+               wakeup.rfkill_release = true;
+               pkt8023 = true;
+       }
+
+       if (reasons & IWL_WOWLAN_WAKEUP_BY_EAPOL_REQUEST) {
+               wakeup.eap_identity_req = true;
+               pkt8023 = true;
+       }
+
+       if (reasons & IWL_WOWLAN_WAKEUP_BY_FOUR_WAY_HANDSHAKE) {
+               wakeup.four_way_handshake = true;
+               pkt8023 = true;
+       }
+
+       if (status->wake_packet_bufsize) {
+               u32 pktsize = le32_to_cpu(status->wake_packet_bufsize);
+               u32 pktlen = le32_to_cpu(status->wake_packet_length);
+
+               if (pkt8023) {
+                       pkt = alloc_skb(pktsize, GFP_KERNEL);
+                       if (!pkt)
+                               goto report;
+                       memcpy(skb_put(pkt, pktsize), status->wake_packet,
+                              pktsize);
+                       if (ieee80211_data_to_8023(pkt, vif->addr, vif->type))
+                               goto report;
+                       wakeup.packet = pkt->data;
+                       wakeup.packet_present_len = pkt->len;
+                       wakeup.packet_len = pkt->len - (pktlen - pktsize);
+                       wakeup.packet_80211 = false;
+               } else {
+                       wakeup.packet = status->wake_packet;
+                       wakeup.packet_present_len = pktsize;
+                       wakeup.packet_len = pktlen;
+                       wakeup.packet_80211 = true;
+               }
+       }
+
+ report:
+       ieee80211_report_wowlan_wakeup(vif, wakeup_report, GFP_KERNEL);
+       kfree_skb(pkt);
+
+ out:
+       iwl_free_resp(&cmd);
+}
+
+int iwl_mvm_resume(struct ieee80211_hw *hw)
+{
+       struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+       struct iwl_d3_iter_data resume_iter_data = {
+               .mvm = mvm,
+       };
+       struct ieee80211_vif *vif = NULL;
+       int ret;
+       enum iwl_d3_status d3_status;
+
+       mutex_lock(&mvm->mutex);
+
+       /* get the BSS vif pointer again */
+       ieee80211_iterate_active_interfaces_atomic(
+               mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+               iwl_mvm_d3_iface_iterator, &resume_iter_data);
+
+       if (WARN_ON(resume_iter_data.error || !resume_iter_data.vif))
+               goto out_unlock;
+
+       vif = resume_iter_data.vif;
+
+       ret = iwl_trans_d3_resume(mvm->trans, &d3_status);
+       if (ret)
+               goto out_unlock;
+
+       if (d3_status != IWL_D3_STATUS_ALIVE) {
+               IWL_INFO(mvm, "Device was reset during suspend\n");
+               goto out_unlock;
+       }
+
+       iwl_mvm_query_wakeup_reasons(mvm, vif);
+
+ out_unlock:
+       mutex_unlock(&mvm->mutex);
+
+       if (vif)
+               ieee80211_resume_disconnect(vif);
+
+       /* return 1 to reconfigure the device */
+       set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
+       return 1;
+}
+
+void iwl_mvm_set_wakeup(struct ieee80211_hw *hw, bool enabled)
+{
+       struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+       device_set_wakeup_enable(mvm->trans->dev, enabled);
+}
diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/iwlwifi/mvm/debugfs.c
new file mode 100644 (file)
index 0000000..c1bdb55
--- /dev/null
@@ -0,0 +1,378 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include "mvm.h"
+#include "sta.h"
+#include "iwl-io.h"
+
+struct iwl_dbgfs_mvm_ctx {
+       struct iwl_mvm *mvm;
+       struct ieee80211_vif *vif;
+};
+
+static int iwl_dbgfs_open_file_generic(struct inode *inode, struct file *file)
+{
+       file->private_data = inode->i_private;
+       return 0;
+}
+
+static ssize_t iwl_dbgfs_tx_flush_write(struct file *file,
+                                       const char __user *user_buf,
+                                       size_t count, loff_t *ppos)
+{
+       struct iwl_mvm *mvm = file->private_data;
+
+       char buf[16];
+       int buf_size, ret;
+       u32 scd_q_msk;
+
+       if (!mvm->ucode_loaded || mvm->cur_ucode != IWL_UCODE_REGULAR)
+               return -EIO;
+
+       memset(buf, 0, sizeof(buf));
+       buf_size = min(count, sizeof(buf) - 1);
+       if (copy_from_user(buf, user_buf, buf_size))
+               return -EFAULT;
+
+       if (sscanf(buf, "%x", &scd_q_msk) != 1)
+               return -EINVAL;
+
+       IWL_ERR(mvm, "FLUSHING queues: scd_q_msk = 0x%x\n", scd_q_msk);
+
+       mutex_lock(&mvm->mutex);
+       ret =  iwl_mvm_flush_tx_path(mvm, scd_q_msk, true) ? : count;
+       mutex_unlock(&mvm->mutex);
+
+       return ret;
+}
+
+static ssize_t iwl_dbgfs_sta_drain_write(struct file *file,
+                                        const char __user *user_buf,
+                                        size_t count, loff_t *ppos)
+{
+       struct iwl_mvm *mvm = file->private_data;
+       struct ieee80211_sta *sta;
+
+       char buf[8];
+       int buf_size, sta_id, drain, ret;
+
+       if (!mvm->ucode_loaded || mvm->cur_ucode != IWL_UCODE_REGULAR)
+               return -EIO;
+
+       memset(buf, 0, sizeof(buf));
+       buf_size = min(count, sizeof(buf) - 1);
+       if (copy_from_user(buf, user_buf, buf_size))
+               return -EFAULT;
+
+       if (sscanf(buf, "%d %d", &sta_id, &drain) != 2)
+               return -EINVAL;
+
+       mutex_lock(&mvm->mutex);
+
+       sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
+                                       lockdep_is_held(&mvm->mutex));
+       if (IS_ERR_OR_NULL(sta))
+               ret = -ENOENT;
+       else
+               ret = iwl_mvm_drain_sta(mvm, (void *)sta->drv_priv, drain) ? :
+                       count;
+
+       mutex_unlock(&mvm->mutex);
+
+       return ret;
+}
+
+static ssize_t iwl_dbgfs_sram_read(struct file *file, char __user *user_buf,
+                                  size_t count, loff_t *ppos)
+{
+       struct iwl_mvm *mvm = file->private_data;
+       const struct fw_img *img;
+       int ofs, len, pos = 0;
+       size_t bufsz, ret;
+       char *buf;
+       u8 *ptr;
+
+       /* default is to dump the entire data segment */
+       if (!mvm->dbgfs_sram_offset && !mvm->dbgfs_sram_len) {
+               mvm->dbgfs_sram_offset = 0x800000;
+               if (!mvm->ucode_loaded)
+                       return -EINVAL;
+               img = &mvm->fw->img[mvm->cur_ucode];
+               mvm->dbgfs_sram_len = img->sec[IWL_UCODE_SECTION_DATA].len;
+       }
+       len = mvm->dbgfs_sram_len;
+
+       bufsz = len * 4 + 256;
+       buf = kzalloc(bufsz, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+
+       ptr = kzalloc(len, GFP_KERNEL);
+       if (!ptr) {
+               kfree(buf);
+               return -ENOMEM;
+       }
+
+       pos += scnprintf(buf + pos, bufsz - pos, "sram_len: 0x%x\n", len);
+       pos += scnprintf(buf + pos, bufsz - pos, "sram_offset: 0x%x\n",
+                        mvm->dbgfs_sram_offset);
+
+       iwl_trans_read_mem_bytes(mvm->trans,
+                                mvm->dbgfs_sram_offset,
+                                ptr, len);
+       for (ofs = 0; ofs < len; ofs += 16) {
+               pos += scnprintf(buf + pos, bufsz - pos, "0x%.4x ", ofs);
+               hex_dump_to_buffer(ptr + ofs, 16, 16, 1, buf + pos,
+                                  bufsz - pos, false);
+               pos += strlen(buf + pos);
+               if (bufsz - pos > 0)
+                       buf[pos++] = '\n';
+       }
+
+       ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+
+       kfree(buf);
+       kfree(ptr);
+
+       return ret;
+}
+
+static ssize_t iwl_dbgfs_sram_write(struct file *file,
+                                   const char __user *user_buf, size_t count,
+                                   loff_t *ppos)
+{
+       struct iwl_mvm *mvm = file->private_data;
+       char buf[64];
+       int buf_size;
+       u32 offset, len;
+
+       memset(buf, 0, sizeof(buf));
+       buf_size = min(count, sizeof(buf) -  1);
+       if (copy_from_user(buf, user_buf, buf_size))
+               return -EFAULT;
+
+       if (sscanf(buf, "%x,%x", &offset, &len) == 2) {
+               if ((offset & 0x3) || (len & 0x3))
+                       return -EINVAL;
+               mvm->dbgfs_sram_offset = offset;
+               mvm->dbgfs_sram_len = len;
+       } else {
+               mvm->dbgfs_sram_offset = 0;
+               mvm->dbgfs_sram_len = 0;
+       }
+
+       return count;
+}
+
+static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf,
+                                      size_t count, loff_t *ppos)
+{
+       struct iwl_mvm *mvm = file->private_data;
+       struct ieee80211_sta *sta;
+       char buf[400];
+       int i, pos = 0, bufsz = sizeof(buf);
+
+       mutex_lock(&mvm->mutex);
+
+       for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
+               pos += scnprintf(buf + pos, bufsz - pos, "%.2d: ", i);
+               sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
+                                               lockdep_is_held(&mvm->mutex));
+               if (!sta)
+                       pos += scnprintf(buf + pos, bufsz - pos, "N/A\n");
+               else if (IS_ERR(sta))
+                       pos += scnprintf(buf + pos, bufsz - pos, "%ld\n",
+                                        PTR_ERR(sta));
+               else
+                       pos += scnprintf(buf + pos, bufsz - pos, "%pM\n",
+                                        sta->addr);
+       }
+
+       mutex_unlock(&mvm->mutex);
+
+       return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_dbgfs_power_down_allow_write(struct file *file,
+                                               const char __user *user_buf,
+                                               size_t count, loff_t *ppos)
+{
+       struct iwl_mvm *mvm = file->private_data;
+       char buf[8] = {};
+       int allow;
+
+       if (!mvm->ucode_loaded)
+               return -EIO;
+
+       if (copy_from_user(buf, user_buf, sizeof(buf)))
+               return -EFAULT;
+
+       if (sscanf(buf, "%d", &allow) != 1)
+               return -EINVAL;
+
+       IWL_DEBUG_POWER(mvm, "%s device power down\n",
+                       allow ? "allow" : "prevent");
+
+       /*
+        * TODO: Send REPLY_DEBUG_CMD (0xf0) when FW support it
+        */
+
+       return count;
+}
+
+static ssize_t iwl_dbgfs_power_down_d3_allow_write(struct file *file,
+                                                  const char __user *user_buf,
+                                                  size_t count, loff_t *ppos)
+{
+       struct iwl_mvm *mvm = file->private_data;
+       char buf[8] = {};
+       int allow;
+
+       if (copy_from_user(buf, user_buf, sizeof(buf)))
+               return -EFAULT;
+
+       if (sscanf(buf, "%d", &allow) != 1)
+               return -EINVAL;
+
+       IWL_DEBUG_POWER(mvm, "%s device power down in d3\n",
+                       allow ? "allow" : "prevent");
+
+       /*
+        * TODO: When WoWLAN FW alive notification happens, driver will send
+        * REPLY_DEBUG_CMD setting power_down_allow flag according to
+        * mvm->prevent_power_down_d3
+        */
+       mvm->prevent_power_down_d3 = !allow;
+
+       return count;
+}
+
+#define MVM_DEBUGFS_READ_FILE_OPS(name)                                        \
+static const struct file_operations iwl_dbgfs_##name##_ops = { \
+       .read = iwl_dbgfs_##name##_read,                                \
+       .open = iwl_dbgfs_open_file_generic,                            \
+       .llseek = generic_file_llseek,                                  \
+}
+
+#define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name)                          \
+static const struct file_operations iwl_dbgfs_##name##_ops = { \
+       .write = iwl_dbgfs_##name##_write,                              \
+       .read = iwl_dbgfs_##name##_read,                                \
+       .open = iwl_dbgfs_open_file_generic,                            \
+       .llseek = generic_file_llseek,                                  \
+};
+
+#define MVM_DEBUGFS_WRITE_FILE_OPS(name)                               \
+static const struct file_operations iwl_dbgfs_##name##_ops = { \
+       .write = iwl_dbgfs_##name##_write,                              \
+       .open = iwl_dbgfs_open_file_generic,                            \
+       .llseek = generic_file_llseek,                                  \
+};
+
+#define MVM_DEBUGFS_ADD_FILE(name, parent, mode) do {                  \
+               if (!debugfs_create_file(#name, mode, parent, mvm,      \
+                                        &iwl_dbgfs_##name##_ops))      \
+                       goto err;                                       \
+       } while (0)
+
+#define MVM_DEBUGFS_ADD_FILE_VIF(name, parent, mode) do {              \
+               if (!debugfs_create_file(#name, mode, parent, vif,      \
+                                        &iwl_dbgfs_##name##_ops))      \
+                       goto err;                                       \
+       } while (0)
+
+/* Device wide debugfs entries */
+MVM_DEBUGFS_WRITE_FILE_OPS(tx_flush);
+MVM_DEBUGFS_WRITE_FILE_OPS(sta_drain);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(sram);
+MVM_DEBUGFS_READ_FILE_OPS(stations);
+MVM_DEBUGFS_WRITE_FILE_OPS(power_down_allow);
+MVM_DEBUGFS_WRITE_FILE_OPS(power_down_d3_allow);
+
+int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
+{
+       char buf[100];
+
+       mvm->debugfs_dir = dbgfs_dir;
+
+       MVM_DEBUGFS_ADD_FILE(tx_flush, mvm->debugfs_dir, S_IWUSR);
+       MVM_DEBUGFS_ADD_FILE(sta_drain, mvm->debugfs_dir, S_IWUSR);
+       MVM_DEBUGFS_ADD_FILE(sram, mvm->debugfs_dir, S_IWUSR | S_IRUSR);
+       MVM_DEBUGFS_ADD_FILE(stations, dbgfs_dir, S_IRUSR);
+       MVM_DEBUGFS_ADD_FILE(power_down_allow, mvm->debugfs_dir, S_IWUSR);
+       MVM_DEBUGFS_ADD_FILE(power_down_d3_allow, mvm->debugfs_dir, S_IWUSR);
+
+       /*
+        * Create a symlink with mac80211. It will be removed when mac80211
+        * exists (before the opmode exists which removes the target.)
+        */
+       snprintf(buf, 100, "../../%s/%s",
+                dbgfs_dir->d_parent->d_parent->d_name.name,
+                dbgfs_dir->d_parent->d_name.name);
+       if (!debugfs_create_symlink("iwlwifi", mvm->hw->wiphy->debugfsdir, buf))
+               goto err;
+
+       return 0;
+err:
+       IWL_ERR(mvm, "Can't create the mvm debugfs directory\n");
+       return -ENOMEM;
+}
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
new file mode 100644 (file)
index 0000000..cf6f9a0
--- /dev/null
@@ -0,0 +1,282 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *****************************************************************************/
+
+#ifndef __fw_api_d3_h__
+#define __fw_api_d3_h__
+
+/**
+ * enum iwl_d3_wakeup_flags - D3 manager wakeup flags
+ * @IWL_WAKEUP_D3_CONFIG_FW_ERROR: wake up on firmware sysassert
+ */
+enum iwl_d3_wakeup_flags {
+       IWL_WAKEUP_D3_CONFIG_FW_ERROR = BIT(0),
+}; /* D3_MANAGER_WAKEUP_CONFIG_API_E_VER_3 */
+
+/**
+ * struct iwl_d3_manager_config - D3 manager configuration command
+ * @min_sleep_time: minimum sleep time (in usec)
+ * @wakeup_flags: wakeup flags, see &enum iwl_d3_wakeup_flags
+ *
+ * The structure is used for the D3_CONFIG_CMD command.
+ */
+struct iwl_d3_manager_config {
+       __le32 min_sleep_time;
+       __le32 wakeup_flags;
+} __packed; /* D3_MANAGER_CONFIG_CMD_S_VER_3 */
+
+
+/* TODO: OFFLOADS_QUERY_API_S_VER_1 */
+
+/**
+ * enum iwl_d3_proto_offloads - enabled protocol offloads
+ * @IWL_D3_PROTO_OFFLOAD_ARP: ARP data is enabled
+ * @IWL_D3_PROTO_OFFLOAD_NS: NS (Neighbor Solicitation) is enabled
+ */
+enum iwl_proto_offloads {
+       IWL_D3_PROTO_OFFLOAD_ARP = BIT(0),
+       IWL_D3_PROTO_OFFLOAD_NS = BIT(1),
+};
+
+#define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS       2
+
+/**
+ * struct iwl_proto_offload_cmd - ARP/NS offload configuration
+ * @enabled: enable flags
+ * @remote_ipv4_addr: remote address to answer to (or zero if all)
+ * @host_ipv4_addr: our IPv4 address to respond to queries for
+ * @arp_mac_addr: our MAC address for ARP responses
+ * @remote_ipv6_addr: remote address to answer to (or zero if all)
+ * @solicited_node_ipv6_addr: broken -- solicited node address exists
+ *     for each target address
+ * @target_ipv6_addr: our target addresses
+ * @ndp_mac_addr: neighbor soliciation response MAC address
+ */
+struct iwl_proto_offload_cmd {
+       __le32 enabled;
+       __be32 remote_ipv4_addr;
+       __be32 host_ipv4_addr;
+       u8 arp_mac_addr[ETH_ALEN];
+       __le16 reserved1;
+
+       u8 remote_ipv6_addr[16];
+       u8 solicited_node_ipv6_addr[16];
+       u8 target_ipv6_addr[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS][16];
+       u8 ndp_mac_addr[ETH_ALEN];
+       __le16 reserved2;
+} __packed; /* PROT_OFFLOAD_CONFIG_CMD_DB_S_VER_1 */
+
+
+/*
+ * WOWLAN_PATTERNS
+ */
+#define IWL_WOWLAN_MIN_PATTERN_LEN     16
+#define IWL_WOWLAN_MAX_PATTERN_LEN     128
+
+struct iwl_wowlan_pattern {
+       u8 mask[IWL_WOWLAN_MAX_PATTERN_LEN / 8];
+       u8 pattern[IWL_WOWLAN_MAX_PATTERN_LEN];
+       u8 mask_size;
+       u8 pattern_size;
+       __le16 reserved;
+} __packed; /* WOWLAN_PATTERN_API_S_VER_1 */
+
+#define IWL_WOWLAN_MAX_PATTERNS        20
+
+struct iwl_wowlan_patterns_cmd {
+       __le32 n_patterns;
+       struct iwl_wowlan_pattern patterns[];
+} __packed; /* WOWLAN_PATTERN_ARRAY_API_S_VER_1 */
+
+enum iwl_wowlan_wakeup_filters {
+       IWL_WOWLAN_WAKEUP_MAGIC_PACKET                  = BIT(0),
+       IWL_WOWLAN_WAKEUP_PATTERN_MATCH                 = BIT(1),
+       IWL_WOWLAN_WAKEUP_BEACON_MISS                   = BIT(2),
+       IWL_WOWLAN_WAKEUP_LINK_CHANGE                   = BIT(3),
+       IWL_WOWLAN_WAKEUP_GTK_REKEY_FAIL                = BIT(4),
+       IWL_WOWLAN_WAKEUP_EAP_IDENT_REQ                 = BIT(5),
+       IWL_WOWLAN_WAKEUP_4WAY_HANDSHAKE                = BIT(6),
+       IWL_WOWLAN_WAKEUP_ENABLE_NET_DETECT             = BIT(7),
+       IWL_WOWLAN_WAKEUP_RF_KILL_DEASSERT              = BIT(8),
+       IWL_WOWLAN_WAKEUP_REMOTE_LINK_LOSS              = BIT(9),
+       IWL_WOWLAN_WAKEUP_REMOTE_SIGNATURE_TABLE        = BIT(10),
+       /* BIT(11) reserved */
+       IWL_WOWLAN_WAKEUP_REMOTE_WAKEUP_PACKET          = BIT(12),
+}; /* WOWLAN_WAKEUP_FILTER_API_E_VER_4 */
+
+struct iwl_wowlan_config_cmd {
+       __le32 wakeup_filter;
+       __le16 non_qos_seq;
+       __le16 qos_seq[8];
+       u8 wowlan_ba_teardown_tids;
+       u8 is_11n_connection;
+} __packed; /* WOWLAN_CONFIG_API_S_VER_2 */
+
+/*
+ * WOWLAN_TSC_RSC_PARAMS
+ */
+#define IWL_NUM_RSC    16
+
+struct tkip_sc {
+       __le16 iv16;
+       __le16 pad;
+       __le32 iv32;
+} __packed; /* TKIP_SC_API_U_VER_1 */
+
+struct iwl_tkip_rsc_tsc {
+       struct tkip_sc unicast_rsc[IWL_NUM_RSC];
+       struct tkip_sc multicast_rsc[IWL_NUM_RSC];
+       struct tkip_sc tsc;
+} __packed; /* TKIP_TSC_RSC_API_S_VER_1 */
+
+struct aes_sc {
+       __le64 pn;
+} __packed; /* TKIP_AES_SC_API_U_VER_1 */
+
+struct iwl_aes_rsc_tsc {
+       struct aes_sc unicast_rsc[IWL_NUM_RSC];
+       struct aes_sc multicast_rsc[IWL_NUM_RSC];
+       struct aes_sc tsc;
+} __packed; /* AES_TSC_RSC_API_S_VER_1 */
+
+union iwl_all_tsc_rsc {
+       struct iwl_tkip_rsc_tsc tkip;
+       struct iwl_aes_rsc_tsc aes;
+}; /* ALL_TSC_RSC_API_S_VER_2 */
+
+struct iwl_wowlan_rsc_tsc_params_cmd {
+       union iwl_all_tsc_rsc all_tsc_rsc;
+} __packed; /* ALL_TSC_RSC_API_S_VER_2 */
+
+#define IWL_MIC_KEY_SIZE       8
+struct iwl_mic_keys {
+       u8 tx[IWL_MIC_KEY_SIZE];
+       u8 rx_unicast[IWL_MIC_KEY_SIZE];
+       u8 rx_mcast[IWL_MIC_KEY_SIZE];
+} __packed; /* MIC_KEYS_API_S_VER_1 */
+
+#define IWL_P1K_SIZE           5
+struct iwl_p1k_cache {
+       __le16 p1k[IWL_P1K_SIZE];
+} __packed;
+
+#define IWL_NUM_RX_P1K_CACHE   2
+
+struct iwl_wowlan_tkip_params_cmd {
+       struct iwl_mic_keys mic_keys;
+       struct iwl_p1k_cache tx;
+       struct iwl_p1k_cache rx_uni[IWL_NUM_RX_P1K_CACHE];
+       struct iwl_p1k_cache rx_multi[IWL_NUM_RX_P1K_CACHE];
+} __packed; /* WOWLAN_TKIP_SETTING_API_S_VER_1 */
+
+#define IWL_KCK_MAX_SIZE       32
+#define IWL_KEK_MAX_SIZE       32
+
+struct iwl_wowlan_kek_kck_material_cmd {
+       u8      kck[IWL_KCK_MAX_SIZE];
+       u8      kek[IWL_KEK_MAX_SIZE];
+       __le16  kck_len;
+       __le16  kek_len;
+       __le64  replay_ctr;
+} __packed; /* KEK_KCK_MATERIAL_API_S_VER_2 */
+
+#define RF_KILL_INDICATOR_FOR_WOWLAN   0x87
+
+enum iwl_wowlan_rekey_status {
+       IWL_WOWLAN_REKEY_POST_REKEY = 0,
+       IWL_WOWLAN_REKEY_WHILE_REKEY = 1,
+}; /* WOWLAN_REKEY_STATUS_API_E_VER_1 */
+
+enum iwl_wowlan_wakeup_reason {
+       IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS                       = 0,
+       IWL_WOWLAN_WAKEUP_BY_MAGIC_PACKET                       = BIT(0),
+       IWL_WOWLAN_WAKEUP_BY_PATTERN                            = BIT(1),
+       IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON     = BIT(2),
+       IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH            = BIT(3),
+       IWL_WOWLAN_WAKEUP_BY_GTK_REKEY_FAILURE                  = BIT(4),
+       IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED                  = BIT(5),
+       IWL_WOWLAN_WAKEUP_BY_UCODE_ERROR                        = BIT(6),
+       IWL_WOWLAN_WAKEUP_BY_EAPOL_REQUEST                      = BIT(7),
+       IWL_WOWLAN_WAKEUP_BY_FOUR_WAY_HANDSHAKE                 = BIT(8),
+       IWL_WOWLAN_WAKEUP_BY_REM_WAKE_LINK_LOSS                 = BIT(9),
+       IWL_WOWLAN_WAKEUP_BY_REM_WAKE_SIGNATURE_TABLE           = BIT(10),
+       IWL_WOWLAN_WAKEUP_BY_REM_WAKE_TCP_EXTERNAL              = BIT(11),
+       IWL_WOWLAN_WAKEUP_BY_REM_WAKE_WAKEUP_PACKET             = BIT(12),
+}; /* WOWLAN_WAKE_UP_REASON_API_E_VER_2 */
+
+struct iwl_wowlan_status {
+       __le64 replay_ctr;
+       __le16 pattern_number;
+       __le16 non_qos_seq_ctr;
+       __le16 qos_seq_ctr[8];
+       __le32 wakeup_reasons;
+       __le32 rekey_status;
+       __le32 num_of_gtk_rekeys;
+       __le32 transmitted_ndps;
+       __le32 received_beacons;
+       __le32 wake_packet_length;
+       __le32 wake_packet_bufsize;
+       u8 wake_packet[]; /* can be truncated from _length to _bufsize */
+} __packed; /* WOWLAN_STATUSES_API_S_VER_4 */
+
+/* TODO: NetDetect API */
+
+#endif /* __fw_api_d3_h__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h
new file mode 100644 (file)
index 0000000..ae39b7d
--- /dev/null
@@ -0,0 +1,369 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *****************************************************************************/
+
+#ifndef __fw_api_mac_h__
+#define __fw_api_mac_h__
+
+/*
+ * The first MAC indices (starting from 0)
+ * are available to the driver, AUX follows
+ */
+#define MAC_INDEX_AUX          4
+#define MAC_INDEX_MIN_DRIVER   0
+#define NUM_MAC_INDEX_DRIVER   MAC_INDEX_AUX
+
+#define AC_NUM 4 /* Number of access categories */
+
+/**
+ * enum iwl_mac_protection_flags - MAC context flags
+ * @MAC_PROT_FLG_TGG_PROTECT: 11g protection when transmitting OFDM frames,
+ *     this will require CCK RTS/CTS2self.
+ *     RTS/CTS will protect full burst time.
+ * @MAC_PROT_FLG_HT_PROT: enable HT protection
+ * @MAC_PROT_FLG_FAT_PROT: protect 40 MHz transmissions
+ * @MAC_PROT_FLG_SELF_CTS_EN: allow CTS2self
+ */
+enum iwl_mac_protection_flags {
+       MAC_PROT_FLG_TGG_PROTECT        = BIT(3),
+       MAC_PROT_FLG_HT_PROT            = BIT(23),
+       MAC_PROT_FLG_FAT_PROT           = BIT(24),
+       MAC_PROT_FLG_SELF_CTS_EN        = BIT(30),
+};
+
+#define MAC_FLG_SHORT_SLOT             BIT(4)
+#define MAC_FLG_SHORT_PREAMBLE         BIT(5)
+
+/**
+ * enum iwl_mac_types - Supported MAC types
+ * @FW_MAC_TYPE_FIRST: lowest supported MAC type
+ * @FW_MAC_TYPE_AUX: Auxiliary MAC (internal)
+ * @FW_MAC_TYPE_LISTENER: monitor MAC type (?)
+ * @FW_MAC_TYPE_PIBSS: Pseudo-IBSS
+ * @FW_MAC_TYPE_IBSS: IBSS
+ * @FW_MAC_TYPE_BSS_STA: BSS (managed) station
+ * @FW_MAC_TYPE_P2P_DEVICE: P2P Device
+ * @FW_MAC_TYPE_P2P_STA: P2P client
+ * @FW_MAC_TYPE_GO: P2P GO
+ * @FW_MAC_TYPE_TEST: ?
+ * @FW_MAC_TYPE_MAX: highest support MAC type
+ */
+enum iwl_mac_types {
+       FW_MAC_TYPE_FIRST = 1,
+       FW_MAC_TYPE_AUX = FW_MAC_TYPE_FIRST,
+       FW_MAC_TYPE_LISTENER,
+       FW_MAC_TYPE_PIBSS,
+       FW_MAC_TYPE_IBSS,
+       FW_MAC_TYPE_BSS_STA,
+       FW_MAC_TYPE_P2P_DEVICE,
+       FW_MAC_TYPE_P2P_STA,
+       FW_MAC_TYPE_GO,
+       FW_MAC_TYPE_TEST,
+       FW_MAC_TYPE_MAX = FW_MAC_TYPE_TEST
+}; /* MAC_CONTEXT_TYPE_API_E_VER_1 */
+
+/**
+ * enum iwl_tsf_id - TSF hw timer ID
+ * @TSF_ID_A: use TSF A
+ * @TSF_ID_B: use TSF B
+ * @TSF_ID_C: use TSF C
+ * @TSF_ID_D: use TSF D
+ * @NUM_TSF_IDS: number of TSF timers available
+ */
+enum iwl_tsf_id {
+       TSF_ID_A = 0,
+       TSF_ID_B = 1,
+       TSF_ID_C = 2,
+       TSF_ID_D = 3,
+       NUM_TSF_IDS = 4,
+}; /* TSF_ID_API_E_VER_1 */
+
+/**
+ * struct iwl_mac_data_ap - configuration data for AP MAC context
+ * @beacon_time: beacon transmit time in system time
+ * @beacon_tsf: beacon transmit time in TSF
+ * @bi: beacon interval in TU
+ * @bi_reciprocal: 2^32 / bi
+ * @dtim_interval: dtim transmit time in TU
+ * @dtim_reciprocal: 2^32 / dtim_interval
+ * @mcast_qid: queue ID for multicast traffic
+ * @beacon_template: beacon template ID
+ */
+struct iwl_mac_data_ap {
+       __le32 beacon_time;
+       __le64 beacon_tsf;
+       __le32 bi;
+       __le32 bi_reciprocal;
+       __le32 dtim_interval;
+       __le32 dtim_reciprocal;
+       __le32 mcast_qid;
+       __le32 beacon_template;
+} __packed; /* AP_MAC_DATA_API_S_VER_1 */
+
+/**
+ * struct iwl_mac_data_ibss - configuration data for IBSS MAC context
+ * @beacon_time: beacon transmit time in system time
+ * @beacon_tsf: beacon transmit time in TSF
+ * @bi: beacon interval in TU
+ * @bi_reciprocal: 2^32 / bi
+ */
+struct iwl_mac_data_ibss {
+       __le32 beacon_time;
+       __le64 beacon_tsf;
+       __le32 bi;
+       __le32 bi_reciprocal;
+} __packed; /* IBSS_MAC_DATA_API_S_VER_1 */
+
+/**
+ * struct iwl_mac_data_sta - configuration data for station MAC context
+ * @is_assoc: 1 for associated state, 0 otherwise
+ * @dtim_time: DTIM arrival time in system time
+ * @dtim_tsf: DTIM arrival time in TSF
+ * @bi: beacon interval in TU, applicable only when associated
+ * @bi_reciprocal: 2^32 / bi , applicable only when associated
+ * @dtim_interval: DTIM interval in TU, applicable only when associated
+ * @dtim_reciprocal: 2^32 / dtim_interval , applicable only when associated
+ * @listen_interval: in beacon intervals, applicable only when associated
+ * @assoc_id: unique ID assigned by the AP during association
+ */
+struct iwl_mac_data_sta {
+       __le32 is_assoc;
+       __le32 dtim_time;
+       __le64 dtim_tsf;
+       __le32 bi;
+       __le32 bi_reciprocal;
+       __le32 dtim_interval;
+       __le32 dtim_reciprocal;
+       __le32 listen_interval;
+       __le32 assoc_id;
+       __le32 assoc_beacon_arrive_time;
+} __packed; /* STA_MAC_DATA_API_S_VER_1 */
+
+/**
+ * struct iwl_mac_data_go - configuration data for P2P GO MAC context
+ * @ap: iwl_mac_data_ap struct with most config data
+ * @ctwin: client traffic window in TU (period after TBTT when GO is present).
+ *     0 indicates that there is no CT window.
+ * @opp_ps_enabled: indicate that opportunistic PS allowed
+ */
+struct iwl_mac_data_go {
+       struct iwl_mac_data_ap ap;
+       __le32 ctwin;
+       __le32 opp_ps_enabled;
+} __packed; /* GO_MAC_DATA_API_S_VER_1 */
+
+/**
+ * struct iwl_mac_data_p2p_sta - configuration data for P2P client MAC context
+ * @sta: iwl_mac_data_sta struct with most config data
+ * @ctwin: client traffic window in TU (period after TBTT when GO is present).
+ *     0 indicates that there is no CT window.
+ */
+struct iwl_mac_data_p2p_sta {
+       struct iwl_mac_data_sta sta;
+       __le32 ctwin;
+} __packed; /* P2P_STA_MAC_DATA_API_S_VER_1 */
+
+/**
+ * struct iwl_mac_data_pibss - Pseudo IBSS config data
+ * @stats_interval: interval in TU between statistics notifications to host.
+ */
+struct iwl_mac_data_pibss {
+       __le32 stats_interval;
+} __packed; /* PIBSS_MAC_DATA_API_S_VER_1 */
+
+/*
+ * struct iwl_mac_data_p2p_dev - configuration data for the P2P Device MAC
+ * context.
+ * @is_disc_extended: if set to true, P2P Device discoverability is enabled on
+ *     other channels as well. This should be to true only in case that the
+ *     device is discoverable and there is an active GO. Note that setting this
+ *     field when not needed, will increase the number of interrupts and have
+ *     effect on the platform power, as this setting opens the Rx filters on
+ *     all macs.
+ */
+struct iwl_mac_data_p2p_dev {
+       __le32 is_disc_extended;
+} __packed; /* _P2P_DEV_MAC_DATA_API_S_VER_1 */
+
+/**
+ * enum iwl_mac_filter_flags - MAC context filter flags
+ * @MAC_FILTER_IN_PROMISC: accept all data frames
+ * @MAC_FILTER_IN_CONTROL_AND_MGMT: pass all mangement and
+ *     control frames to the host
+ * @MAC_FILTER_ACCEPT_GRP: accept multicast frames
+ * @MAC_FILTER_DIS_DECRYPT: don't decrypt unicast frames
+ * @MAC_FILTER_DIS_GRP_DECRYPT: don't decrypt multicast frames
+ * @MAC_FILTER_IN_BEACON: transfer foreign BSS's beacons to host
+ *     (in station mode when associated)
+ * @MAC_FILTER_OUT_BCAST: filter out all broadcast frames
+ * @MAC_FILTER_IN_CRC32: extract FCS and append it to frames
+ * @MAC_FILTER_IN_PROBE_REQUEST: pass probe requests to host
+ */
+enum iwl_mac_filter_flags {
+       MAC_FILTER_IN_PROMISC           = BIT(0),
+       MAC_FILTER_IN_CONTROL_AND_MGMT  = BIT(1),
+       MAC_FILTER_ACCEPT_GRP           = BIT(2),
+       MAC_FILTER_DIS_DECRYPT          = BIT(3),
+       MAC_FILTER_DIS_GRP_DECRYPT      = BIT(4),
+       MAC_FILTER_IN_BEACON            = BIT(6),
+       MAC_FILTER_OUT_BCAST            = BIT(8),
+       MAC_FILTER_IN_CRC32             = BIT(11),
+       MAC_FILTER_IN_PROBE_REQUEST     = BIT(12),
+};
+
+/**
+ * enum iwl_mac_qos_flags - QoS flags
+ * @MAC_QOS_FLG_UPDATE_EDCA: ?
+ * @MAC_QOS_FLG_TGN: HT is enabled
+ * @MAC_QOS_FLG_TXOP_TYPE: ?
+ *
+ */
+enum iwl_mac_qos_flags {
+       MAC_QOS_FLG_UPDATE_EDCA = BIT(0),
+       MAC_QOS_FLG_TGN         = BIT(1),
+       MAC_QOS_FLG_TXOP_TYPE   = BIT(4),
+};
+
+/**
+ * struct iwl_ac_qos - QOS timing params for MAC_CONTEXT_CMD
+ * @cw_min: Contention window, start value in numbers of slots.
+ *     Should be a power-of-2, minus 1.  Device's default is 0x0f.
+ * @cw_max: Contention window, max value in numbers of slots.
+ *     Should be a power-of-2, minus 1.  Device's default is 0x3f.
+ * @aifsn:  Number of slots in Arbitration Interframe Space (before
+ *     performing random backoff timing prior to Tx).  Device default 1.
+ * @fifos_mask: FIFOs used by this MAC for this AC
+ * @edca_txop:  Length of Tx opportunity, in uSecs.  Device default is 0.
+ *
+ * One instance of this config struct for each of 4 EDCA access categories
+ * in struct iwl_qosparam_cmd.
+ *
+ * Device will automatically increase contention window by (2*CW) + 1 for each
+ * transmission retry.  Device uses cw_max as a bit mask, ANDed with new CW
+ * value, to cap the CW value.
+ */
+struct iwl_ac_qos {
+       __le16 cw_min;
+       __le16 cw_max;
+       u8 aifsn;
+       u8 fifos_mask;
+       __le16 edca_txop;
+} __packed; /* AC_QOS_API_S_VER_2 */
+
+/**
+ * struct iwl_mac_ctx_cmd - command structure to configure MAC contexts
+ * ( MAC_CONTEXT_CMD = 0x28 )
+ * @id_and_color: ID and color of the MAC
+ * @action: action to perform, one of FW_CTXT_ACTION_*
+ * @mac_type: one of FW_MAC_TYPE_*
+ * @tsd_id: TSF HW timer, one of TSF_ID_*
+ * @node_addr: MAC address
+ * @bssid_addr: BSSID
+ * @cck_rates: basic rates available for CCK
+ * @ofdm_rates: basic rates available for OFDM
+ * @protection_flags: combination of MAC_PROT_FLG_FLAG_*
+ * @cck_short_preamble: 0x20 for enabling short preamble, 0 otherwise
+ * @short_slot: 0x10 for enabling short slots, 0 otherwise
+ * @filter_flags: combination of MAC_FILTER_*
+ * @qos_flags: from MAC_QOS_FLG_*
+ * @ac: one iwl_mac_qos configuration for each AC
+ * @mac_specific: one of struct iwl_mac_data_*, according to mac_type
+ */
+struct iwl_mac_ctx_cmd {
+       /* COMMON_INDEX_HDR_API_S_VER_1 */
+       __le32 id_and_color;
+       __le32 action;
+       /* MAC_CONTEXT_COMMON_DATA_API_S_VER_1 */
+       __le32 mac_type;
+       __le32 tsf_id;
+       u8 node_addr[6];
+       __le16 reserved_for_node_addr;
+       u8 bssid_addr[6];
+       __le16 reserved_for_bssid_addr;
+       __le32 cck_rates;
+       __le32 ofdm_rates;
+       __le32 protection_flags;
+       __le32 cck_short_preamble;
+       __le32 short_slot;
+       __le32 filter_flags;
+       /* MAC_QOS_PARAM_API_S_VER_1 */
+       __le32 qos_flags;
+       struct iwl_ac_qos ac[AC_NUM+1];
+       /* MAC_CONTEXT_COMMON_DATA_API_S */
+       union {
+               struct iwl_mac_data_ap ap;
+               struct iwl_mac_data_go go;
+               struct iwl_mac_data_sta sta;
+               struct iwl_mac_data_p2p_sta p2p_sta;
+               struct iwl_mac_data_p2p_dev p2p_dev;
+               struct iwl_mac_data_pibss pibss;
+               struct iwl_mac_data_ibss ibss;
+       };
+} __packed; /* MAC_CONTEXT_CMD_API_S_VER_1 */
+
+static inline u32 iwl_mvm_reciprocal(u32 v)
+{
+       if (!v)
+               return 0;
+       return 0xFFFFFFFF / v;
+}
+
+#endif /* __fw_api_mac_h__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
new file mode 100644 (file)
index 0000000..be36b76
--- /dev/null
@@ -0,0 +1,140 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#ifndef __fw_api_power_h__
+#define __fw_api_power_h__
+
+/* Power Management Commands, Responses, Notifications */
+
+/**
+ * enum iwl_scan_flags - masks for power table command flags
+ * @POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK: '0' Driver disables power management,
+ *             '1' Driver enables PM (use rest of parameters)
+ * @POWER_FLAGS_SLEEP_OVER_DTIM_MSK: '0' PM have to walk up every DTIM,
+ *             '1' PM could sleep over DTIM till listen Interval.
+ * @POWER_FLAGS_LPRX_ENA_MSK: Low Power RX enable.
+ * @POWER_FLAGS_SNOOZE_ENA_MSK: Enable snoozing only if uAPSD is enabled and all
+ *             access categories are both delivery and trigger enabled.
+ * @POWER_FLAGS_BT_SCO_ENA: Enable BT SCO coex only if uAPSD and
+ *             PBW Snoozing enabled
+ * @POWER_FLAGS_ADVANCE_PM_ENA_MSK: Advanced PM (uAPSD) enable mask
+*/
+enum iwl_power_flags {
+       POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK    = BIT(0),
+       POWER_FLAGS_SLEEP_OVER_DTIM_MSK         = BIT(1),
+       POWER_FLAGS_LPRX_ENA_MSK                = BIT(2),
+       POWER_FLAGS_SNOOZE_ENA_MSK              = BIT(3),
+       POWER_FLAGS_BT_SCO_ENA                  = BIT(4),
+       POWER_FLAGS_ADVANCE_PM_ENA_MSK          = BIT(5)
+};
+
+/**
+ * struct iwl_powertable_cmd - Power Table Command
+ * POWER_TABLE_CMD = 0x77 (command, has simple generic response)
+ *
+ * @id_and_color:      MAC contex identifier
+ * @action:            Action on context - no action, add new,
+ *                     modify existent, remove
+ * @flags:             Power table command flags from POWER_FLAGS_*
+ * @keep_alive_seconds: Keep alive period in seconds. Default - 25 sec.
+ *                     Minimum allowed:- 3 * DTIM
+ * @rx_data_timeout:    Minimum time (usec) from last Rx packet for AM to
+ *                     PSM transition - legacy PM
+ * @tx_data_timeout:    Minimum time (usec) from last Tx packet for AM to
+ *                     PSM transition - legacy PM
+ * @rx_data_timeout_uapsd: Minimum time (usec) from last Rx packet for AM to
+ *                     PSM transition - uAPSD
+ * @tx_data_timeout_uapsd: Minimum time (usec) from last Tx packet for AM to
+ *                     PSM transition - uAPSD
+ * @lprx_rssi_threshold: Signal strength up to which LP RX can be enabled.
+ *                     Default: 80dbm
+ * @num_skip_dtim:      Number of DTIMs to skip if Skip over DTIM flag is set
+ * @snooze_interval:    TBD
+ * @snooze_window:      TBD
+ * @snooze_step:        TBD
+ * @qndp_tid:           TBD
+ * @uapsd_ac_flags:     TBD
+ * @uapsd_max_sp:       TBD
+ */
+struct iwl_powertable_cmd {
+       /* COMMON_INDEX_HDR_API_S_VER_1 */
+       __le32 id_and_color;
+       __le32 action;
+       __le16 flags;
+       u8 reserved;
+       __le16 keep_alive_seconds;
+       __le32 rx_data_timeout;
+       __le32 tx_data_timeout;
+       __le32 rx_data_timeout_uapsd;
+       __le32 tx_data_timeout_uapsd;
+       u8 lprx_rssi_threshold;
+       u8 num_skip_dtim;
+       __le16 snooze_interval;
+       __le16 snooze_window;
+       u8 snooze_step;
+       u8 qndp_tid;
+       u8 uapsd_ac_flags;
+       u8 uapsd_max_sp;
+} __packed;
+
+#endif
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h
new file mode 100644 (file)
index 0000000..aa3474d
--- /dev/null
@@ -0,0 +1,312 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *****************************************************************************/
+
+#ifndef __fw_api_rs_h__
+#define __fw_api_rs_h__
+
+#include "fw-api-mac.h"
+
+/*
+ * These serve as indexes into
+ * struct iwl_rate_info fw_rate_idx_to_plcp[IWL_RATE_COUNT];
+ */
+enum {
+       IWL_RATE_1M_INDEX = 0,
+       IWL_FIRST_CCK_RATE = IWL_RATE_1M_INDEX,
+       IWL_RATE_2M_INDEX,
+       IWL_RATE_5M_INDEX,
+       IWL_RATE_11M_INDEX,
+       IWL_LAST_CCK_RATE = IWL_RATE_11M_INDEX,
+       IWL_RATE_6M_INDEX,
+       IWL_FIRST_OFDM_RATE = IWL_RATE_6M_INDEX,
+       IWL_RATE_9M_INDEX,
+       IWL_RATE_12M_INDEX,
+       IWL_RATE_18M_INDEX,
+       IWL_RATE_24M_INDEX,
+       IWL_RATE_36M_INDEX,
+       IWL_RATE_48M_INDEX,
+       IWL_RATE_54M_INDEX,
+       IWL_LAST_NON_HT_RATE = IWL_RATE_54M_INDEX,
+       IWL_RATE_60M_INDEX,
+       IWL_LAST_OFDM_RATE = IWL_RATE_60M_INDEX,
+       IWL_RATE_COUNT_LEGACY = IWL_LAST_NON_HT_RATE + 1,
+       IWL_RATE_COUNT,
+};
+
+#define IWL_RATE_BIT_MSK(r) BIT(IWL_RATE_##r##M_INDEX)
+
+/* fw API values for legacy bit rates, both OFDM and CCK */
+enum {
+       IWL_RATE_6M_PLCP  = 13,
+       IWL_RATE_9M_PLCP  = 15,
+       IWL_RATE_12M_PLCP = 5,
+       IWL_RATE_18M_PLCP = 7,
+       IWL_RATE_24M_PLCP = 9,
+       IWL_RATE_36M_PLCP = 11,
+       IWL_RATE_48M_PLCP = 1,
+       IWL_RATE_54M_PLCP = 3,
+       IWL_RATE_1M_PLCP  = 10,
+       IWL_RATE_2M_PLCP  = 20,
+       IWL_RATE_5M_PLCP  = 55,
+       IWL_RATE_11M_PLCP = 110,
+};
+
+/*
+ * rate_n_flags bit fields
+ *
+ * The 32-bit value has different layouts in the low 8 bites depending on the
+ * format. There are three formats, HT, VHT and legacy (11abg, with subformats
+ * for CCK and OFDM).
+ *
+ * High-throughput (HT) rate format
+ *     bit 8 is 1, bit 26 is 0, bit 9 is 0 (OFDM)
+ * Very High-throughput (VHT) rate format
+ *     bit 8 is 0, bit 26 is 1, bit 9 is 0 (OFDM)
+ * Legacy OFDM rate format for bits 7:0
+ *     bit 8 is 0, bit 26 is 0, bit 9 is 0 (OFDM)
+ * Legacy CCK rate format for bits 7:0:
+ *     bit 8 is 0, bit 26 is 0, bit 9 is 1 (CCK)
+ */
+
+/* Bit 8: (1) HT format, (0) legacy or VHT format */
+#define RATE_MCS_HT_POS 8
+#define RATE_MCS_HT_MSK (1 << RATE_MCS_HT_POS)
+
+/* Bit 9: (1) CCK, (0) OFDM.  HT (bit 8) must be "0" for this bit to be valid */
+#define RATE_MCS_CCK_POS 9
+#define RATE_MCS_CCK_MSK (1 << RATE_MCS_CCK_POS)
+
+/* Bit 26: (1) VHT format, (0) legacy format in bits 8:0 */
+#define RATE_MCS_VHT_POS 26
+#define RATE_MCS_VHT_MSK (1 << RATE_MCS_VHT_POS)
+
+
+/*
+ * High-throughput (HT) rate format for bits 7:0
+ *
+ *  2-0:  MCS rate base
+ *        0)   6 Mbps
+ *        1)  12 Mbps
+ *        2)  18 Mbps
+ *        3)  24 Mbps
+ *        4)  36 Mbps
+ *        5)  48 Mbps
+ *        6)  54 Mbps
+ *        7)  60 Mbps
+ *  4-3:  0)  Single stream (SISO)
+ *        1)  Dual stream (MIMO)
+ *        2)  Triple stream (MIMO)
+ *    5:  Value of 0x20 in bits 7:0 indicates 6 Mbps HT40 duplicate data
+ *  (bits 7-6 are zero)
+ *
+ * Together the low 5 bits work out to the MCS index because we don't
+ * support MCSes above 15/23, and 0-7 have one stream, 8-15 have two
+ * streams and 16-23 have three streams. We could also support MCS 32
+ * which is the duplicate 20 MHz MCS (bit 5 set, all others zero.)
+ */
+#define RATE_HT_MCS_RATE_CODE_MSK      0x7
+
+/* Bit 10: (1) Use Green Field preamble */
+#define RATE_HT_MCS_GF_POS             10
+#define RATE_HT_MCS_GF_MSK             (1 << RATE_HT_MCS_GF_POS)
+
+#define RATE_HT_MCS_INDEX_MSK          0x3f
+
+/*
+ * Very High-throughput (VHT) rate format for bits 7:0
+ *
+ *  3-0:  VHT MCS (0-9)
+ *  5-4:  number of streams - 1:
+ *        0)  Single stream (SISO)
+ *        1)  Dual stream (MIMO)
+ *        2)  Triple stream (MIMO)
+ */
+
+/* Bit 4-5: (0) SISO, (1) MIMO2 (2) MIMO3 */
+#define RATE_VHT_MCS_RATE_CODE_MSK     0xf
+#define RATE_VHT_MCS_NSS_POS           4
+#define RATE_VHT_MCS_NSS_MSK           (3 << RATE_VHT_MCS_NSS_POS)
+
+/*
+ * Legacy OFDM rate format for bits 7:0
+ *
+ *  3-0:  0xD)   6 Mbps
+ *        0xF)   9 Mbps
+ *        0x5)  12 Mbps
+ *        0x7)  18 Mbps
+ *        0x9)  24 Mbps
+ *        0xB)  36 Mbps
+ *        0x1)  48 Mbps
+ *        0x3)  54 Mbps
+ * (bits 7-4 are 0)
+ *
+ * Legacy CCK rate format for bits 7:0:
+ * bit 8 is 0, bit 26 is 0, bit 9 is 1 (CCK):
+ *
+ *  6-0:   10)  1 Mbps
+ *         20)  2 Mbps
+ *         55)  5.5 Mbps
+ *        110)  11 Mbps
+ * (bit 7 is 0)
+ */
+#define RATE_LEGACY_RATE_MSK 0xff
+
+
+/*
+ * Bit 11-12: (0) 20MHz, (1) 40MHz, (2) 80MHz, (3) 160MHz
+ * 0 and 1 are valid for HT and VHT, 2 and 3 only for VHT
+ */
+#define RATE_MCS_CHAN_WIDTH_POS                11
+#define RATE_MCS_CHAN_WIDTH_MSK                (3 << RATE_MCS_CHAN_WIDTH_POS)
+#define RATE_MCS_CHAN_WIDTH_20         (0 << RATE_MCS_CHAN_WIDTH_POS)
+#define RATE_MCS_CHAN_WIDTH_40         (1 << RATE_MCS_CHAN_WIDTH_POS)
+#define RATE_MCS_CHAN_WIDTH_80         (2 << RATE_MCS_CHAN_WIDTH_POS)
+#define RATE_MCS_CHAN_WIDTH_160                (3 << RATE_MCS_CHAN_WIDTH_POS)
+
+/* Bit 13: (1) Short guard interval (0.4 usec), (0) normal GI (0.8 usec) */
+#define RATE_MCS_SGI_POS               13
+#define RATE_MCS_SGI_MSK               (1 << RATE_MCS_SGI_POS)
+
+/* Bit 14-16: Antenna selection (1) Ant A, (2) Ant B, (4) Ant C */
+#define RATE_MCS_ANT_POS               14
+#define RATE_MCS_ANT_A_MSK             (1 << RATE_MCS_ANT_POS)
+#define RATE_MCS_ANT_B_MSK             (2 << RATE_MCS_ANT_POS)
+#define RATE_MCS_ANT_C_MSK             (4 << RATE_MCS_ANT_POS)
+#define RATE_MCS_ANT_AB_MSK            (RATE_MCS_ANT_A_MSK | \
+                                        RATE_MCS_ANT_B_MSK)
+#define RATE_MCS_ANT_ABC_MSK           (RATE_MCS_ANT_AB_MSK | \
+                                        RATE_MCS_ANT_C_MSK)
+#define RATE_MCS_ANT_MSK               RATE_MCS_ANT_ABC_MSK
+#define RATE_MCS_ANT_NUM 3
+
+/* Bit 17-18: (0) SS, (1) SS*2 */
+#define RATE_MCS_STBC_POS              17
+#define RATE_MCS_STBC_MSK              (1 << RATE_MCS_STBC_POS)
+
+/* Bit 19: (0) Beamforming is off, (1) Beamforming is on */
+#define RATE_MCS_BF_POS                        19
+#define RATE_MCS_BF_MSK                        (1 << RATE_MCS_BF_POS)
+
+/* Bit 20: (0) ZLF is off, (1) ZLF is on */
+#define RATE_MCS_ZLF_POS               20
+#define RATE_MCS_ZLF_MSK               (1 << RATE_MCS_ZLF_POS)
+
+/* Bit 24-25: (0) 20MHz (no dup), (1) 2x20MHz, (2) 4x20MHz, 3 8x20MHz */
+#define RATE_MCS_DUP_POS               24
+#define RATE_MCS_DUP_MSK               (3 << RATE_MCS_DUP_POS)
+
+/* Bit 27: (1) LDPC enabled, (0) LDPC disabled */
+#define RATE_MCS_LDPC_POS              27
+#define RATE_MCS_LDPC_MSK              (1 << RATE_MCS_LDPC_POS)
+
+
+/* Link Quality definitions */
+
+/* # entries in rate scale table to support Tx retries */
+#define  LQ_MAX_RETRY_NUM 16
+
+/* Link quality command flags, only this one is available */
+#define  LQ_FLAG_SET_STA_TLC_RTS_MSK   BIT(0)
+
+/**
+ * struct iwl_lq_cmd - link quality command
+ * @sta_id: station to update
+ * @control: not used
+ * @flags: combination of LQ_FLAG_*
+ * @mimo_delim: the first SISO index in rs_table, which separates MIMO
+ *     and SISO rates
+ * @single_stream_ant_msk: best antenna for SISO (can be dual in CDD).
+ *     Should be ANT_[ABC]
+ * @dual_stream_ant_msk: best antennas for MIMO, combination of ANT_[ABC]
+ * @initial_rate_index: first index from rs_table per AC category
+ * @agg_time_limit: aggregation max time threshold in usec/100, meaning
+ *     value of 100 is one usec. Range is 100 to 8000
+ * @agg_disable_start_th: try-count threshold for starting aggregation.
+ *     If a frame has higher try-count, it should not be selected for
+ *     starting an aggregation sequence.
+ * @agg_frame_cnt_limit: max frame count in an aggregation.
+ *     0: no limit
+ *     1: no aggregation (one frame per aggregation)
+ *     2 - 0x3f: maximal number of frames (up to 3f == 63)
+ * @rs_table: array of rates for each TX try, each is rate_n_flags,
+ *     meaning it is a combination of RATE_MCS_* and IWL_RATE_*_PLCP
+ * @bf_params: beam forming params, currently not used
+ */
+struct iwl_lq_cmd {
+       u8 sta_id;
+       u8 reserved1;
+       u16 control;
+       /* LINK_QUAL_GENERAL_PARAMS_API_S_VER_1 */
+       u8 flags;
+       u8 mimo_delim;
+       u8 single_stream_ant_msk;
+       u8 dual_stream_ant_msk;
+       u8 initial_rate_index[AC_NUM];
+       /* LINK_QUAL_AGG_PARAMS_API_S_VER_1 */
+       __le16 agg_time_limit;
+       u8 agg_disable_start_th;
+       u8 agg_frame_cnt_limit;
+       __le32 reserved2;
+       __le32 rs_table[LQ_MAX_RETRY_NUM];
+       __le32 bf_params;
+}; /* LINK_QUALITY_CMD_API_S_VER_1 */
+#endif /* __fw_api_rs_h__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
new file mode 100644 (file)
index 0000000..670ac8f
--- /dev/null
@@ -0,0 +1,561 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#ifndef __fw_api_scan_h__
+#define __fw_api_scan_h__
+
+#include "fw-api.h"
+
+/* Scan Commands, Responses, Notifications */
+
+/* Masks for iwl_scan_channel.type flags */
+#define SCAN_CHANNEL_TYPE_PASSIVE      0
+#define SCAN_CHANNEL_TYPE_ACTIVE       BIT(0)
+#define SCAN_CHANNEL_NARROW_BAND       BIT(22)
+
+/* Max number of IEs for direct SSID scans in a command */
+#define PROBE_OPTION_MAX               20
+
+/**
+ * struct iwl_scan_channel - entry in REPLY_SCAN_CMD channel table
+ * @channel: band is selected by iwl_scan_cmd "flags" field
+ * @tx_gain: gain for analog radio
+ * @dsp_atten: gain for DSP
+ * @active_dwell: dwell time for active scan in TU, typically 5-50
+ * @passive_dwell: dwell time for passive scan in TU, typically 20-500
+ * @type: type is broken down to these bits:
+ *     bit 0: 0 = passive, 1 = active
+ *     bits 1-20: SSID direct bit map. If any of these bits is set then
+ *             the corresponding SSID IE is transmitted in probe request
+ *             (bit i adds IE in position i to the probe request)
+ *     bit 22: channel width, 0 = regular, 1 = TGj narrow channel
+ *
+ * @iteration_count:
+ * @iteration_interval:
+ * This struct is used once for each channel in the scan list.
+ * Each channel can independently select:
+ * 1)  SSID for directed active scans
+ * 2)  Txpower setting (for rate specified within Tx command)
+ * 3)  How long to stay on-channel (behavior may be modified by quiet_time,
+ *     quiet_plcp_th, good_CRC_th)
+ *
+ * To avoid uCode errors, make sure the following are true (see comments
+ * under struct iwl_scan_cmd about max_out_time and quiet_time):
+ * 1)  If using passive_dwell (i.e. passive_dwell != 0):
+ *     active_dwell <= passive_dwell (< max_out_time if max_out_time != 0)
+ * 2)  quiet_time <= active_dwell
+ * 3)  If restricting off-channel time (i.e. max_out_time !=0):
+ *     passive_dwell < max_out_time
+ *     active_dwell < max_out_time
+ */
+struct iwl_scan_channel {
+       __le32 type;
+       __le16 channel;
+       __le16 iteration_count;
+       __le32 iteration_interval;
+       __le16 active_dwell;
+       __le16 passive_dwell;
+} __packed; /* SCAN_CHANNEL_CONTROL_API_S_VER_1 */
+
+/**
+ * struct iwl_ssid_ie - directed scan network information element
+ *
+ * Up to 20 of these may appear in REPLY_SCAN_CMD,
+ * selected by "type" bit field in struct iwl_scan_channel;
+ * each channel may select different ssids from among the 20 entries.
+ * SSID IEs get transmitted in reverse order of entry.
+ */
+struct iwl_ssid_ie {
+       u8 id;
+       u8 len;
+       u8 ssid[IEEE80211_MAX_SSID_LEN];
+} __packed; /* SCAN_DIRECT_SSID_IE_API_S_VER_1 */
+
+/**
+ * iwl_scan_flags - masks for scan command flags
+ *@SCAN_FLAGS_PERIODIC_SCAN:
+ *@SCAN_FLAGS_P2P_PUBLIC_ACTION_FRAME_TX:
+ *@SCAN_FLAGS_DELAYED_SCAN_LOWBAND:
+ *@SCAN_FLAGS_DELAYED_SCAN_HIGHBAND:
+ *@SCAN_FLAGS_FRAGMENTED_SCAN:
+ */
+enum iwl_scan_flags {
+       SCAN_FLAGS_PERIODIC_SCAN                = BIT(0),
+       SCAN_FLAGS_P2P_PUBLIC_ACTION_FRAME_TX   = BIT(1),
+       SCAN_FLAGS_DELAYED_SCAN_LOWBAND         = BIT(2),
+       SCAN_FLAGS_DELAYED_SCAN_HIGHBAND        = BIT(3),
+       SCAN_FLAGS_FRAGMENTED_SCAN              = BIT(4),
+};
+
+/**
+ * enum iwl_scan_type - Scan types for scan command
+ * @SCAN_TYPE_FORCED:
+ * @SCAN_TYPE_BACKGROUND:
+ * @SCAN_TYPE_OS:
+ * @SCAN_TYPE_ROAMING:
+ * @SCAN_TYPE_ACTION:
+ * @SCAN_TYPE_DISCOVERY:
+ * @SCAN_TYPE_DISCOVERY_FORCED:
+ */
+enum iwl_scan_type {
+       SCAN_TYPE_FORCED                = 0,
+       SCAN_TYPE_BACKGROUND            = 1,
+       SCAN_TYPE_OS                    = 2,
+       SCAN_TYPE_ROAMING               = 3,
+       SCAN_TYPE_ACTION                = 4,
+       SCAN_TYPE_DISCOVERY             = 5,
+       SCAN_TYPE_DISCOVERY_FORCED      = 6,
+}; /* SCAN_ACTIVITY_TYPE_E_VER_1 */
+
+/* Maximal number of channels to scan */
+#define MAX_NUM_SCAN_CHANNELS 0x24
+
+/**
+ * struct iwl_scan_cmd - scan request command
+ * ( SCAN_REQUEST_CMD = 0x80 )
+ * @len: command length in bytes
+ * @scan_flags: scan flags from SCAN_FLAGS_*
+ * @channel_count: num of channels in channel list (1 - MAX_NUM_SCAN_CHANNELS)
+ * @quiet_time: in msecs, dwell this time for active scan on quiet channels
+ * @quiet_plcp_th: quiet PLCP threshold (channel is quiet if less than
+ *     this number of packets were received (typically 1)
+ * @passive2active: is auto switching from passive to active allowed (0 or 1)
+ * @rxchain_sel_flags: RXON_RX_CHAIN_*
+ * @max_out_time: in usecs, max out of serving channel time
+ * @suspend_time: how long to pause scan when returning to service channel:
+ *     bits 0-19: beacon interal in usecs (suspend before executing)
+ *     bits 20-23: reserved
+ *     bits 24-31: number of beacons (suspend between channels)
+ * @rxon_flags: RXON_FLG_*
+ * @filter_flags: RXON_FILTER_*
+ * @tx_cmd: for active scans (zero for passive), w/o payload,
+ *     no RS so specify TX rate
+ * @direct_scan: direct scan SSIDs
+ * @type: one of SCAN_TYPE_*
+ * @repeats: how many time to repeat the scan
+ */
+struct iwl_scan_cmd {
+       __le16 len;
+       u8 scan_flags;
+       u8 channel_count;
+       __le16 quiet_time;
+       __le16 quiet_plcp_th;
+       __le16 passive2active;
+       __le16 rxchain_sel_flags;
+       __le32 max_out_time;
+       __le32 suspend_time;
+       /* RX_ON_FLAGS_API_S_VER_1 */
+       __le32 rxon_flags;
+       __le32 filter_flags;
+       struct iwl_tx_cmd tx_cmd;
+       struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX];
+       __le32 type;
+       __le32 repeats;
+
+       /*
+        * Probe request frame, followed by channel list.
+        *
+        * Size of probe request frame is specified by byte count in tx_cmd.
+        * Channel list follows immediately after probe request frame.
+        * Number of channels in list is specified by channel_count.
+        * Each channel in list is of type:
+        *
+        * struct iwl_scan_channel channels[0];
+        *
+        * NOTE:  Only one band of channels can be scanned per pass.  You
+        * must not mix 2.4GHz channels and 5.2GHz channels, and you must wait
+        * for one scan to complete (i.e. receive SCAN_COMPLETE_NOTIFICATION)
+        * before requesting another scan.
+        */
+       u8 data[0];
+} __packed; /* SCAN_REQUEST_FIXED_PART_API_S_VER_5 */
+
+/* Response to scan request contains only status with one of these values */
+#define SCAN_RESPONSE_OK       0x1
+#define SCAN_RESPONSE_ERROR    0x2
+
+/*
+ * SCAN_ABORT_CMD = 0x81
+ * When scan abort is requested, the command has no fields except the common
+ * header. The response contains only a status with one of these values.
+ */
+#define SCAN_ABORT_POSSIBLE    0x1
+#define SCAN_ABORT_IGNORED     0x2 /* no pending scans */
+
+/* TODO: complete documentation */
+#define  SCAN_OWNER_STATUS 0x1
+#define  MEASURE_OWNER_STATUS 0x2
+
+/**
+ * struct iwl_scan_start_notif - notifies start of scan in the device
+ * ( SCAN_START_NOTIFICATION = 0x82 )
+ * @tsf_low: TSF timer (lower half) in usecs
+ * @tsf_high: TSF timer (higher half) in usecs
+ * @beacon_timer: structured as follows:
+ *     bits 0:19 - beacon interval in usecs
+ *     bits 20:23 - reserved (0)
+ *     bits 24:31 - number of beacons
+ * @channel: which channel is scanned
+ * @band: 0 for 5.2 GHz, 1 for 2.4 GHz
+ * @status: one of *_OWNER_STATUS
+ */
+struct iwl_scan_start_notif {
+       __le32 tsf_low;
+       __le32 tsf_high;
+       __le32 beacon_timer;
+       u8 channel;
+       u8 band;
+       u8 reserved[2];
+       __le32 status;
+} __packed; /* SCAN_START_NTF_API_S_VER_1 */
+
+/* scan results probe_status first bit indicates success */
+#define SCAN_PROBE_STATUS_OK           0
+#define SCAN_PROBE_STATUS_TX_FAILED    BIT(0)
+/* error statuses combined with TX_FAILED */
+#define SCAN_PROBE_STATUS_FAIL_TTL     BIT(1)
+#define SCAN_PROBE_STATUS_FAIL_BT      BIT(2)
+
+/* How many statistics are gathered for each channel */
+#define SCAN_RESULTS_STATISTICS 1
+
+/**
+ * enum iwl_scan_complete_status - status codes for scan complete notifications
+ * @SCAN_COMP_STATUS_OK:  scan completed successfully
+ * @SCAN_COMP_STATUS_ABORT: scan was aborted by user
+ * @SCAN_COMP_STATUS_ERR_SLEEP: sending null sleep packet failed
+ * @SCAN_COMP_STATUS_ERR_CHAN_TIMEOUT: timeout before channel is ready
+ * @SCAN_COMP_STATUS_ERR_PROBE: sending probe request failed
+ * @SCAN_COMP_STATUS_ERR_WAKEUP: sending null wakeup packet failed
+ * @SCAN_COMP_STATUS_ERR_ANTENNAS: invalid antennas chosen at scan command
+ * @SCAN_COMP_STATUS_ERR_INTERNAL: internal error caused scan abort
+ * @SCAN_COMP_STATUS_ERR_COEX: medium was lost ot WiMax
+ * @SCAN_COMP_STATUS_P2P_ACTION_OK: P2P public action frame TX was successful
+ *     (not an error!)
+ * @SCAN_COMP_STATUS_ITERATION_END: indicates end of one repeatition the driver
+ *     asked for
+ * @SCAN_COMP_STATUS_ERR_ALLOC_TE: scan could not allocate time events
+*/
+enum iwl_scan_complete_status {
+       SCAN_COMP_STATUS_OK = 0x1,
+       SCAN_COMP_STATUS_ABORT = 0x2,
+       SCAN_COMP_STATUS_ERR_SLEEP = 0x3,
+       SCAN_COMP_STATUS_ERR_CHAN_TIMEOUT = 0x4,
+       SCAN_COMP_STATUS_ERR_PROBE = 0x5,
+       SCAN_COMP_STATUS_ERR_WAKEUP = 0x6,
+       SCAN_COMP_STATUS_ERR_ANTENNAS = 0x7,
+       SCAN_COMP_STATUS_ERR_INTERNAL = 0x8,
+       SCAN_COMP_STATUS_ERR_COEX = 0x9,
+       SCAN_COMP_STATUS_P2P_ACTION_OK = 0xA,
+       SCAN_COMP_STATUS_ITERATION_END = 0x0B,
+       SCAN_COMP_STATUS_ERR_ALLOC_TE = 0x0C,
+};
+
+/**
+ * struct iwl_scan_results_notif - scan results for one channel
+ * ( SCAN_RESULTS_NOTIFICATION = 0x83 )
+ * @channel: which channel the results are from
+ * @band: 0 for 5.2 GHz, 1 for 2.4 GHz
+ * @probe_status: SCAN_PROBE_STATUS_*, indicates success of probe request
+ * @num_probe_not_sent: # of request that weren't sent due to not enough time
+ * @duration: duration spent in channel, in usecs
+ * @statistics: statistics gathered for this channel
+ */
+struct iwl_scan_results_notif {
+       u8 channel;
+       u8 band;
+       u8 probe_status;
+       u8 num_probe_not_sent;
+       __le32 duration;
+       __le32 statistics[SCAN_RESULTS_STATISTICS];
+} __packed; /* SCAN_RESULT_NTF_API_S_VER_2 */
+
+/**
+ * struct iwl_scan_complete_notif - notifies end of scanning (all channels)
+ * ( SCAN_COMPLETE_NOTIFICATION = 0x84 )
+ * @scanned_channels: number of channels scanned (and number of valid results)
+ * @status: one of SCAN_COMP_STATUS_*
+ * @bt_status: BT on/off status
+ * @last_channel: last channel that was scanned
+ * @tsf_low: TSF timer (lower half) in usecs
+ * @tsf_high: TSF timer (higher half) in usecs
+ * @results: all scan results, only "scanned_channels" of them are valid
+ */
+struct iwl_scan_complete_notif {
+       u8 scanned_channels;
+       u8 status;
+       u8 bt_status;
+       u8 last_channel;
+       __le32 tsf_low;
+       __le32 tsf_high;
+       struct iwl_scan_results_notif results[MAX_NUM_SCAN_CHANNELS];
+} __packed; /* SCAN_COMPLETE_NTF_API_S_VER_2 */
+
+/* scan offload */
+#define IWL_MAX_SCAN_CHANNELS          40
+#define IWL_SCAN_MAX_BLACKLIST_LEN     64
+#define IWL_SCAN_MAX_PROFILES          11
+#define SCAN_OFFLOAD_PROBE_REQ_SIZE    512
+
+/* Default watchdog (in MS) for scheduled scan iteration */
+#define IWL_SCHED_SCAN_WATCHDOG cpu_to_le16(15000)
+
+#define IWL_GOOD_CRC_TH_DEFAULT cpu_to_le16(1)
+#define CAN_ABORT_STATUS 1
+
+#define IWL_FULL_SCAN_MULTIPLIER 5
+#define IWL_FAST_SCHED_SCAN_ITERATIONS 3
+
+/**
+ * struct iwl_scan_offload_cmd - SCAN_REQUEST_FIXED_PART_API_S_VER_6
+ * @scan_flags:                see enum iwl_scan_flags
+ * @channel_count:     channels in channel list
+ * @quiet_time:                dwell time, in milisiconds, on quiet channel
+ * @quiet_plcp_th:     quiet channel num of packets threshold
+ * @good_CRC_th:       passive to active promotion threshold
+ * @rx_chain:          RXON rx chain.
+ * @max_out_time:      max uSec to be out of assoceated channel
+ * @suspend_time:      pause scan this long when returning to service channel
+ * @flags:             RXON flags
+ * @filter_flags:      RXONfilter
+ * @tx_cmd:            tx command for active scan; for 2GHz and for 5GHz.
+ * @direct_scan:       list of SSIDs for directed active scan
+ * @scan_type:         see enum iwl_scan_type.
+ * @rep_count:         repetition count for each scheduled scan iteration.
+ */
+struct iwl_scan_offload_cmd {
+       __le16 len;
+       u8 scan_flags;
+       u8 channel_count;
+       __le16 quiet_time;
+       __le16 quiet_plcp_th;
+       __le16 good_CRC_th;
+       __le16 rx_chain;
+       __le32 max_out_time;
+       __le32 suspend_time;
+       /* RX_ON_FLAGS_API_S_VER_1 */
+       __le32 flags;
+       __le32 filter_flags;
+       struct iwl_tx_cmd tx_cmd[2];
+       /* SCAN_DIRECT_SSID_IE_API_S_VER_1 */
+       struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX];
+       __le32 scan_type;
+       __le32 rep_count;
+} __packed;
+
+enum iwl_scan_offload_channel_flags {
+       IWL_SCAN_OFFLOAD_CHANNEL_ACTIVE         = BIT(0),
+       IWL_SCAN_OFFLOAD_CHANNEL_NARROW         = BIT(22),
+       IWL_SCAN_OFFLOAD_CHANNEL_FULL           = BIT(24),
+       IWL_SCAN_OFFLOAD_CHANNEL_PARTIAL        = BIT(25),
+};
+
+/**
+ * iwl_scan_channel_cfg - SCAN_CHANNEL_CFG_S
+ * @type:              bitmap - see enum iwl_scan_offload_channel_flags.
+ *                     0:      passive (0) or active (1) scan.
+ *                     1-20:   directed scan to i'th ssid.
+ *                     22:     channel width configuation - 1 for narrow.
+ *                     24:     full scan.
+ *                     25:     partial scan.
+ * @channel_number:    channel number 1-13 etc.
+ * @iter_count:                repetition count for the channel.
+ * @iter_interval:     interval between two innteration on one channel.
+ * @dwell_time:        entry 0 - active scan, entry 1 - passive scan.
+ */
+struct iwl_scan_channel_cfg {
+       __le32 type[IWL_MAX_SCAN_CHANNELS];
+       __le16 channel_number[IWL_MAX_SCAN_CHANNELS];
+       __le16 iter_count[IWL_MAX_SCAN_CHANNELS];
+       __le32 iter_interval[IWL_MAX_SCAN_CHANNELS];
+       u8 dwell_time[IWL_MAX_SCAN_CHANNELS][2];
+} __packed;
+
+/**
+ * iwl_scan_offload_cfg - SCAN_OFFLOAD_CONFIG_API_S
+ * @scan_cmd:          scan command fixed part
+ * @channel_cfg:       scan channel configuration
+ * @data:              probe request frames (one per band)
+ */
+struct iwl_scan_offload_cfg {
+       struct iwl_scan_offload_cmd scan_cmd;
+       struct iwl_scan_channel_cfg channel_cfg;
+       u8 data[0];
+} __packed;
+
+/**
+ * iwl_scan_offload_blacklist - SCAN_OFFLOAD_BLACKLIST_S
+ * @ssid:              MAC address to filter out
+ * @reported_rssi:     AP rssi reported to the host
+ */
+struct iwl_scan_offload_blacklist {
+       u8 ssid[ETH_ALEN];
+       u8 reported_rssi;
+       u8 reserved;
+} __packed;
+
+enum iwl_scan_offload_network_type {
+       IWL_NETWORK_TYPE_BSS    = 1,
+       IWL_NETWORK_TYPE_IBSS   = 2,
+       IWL_NETWORK_TYPE_ANY    = 3,
+};
+
+enum iwl_scan_offload_band_selection {
+       IWL_SCAN_OFFLOAD_SELECT_2_4     = 0x4,
+       IWL_SCAN_OFFLOAD_SELECT_5_2     = 0x8,
+       IWL_SCAN_OFFLOAD_SELECT_ANY     = 0xc,
+};
+
+/**
+ * iwl_scan_offload_profile - SCAN_OFFLOAD_PROFILE_S
+ * @ssid_index:                index to ssid list in fixed part
+ * @unicast_cipher:    encryption olgorithm to match - bitmap
+ * @aut_alg:           authentication olgorithm to match - bitmap
+ * @network_type:      enum iwl_scan_offload_network_type
+ * @band_selection:    enum iwl_scan_offload_band_selection
+ */
+struct iwl_scan_offload_profile {
+       u8 ssid_index;
+       u8 unicast_cipher;
+       u8 auth_alg;
+       u8 network_type;
+       u8 band_selection;
+       u8 reserved[3];
+} __packed;
+
+/**
+ * iwl_scan_offload_profile_cfg - SCAN_OFFLOAD_PROFILES_CFG_API_S_VER_1
+ * @blaclist:          AP list to filter off from scan results
+ * @profiles:          profiles to search for match
+ * @blacklist_len:     length of blacklist
+ * @num_profiles:      num of profiles in the list
+ */
+struct iwl_scan_offload_profile_cfg {
+       struct iwl_scan_offload_blacklist blacklist[IWL_SCAN_MAX_BLACKLIST_LEN];
+       struct iwl_scan_offload_profile profiles[IWL_SCAN_MAX_PROFILES];
+       u8 blacklist_len;
+       u8 num_profiles;
+       u8 reserved[2];
+} __packed;
+
+/**
+ * iwl_scan_offload_schedule - schedule of scan offload
+ * @delay:             delay between iterations, in seconds.
+ * @iterations:                num of scan iterations
+ * @full_scan_mul:     number of partial scans before each full scan
+ */
+struct iwl_scan_offload_schedule {
+       u16 delay;
+       u8 iterations;
+       u8 full_scan_mul;
+} __packed;
+
+/*
+ * iwl_scan_offload_flags
+ *
+ * IWL_SCAN_OFFLOAD_FLAG_FILTER_SSID: filter mode - upload every beacon or match
+ *     ssid list.
+ * IWL_SCAN_OFFLOAD_FLAG_CACHED_CHANNEL: add cached channels to partial scan.
+ * IWL_SCAN_OFFLOAD_FLAG_ENERGY_SCAN: use energy based scan before partial scan
+ *     on A band.
+ */
+enum iwl_scan_offload_flags {
+       IWL_SCAN_OFFLOAD_FLAG_FILTER_SSID       = BIT(0),
+       IWL_SCAN_OFFLOAD_FLAG_CACHED_CHANNEL    = BIT(2),
+       IWL_SCAN_OFFLOAD_FLAG_ENERGY_SCAN       = BIT(3),
+};
+
+/**
+ * iwl_scan_offload_req - scan offload request command
+ * @flags:             bitmap - enum iwl_scan_offload_flags.
+ * @watchdog:          maximum scan duration in TU.
+ * @delay:             delay in seconds before first iteration.
+ * @schedule_line:     scan offload schedule, for fast and regular scan.
+ */
+struct iwl_scan_offload_req {
+       __le16 flags;
+       __le16 watchdog;
+       __le16 delay;
+       __le16 reserved;
+       struct iwl_scan_offload_schedule schedule_line[2];
+} __packed;
+
+enum iwl_scan_offload_compleate_status {
+       IWL_SCAN_OFFLOAD_COMPLETED      = 1,
+       IWL_SCAN_OFFLOAD_ABORTED        = 2,
+};
+
+/**
+ * iwl_scan_offload_complete - SCAN_OFFLOAD_COMPLETE_NTF_API_S_VER_1
+ * @last_schedule_line:                last schedule line executed (fast or regular)
+ * @last_schedule_iteration:   last scan iteration executed before scan abort
+ * @status:                    enum iwl_scan_offload_compleate_status
+ */
+struct iwl_scan_offload_complete {
+       u8 last_schedule_line;
+       u8 last_schedule_iteration;
+       u8 status;
+       u8 reserved;
+} __packed;
+
+#endif
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h
new file mode 100644 (file)
index 0000000..0acb53d
--- /dev/null
@@ -0,0 +1,380 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *****************************************************************************/
+
+#ifndef __fw_api_sta_h__
+#define __fw_api_sta_h__
+
+/**
+ * enum iwl_sta_flags - flags for the ADD_STA host command
+ * @STA_FLG_REDUCED_TX_PWR_CTRL:
+ * @STA_FLG_REDUCED_TX_PWR_DATA:
+ * @STA_FLG_FLG_ANT_MSK: Antenna selection
+ * @STA_FLG_PS: set if STA is in Power Save
+ * @STA_FLG_INVALID: set if STA is invalid
+ * @STA_FLG_DLP_EN: Direct Link Protocol is enabled
+ * @STA_FLG_SET_ALL_KEYS: the current key applies to all key IDs
+ * @STA_FLG_DRAIN_FLOW: drain flow
+ * @STA_FLG_PAN: STA is for PAN interface
+ * @STA_FLG_CLASS_AUTH:
+ * @STA_FLG_CLASS_ASSOC:
+ * @STA_FLG_CLASS_MIMO_PROT:
+ * @STA_FLG_MAX_AGG_SIZE_MSK: maximal size for A-MPDU
+ * @STA_FLG_AGG_MPDU_DENS_MSK: maximal MPDU density for Tx aggregation
+ * @STA_FLG_FAT_EN_MSK: support for channel width (for Tx). This flag is
+ *     initialised by driver and can be updated by fw upon reception of
+ *     action frames that can change the channel width. When cleared the fw
+ *     will send all the frames in 20MHz even when FAT channel is requested.
+ * @STA_FLG_MIMO_EN_MSK: support for MIMO. This flag is initialised by the
+ *     driver and can be updated by fw upon reception of action frames.
+ * @STA_FLG_MFP_EN: Management Frame Protection
+ */
+enum iwl_sta_flags {
+       STA_FLG_REDUCED_TX_PWR_CTRL     = BIT(3),
+       STA_FLG_REDUCED_TX_PWR_DATA     = BIT(6),
+
+       STA_FLG_FLG_ANT_A               = (1 << 4),
+       STA_FLG_FLG_ANT_B               = (2 << 4),
+       STA_FLG_FLG_ANT_MSK             = (STA_FLG_FLG_ANT_A |
+                                          STA_FLG_FLG_ANT_B),
+
+       STA_FLG_PS                      = BIT(8),
+       STA_FLG_INVALID                 = BIT(9),
+       STA_FLG_DLP_EN                  = BIT(10),
+       STA_FLG_SET_ALL_KEYS            = BIT(11),
+       STA_FLG_DRAIN_FLOW              = BIT(12),
+       STA_FLG_PAN                     = BIT(13),
+       STA_FLG_CLASS_AUTH              = BIT(14),
+       STA_FLG_CLASS_ASSOC             = BIT(15),
+       STA_FLG_RTS_MIMO_PROT           = BIT(17),
+
+       STA_FLG_MAX_AGG_SIZE_SHIFT      = 19,
+       STA_FLG_MAX_AGG_SIZE_8K         = (0 << STA_FLG_MAX_AGG_SIZE_SHIFT),
+       STA_FLG_MAX_AGG_SIZE_16K        = (1 << STA_FLG_MAX_AGG_SIZE_SHIFT),
+       STA_FLG_MAX_AGG_SIZE_32K        = (2 << STA_FLG_MAX_AGG_SIZE_SHIFT),
+       STA_FLG_MAX_AGG_SIZE_64K        = (3 << STA_FLG_MAX_AGG_SIZE_SHIFT),
+       STA_FLG_MAX_AGG_SIZE_128K       = (4 << STA_FLG_MAX_AGG_SIZE_SHIFT),
+       STA_FLG_MAX_AGG_SIZE_256K       = (5 << STA_FLG_MAX_AGG_SIZE_SHIFT),
+       STA_FLG_MAX_AGG_SIZE_512K       = (6 << STA_FLG_MAX_AGG_SIZE_SHIFT),
+       STA_FLG_MAX_AGG_SIZE_1024K      = (7 << STA_FLG_MAX_AGG_SIZE_SHIFT),
+       STA_FLG_MAX_AGG_SIZE_MSK        = (7 << STA_FLG_MAX_AGG_SIZE_SHIFT),
+
+       STA_FLG_AGG_MPDU_DENS_SHIFT     = 23,
+       STA_FLG_AGG_MPDU_DENS_2US       = (4 << STA_FLG_AGG_MPDU_DENS_SHIFT),
+       STA_FLG_AGG_MPDU_DENS_4US       = (5 << STA_FLG_AGG_MPDU_DENS_SHIFT),
+       STA_FLG_AGG_MPDU_DENS_8US       = (6 << STA_FLG_AGG_MPDU_DENS_SHIFT),
+       STA_FLG_AGG_MPDU_DENS_16US      = (7 << STA_FLG_AGG_MPDU_DENS_SHIFT),
+       STA_FLG_AGG_MPDU_DENS_MSK       = (7 << STA_FLG_AGG_MPDU_DENS_SHIFT),
+
+       STA_FLG_FAT_EN_20MHZ            = (0 << 26),
+       STA_FLG_FAT_EN_40MHZ            = (1 << 26),
+       STA_FLG_FAT_EN_80MHZ            = (2 << 26),
+       STA_FLG_FAT_EN_160MHZ           = (3 << 26),
+       STA_FLG_FAT_EN_MSK              = (3 << 26),
+
+       STA_FLG_MIMO_EN_SISO            = (0 << 28),
+       STA_FLG_MIMO_EN_MIMO2           = (1 << 28),
+       STA_FLG_MIMO_EN_MIMO3           = (2 << 28),
+       STA_FLG_MIMO_EN_MSK             = (3 << 28),
+};
+
+/**
+ * enum iwl_sta_key_flag - key flags for the ADD_STA host command
+ * @STA_KEY_FLG_EN_MSK: mask for encryption algorithm
+ * @STA_KEY_FLG_WEP_KEY_MAP: wep is either a group key (0 - legacy WEP) or from
+ *     station info array (1 - n 1X mode)
+ * @STA_KEY_FLG_KEYID_MSK: the index of the key
+ * @STA_KEY_NOT_VALID: key is invalid
+ * @STA_KEY_FLG_WEP_13BYTES: set for 13 bytes WEP key
+ * @STA_KEY_MULTICAST: set for multical key
+ * @STA_KEY_MFP: key is used for Management Frame Protection
+ */
+enum iwl_sta_key_flag {
+       STA_KEY_FLG_NO_ENC              = (0 << 0),
+       STA_KEY_FLG_WEP                 = (1 << 0),
+       STA_KEY_FLG_CCM                 = (2 << 0),
+       STA_KEY_FLG_TKIP                = (3 << 0),
+       STA_KEY_FLG_CMAC                = (6 << 0),
+       STA_KEY_FLG_ENC_UNKNOWN         = (7 << 0),
+       STA_KEY_FLG_EN_MSK              = (7 << 0),
+
+       STA_KEY_FLG_WEP_KEY_MAP         = BIT(3),
+       STA_KEY_FLG_KEYID_POS            = 8,
+       STA_KEY_FLG_KEYID_MSK           = (3 << STA_KEY_FLG_KEYID_POS),
+       STA_KEY_NOT_VALID               = BIT(11),
+       STA_KEY_FLG_WEP_13BYTES         = BIT(12),
+       STA_KEY_MULTICAST               = BIT(14),
+       STA_KEY_MFP                     = BIT(15),
+};
+
+/**
+ * enum iwl_sta_modify_flag - indicate to the fw what flag are being changed
+ * @STA_MODIFY_KEY: this command modifies %key
+ * @STA_MODIFY_TID_DISABLE_TX: this command modifies %tid_disable_tx
+ * @STA_MODIFY_TX_RATE: unused
+ * @STA_MODIFY_ADD_BA_TID: this command modifies %add_immediate_ba_tid
+ * @STA_MODIFY_REMOVE_BA_TID: this command modifies %remove_immediate_ba_tid
+ * @STA_MODIFY_SLEEPING_STA_TX_COUNT: this command modifies %sleep_tx_count
+ * @STA_MODIFY_PROT_TH:
+ * @STA_MODIFY_QUEUES: modify the queues used by this station
+ */
+enum iwl_sta_modify_flag {
+       STA_MODIFY_KEY                          = BIT(0),
+       STA_MODIFY_TID_DISABLE_TX               = BIT(1),
+       STA_MODIFY_TX_RATE                      = BIT(2),
+       STA_MODIFY_ADD_BA_TID                   = BIT(3),
+       STA_MODIFY_REMOVE_BA_TID                = BIT(4),
+       STA_MODIFY_SLEEPING_STA_TX_COUNT        = BIT(5),
+       STA_MODIFY_PROT_TH                      = BIT(6),
+       STA_MODIFY_QUEUES                       = BIT(7),
+};
+
+#define STA_MODE_MODIFY        1
+
+/**
+ * enum iwl_sta_sleep_flag - type of sleep of the station
+ * @STA_SLEEP_STATE_AWAKE:
+ * @STA_SLEEP_STATE_PS_POLL:
+ * @STA_SLEEP_STATE_UAPSD:
+ */
+enum iwl_sta_sleep_flag {
+       STA_SLEEP_STATE_AWAKE   = 0,
+       STA_SLEEP_STATE_PS_POLL = BIT(0),
+       STA_SLEEP_STATE_UAPSD   = BIT(1),
+};
+
+/* STA ID and color bits definitions */
+#define STA_ID_SEED            (0x0f)
+#define STA_ID_POS             (0)
+#define STA_ID_MSK             (STA_ID_SEED << STA_ID_POS)
+
+#define STA_COLOR_SEED         (0x7)
+#define STA_COLOR_POS          (4)
+#define STA_COLOR_MSK          (STA_COLOR_SEED << STA_COLOR_POS)
+
+#define STA_ID_N_COLOR_GET_COLOR(id_n_color) \
+       (((id_n_color) & STA_COLOR_MSK) >> STA_COLOR_POS)
+#define STA_ID_N_COLOR_GET_ID(id_n_color)    \
+       (((id_n_color) & STA_ID_MSK) >> STA_ID_POS)
+
+#define STA_KEY_MAX_NUM (16)
+#define STA_KEY_IDX_INVALID (0xff)
+#define STA_KEY_MAX_DATA_KEY_NUM (4)
+#define IWL_MAX_GLOBAL_KEYS (4)
+#define STA_KEY_LEN_WEP40 (5)
+#define STA_KEY_LEN_WEP104 (13)
+
+/**
+ * struct iwl_mvm_keyinfo - key information
+ * @key_flags: type %iwl_sta_key_flag
+ * @tkip_rx_tsc_byte2: TSC[2] for key mix ph1 detection
+ * @tkip_rx_ttak: 10-byte unicast TKIP TTAK for Rx
+ * @key_offset: key offset in the fw's key table
+ * @key: 16-byte unicast decryption key
+ * @tx_secur_seq_cnt: initial RSC / PN needed for replay check
+ * @hw_tkip_mic_rx_key: byte: MIC Rx Key - used for TKIP only
+ * @hw_tkip_mic_tx_key: byte: MIC Tx Key - used for TKIP only
+ */
+struct iwl_mvm_keyinfo {
+       __le16 key_flags;
+       u8 tkip_rx_tsc_byte2;
+       u8 reserved1;
+       __le16 tkip_rx_ttak[5];
+       u8 key_offset;
+       u8 reserved2;
+       u8 key[16];
+       __le64 tx_secur_seq_cnt;
+       __le64 hw_tkip_mic_rx_key;
+       __le64 hw_tkip_mic_tx_key;
+} __packed;
+
+/**
+ * struct iwl_mvm_add_sta_cmd - Add / modify a station in the fw's station table
+ * ( REPLY_ADD_STA = 0x18 )
+ * @add_modify: 1: modify existing, 0: add new station
+ * @unicast_tx_key_id: unicast tx key id. Relevant only when unicast key sent
+ * @multicast_tx_key_id: multicast tx key id. Relevant only when multicast key
+ *     sent
+ * @mac_id_n_color: the Mac context this station belongs to
+ * @addr[ETH_ALEN]: station's MAC address
+ * @sta_id: index of station in uCode's station table
+ * @modify_mask: STA_MODIFY_*, selects which parameters to modify vs. leave
+ *     alone. 1 - modify, 0 - don't change.
+ * @key: look at %iwl_mvm_keyinfo
+ * @station_flags: look at %iwl_sta_flags
+ * @station_flags_msk: what of %station_flags have changed
+ * @tid_disable_tx: is tid BIT(tid) enabled for Tx. Clear BIT(x) to enable
+ *     AMPDU for tid x. Set %STA_MODIFY_TID_DISABLE_TX to change this field.
+ * @add_immediate_ba_tid: tid for which to add block-ack support (Rx)
+ *     Set %STA_MODIFY_ADD_BA_TID to use this field, and also set
+ *     add_immediate_ba_ssn.
+ * @remove_immediate_ba_tid: tid for which to remove block-ack support (Rx)
+ *     Set %STA_MODIFY_REMOVE_BA_TID to use this field
+ * @add_immediate_ba_ssn: ssn for the Rx block-ack session. Used together with
+ *     add_immediate_ba_tid.
+ * @sleep_tx_count: number of packets to transmit to station even though it is
+ *     asleep. Used to synchronise PS-poll and u-APSD responses while ucode
+ *     keeps track of STA sleep state.
+ * @sleep_state_flags: Look at %iwl_sta_sleep_flag.
+ * @assoc_id: assoc_id to be sent in VHT PLCP (9-bit), for grp use 0, for AP
+ *     mac-addr.
+ * @beamform_flags: beam forming controls
+ * @tfd_queue_msk: tfd queues used by this station
+ *
+ * The device contains an internal table of per-station information, with info
+ * on security keys, aggregation parameters, and Tx rates for initial Tx
+ * attempt and any retries (set by REPLY_TX_LINK_QUALITY_CMD).
+ *
+ * ADD_STA sets up the table entry for one station, either creating a new
+ * entry, or modifying a pre-existing one.
+ */
+struct iwl_mvm_add_sta_cmd {
+       u8 add_modify;
+       u8 unicast_tx_key_id;
+       u8 multicast_tx_key_id;
+       u8 reserved1;
+       __le32 mac_id_n_color;
+       u8 addr[ETH_ALEN];
+       __le16 reserved2;
+       u8 sta_id;
+       u8 modify_mask;
+       __le16 reserved3;
+       struct iwl_mvm_keyinfo key;
+       __le32 station_flags;
+       __le32 station_flags_msk;
+       __le16 tid_disable_tx;
+       __le16 reserved4;
+       u8 add_immediate_ba_tid;
+       u8 remove_immediate_ba_tid;
+       __le16 add_immediate_ba_ssn;
+       __le16 sleep_tx_count;
+       __le16 sleep_state_flags;
+       __le16 assoc_id;
+       __le16 beamform_flags;
+       __le32 tfd_queue_msk;
+} __packed; /* ADD_STA_CMD_API_S_VER_5 */
+
+/**
+ * enum iwl_mvm_add_sta_rsp_status - status in the response to ADD_STA command
+ * @ADD_STA_SUCCESS: operation was executed successfully
+ * @ADD_STA_STATIONS_OVERLOAD: no room left in the fw's station table
+ * @ADD_STA_IMMEDIATE_BA_FAILURE: can't add Rx block ack session
+ * @ADD_STA_MODIFY_NON_EXISTING_STA: driver requested to modify a station that
+ *     doesn't exist.
+ */
+enum iwl_mvm_add_sta_rsp_status {
+       ADD_STA_SUCCESS                 = 0x1,
+       ADD_STA_STATIONS_OVERLOAD       = 0x2,
+       ADD_STA_IMMEDIATE_BA_FAILURE    = 0x4,
+       ADD_STA_MODIFY_NON_EXISTING_STA = 0x8,
+};
+
+/**
+ * struct iwl_mvm_rm_sta_cmd - Add / modify a station in the fw's station table
+ * ( REMOVE_STA = 0x19 )
+ * @sta_id: the station id of the station to be removed
+ */
+struct iwl_mvm_rm_sta_cmd {
+       u8 sta_id;
+       u8 reserved[3];
+} __packed; /* REMOVE_STA_CMD_API_S_VER_2 */
+
+/**
+ * struct iwl_mvm_mgmt_mcast_key_cmd
+ * ( MGMT_MCAST_KEY = 0x1f )
+ * @ctrl_flags: %iwl_sta_key_flag
+ * @IGTK:
+ * @K1: IGTK master key
+ * @K2: IGTK sub key
+ * @sta_id: station ID that support IGTK
+ * @key_id:
+ * @receive_seq_cnt: initial RSC/PN needed for replay check
+ */
+struct iwl_mvm_mgmt_mcast_key_cmd {
+       __le32 ctrl_flags;
+       u8 IGTK[16];
+       u8 K1[16];
+       u8 K2[16];
+       __le32 key_id;
+       __le32 sta_id;
+       __le64 receive_seq_cnt;
+} __packed; /* SEC_MGMT_MULTICAST_KEY_CMD_API_S_VER_1 */
+
+struct iwl_mvm_wep_key {
+       u8 key_index;
+       u8 key_offset;
+       __le16 reserved1;
+       u8 key_size;
+       u8 reserved2[3];
+       u8 key[16];
+} __packed;
+
+struct iwl_mvm_wep_key_cmd {
+       __le32 mac_id_n_color;
+       u8 num_keys;
+       u8 decryption_type;
+       u8 flags;
+       u8 reserved;
+       struct iwl_mvm_wep_key wep_key[0];
+} __packed; /* SEC_CURR_WEP_KEY_CMD_API_S_VER_2 */
+
+
+#endif /* __fw_api_sta_h__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h
new file mode 100644 (file)
index 0000000..2677914
--- /dev/null
@@ -0,0 +1,580 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *****************************************************************************/
+
+#ifndef __fw_api_tx_h__
+#define __fw_api_tx_h__
+
+/**
+ * enum iwl_tx_flags - bitmasks for tx_flags in TX command
+ * @TX_CMD_FLG_PROT_REQUIRE: use RTS or CTS-to-self to protect the frame
+ * @TX_CMD_FLG_ACK: expect ACK from receiving station
+ * @TX_CMD_FLG_STA_RATE: use RS table with initial index from the TX command.
+ *     Otherwise, use rate_n_flags from the TX command
+ * @TX_CMD_FLG_BA: this frame is a block ack
+ * @TX_CMD_FLG_BAR: this frame is a BA request, immediate BAR is expected
+ *     Must set TX_CMD_FLG_ACK with this flag.
+ * @TX_CMD_FLG_TXOP_PROT: protect frame with full TXOP protection
+ * @TX_CMD_FLG_VHT_NDPA: mark frame is NDPA for VHT beamformer sequence
+ * @TX_CMD_FLG_HT_NDPA: mark frame is NDPA for HT beamformer sequence
+ * @TX_CMD_FLG_CSI_FDBK2HOST: mark to send feedback to host (only if good CRC)
+ * @TX_CMD_FLG_BT_DIS: disable BT priority for this frame
+ * @TX_CMD_FLG_SEQ_CTL: set if FW should override the sequence control.
+ *     Should be set for mgmt, non-QOS data, mcast, bcast and in scan command
+ * @TX_CMD_FLG_MORE_FRAG: this frame is non-last MPDU
+ * @TX_CMD_FLG_NEXT_FRAME: this frame includes information of the next frame
+ * @TX_CMD_FLG_TSF: FW should calculate and insert TSF in the frame
+ *     Should be set for beacons and probe responses
+ * @TX_CMD_FLG_CALIB: activate PA TX power calibrations
+ * @TX_CMD_FLG_KEEP_SEQ_CTL: if seq_ctl is set, don't increase inner seq count
+ * @TX_CMD_FLG_AGG_START: allow this frame to start aggregation
+ * @TX_CMD_FLG_MH_PAD: driver inserted 2 byte padding after MAC header.
+ *     Should be set for 26/30 length MAC headers
+ * @TX_CMD_FLG_RESP_TO_DRV: zero this if the response should go only to FW
+ * @TX_CMD_FLG_CCMP_AGG: this frame uses CCMP for aggregation acceleration
+ * @TX_CMD_FLG_TKIP_MIC_DONE: FW already performed TKIP MIC calculation
+ * @TX_CMD_FLG_CTS_ONLY: send CTS only, no data after that
+ * @TX_CMD_FLG_DUR: disable duration overwriting used in PS-Poll Assoc-id
+ * @TX_CMD_FLG_FW_DROP: FW should mark frame to be dropped
+ * @TX_CMD_FLG_EXEC_PAPD: execute PAPD
+ * @TX_CMD_FLG_PAPD_TYPE: 0 for reference power, 1 for nominal power
+ * @TX_CMD_FLG_HCCA_CHUNK: mark start of TSPEC chunk
+ */
+enum iwl_tx_flags {
+       TX_CMD_FLG_PROT_REQUIRE         = BIT(0),
+       TX_CMD_FLG_ACK                  = BIT(3),
+       TX_CMD_FLG_STA_RATE             = BIT(4),
+       TX_CMD_FLG_BA                   = BIT(5),
+       TX_CMD_FLG_BAR                  = BIT(6),
+       TX_CMD_FLG_TXOP_PROT            = BIT(7),
+       TX_CMD_FLG_VHT_NDPA             = BIT(8),
+       TX_CMD_FLG_HT_NDPA              = BIT(9),
+       TX_CMD_FLG_CSI_FDBK2HOST        = BIT(10),
+       TX_CMD_FLG_BT_DIS               = BIT(12),
+       TX_CMD_FLG_SEQ_CTL              = BIT(13),
+       TX_CMD_FLG_MORE_FRAG            = BIT(14),
+       TX_CMD_FLG_NEXT_FRAME           = BIT(15),
+       TX_CMD_FLG_TSF                  = BIT(16),
+       TX_CMD_FLG_CALIB                = BIT(17),
+       TX_CMD_FLG_KEEP_SEQ_CTL         = BIT(18),
+       TX_CMD_FLG_AGG_START            = BIT(19),
+       TX_CMD_FLG_MH_PAD               = BIT(20),
+       TX_CMD_FLG_RESP_TO_DRV          = BIT(21),
+       TX_CMD_FLG_CCMP_AGG             = BIT(22),
+       TX_CMD_FLG_TKIP_MIC_DONE        = BIT(23),
+       TX_CMD_FLG_CTS_ONLY             = BIT(24),
+       TX_CMD_FLG_DUR                  = BIT(25),
+       TX_CMD_FLG_FW_DROP              = BIT(26),
+       TX_CMD_FLG_EXEC_PAPD            = BIT(27),
+       TX_CMD_FLG_PAPD_TYPE            = BIT(28),
+       TX_CMD_FLG_HCCA_CHUNK           = BIT(31)
+}; /* TX_FLAGS_BITS_API_S_VER_1 */
+
+/*
+ * TX command security control
+ */
+#define TX_CMD_SEC_WEP                 0x01
+#define TX_CMD_SEC_CCM                 0x02
+#define TX_CMD_SEC_TKIP                        0x03
+#define TX_CMD_SEC_WEP_KEY_IDX_POS     6
+#define TX_CMD_SEC_WEP_KEY_IDX_MSK     0xc0
+#define TX_CMD_SEC_KEY128              0x08
+
+/* TODO: how does these values are OK with only 16 bit variable??? */
+/*
+ * TX command next frame info
+ *
+ * bits 0:2 - security control (TX_CMD_SEC_*)
+ * bit 3 - immediate ACK required
+ * bit 4 - rate is taken from STA table
+ * bit 5 - frame belongs to BA stream
+ * bit 6 - immediate BA response expected
+ * bit 7 - unused
+ * bits 8:15 - Station ID
+ * bits 16:31 - rate
+ */
+#define TX_CMD_NEXT_FRAME_ACK_MSK              (0x8)
+#define TX_CMD_NEXT_FRAME_STA_RATE_MSK         (0x10)
+#define TX_CMD_NEXT_FRAME_BA_MSK               (0x20)
+#define TX_CMD_NEXT_FRAME_IMM_BA_RSP_MSK       (0x40)
+#define TX_CMD_NEXT_FRAME_FLAGS_MSK            (0xf8)
+#define TX_CMD_NEXT_FRAME_STA_ID_MSK           (0xff00)
+#define TX_CMD_NEXT_FRAME_STA_ID_POS           (8)
+#define TX_CMD_NEXT_FRAME_RATE_MSK             (0xffff0000)
+#define TX_CMD_NEXT_FRAME_RATE_POS             (16)
+
+/*
+ * TX command Frame life time in us - to be written in pm_frame_timeout
+ */
+#define TX_CMD_LIFE_TIME_INFINITE      0xFFFFFFFF
+#define TX_CMD_LIFE_TIME_DEFAULT       2000000 /* 2000 ms*/
+#define TX_CMD_LIFE_TIME_PROBE_RESP    40000 /* 40 ms */
+#define TX_CMD_LIFE_TIME_EXPIRED_FRAME 0
+
+/*
+ * TID for non QoS frames - to be written in tid_tspec
+ */
+#define IWL_TID_NON_QOS        IWL_MAX_TID_COUNT
+
+/*
+ * Limits on the retransmissions - to be written in {data,rts}_retry_limit
+ */
+#define IWL_DEFAULT_TX_RETRY                   15
+#define IWL_MGMT_DFAULT_RETRY_LIMIT            3
+#define IWL_RTS_DFAULT_RETRY_LIMIT             60
+#define IWL_BAR_DFAULT_RETRY_LIMIT             60
+#define IWL_LOW_RETRY_LIMIT                    7
+
+/* TODO: complete documentation for try_cnt and btkill_cnt */
+/**
+ * struct iwl_tx_cmd - TX command struct to FW
+ * ( TX_CMD = 0x1c )
+ * @len: in bytes of the payload, see below for details
+ * @next_frame_len: same as len, but for next frame (0 if not applicable)
+ *     Used for fragmentation and bursting, but not in 11n aggregation.
+ * @tx_flags: combination of TX_CMD_FLG_*
+ * @rate_n_flags: rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is
+ *     cleared. Combination of RATE_MCS_*
+ * @sta_id: index of destination station in FW station table
+ * @sec_ctl: security control, TX_CMD_SEC_*
+ * @initial_rate_index: index into the the rate table for initial TX attempt.
+ *     Applied if TX_CMD_FLG_STA_RATE_MSK is set, normally 0 for data frames.
+ * @key: security key
+ * @next_frame_flags: TX_CMD_SEC_* and TX_CMD_NEXT_FRAME_*
+ * @life_time: frame life time (usecs??)
+ * @dram_lsb_ptr: Physical address of scratch area in the command (try_cnt +
+ *     btkill_cnd + reserved), first 32 bits. "0" disables usage.
+ * @dram_msb_ptr: upper bits of the scratch physical address
+ * @rts_retry_limit: max attempts for RTS
+ * @data_retry_limit: max attempts to send the data packet
+ * @tid_spec: TID/tspec
+ * @pm_frame_timeout: PM TX frame timeout
+ * @driver_txop: duration od EDCA TXOP, in 32-usec units. Set this if not
+ *     specified by HCCA protocol
+ *
+ * The byte count (both len and next_frame_len) includes MAC header
+ * (24/26/30/32 bytes)
+ * + 2 bytes pad if 26/30 header size
+ * + 8 byte IV for CCM or TKIP (not used for WEP)
+ * + Data payload
+ * + 8-byte MIC (not used for CCM/WEP)
+ * It does not include post-MAC padding, i.e.,
+ * MIC (CCM) 8 bytes, ICV (WEP/TKIP/CKIP) 4 bytes, CRC 4 bytes.
+ * Range of len: 14-2342 bytes.
+ *
+ * After the struct fields the MAC header is placed, plus any padding,
+ * and then the actial payload.
+ */
+struct iwl_tx_cmd {
+       __le16 len;
+       __le16 next_frame_len;
+       __le32 tx_flags;
+       /* DRAM_SCRATCH_API_U_VER_1 */
+       u8 try_cnt;
+       u8 btkill_cnt;
+       __le16 reserved;
+       __le32 rate_n_flags;
+       u8 sta_id;
+       u8 sec_ctl;
+       u8 initial_rate_index;
+       u8 reserved2;
+       u8 key[16];
+       __le16 next_frame_flags;
+       __le16 reserved3;
+       __le32 life_time;
+       __le32 dram_lsb_ptr;
+       u8 dram_msb_ptr;
+       u8 rts_retry_limit;
+       u8 data_retry_limit;
+       u8 tid_tspec;
+       __le16 pm_frame_timeout;
+       __le16 driver_txop;
+       u8 payload[0];
+       struct ieee80211_hdr hdr[0];
+} __packed; /* TX_CMD_API_S_VER_3 */
+
+/*
+ * TX response related data
+ */
+
+/*
+ * enum iwl_tx_status - status that is returned by the fw after attempts to Tx
+ * @TX_STATUS_SUCCESS:
+ * @TX_STATUS_DIRECT_DONE:
+ * @TX_STATUS_POSTPONE_DELAY:
+ * @TX_STATUS_POSTPONE_FEW_BYTES:
+ * @TX_STATUS_POSTPONE_BT_PRIO:
+ * @TX_STATUS_POSTPONE_QUIET_PERIOD:
+ * @TX_STATUS_POSTPONE_CALC_TTAK:
+ * @TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY:
+ * @TX_STATUS_FAIL_SHORT_LIMIT:
+ * @TX_STATUS_FAIL_LONG_LIMIT:
+ * @TX_STATUS_FAIL_UNDERRUN:
+ * @TX_STATUS_FAIL_DRAIN_FLOW:
+ * @TX_STATUS_FAIL_RFKILL_FLUSH:
+ * @TX_STATUS_FAIL_LIFE_EXPIRE:
+ * @TX_STATUS_FAIL_DEST_PS:
+ * @TX_STATUS_FAIL_HOST_ABORTED:
+ * @TX_STATUS_FAIL_BT_RETRY:
+ * @TX_STATUS_FAIL_STA_INVALID:
+ * @TX_TATUS_FAIL_FRAG_DROPPED:
+ * @TX_STATUS_FAIL_TID_DISABLE:
+ * @TX_STATUS_FAIL_FIFO_FLUSHED:
+ * @TX_STATUS_FAIL_SMALL_CF_POLL:
+ * @TX_STATUS_FAIL_FW_DROP:
+ * @TX_STATUS_FAIL_STA_COLOR_MISMATCH: mismatch between color of Tx cmd and
+ *     STA table
+ * @TX_FRAME_STATUS_INTERNAL_ABORT:
+ * @TX_MODE_MSK:
+ * @TX_MODE_NO_BURST:
+ * @TX_MODE_IN_BURST_SEQ:
+ * @TX_MODE_FIRST_IN_BURST:
+ * @TX_QUEUE_NUM_MSK:
+ *
+ * Valid only if frame_count =1
+ * TODO: complete documentation
+ */
+enum iwl_tx_status {
+       TX_STATUS_MSK = 0x000000ff,
+       TX_STATUS_SUCCESS = 0x01,
+       TX_STATUS_DIRECT_DONE = 0x02,
+       /* postpone TX */
+       TX_STATUS_POSTPONE_DELAY = 0x40,
+       TX_STATUS_POSTPONE_FEW_BYTES = 0x41,
+       TX_STATUS_POSTPONE_BT_PRIO = 0x42,
+       TX_STATUS_POSTPONE_QUIET_PERIOD = 0x43,
+       TX_STATUS_POSTPONE_CALC_TTAK = 0x44,
+       /* abort TX */
+       TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY = 0x81,
+       TX_STATUS_FAIL_SHORT_LIMIT = 0x82,
+       TX_STATUS_FAIL_LONG_LIMIT = 0x83,
+       TX_STATUS_FAIL_UNDERRUN = 0x84,
+       TX_STATUS_FAIL_DRAIN_FLOW = 0x85,
+       TX_STATUS_FAIL_RFKILL_FLUSH = 0x86,
+       TX_STATUS_FAIL_LIFE_EXPIRE = 0x87,
+       TX_STATUS_FAIL_DEST_PS = 0x88,
+       TX_STATUS_FAIL_HOST_ABORTED = 0x89,
+       TX_STATUS_FAIL_BT_RETRY = 0x8a,
+       TX_STATUS_FAIL_STA_INVALID = 0x8b,
+       TX_STATUS_FAIL_FRAG_DROPPED = 0x8c,
+       TX_STATUS_FAIL_TID_DISABLE = 0x8d,
+       TX_STATUS_FAIL_FIFO_FLUSHED = 0x8e,
+       TX_STATUS_FAIL_SMALL_CF_POLL = 0x8f,
+       TX_STATUS_FAIL_FW_DROP = 0x90,
+       TX_STATUS_FAIL_STA_COLOR_MISMATCH = 0x91,
+       TX_STATUS_INTERNAL_ABORT = 0x92,
+       TX_MODE_MSK = 0x00000f00,
+       TX_MODE_NO_BURST = 0x00000000,
+       TX_MODE_IN_BURST_SEQ = 0x00000100,
+       TX_MODE_FIRST_IN_BURST = 0x00000200,
+       TX_QUEUE_NUM_MSK = 0x0001f000,
+       TX_NARROW_BW_MSK = 0x00060000,
+       TX_NARROW_BW_1DIV2 = 0x00020000,
+       TX_NARROW_BW_1DIV4 = 0x00040000,
+       TX_NARROW_BW_1DIV8 = 0x00060000,
+};
+
+/*
+ * enum iwl_tx_agg_status - TX aggregation status
+ * @AGG_TX_STATE_STATUS_MSK:
+ * @AGG_TX_STATE_TRANSMITTED:
+ * @AGG_TX_STATE_UNDERRUN:
+ * @AGG_TX_STATE_BT_PRIO:
+ * @AGG_TX_STATE_FEW_BYTES:
+ * @AGG_TX_STATE_ABORT:
+ * @AGG_TX_STATE_LAST_SENT_TTL:
+ * @AGG_TX_STATE_LAST_SENT_TRY_CNT:
+ * @AGG_TX_STATE_LAST_SENT_BT_KILL:
+ * @AGG_TX_STATE_SCD_QUERY:
+ * @AGG_TX_STATE_TEST_BAD_CRC32:
+ * @AGG_TX_STATE_RESPONSE:
+ * @AGG_TX_STATE_DUMP_TX:
+ * @AGG_TX_STATE_DELAY_TX:
+ * @AGG_TX_STATE_TRY_CNT_MSK: Retry count for 1st frame in aggregation (retries
+ *     occur if tx failed for this frame when it was a member of a previous
+ *     aggregation block). If rate scaling is used, retry count indicates the
+ *     rate table entry used for all frames in the new agg.
+ *@ AGG_TX_STATE_SEQ_NUM_MSK: Command ID and sequence number of Tx command for
+ *     this frame
+ *
+ * TODO: complete documentation
+ */
+enum iwl_tx_agg_status {
+       AGG_TX_STATE_STATUS_MSK = 0x00fff,
+       AGG_TX_STATE_TRANSMITTED = 0x000,
+       AGG_TX_STATE_UNDERRUN = 0x001,
+       AGG_TX_STATE_BT_PRIO = 0x002,
+       AGG_TX_STATE_FEW_BYTES = 0x004,
+       AGG_TX_STATE_ABORT = 0x008,
+       AGG_TX_STATE_LAST_SENT_TTL = 0x010,
+       AGG_TX_STATE_LAST_SENT_TRY_CNT = 0x020,
+       AGG_TX_STATE_LAST_SENT_BT_KILL = 0x040,
+       AGG_TX_STATE_SCD_QUERY = 0x080,
+       AGG_TX_STATE_TEST_BAD_CRC32 = 0x0100,
+       AGG_TX_STATE_RESPONSE = 0x1ff,
+       AGG_TX_STATE_DUMP_TX = 0x200,
+       AGG_TX_STATE_DELAY_TX = 0x400,
+       AGG_TX_STATE_TRY_CNT_POS = 12,
+       AGG_TX_STATE_TRY_CNT_MSK = 0xf << AGG_TX_STATE_TRY_CNT_POS,
+};
+
+#define AGG_TX_STATE_LAST_SENT_MSK  (AGG_TX_STATE_LAST_SENT_TTL| \
+                                    AGG_TX_STATE_LAST_SENT_TRY_CNT| \
+                                    AGG_TX_STATE_LAST_SENT_BT_KILL)
+
+/*
+ * The mask below describes a status where we are absolutely sure that the MPDU
+ * wasn't sent. For BA/Underrun we cannot be that sure. All we know that we've
+ * written the bytes to the TXE, but we know nothing about what the DSP did.
+ */
+#define AGG_TX_STAT_FRAME_NOT_SENT (AGG_TX_STATE_FEW_BYTES | \
+                                   AGG_TX_STATE_ABORT | \
+                                   AGG_TX_STATE_SCD_QUERY)
+
+/*
+ * REPLY_TX = 0x1c (response)
+ *
+ * This response may be in one of two slightly different formats, indicated
+ * by the frame_count field:
+ *
+ * 1)  No aggregation (frame_count == 1).  This reports Tx results for a single
+ *     frame. Multiple attempts, at various bit rates, may have been made for
+ *     this frame.
+ *
+ * 2)  Aggregation (frame_count > 1).  This reports Tx results for two or more
+ *     frames that used block-acknowledge.  All frames were transmitted at
+ *     same rate. Rate scaling may have been used if first frame in this new
+ *     agg block failed in previous agg block(s).
+ *
+ *     Note that, for aggregation, ACK (block-ack) status is not delivered
+ *     here; block-ack has not been received by the time the device records
+ *     this status.
+ *     This status relates to reasons the tx might have been blocked or aborted
+ *     within the device, rather than whether it was received successfully by
+ *     the destination station.
+ */
+
+/**
+ * struct agg_tx_status - per packet TX aggregation status
+ * @status: enum iwl_tx_agg_status
+ * @sequence: Sequence # for this frame's Tx cmd (not SSN!)
+ */
+struct agg_tx_status {
+       __le16 status;
+       __le16 sequence;
+} __packed;
+
+/*
+ * definitions for initial rate index field
+ * bits [3:0] initial rate index
+ * bits [6:4] rate table color, used for the initial rate
+ * bit-7 invalid rate indication
+ */
+#define TX_RES_INIT_RATE_INDEX_MSK 0x0f
+#define TX_RES_RATE_TABLE_COLOR_MSK 0x70
+#define TX_RES_INV_RATE_INDEX_MSK 0x80
+
+#define IWL_MVM_TX_RES_GET_TID(_ra_tid) ((_ra_tid) & 0x0f)
+#define IWL_MVM_TX_RES_GET_RA(_ra_tid) ((_ra_tid) >> 4)
+
+/**
+ * struct iwl_mvm_tx_resp - notifies that fw is TXing a packet
+ * ( REPLY_TX = 0x1c )
+ * @frame_count: 1 no aggregation, >1 aggregation
+ * @bt_kill_count: num of times blocked by bluetooth (unused for agg)
+ * @failure_rts: num of failures due to unsuccessful RTS
+ * @failure_frame: num failures due to no ACK (unused for agg)
+ * @initial_rate: for non-agg: rate of the successful Tx. For agg: rate of the
+ *     Tx of all the batch. RATE_MCS_*
+ * @wireless_media_time: for non-agg: RTS + CTS + frame tx attempts time + ACK.
+ *     for agg: RTS + CTS + aggregation tx time + block-ack time.
+ *     in usec.
+ * @pa_status: tx power info
+ * @pa_integ_res_a: tx power info
+ * @pa_integ_res_b: tx power info
+ * @pa_integ_res_c: tx power info
+ * @measurement_req_id: tx power info
+ * @tfd_info: TFD information set by the FH
+ * @seq_ctl: sequence control from the Tx cmd
+ * @byte_cnt: byte count from the Tx cmd
+ * @tlc_info: TLC rate info
+ * @ra_tid: bits [3:0] = ra, bits [7:4] = tid
+ * @frame_ctrl: frame control
+ * @status: for non-agg:  frame status TX_STATUS_*
+ *     for agg: status of 1st frame, AGG_TX_STATE_*; other frame status fields
+ *     follow this one, up to frame_count.
+ *
+ * After the array of statuses comes the SSN of the SCD. Look at
+ * %iwl_mvm_get_scd_ssn for more details.
+ */
+struct iwl_mvm_tx_resp {
+       u8 frame_count;
+       u8 bt_kill_count;
+       u8 failure_rts;
+       u8 failure_frame;
+       __le32 initial_rate;
+       __le16 wireless_media_time;
+
+       u8 pa_status;
+       u8 pa_integ_res_a[3];
+       u8 pa_integ_res_b[3];
+       u8 pa_integ_res_c[3];
+       __le16 measurement_req_id;
+       __le16 reserved;
+
+       __le32 tfd_info;
+       __le16 seq_ctl;
+       __le16 byte_cnt;
+       u8 tlc_info;
+       u8 ra_tid;
+       __le16 frame_ctrl;
+
+       struct agg_tx_status status;
+} __packed; /* TX_RSP_API_S_VER_3 */
+
+/**
+ * struct iwl_mvm_ba_notif - notifies about reception of BA
+ * ( BA_NOTIF = 0xc5 )
+ * @sta_addr_lo32: lower 32 bits of the MAC address
+ * @sta_addr_hi16: upper 16 bits of the MAC address
+ * @sta_id: Index of recipient (BA-sending) station in fw's station table
+ * @tid: tid of the session
+ * @seq_ctl:
+ * @bitmap: the bitmap of the BA notification as seen in the air
+ * @scd_flow: the tx queue this BA relates to
+ * @scd_ssn: the index of the last contiguously sent packet
+ * @txed: number of Txed frames in this batch
+ * @txed_2_done: number of Acked frames in this batch
+ */
+struct iwl_mvm_ba_notif {
+       __le32 sta_addr_lo32;
+       __le16 sta_addr_hi16;
+       __le16 reserved;
+
+       u8 sta_id;
+       u8 tid;
+       __le16 seq_ctl;
+       __le64 bitmap;
+       __le16 scd_flow;
+       __le16 scd_ssn;
+       u8 txed;
+       u8 txed_2_done;
+       __le16 reserved1;
+} __packed;
+
+/*
+ * struct iwl_mac_beacon_cmd - beacon template command
+ * @tx: the tx commands associated with the beacon frame
+ * @template_id: currently equal to the mac context id of the coresponding
+ *  mac.
+ * @tim_idx: the offset of the tim IE in the beacon
+ * @tim_size: the length of the tim IE
+ * @frame: the template of the beacon frame
+ */
+struct iwl_mac_beacon_cmd {
+       struct iwl_tx_cmd tx;
+       __le32 template_id;
+       __le32 tim_idx;
+       __le32 tim_size;
+       struct ieee80211_hdr frame[0];
+} __packed;
+
+/**
+ * enum iwl_dump_control - dump (flush) control flags
+ * @DUMP_TX_FIFO_FLUSH: Dump MSDUs until the the FIFO is empty
+ *     and the TFD queues are empty.
+ */
+enum iwl_dump_control {
+       DUMP_TX_FIFO_FLUSH      = BIT(1),
+};
+
+/**
+ * struct iwl_tx_path_flush_cmd -- queue/FIFO flush command
+ * @queues_ctl: bitmap of queues to flush
+ * @flush_ctl: control flags
+ * @reserved: reserved
+ */
+struct iwl_tx_path_flush_cmd {
+       __le32 queues_ctl;
+       __le16 flush_ctl;
+       __le16 reserved;
+} __packed; /* TX_PATH_FLUSH_CMD_API_S_VER_1 */
+
+/**
+ * iwl_mvm_get_scd_ssn - returns the SSN of the SCD
+ * @tx_resp: the Tx response from the fw (agg or non-agg)
+ *
+ * When the fw sends an AMPDU, it fetches the MPDUs one after the other. Since
+ * it can't know that everything will go well until the end of the AMPDU, it
+ * can't know in advance the number of MPDUs that will be sent in the current
+ * batch. This is why it writes the agg Tx response while it fetches the MPDUs.
+ * Hence, it can't know in advance what the SSN of the SCD will be at the end
+ * of the batch. This is why the SSN of the SCD is written at the end of the
+ * whole struct at a variable offset. This function knows how to cope with the
+ * variable offset and returns the SSN of the SCD.
+ */
+static inline u32 iwl_mvm_get_scd_ssn(struct iwl_mvm_tx_resp *tx_resp)
+{
+       return le32_to_cpup((__le32 *)&tx_resp->status +
+                           tx_resp->frame_count) & 0xfff;
+}
+
+#endif /* __fw_api_tx_h__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
new file mode 100644 (file)
index 0000000..23eebda
--- /dev/null
@@ -0,0 +1,952 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#ifndef __fw_api_h__
+#define __fw_api_h__
+
+#include "fw-api-rs.h"
+#include "fw-api-tx.h"
+#include "fw-api-sta.h"
+#include "fw-api-mac.h"
+#include "fw-api-power.h"
+#include "fw-api-d3.h"
+
+/* queue and FIFO numbers by usage */
+enum {
+       IWL_MVM_OFFCHANNEL_QUEUE = 8,
+       IWL_MVM_CMD_QUEUE = 9,
+       IWL_MVM_AUX_QUEUE = 15,
+       IWL_MVM_FIRST_AGG_QUEUE = 16,
+       IWL_MVM_NUM_QUEUES = 20,
+       IWL_MVM_LAST_AGG_QUEUE = IWL_MVM_NUM_QUEUES - 1,
+       IWL_MVM_CMD_FIFO = 7
+};
+
+#define IWL_MVM_STATION_COUNT  16
+
+/* commands */
+enum {
+       MVM_ALIVE = 0x1,
+       REPLY_ERROR = 0x2,
+
+       INIT_COMPLETE_NOTIF = 0x4,
+
+       /* PHY context commands */
+       PHY_CONTEXT_CMD = 0x8,
+       DBG_CFG = 0x9,
+
+       /* station table */
+       ADD_STA = 0x18,
+       REMOVE_STA = 0x19,
+
+       /* TX */
+       TX_CMD = 0x1c,
+       TXPATH_FLUSH = 0x1e,
+       MGMT_MCAST_KEY = 0x1f,
+
+       /* global key */
+       WEP_KEY = 0x20,
+
+       /* MAC and Binding commands */
+       MAC_CONTEXT_CMD = 0x28,
+       TIME_EVENT_CMD = 0x29, /* both CMD and response */
+       TIME_EVENT_NOTIFICATION = 0x2a,
+       BINDING_CONTEXT_CMD = 0x2b,
+       TIME_QUOTA_CMD = 0x2c,
+
+       LQ_CMD = 0x4e,
+
+       /* Calibration */
+       TEMPERATURE_NOTIFICATION = 0x62,
+       CALIBRATION_CFG_CMD = 0x65,
+       CALIBRATION_RES_NOTIFICATION = 0x66,
+       CALIBRATION_COMPLETE_NOTIFICATION = 0x67,
+       RADIO_VERSION_NOTIFICATION = 0x68,
+
+       /* Scan offload */
+       SCAN_OFFLOAD_REQUEST_CMD = 0x51,
+       SCAN_OFFLOAD_ABORT_CMD = 0x52,
+       SCAN_OFFLOAD_COMPLETE = 0x6D,
+       SCAN_OFFLOAD_UPDATE_PROFILES_CMD = 0x6E,
+       SCAN_OFFLOAD_CONFIG_CMD = 0x6f,
+
+       /* Phy */
+       PHY_CONFIGURATION_CMD = 0x6a,
+       CALIB_RES_NOTIF_PHY_DB = 0x6b,
+       /* PHY_DB_CMD = 0x6c, */
+
+       /* Power */
+       POWER_TABLE_CMD = 0x77,
+
+       /* Scanning */
+       SCAN_REQUEST_CMD = 0x80,
+       SCAN_ABORT_CMD = 0x81,
+       SCAN_START_NOTIFICATION = 0x82,
+       SCAN_RESULTS_NOTIFICATION = 0x83,
+       SCAN_COMPLETE_NOTIFICATION = 0x84,
+
+       /* NVM */
+       NVM_ACCESS_CMD = 0x88,
+
+       SET_CALIB_DEFAULT_CMD = 0x8e,
+
+       BEACON_TEMPLATE_CMD = 0x91,
+       TX_ANT_CONFIGURATION_CMD = 0x98,
+       STATISTICS_NOTIFICATION = 0x9d,
+
+       /* RF-KILL commands and notifications */
+       CARD_STATE_CMD = 0xa0,
+       CARD_STATE_NOTIFICATION = 0xa1,
+
+       REPLY_RX_PHY_CMD = 0xc0,
+       REPLY_RX_MPDU_CMD = 0xc1,
+       BA_NOTIF = 0xc5,
+
+       REPLY_DEBUG_CMD = 0xf0,
+       DEBUG_LOG_MSG = 0xf7,
+
+       /* D3 commands/notifications */
+       D3_CONFIG_CMD = 0xd3,
+       PROT_OFFLOAD_CONFIG_CMD = 0xd4,
+       OFFLOADS_QUERY_CMD = 0xd5,
+       REMOTE_WAKE_CONFIG_CMD = 0xd6,
+
+       /* for WoWLAN in particular */
+       WOWLAN_PATTERNS = 0xe0,
+       WOWLAN_CONFIGURATION = 0xe1,
+       WOWLAN_TSC_RSC_PARAM = 0xe2,
+       WOWLAN_TKIP_PARAM = 0xe3,
+       WOWLAN_KEK_KCK_MATERIAL = 0xe4,
+       WOWLAN_GET_STATUSES = 0xe5,
+       WOWLAN_TX_POWER_PER_DB = 0xe6,
+
+       /* and for NetDetect */
+       NET_DETECT_CONFIG_CMD = 0x54,
+       NET_DETECT_PROFILES_QUERY_CMD = 0x56,
+       NET_DETECT_PROFILES_CMD = 0x57,
+       NET_DETECT_HOTSPOTS_CMD = 0x58,
+       NET_DETECT_HOTSPOTS_QUERY_CMD = 0x59,
+
+       REPLY_MAX = 0xff,
+};
+
+/**
+ * struct iwl_cmd_response - generic response struct for most commands
+ * @status: status of the command asked, changes for each one
+ */
+struct iwl_cmd_response {
+       __le32 status;
+};
+
+/*
+ * struct iwl_tx_ant_cfg_cmd
+ * @valid: valid antenna configuration
+ */
+struct iwl_tx_ant_cfg_cmd {
+       __le32 valid;
+} __packed;
+
+/*
+ * Calibration control struct.
+ * Sent as part of the phy configuration command.
+ * @flow_trigger: bitmap for which calibrations to perform according to
+ *             flow triggers.
+ * @event_trigger: bitmap for which calibrations to perform according to
+ *             event triggers.
+ */
+struct iwl_calib_ctrl {
+       __le32 flow_trigger;
+       __le32 event_trigger;
+} __packed;
+
+/* This enum defines the bitmap of various calibrations to enable in both
+ * init ucode and runtime ucode through CALIBRATION_CFG_CMD.
+ */
+enum iwl_calib_cfg {
+       IWL_CALIB_CFG_XTAL_IDX                  = BIT(0),
+       IWL_CALIB_CFG_TEMPERATURE_IDX           = BIT(1),
+       IWL_CALIB_CFG_VOLTAGE_READ_IDX          = BIT(2),
+       IWL_CALIB_CFG_PAPD_IDX                  = BIT(3),
+       IWL_CALIB_CFG_TX_PWR_IDX                = BIT(4),
+       IWL_CALIB_CFG_DC_IDX                    = BIT(5),
+       IWL_CALIB_CFG_BB_FILTER_IDX             = BIT(6),
+       IWL_CALIB_CFG_LO_LEAKAGE_IDX            = BIT(7),
+       IWL_CALIB_CFG_TX_IQ_IDX                 = BIT(8),
+       IWL_CALIB_CFG_TX_IQ_SKEW_IDX            = BIT(9),
+       IWL_CALIB_CFG_RX_IQ_IDX                 = BIT(10),
+       IWL_CALIB_CFG_RX_IQ_SKEW_IDX            = BIT(11),
+       IWL_CALIB_CFG_SENSITIVITY_IDX           = BIT(12),
+       IWL_CALIB_CFG_CHAIN_NOISE_IDX           = BIT(13),
+       IWL_CALIB_CFG_DISCONNECTED_ANT_IDX      = BIT(14),
+       IWL_CALIB_CFG_ANT_COUPLING_IDX          = BIT(15),
+       IWL_CALIB_CFG_DAC_IDX                   = BIT(16),
+       IWL_CALIB_CFG_ABS_IDX                   = BIT(17),
+       IWL_CALIB_CFG_AGC_IDX                   = BIT(18),
+};
+
+/*
+ * Phy configuration command.
+ */
+struct iwl_phy_cfg_cmd {
+       __le32  phy_cfg;
+       struct iwl_calib_ctrl calib_control;
+} __packed;
+
+#define PHY_CFG_RADIO_TYPE     (BIT(0) | BIT(1))
+#define PHY_CFG_RADIO_STEP     (BIT(2) | BIT(3))
+#define PHY_CFG_RADIO_DASH     (BIT(4) | BIT(5))
+#define PHY_CFG_PRODUCT_NUMBER (BIT(6) | BIT(7))
+#define PHY_CFG_TX_CHAIN_A     BIT(8)
+#define PHY_CFG_TX_CHAIN_B     BIT(9)
+#define PHY_CFG_TX_CHAIN_C     BIT(10)
+#define PHY_CFG_RX_CHAIN_A     BIT(12)
+#define PHY_CFG_RX_CHAIN_B     BIT(13)
+#define PHY_CFG_RX_CHAIN_C     BIT(14)
+
+
+/* Target of the NVM_ACCESS_CMD */
+enum {
+       NVM_ACCESS_TARGET_CACHE = 0,
+       NVM_ACCESS_TARGET_OTP = 1,
+       NVM_ACCESS_TARGET_EEPROM = 2,
+};
+
+/**
+ * struct iwl_nvm_access_cmd_ver1 - Request the device to send the NVM.
+ * @op_code: 0 - read, 1 - write.
+ * @target: NVM_ACCESS_TARGET_*. should be 0 for read.
+ * @cache_refresh: 0 - None, 1- NVM.
+ * @offset: offset in the nvm data.
+ * @length: of the chunk.
+ * @data: empty on read, the NVM chunk on write
+ */
+struct iwl_nvm_access_cmd_ver1 {
+       u8 op_code;
+       u8 target;
+       u8 cache_refresh;
+       u8 reserved;
+       __le16 offset;
+       __le16 length;
+       u8 data[];
+} __packed; /* NVM_ACCESS_CMD_API_S_VER_1 */
+
+/**
+ * struct iwl_nvm_access_resp_ver1 - response to NVM_ACCESS_CMD
+ * @offset: the offset in the nvm data
+ * @length: of the chunk
+ * @data: the nvm chunk on when NVM_ACCESS_CMD was read, nothing on write
+ */
+struct iwl_nvm_access_resp_ver1 {
+       __le16 offset;
+       __le16 length;
+       u8 data[];
+} __packed; /* NVM_ACCESS_CMD_RESP_API_S_VER_1 */
+
+/* Section types for NVM_ACCESS_CMD version 2 */
+enum {
+       NVM_SECTION_TYPE_HW = 0,
+       NVM_SECTION_TYPE_SW,
+       NVM_SECTION_TYPE_PAPD,
+       NVM_SECTION_TYPE_BT,
+       NVM_SECTION_TYPE_CALIBRATION,
+       NVM_SECTION_TYPE_PRODUCTION,
+       NVM_SECTION_TYPE_POST_FCS_CALIB,
+       NVM_NUM_OF_SECTIONS,
+};
+
+/**
+ * struct iwl_nvm_access_cmd_ver2 - Request the device to send an NVM section
+ * @op_code: 0 - read, 1 - write
+ * @target: NVM_ACCESS_TARGET_*
+ * @type: NVM_SECTION_TYPE_*
+ * @offset: offset in bytes into the section
+ * @length: in bytes, to read/write
+ * @data: if write operation, the data to write. On read its empty
+ */
+struct iwl_nvm_access_cmd_ver2 {
+       u8 op_code;
+       u8 target;
+       __le16 type;
+       __le16 offset;
+       __le16 length;
+       u8 data[];
+} __packed; /* NVM_ACCESS_CMD_API_S_VER_2 */
+
+/**
+ * struct iwl_nvm_access_resp_ver2 - response to NVM_ACCESS_CMD
+ * @offset: offset in bytes into the section
+ * @length: in bytes, either how much was written or read
+ * @type: NVM_SECTION_TYPE_*
+ * @status: 0 for success, fail otherwise
+ * @data: if read operation, the data returned. Empty on write.
+ */
+struct iwl_nvm_access_resp_ver2 {
+       __le16 offset;
+       __le16 length;
+       __le16 type;
+       __le16 status;
+       u8 data[];
+} __packed; /* NVM_ACCESS_CMD_RESP_API_S_VER_2 */
+
+/* MVM_ALIVE 0x1 */
+
+/* alive response is_valid values */
+#define ALIVE_RESP_UCODE_OK    BIT(0)
+#define ALIVE_RESP_RFKILL      BIT(1)
+
+/* alive response ver_type values */
+enum {
+       FW_TYPE_HW = 0,
+       FW_TYPE_PROT = 1,
+       FW_TYPE_AP = 2,
+       FW_TYPE_WOWLAN = 3,
+       FW_TYPE_TIMING = 4,
+       FW_TYPE_WIPAN = 5
+};
+
+/* alive response ver_subtype values */
+enum {
+       FW_SUBTYPE_FULL_FEATURE = 0,
+       FW_SUBTYPE_BOOTSRAP = 1, /* Not valid */
+       FW_SUBTYPE_REDUCED = 2,
+       FW_SUBTYPE_ALIVE_ONLY = 3,
+       FW_SUBTYPE_WOWLAN = 4,
+       FW_SUBTYPE_AP_SUBTYPE = 5,
+       FW_SUBTYPE_WIPAN = 6,
+       FW_SUBTYPE_INITIALIZE = 9
+};
+
+#define IWL_ALIVE_STATUS_ERR 0xDEAD
+#define IWL_ALIVE_STATUS_OK 0xCAFE
+
+#define IWL_ALIVE_FLG_RFKILL   BIT(0)
+
+struct mvm_alive_resp {
+       __le16 status;
+       __le16 flags;
+       u8 ucode_minor;
+       u8 ucode_major;
+       __le16 id;
+       u8 api_minor;
+       u8 api_major;
+       u8 ver_subtype;
+       u8 ver_type;
+       u8 mac;
+       u8 opt;
+       __le16 reserved2;
+       __le32 timestamp;
+       __le32 error_event_table_ptr;   /* SRAM address for error log */
+       __le32 log_event_table_ptr;     /* SRAM address for event log */
+       __le32 cpu_register_ptr;
+       __le32 dbgm_config_ptr;
+       __le32 alive_counter_ptr;
+       __le32 scd_base_ptr;            /* SRAM address for SCD */
+} __packed; /* ALIVE_RES_API_S_VER_1 */
+
+/* Error response/notification */
+enum {
+       FW_ERR_UNKNOWN_CMD = 0x0,
+       FW_ERR_INVALID_CMD_PARAM = 0x1,
+       FW_ERR_SERVICE = 0x2,
+       FW_ERR_ARC_MEMORY = 0x3,
+       FW_ERR_ARC_CODE = 0x4,
+       FW_ERR_WATCH_DOG = 0x5,
+       FW_ERR_WEP_GRP_KEY_INDX = 0x10,
+       FW_ERR_WEP_KEY_SIZE = 0x11,
+       FW_ERR_OBSOLETE_FUNC = 0x12,
+       FW_ERR_UNEXPECTED = 0xFE,
+       FW_ERR_FATAL = 0xFF
+};
+
+/**
+ * struct iwl_error_resp - FW error indication
+ * ( REPLY_ERROR = 0x2 )
+ * @error_type: one of FW_ERR_*
+ * @cmd_id: the command ID for which the error occured
+ * @bad_cmd_seq_num: sequence number of the erroneous command
+ * @error_service: which service created the error, applicable only if
+ *     error_type = 2, otherwise 0
+ * @timestamp: TSF in usecs.
+ */
+struct iwl_error_resp {
+       __le32 error_type;
+       u8 cmd_id;
+       u8 reserved1;
+       __le16 bad_cmd_seq_num;
+       __le32 error_service;
+       __le64 timestamp;
+} __packed;
+
+
+/* Common PHY, MAC and Bindings definitions */
+
+#define MAX_MACS_IN_BINDING    (3)
+#define MAX_BINDINGS           (4)
+#define AUX_BINDING_INDEX      (3)
+#define MAX_PHYS               (4)
+
+/* Used to extract ID and color from the context dword */
+#define FW_CTXT_ID_POS   (0)
+#define FW_CTXT_ID_MSK   (0xff << FW_CTXT_ID_POS)
+#define FW_CTXT_COLOR_POS (8)
+#define FW_CTXT_COLOR_MSK (0xff << FW_CTXT_COLOR_POS)
+#define FW_CTXT_INVALID          (0xffffffff)
+
+#define FW_CMD_ID_AND_COLOR(_id, _color) ((_id << FW_CTXT_ID_POS) |\
+                                         (_color << FW_CTXT_COLOR_POS))
+
+/* Possible actions on PHYs, MACs and Bindings */
+enum {
+       FW_CTXT_ACTION_STUB = 0,
+       FW_CTXT_ACTION_ADD,
+       FW_CTXT_ACTION_MODIFY,
+       FW_CTXT_ACTION_REMOVE,
+       FW_CTXT_ACTION_NUM
+}; /* COMMON_CONTEXT_ACTION_API_E_VER_1 */
+
+/* Time Events */
+
+/* Time Event types, according to MAC type */
+enum iwl_time_event_type {
+       /* BSS Station Events */
+       TE_BSS_STA_AGGRESSIVE_ASSOC,
+       TE_BSS_STA_ASSOC,
+       TE_BSS_EAP_DHCP_PROT,
+       TE_BSS_QUIET_PERIOD,
+
+       /* P2P Device Events */
+       TE_P2P_DEVICE_DISCOVERABLE,
+       TE_P2P_DEVICE_LISTEN,
+       TE_P2P_DEVICE_ACTION_SCAN,
+       TE_P2P_DEVICE_FULL_SCAN,
+
+       /* P2P Client Events */
+       TE_P2P_CLIENT_AGGRESSIVE_ASSOC,
+       TE_P2P_CLIENT_ASSOC,
+       TE_P2P_CLIENT_QUIET_PERIOD,
+
+       /* P2P GO Events */
+       TE_P2P_GO_ASSOC_PROT,
+       TE_P2P_GO_REPETITIVE_NOA,
+       TE_P2P_GO_CT_WINDOW,
+
+       /* WiDi Sync Events */
+       TE_WIDI_TX_SYNC,
+
+       TE_MAX
+}; /* MAC_EVENT_TYPE_API_E_VER_1 */
+
+/* Time Event dependencies: none, on another TE, or in a specific time */
+enum {
+       TE_INDEPENDENT          = 0,
+       TE_DEP_OTHER            = 1,
+       TE_DEP_TSF              = 2,
+       TE_EVENT_SOCIOPATHIC    = 4,
+}; /* MAC_EVENT_DEPENDENCY_POLICY_API_E_VER_2 */
+
+/* When to send Time Event notifications and to whom (internal = FW) */
+enum {
+       TE_NOTIF_NONE = 0,
+       TE_NOTIF_HOST_START = 0x1,
+       TE_NOTIF_HOST_END = 0x2,
+       TE_NOTIF_INTERNAL_START = 0x4,
+       TE_NOTIF_INTERNAL_END = 0x8
+}; /* MAC_EVENT_ACTION_API_E_VER_1 */
+
+/*
+ * @TE_FRAG_NONE: fragmentation of the time event is NOT allowed.
+ * @TE_FRAG_SINGLE: fragmentation of the time event is allowed, but only
+ *  the first fragment is scheduled.
+ * @TE_FRAG_DUAL: fragmentation of the time event is allowed, but only
+ *  the first 2 fragments are scheduled.
+ * @TE_FRAG_ENDLESS: fragmentation of the time event is allowed, and any number
+ *  of fragments are valid.
+ *
+ * Other than the constant defined above, specifying a fragmentation value 'x'
+ * means that the event can be fragmented but only the first 'x' will be
+ * scheduled.
+ */
+enum {
+       TE_FRAG_NONE = 0,
+       TE_FRAG_SINGLE = 1,
+       TE_FRAG_DUAL = 2,
+       TE_FRAG_ENDLESS = 0xffffffff
+};
+
+/* Repeat the time event endlessly (until removed) */
+#define TE_REPEAT_ENDLESS      (0xffffffff)
+/* If a Time Event has bounded repetitions, this is the maximal value */
+#define TE_REPEAT_MAX_MSK      (0x0fffffff)
+/* If a Time Event can be fragmented, this is the max number of fragments */
+#define TE_FRAG_MAX_MSK                (0x0fffffff)
+
+/**
+ * struct iwl_time_event_cmd - configuring Time Events
+ * ( TIME_EVENT_CMD = 0x29 )
+ * @id_and_color: ID and color of the relevant MAC
+ * @action: action to perform, one of FW_CTXT_ACTION_*
+ * @id: this field has two meanings, depending on the action:
+ *     If the action is ADD, then it means the type of event to add.
+ *     For all other actions it is the unique event ID assigned when the
+ *     event was added by the FW.
+ * @apply_time: When to start the Time Event (in GP2)
+ * @max_delay: maximum delay to event's start (apply time), in TU
+ * @depends_on: the unique ID of the event we depend on (if any)
+ * @interval: interval between repetitions, in TU
+ * @interval_reciprocal: 2^32 / interval
+ * @duration: duration of event in TU
+ * @repeat: how many repetitions to do, can be TE_REPEAT_ENDLESS
+ * @dep_policy: one of TE_INDEPENDENT, TE_DEP_OTHER, TE_DEP_TSF
+ * @is_present: 0 or 1, are we present or absent during the Time Event
+ * @max_frags: maximal number of fragments the Time Event can be divided to
+ * @notify: notifications using TE_NOTIF_* (whom to notify when)
+ */
+struct iwl_time_event_cmd {
+       /* COMMON_INDEX_HDR_API_S_VER_1 */
+       __le32 id_and_color;
+       __le32 action;
+       __le32 id;
+       /* MAC_TIME_EVENT_DATA_API_S_VER_1 */
+       __le32 apply_time;
+       __le32 max_delay;
+       __le32 dep_policy;
+       __le32 depends_on;
+       __le32 is_present;
+       __le32 max_frags;
+       __le32 interval;
+       __le32 interval_reciprocal;
+       __le32 duration;
+       __le32 repeat;
+       __le32 notify;
+} __packed; /* MAC_TIME_EVENT_CMD_API_S_VER_1 */
+
+/**
+ * struct iwl_time_event_resp - response structure to iwl_time_event_cmd
+ * @status: bit 0 indicates success, all others specify errors
+ * @id: the Time Event type
+ * @unique_id: the unique ID assigned (in ADD) or given (others) to the TE
+ * @id_and_color: ID and color of the relevant MAC
+ */
+struct iwl_time_event_resp {
+       __le32 status;
+       __le32 id;
+       __le32 unique_id;
+       __le32 id_and_color;
+} __packed; /* MAC_TIME_EVENT_RSP_API_S_VER_1 */
+
+/**
+ * struct iwl_time_event_notif - notifications of time event start/stop
+ * ( TIME_EVENT_NOTIFICATION = 0x2a )
+ * @timestamp: action timestamp in GP2
+ * @session_id: session's unique id
+ * @unique_id: unique id of the Time Event itself
+ * @id_and_color: ID and color of the relevant MAC
+ * @action: one of TE_NOTIF_START or TE_NOTIF_END
+ * @status: true if scheduled, false otherwise (not executed)
+ */
+struct iwl_time_event_notif {
+       __le32 timestamp;
+       __le32 session_id;
+       __le32 unique_id;
+       __le32 id_and_color;
+       __le32 action;
+       __le32 status;
+} __packed; /* MAC_TIME_EVENT_NTFY_API_S_VER_1 */
+
+
+/* Bindings and Time Quota */
+
+/**
+ * struct iwl_binding_cmd - configuring bindings
+ * ( BINDING_CONTEXT_CMD = 0x2b )
+ * @id_and_color: ID and color of the relevant Binding
+ * @action: action to perform, one of FW_CTXT_ACTION_*
+ * @macs: array of MAC id and colors which belong to the binding
+ * @phy: PHY id and color which belongs to the binding
+ */
+struct iwl_binding_cmd {
+       /* COMMON_INDEX_HDR_API_S_VER_1 */
+       __le32 id_and_color;
+       __le32 action;
+       /* BINDING_DATA_API_S_VER_1 */
+       __le32 macs[MAX_MACS_IN_BINDING];
+       __le32 phy;
+} __packed; /* BINDING_CMD_API_S_VER_1 */
+
+/* The maximal number of fragments in the FW's schedule session */
+#define IWL_MVM_MAX_QUOTA 128
+
+/**
+ * struct iwl_time_quota_data - configuration of time quota per binding
+ * @id_and_color: ID and color of the relevant Binding
+ * @quota: absolute time quota in TU. The scheduler will try to divide the
+ *     remainig quota (after Time Events) according to this quota.
+ * @max_duration: max uninterrupted context duration in TU
+ */
+struct iwl_time_quota_data {
+       __le32 id_and_color;
+       __le32 quota;
+       __le32 max_duration;
+} __packed; /* TIME_QUOTA_DATA_API_S_VER_1 */
+
+/**
+ * struct iwl_time_quota_cmd - configuration of time quota between bindings
+ * ( TIME_QUOTA_CMD = 0x2c )
+ * @quotas: allocations per binding
+ */
+struct iwl_time_quota_cmd {
+       struct iwl_time_quota_data quotas[MAX_BINDINGS];
+} __packed; /* TIME_QUOTA_ALLOCATION_CMD_API_S_VER_1 */
+
+
+/* PHY context */
+
+/* Supported bands */
+#define PHY_BAND_5  (0)
+#define PHY_BAND_24 (1)
+
+/* Supported channel width, vary if there is VHT support */
+#define PHY_VHT_CHANNEL_MODE20 (0x0)
+#define PHY_VHT_CHANNEL_MODE40 (0x1)
+#define PHY_VHT_CHANNEL_MODE80 (0x2)
+#define PHY_VHT_CHANNEL_MODE160        (0x3)
+
+/*
+ * Control channel position:
+ * For legacy set bit means upper channel, otherwise lower.
+ * For VHT - bit-2 marks if the control is lower/upper relative to center-freq
+ *   bits-1:0 mark the distance from the center freq. for 20Mhz, offset is 0.
+ *                                   center_freq
+ *                                        |
+ * 40Mhz                          |_______|_______|
+ * 80Mhz                  |_______|_______|_______|_______|
+ * 160Mhz |_______|_______|_______|_______|_______|_______|_______|_______|
+ * code      011     010     001     000  |  100     101     110    111
+ */
+#define PHY_VHT_CTRL_POS_1_BELOW  (0x0)
+#define PHY_VHT_CTRL_POS_2_BELOW  (0x1)
+#define PHY_VHT_CTRL_POS_3_BELOW  (0x2)
+#define PHY_VHT_CTRL_POS_4_BELOW  (0x3)
+#define PHY_VHT_CTRL_POS_1_ABOVE  (0x4)
+#define PHY_VHT_CTRL_POS_2_ABOVE  (0x5)
+#define PHY_VHT_CTRL_POS_3_ABOVE  (0x6)
+#define PHY_VHT_CTRL_POS_4_ABOVE  (0x7)
+
+/*
+ * @band: PHY_BAND_*
+ * @channel: channel number
+ * @width: PHY_[VHT|LEGACY]_CHANNEL_*
+ * @ctrl channel: PHY_[VHT|LEGACY]_CTRL_*
+ */
+struct iwl_fw_channel_info {
+       u8 band;
+       u8 channel;
+       u8 width;
+       u8 ctrl_pos;
+} __packed;
+
+#define PHY_RX_CHAIN_DRIVER_FORCE_POS  (0)
+#define PHY_RX_CHAIN_DRIVER_FORCE_MSK \
+       (0x1 << PHY_RX_CHAIN_DRIVER_FORCE_POS)
+#define PHY_RX_CHAIN_VALID_POS         (1)
+#define PHY_RX_CHAIN_VALID_MSK \
+       (0x7 << PHY_RX_CHAIN_VALID_POS)
+#define PHY_RX_CHAIN_FORCE_SEL_POS     (4)
+#define PHY_RX_CHAIN_FORCE_SEL_MSK \
+       (0x7 << PHY_RX_CHAIN_FORCE_SEL_POS)
+#define PHY_RX_CHAIN_FORCE_MIMO_SEL_POS        (7)
+#define PHY_RX_CHAIN_FORCE_MIMO_SEL_MSK \
+       (0x7 << PHY_RX_CHAIN_FORCE_MIMO_SEL_POS)
+#define PHY_RX_CHAIN_CNT_POS           (10)
+#define PHY_RX_CHAIN_CNT_MSK \
+       (0x3 << PHY_RX_CHAIN_CNT_POS)
+#define PHY_RX_CHAIN_MIMO_CNT_POS      (12)
+#define PHY_RX_CHAIN_MIMO_CNT_MSK \
+       (0x3 << PHY_RX_CHAIN_MIMO_CNT_POS)
+#define PHY_RX_CHAIN_MIMO_FORCE_POS    (14)
+#define PHY_RX_CHAIN_MIMO_FORCE_MSK \
+       (0x1 << PHY_RX_CHAIN_MIMO_FORCE_POS)
+
+/* TODO: fix the value, make it depend on firmware at runtime? */
+#define NUM_PHY_CTX    3
+
+/* TODO: complete missing documentation */
+/**
+ * struct iwl_phy_context_cmd - config of the PHY context
+ * ( PHY_CONTEXT_CMD = 0x8 )
+ * @id_and_color: ID and color of the relevant Binding
+ * @action: action to perform, one of FW_CTXT_ACTION_*
+ * @apply_time: 0 means immediate apply and context switch.
+ *     other value means apply new params after X usecs
+ * @tx_param_color: ???
+ * @channel_info:
+ * @txchain_info: ???
+ * @rxchain_info: ???
+ * @acquisition_data: ???
+ * @dsp_cfg_flags: set to 0
+ */
+struct iwl_phy_context_cmd {
+       /* COMMON_INDEX_HDR_API_S_VER_1 */
+       __le32 id_and_color;
+       __le32 action;
+       /* PHY_CONTEXT_DATA_API_S_VER_1 */
+       __le32 apply_time;
+       __le32 tx_param_color;
+       struct iwl_fw_channel_info ci;
+       __le32 txchain_info;
+       __le32 rxchain_info;
+       __le32 acquisition_data;
+       __le32 dsp_cfg_flags;
+} __packed; /* PHY_CONTEXT_CMD_API_VER_1 */
+
+#define IWL_RX_INFO_PHY_CNT 8
+#define IWL_RX_INFO_AGC_IDX 1
+#define IWL_RX_INFO_RSSI_AB_IDX 2
+#define IWL_RX_INFO_RSSI_C_IDX 3
+#define IWL_OFDM_AGC_DB_MSK 0xfe00
+#define IWL_OFDM_AGC_DB_POS 9
+#define IWL_OFDM_RSSI_INBAND_A_MSK 0x00ff
+#define IWL_OFDM_RSSI_ALLBAND_A_MSK 0xff00
+#define IWL_OFDM_RSSI_A_POS 0
+#define IWL_OFDM_RSSI_INBAND_B_MSK 0xff0000
+#define IWL_OFDM_RSSI_ALLBAND_B_MSK 0xff000000
+#define IWL_OFDM_RSSI_B_POS 16
+#define IWL_OFDM_RSSI_INBAND_C_MSK 0x00ff
+#define IWL_OFDM_RSSI_ALLBAND_C_MSK 0xff00
+#define IWL_OFDM_RSSI_C_POS 0
+
+/**
+ * struct iwl_rx_phy_info - phy info
+ * (REPLY_RX_PHY_CMD = 0xc0)
+ * @non_cfg_phy_cnt: non configurable DSP phy data byte count
+ * @cfg_phy_cnt: configurable DSP phy data byte count
+ * @stat_id: configurable DSP phy data set ID
+ * @reserved1:
+ * @system_timestamp: GP2  at on air rise
+ * @timestamp: TSF at on air rise
+ * @beacon_time_stamp: beacon at on-air rise
+ * @phy_flags: general phy flags: band, modulation, ...
+ * @channel: channel number
+ * @non_cfg_phy_buf: for various implementations of non_cfg_phy
+ * @rate_n_flags: RATE_MCS_*
+ * @byte_count: frame's byte-count
+ * @frame_time: frame's time on the air, based on byte count and frame rate
+ *     calculation
+ *
+ * Before each Rx, the device sends this data. It contains PHY information
+ * about the reception of the packet.
+ */
+struct iwl_rx_phy_info {
+       u8 non_cfg_phy_cnt;
+       u8 cfg_phy_cnt;
+       u8 stat_id;
+       u8 reserved1;
+       __le32 system_timestamp;
+       __le64 timestamp;
+       __le32 beacon_time_stamp;
+       __le16 phy_flags;
+       __le16 channel;
+       __le32 non_cfg_phy[IWL_RX_INFO_PHY_CNT];
+       __le32 rate_n_flags;
+       __le32 byte_count;
+       __le16 reserved2;
+       __le16 frame_time;
+} __packed;
+
+struct iwl_rx_mpdu_res_start {
+       __le16 byte_count;
+       __le16 reserved;
+} __packed;
+
+/**
+ * enum iwl_rx_phy_flags - to parse %iwl_rx_phy_info phy_flags
+ * @RX_RES_PHY_FLAGS_BAND_24: true if the packet was received on 2.4 band
+ * @RX_RES_PHY_FLAGS_MOD_CCK:
+ * @RX_RES_PHY_FLAGS_SHORT_PREAMBLE: true if packet's preamble was short
+ * @RX_RES_PHY_FLAGS_NARROW_BAND:
+ * @RX_RES_PHY_FLAGS_ANTENNA: antenna on which the packet was received
+ * @RX_RES_PHY_FLAGS_AGG: set if the packet was part of an A-MPDU
+ * @RX_RES_PHY_FLAGS_OFDM_HT: The frame was an HT frame
+ * @RX_RES_PHY_FLAGS_OFDM_GF: The frame used GF preamble
+ * @RX_RES_PHY_FLAGS_OFDM_VHT: The frame was a VHT frame
+ */
+enum iwl_rx_phy_flags {
+       RX_RES_PHY_FLAGS_BAND_24        = BIT(0),
+       RX_RES_PHY_FLAGS_MOD_CCK        = BIT(1),
+       RX_RES_PHY_FLAGS_SHORT_PREAMBLE = BIT(2),
+       RX_RES_PHY_FLAGS_NARROW_BAND    = BIT(3),
+       RX_RES_PHY_FLAGS_ANTENNA        = (0x7 << 4),
+       RX_RES_PHY_FLAGS_ANTENNA_POS    = 4,
+       RX_RES_PHY_FLAGS_AGG            = BIT(7),
+       RX_RES_PHY_FLAGS_OFDM_HT        = BIT(8),
+       RX_RES_PHY_FLAGS_OFDM_GF        = BIT(9),
+       RX_RES_PHY_FLAGS_OFDM_VHT       = BIT(10),
+};
+
+/**
+ * enum iwl_mvm_rx_status - written by fw for each Rx packet
+ * @RX_MPDU_RES_STATUS_CRC_OK: CRC is fine
+ * @RX_MPDU_RES_STATUS_OVERRUN_OK: there was no RXE overflow
+ * @RX_MPDU_RES_STATUS_SRC_STA_FOUND:
+ * @RX_MPDU_RES_STATUS_KEY_VALID:
+ * @RX_MPDU_RES_STATUS_KEY_PARAM_OK:
+ * @RX_MPDU_RES_STATUS_ICV_OK: ICV is fine, if not, the packet is destroyed
+ * @RX_MPDU_RES_STATUS_MIC_OK: used for CCM alg only. TKIP MIC is checked
+ *     in the driver.
+ * @RX_MPDU_RES_STATUS_TTAK_OK: TTAK is fine
+ * @RX_MPDU_RES_STATUS_MNG_FRAME_REPLAY_ERR:  valid for alg = CCM_CMAC or
+ *     alg = CCM only. Checks replay attack for 11w frames. Relevant only if
+ *     %RX_MPDU_RES_STATUS_ROBUST_MNG_FRAME is set.
+ * @RX_MPDU_RES_STATUS_SEC_NO_ENC: this frame is not encrypted
+ * @RX_MPDU_RES_STATUS_SEC_WEP_ENC: this frame is encrypted using WEP
+ * @RX_MPDU_RES_STATUS_SEC_CCM_ENC: this frame is encrypted using CCM
+ * @RX_MPDU_RES_STATUS_SEC_TKIP_ENC: this frame is encrypted using TKIP
+ * @RX_MPDU_RES_STATUS_SEC_CCM_CMAC_ENC: this frame is encrypted using CCM_CMAC
+ * @RX_MPDU_RES_STATUS_SEC_ENC_ERR: this frame couldn't be decrypted
+ * @RX_MPDU_RES_STATUS_SEC_ENC_MSK: bitmask of the encryption algorithm
+ * @RX_MPDU_RES_STATUS_DEC_DONE: this frame has been successfully decrypted
+ * @RX_MPDU_RES_STATUS_PROTECT_FRAME_BIT_CMP:
+ * @RX_MPDU_RES_STATUS_EXT_IV_BIT_CMP:
+ * @RX_MPDU_RES_STATUS_KEY_ID_CMP_BIT:
+ * @RX_MPDU_RES_STATUS_ROBUST_MNG_FRAME: this frame is an 11w management frame
+ * @RX_MPDU_RES_STATUS_HASH_INDEX_MSK:
+ * @RX_MPDU_RES_STATUS_STA_ID_MSK:
+ * @RX_MPDU_RES_STATUS_RRF_KILL:
+ * @RX_MPDU_RES_STATUS_FILTERING_MSK:
+ * @RX_MPDU_RES_STATUS2_FILTERING_MSK:
+ */
+enum iwl_mvm_rx_status {
+       RX_MPDU_RES_STATUS_CRC_OK                       = BIT(0),
+       RX_MPDU_RES_STATUS_OVERRUN_OK                   = BIT(1),
+       RX_MPDU_RES_STATUS_SRC_STA_FOUND                = BIT(2),
+       RX_MPDU_RES_STATUS_KEY_VALID                    = BIT(3),
+       RX_MPDU_RES_STATUS_KEY_PARAM_OK                 = BIT(4),
+       RX_MPDU_RES_STATUS_ICV_OK                       = BIT(5),
+       RX_MPDU_RES_STATUS_MIC_OK                       = BIT(6),
+       RX_MPDU_RES_STATUS_TTAK_OK                      = BIT(7),
+       RX_MPDU_RES_STATUS_MNG_FRAME_REPLAY_ERR         = BIT(7),
+       RX_MPDU_RES_STATUS_SEC_NO_ENC                   = (0 << 8),
+       RX_MPDU_RES_STATUS_SEC_WEP_ENC                  = (1 << 8),
+       RX_MPDU_RES_STATUS_SEC_CCM_ENC                  = (2 << 8),
+       RX_MPDU_RES_STATUS_SEC_TKIP_ENC                 = (3 << 8),
+       RX_MPDU_RES_STATUS_SEC_CCM_CMAC_ENC             = (6 << 8),
+       RX_MPDU_RES_STATUS_SEC_ENC_ERR                  = (7 << 8),
+       RX_MPDU_RES_STATUS_SEC_ENC_MSK                  = (7 << 8),
+       RX_MPDU_RES_STATUS_DEC_DONE                     = BIT(11),
+       RX_MPDU_RES_STATUS_PROTECT_FRAME_BIT_CMP        = BIT(12),
+       RX_MPDU_RES_STATUS_EXT_IV_BIT_CMP               = BIT(13),
+       RX_MPDU_RES_STATUS_KEY_ID_CMP_BIT               = BIT(14),
+       RX_MPDU_RES_STATUS_ROBUST_MNG_FRAME             = BIT(15),
+       RX_MPDU_RES_STATUS_HASH_INDEX_MSK               = (0x3F0000),
+       RX_MPDU_RES_STATUS_STA_ID_MSK                   = (0x1f000000),
+       RX_MPDU_RES_STATUS_RRF_KILL                     = BIT(29),
+       RX_MPDU_RES_STATUS_FILTERING_MSK                = (0xc00000),
+       RX_MPDU_RES_STATUS2_FILTERING_MSK               = (0xc0000000),
+};
+
+/**
+ * struct iwl_radio_version_notif - information on the radio version
+ * ( RADIO_VERSION_NOTIFICATION = 0x68 )
+ * @radio_flavor:
+ * @radio_step:
+ * @radio_dash:
+ */
+struct iwl_radio_version_notif {
+       __le32 radio_flavor;
+       __le32 radio_step;
+       __le32 radio_dash;
+} __packed; /* RADIO_VERSION_NOTOFICATION_S_VER_1 */
+
+enum iwl_card_state_flags {
+       CARD_ENABLED            = 0x00,
+       HW_CARD_DISABLED        = 0x01,
+       SW_CARD_DISABLED        = 0x02,
+       CT_KILL_CARD_DISABLED   = 0x04,
+       HALT_CARD_DISABLED      = 0x08,
+       CARD_DISABLED_MSK       = 0x0f,
+       CARD_IS_RX_ON           = 0x10,
+};
+
+/**
+ * struct iwl_radio_version_notif - information on the radio version
+ * ( CARD_STATE_NOTIFICATION = 0xa1 )
+ * @flags: %iwl_card_state_flags
+ */
+struct iwl_card_state_notif {
+       __le32 flags;
+} __packed; /* CARD_STATE_NTFY_API_S_VER_1 */
+
+/**
+ * struct iwl_set_calib_default_cmd - set default value for calibration.
+ * ( SET_CALIB_DEFAULT_CMD = 0x8e )
+ * @calib_index: the calibration to set value for
+ * @length: of data
+ * @data: the value to set for the calibration result
+ */
+struct iwl_set_calib_default_cmd {
+       __le16 calib_index;
+       __le16 length;
+       u8 data[0];
+} __packed; /* PHY_CALIB_OVERRIDE_VALUES_S */
+
+#endif /* __fw_api_h__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw.c b/drivers/net/wireless/iwlwifi/mvm/fw.c
new file mode 100644 (file)
index 0000000..d3d959d
--- /dev/null
@@ -0,0 +1,640 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include <net/mac80211.h>
+
+#include "iwl-trans.h"
+#include "iwl-op-mode.h"
+#include "iwl-fw.h"
+#include "iwl-debug.h"
+#include "iwl-csr.h" /* for iwl_mvm_rx_card_state_notif */
+#include "iwl-io.h" /* for iwl_mvm_rx_card_state_notif */
+#include "iwl-eeprom-parse.h"
+
+#include "mvm.h"
+#include "iwl-phy-db.h"
+
+#define MVM_UCODE_ALIVE_TIMEOUT        HZ
+#define MVM_UCODE_CALIB_TIMEOUT        (2*HZ)
+
+#define UCODE_VALID_OK cpu_to_le32(0x1)
+
+/* Default calibration values for WkP - set to INIT image w/o running */
+static const u8 wkp_calib_values_bb_filter[] = { 0xbf, 0x00, 0x5f, 0x00, 0x2f,
+                                                0x00, 0x18, 0x00 };
+static const u8 wkp_calib_values_rx_dc[] = { 0x7f, 0x7f, 0x7f, 0x7f, 0x7f,
+                                            0x7f, 0x7f, 0x7f };
+static const u8 wkp_calib_values_tx_lo[] = { 0x00, 0x00, 0x00, 0x00 };
+static const u8 wkp_calib_values_tx_iq[] = { 0xff, 0x00, 0xff, 0x00, 0x00,
+                                            0x00 };
+static const u8 wkp_calib_values_rx_iq[] = { 0xff, 0x00, 0x00, 0x00 };
+static const u8 wkp_calib_values_rx_iq_skew[] = { 0x00, 0x00, 0x01, 0x00 };
+static const u8 wkp_calib_values_tx_iq_skew[] = { 0x01, 0x00, 0x00, 0x00 };
+static const u8 wkp_calib_values_xtal[] = { 0xd2, 0xd2 };
+
+struct iwl_calib_default_data {
+       u16 size;
+       void *data;
+};
+
+#define CALIB_SIZE_N_DATA(_buf) {.size = sizeof(_buf), .data = &_buf}
+
+static const struct iwl_calib_default_data wkp_calib_default_data[12] = {
+       [5] = CALIB_SIZE_N_DATA(wkp_calib_values_rx_dc),
+       [6] = CALIB_SIZE_N_DATA(wkp_calib_values_bb_filter),
+       [7] = CALIB_SIZE_N_DATA(wkp_calib_values_tx_lo),
+       [8] = CALIB_SIZE_N_DATA(wkp_calib_values_tx_iq),
+       [9] = CALIB_SIZE_N_DATA(wkp_calib_values_tx_iq_skew),
+       [10] = CALIB_SIZE_N_DATA(wkp_calib_values_rx_iq),
+       [11] = CALIB_SIZE_N_DATA(wkp_calib_values_rx_iq_skew),
+};
+
+struct iwl_mvm_alive_data {
+       bool valid;
+       u32 scd_base_addr;
+};
+
+static inline const struct fw_img *
+iwl_get_ucode_image(struct iwl_mvm *mvm, enum iwl_ucode_type ucode_type)
+{
+       if (ucode_type >= IWL_UCODE_TYPE_MAX)
+               return NULL;
+
+       return &mvm->fw->img[ucode_type];
+}
+
+static int iwl_send_tx_ant_cfg(struct iwl_mvm *mvm, u8 valid_tx_ant)
+{
+       struct iwl_tx_ant_cfg_cmd tx_ant_cmd = {
+               .valid = cpu_to_le32(valid_tx_ant),
+       };
+
+       IWL_DEBUG_HC(mvm, "select valid tx ant: %u\n", valid_tx_ant);
+       return iwl_mvm_send_cmd_pdu(mvm, TX_ANT_CONFIGURATION_CMD, CMD_SYNC,
+                                   sizeof(tx_ant_cmd), &tx_ant_cmd);
+}
+
+static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
+                        struct iwl_rx_packet *pkt, void *data)
+{
+       struct iwl_mvm *mvm =
+               container_of(notif_wait, struct iwl_mvm, notif_wait);
+       struct iwl_mvm_alive_data *alive_data = data;
+       struct mvm_alive_resp *palive;
+
+       palive = (void *)pkt->data;
+
+       mvm->error_event_table = le32_to_cpu(palive->error_event_table_ptr);
+       mvm->log_event_table = le32_to_cpu(palive->log_event_table_ptr);
+       alive_data->scd_base_addr = le32_to_cpu(palive->scd_base_ptr);
+
+       alive_data->valid = le16_to_cpu(palive->status) == IWL_ALIVE_STATUS_OK;
+       IWL_DEBUG_FW(mvm, "Alive ucode status 0x%04x revision 0x%01X 0x%01X\n",
+                    le16_to_cpu(palive->status), palive->ver_type,
+                    palive->ver_subtype);
+
+       return true;
+}
+
+static bool iwl_wait_phy_db_entry(struct iwl_notif_wait_data *notif_wait,
+                                 struct iwl_rx_packet *pkt, void *data)
+{
+       struct iwl_phy_db *phy_db = data;
+
+       if (pkt->hdr.cmd != CALIB_RES_NOTIF_PHY_DB) {
+               WARN_ON(pkt->hdr.cmd != INIT_COMPLETE_NOTIF);
+               return true;
+       }
+
+       WARN_ON(iwl_phy_db_set_section(phy_db, pkt, GFP_ATOMIC));
+
+       return false;
+}
+
+static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
+                                        enum iwl_ucode_type ucode_type)
+{
+       struct iwl_notification_wait alive_wait;
+       struct iwl_mvm_alive_data alive_data;
+       const struct fw_img *fw;
+       int ret, i;
+       enum iwl_ucode_type old_type = mvm->cur_ucode;
+       static const u8 alive_cmd[] = { MVM_ALIVE };
+
+       mvm->cur_ucode = ucode_type;
+       fw = iwl_get_ucode_image(mvm, ucode_type);
+
+       mvm->ucode_loaded = false;
+
+       if (!fw)
+               return -EINVAL;
+
+       iwl_init_notification_wait(&mvm->notif_wait, &alive_wait,
+                                  alive_cmd, ARRAY_SIZE(alive_cmd),
+                                  iwl_alive_fn, &alive_data);
+
+       ret = iwl_trans_start_fw(mvm->trans, fw, ucode_type == IWL_UCODE_INIT);
+       if (ret) {
+               mvm->cur_ucode = old_type;
+               iwl_remove_notification(&mvm->notif_wait, &alive_wait);
+               return ret;
+       }
+
+       /*
+        * Some things may run in the background now, but we
+        * just wait for the ALIVE notification here.
+        */
+       ret = iwl_wait_notification(&mvm->notif_wait, &alive_wait,
+                                   MVM_UCODE_ALIVE_TIMEOUT);
+       if (ret) {
+               mvm->cur_ucode = old_type;
+               return ret;
+       }
+
+       if (!alive_data.valid) {
+               IWL_ERR(mvm, "Loaded ucode is not valid!\n");
+               mvm->cur_ucode = old_type;
+               return -EIO;
+       }
+
+       iwl_trans_fw_alive(mvm->trans, alive_data.scd_base_addr);
+
+       /*
+        * Note: all the queues are enabled as part of the interface
+        * initialization, but in firmware restart scenarios they
+        * could be stopped, so wake them up. In firmware restart,
+        * mac80211 will have the queues stopped as well until the
+        * reconfiguration completes. During normal startup, they
+        * will be empty.
+        */
+
+       for (i = 0; i < IWL_MAX_HW_QUEUES; i++) {
+               if (i < IWL_MVM_FIRST_AGG_QUEUE && i != IWL_MVM_CMD_QUEUE)
+                       mvm->queue_to_mac80211[i] = i;
+               else
+                       mvm->queue_to_mac80211[i] = IWL_INVALID_MAC80211_QUEUE;
+               atomic_set(&mvm->queue_stop_count[i], 0);
+       }
+
+       mvm->transport_queue_stop = 0;
+
+       mvm->ucode_loaded = true;
+
+       return 0;
+}
+#define IWL_HW_REV_ID_RAINBOW  0x2
+#define IWL_PROJ_TYPE_LHP      0x5
+
+static u32 iwl_mvm_build_phy_cfg(struct iwl_mvm *mvm)
+{
+       struct iwl_nvm_data *data = mvm->nvm_data;
+       /* Temp calls to static definitions, will be changed to CSR calls */
+       u8 hw_rev_id = IWL_HW_REV_ID_RAINBOW;
+       u8 project_type = IWL_PROJ_TYPE_LHP;
+
+       return data->radio_cfg_dash | (data->radio_cfg_step << 2) |
+               (hw_rev_id << 4) | ((project_type & 0x7f) << 6) |
+               (data->valid_tx_ant << 16) | (data->valid_rx_ant << 20);
+}
+
+static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
+{
+       struct iwl_phy_cfg_cmd phy_cfg_cmd;
+       enum iwl_ucode_type ucode_type = mvm->cur_ucode;
+
+       /* Set parameters */
+       phy_cfg_cmd.phy_cfg = cpu_to_le32(iwl_mvm_build_phy_cfg(mvm));
+       phy_cfg_cmd.calib_control.event_trigger =
+               mvm->fw->default_calib[ucode_type].event_trigger;
+       phy_cfg_cmd.calib_control.flow_trigger =
+               mvm->fw->default_calib[ucode_type].flow_trigger;
+
+       IWL_DEBUG_INFO(mvm, "Sending Phy CFG command: 0x%x\n",
+                      phy_cfg_cmd.phy_cfg);
+
+       return iwl_mvm_send_cmd_pdu(mvm, PHY_CONFIGURATION_CMD, CMD_SYNC,
+                                   sizeof(phy_cfg_cmd), &phy_cfg_cmd);
+}
+
+/* Starting with the new PHY DB implementation - New calibs are enabled */
+/* Value - 0x405e7 */
+#define IWL_CALIB_DEFAULT_FLOW_INIT    (IWL_CALIB_CFG_XTAL_IDX         |\
+                                        IWL_CALIB_CFG_TEMPERATURE_IDX  |\
+                                        IWL_CALIB_CFG_VOLTAGE_READ_IDX |\
+                                        IWL_CALIB_CFG_DC_IDX           |\
+                                        IWL_CALIB_CFG_BB_FILTER_IDX    |\
+                                        IWL_CALIB_CFG_LO_LEAKAGE_IDX   |\
+                                        IWL_CALIB_CFG_TX_IQ_IDX        |\
+                                        IWL_CALIB_CFG_RX_IQ_IDX        |\
+                                        IWL_CALIB_CFG_AGC_IDX)
+
+#define IWL_CALIB_DEFAULT_EVENT_INIT   0x0
+
+/* Value 0x41567 */
+#define IWL_CALIB_DEFAULT_FLOW_RUN     (IWL_CALIB_CFG_XTAL_IDX         |\
+                                        IWL_CALIB_CFG_TEMPERATURE_IDX  |\
+                                        IWL_CALIB_CFG_VOLTAGE_READ_IDX |\
+                                        IWL_CALIB_CFG_BB_FILTER_IDX    |\
+                                        IWL_CALIB_CFG_DC_IDX           |\
+                                        IWL_CALIB_CFG_TX_IQ_IDX        |\
+                                        IWL_CALIB_CFG_RX_IQ_IDX        |\
+                                        IWL_CALIB_CFG_SENSITIVITY_IDX  |\
+                                        IWL_CALIB_CFG_AGC_IDX)
+
+#define IWL_CALIB_DEFAULT_EVENT_RUN    (IWL_CALIB_CFG_XTAL_IDX         |\
+                                        IWL_CALIB_CFG_TEMPERATURE_IDX  |\
+                                        IWL_CALIB_CFG_VOLTAGE_READ_IDX |\
+                                        IWL_CALIB_CFG_TX_PWR_IDX       |\
+                                        IWL_CALIB_CFG_DC_IDX           |\
+                                        IWL_CALIB_CFG_TX_IQ_IDX        |\
+                                        IWL_CALIB_CFG_SENSITIVITY_IDX)
+
+/*
+ * Sets the calibrations trigger values that will be sent to the FW for runtime
+ * and init calibrations.
+ * The ones given in the FW TLV are not correct.
+ */
+static void iwl_set_default_calib_trigger(struct iwl_mvm *mvm)
+{
+       struct iwl_tlv_calib_ctrl default_calib;
+
+       /*
+        * WkP FW TLV calib bits are wrong, overwrite them.
+        * This defines the dynamic calibrations which are implemented in the
+        * uCode both for init(flow) calculation and event driven calibs.
+        */
+
+       /* Init Image */
+       default_calib.event_trigger = cpu_to_le32(IWL_CALIB_DEFAULT_EVENT_INIT);
+       default_calib.flow_trigger = cpu_to_le32(IWL_CALIB_DEFAULT_FLOW_INIT);
+
+       if (default_calib.event_trigger !=
+           mvm->fw->default_calib[IWL_UCODE_INIT].event_trigger)
+               IWL_ERR(mvm,
+                       "Updating the event calib for INIT image: 0x%x -> 0x%x\n",
+                       mvm->fw->default_calib[IWL_UCODE_INIT].event_trigger,
+                       default_calib.event_trigger);
+       if (default_calib.flow_trigger !=
+           mvm->fw->default_calib[IWL_UCODE_INIT].flow_trigger)
+               IWL_ERR(mvm,
+                       "Updating the flow calib for INIT image: 0x%x -> 0x%x\n",
+                       mvm->fw->default_calib[IWL_UCODE_INIT].flow_trigger,
+                       default_calib.flow_trigger);
+
+       memcpy((void *)&mvm->fw->default_calib[IWL_UCODE_INIT],
+              &default_calib, sizeof(struct iwl_tlv_calib_ctrl));
+       IWL_ERR(mvm,
+               "Setting uCode init calibrations event 0x%x, trigger 0x%x\n",
+               default_calib.event_trigger,
+               default_calib.flow_trigger);
+
+       /* Run time image */
+       default_calib.event_trigger = cpu_to_le32(IWL_CALIB_DEFAULT_EVENT_RUN);
+       default_calib.flow_trigger = cpu_to_le32(IWL_CALIB_DEFAULT_FLOW_RUN);
+
+       if (default_calib.event_trigger !=
+           mvm->fw->default_calib[IWL_UCODE_REGULAR].event_trigger)
+               IWL_ERR(mvm,
+                       "Updating the event calib for RT image: 0x%x -> 0x%x\n",
+                       mvm->fw->default_calib[IWL_UCODE_REGULAR].event_trigger,
+                       default_calib.event_trigger);
+       if (default_calib.flow_trigger !=
+           mvm->fw->default_calib[IWL_UCODE_REGULAR].flow_trigger)
+               IWL_ERR(mvm,
+                       "Updating the flow calib for RT image: 0x%x -> 0x%x\n",
+                       mvm->fw->default_calib[IWL_UCODE_REGULAR].flow_trigger,
+                       default_calib.flow_trigger);
+
+       memcpy((void *)&mvm->fw->default_calib[IWL_UCODE_REGULAR],
+              &default_calib, sizeof(struct iwl_tlv_calib_ctrl));
+       IWL_ERR(mvm,
+               "Setting uCode runtime calibs event 0x%x, trigger 0x%x\n",
+               default_calib.event_trigger,
+               default_calib.flow_trigger);
+}
+
+static int iwl_set_default_calibrations(struct iwl_mvm *mvm)
+{
+       u8 cmd_raw[16]; /* holds the variable size commands */
+       struct iwl_set_calib_default_cmd *cmd =
+               (struct iwl_set_calib_default_cmd *)cmd_raw;
+       int ret, i;
+
+       /* Setting default values for calibrations we don't run */
+       for (i = 0; i < ARRAY_SIZE(wkp_calib_default_data); i++) {
+               u16 cmd_len;
+
+               if (wkp_calib_default_data[i].size == 0)
+                       continue;
+
+               memset(cmd_raw, 0, sizeof(cmd_raw));
+               cmd_len = wkp_calib_default_data[i].size + sizeof(cmd);
+               cmd->calib_index = cpu_to_le16(i);
+               cmd->length = cpu_to_le16(wkp_calib_default_data[i].size);
+               if (WARN_ONCE(cmd_len > sizeof(cmd_raw),
+                             "Need to enlarge cmd_raw to %d\n", cmd_len))
+                       break;
+               memcpy(cmd->data, wkp_calib_default_data[i].data,
+                      wkp_calib_default_data[i].size);
+               ret = iwl_mvm_send_cmd_pdu(mvm, SET_CALIB_DEFAULT_CMD, 0,
+                                          sizeof(*cmd) +
+                                          wkp_calib_default_data[i].size,
+                                          cmd);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
+
+int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
+{
+       struct iwl_notification_wait calib_wait;
+       static const u8 init_complete[] = {
+               INIT_COMPLETE_NOTIF,
+               CALIB_RES_NOTIF_PHY_DB
+       };
+       int ret;
+
+       lockdep_assert_held(&mvm->mutex);
+
+       if (mvm->init_ucode_run)
+               return 0;
+
+       iwl_init_notification_wait(&mvm->notif_wait,
+                                  &calib_wait,
+                                  init_complete,
+                                  ARRAY_SIZE(init_complete),
+                                  iwl_wait_phy_db_entry,
+                                  mvm->phy_db);
+
+       /* Will also start the device */
+       ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_INIT);
+       if (ret) {
+               IWL_ERR(mvm, "Failed to start INIT ucode: %d\n", ret);
+               goto error;
+       }
+
+       if (read_nvm) {
+               /* Read nvm */
+               ret = iwl_nvm_init(mvm);
+               if (ret) {
+                       IWL_ERR(mvm, "Failed to read NVM: %d\n", ret);
+                       goto error;
+               }
+       }
+
+       ret = iwl_nvm_check_version(mvm->nvm_data, mvm->trans);
+       WARN_ON(ret);
+
+       /* Override the calibrations from TLV and the const of fw */
+       iwl_set_default_calib_trigger(mvm);
+
+       /* WkP doesn't have all calibrations, need to set default values */
+       if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
+               ret = iwl_set_default_calibrations(mvm);
+               if (ret)
+                       goto error;
+       }
+
+       /*
+        * Send phy configurations command to init uCode
+        * to start the 16.0 uCode init image internal calibrations.
+        */
+       ret = iwl_send_phy_cfg_cmd(mvm);
+       if (ret) {
+               IWL_ERR(mvm, "Failed to run INIT calibrations: %d\n",
+                       ret);
+               goto error;
+       }
+
+       /*
+        * Some things may run in the background now, but we
+        * just wait for the calibration complete notification.
+        */
+       ret = iwl_wait_notification(&mvm->notif_wait, &calib_wait,
+                       MVM_UCODE_CALIB_TIMEOUT);
+       if (!ret)
+               mvm->init_ucode_run = true;
+       goto out;
+
+error:
+       iwl_remove_notification(&mvm->notif_wait, &calib_wait);
+out:
+       if (!iwlmvm_mod_params.init_dbg) {
+               iwl_trans_stop_device(mvm->trans);
+       } else if (!mvm->nvm_data) {
+               /* we want to debug INIT and we have no NVM - fake */
+               mvm->nvm_data = kzalloc(sizeof(struct iwl_nvm_data) +
+                                       sizeof(struct ieee80211_channel) +
+                                       sizeof(struct ieee80211_rate),
+                                       GFP_KERNEL);
+               if (!mvm->nvm_data)
+                       return -ENOMEM;
+               mvm->nvm_data->valid_rx_ant = 1;
+               mvm->nvm_data->valid_tx_ant = 1;
+               mvm->nvm_data->bands[0].channels = mvm->nvm_data->channels;
+               mvm->nvm_data->bands[0].n_channels = 1;
+               mvm->nvm_data->bands[0].n_bitrates = 1;
+               mvm->nvm_data->bands[0].bitrates =
+                       (void *)mvm->nvm_data->channels + 1;
+               mvm->nvm_data->bands[0].bitrates->hw_value = 10;
+       }
+
+       return ret;
+}
+
+#define UCODE_CALIB_TIMEOUT    (2*HZ)
+
+int iwl_mvm_up(struct iwl_mvm *mvm)
+{
+       int ret, i;
+
+       lockdep_assert_held(&mvm->mutex);
+
+       ret = iwl_trans_start_hw(mvm->trans);
+       if (ret)
+               return ret;
+
+       /* If we were in RFKILL during module loading, load init ucode now */
+       if (!mvm->init_ucode_run) {
+               ret = iwl_run_init_mvm_ucode(mvm, false);
+               if (ret && !iwlmvm_mod_params.init_dbg) {
+                       IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret);
+                       goto error;
+               }
+       }
+
+       if (iwlmvm_mod_params.init_dbg)
+               return 0;
+
+       ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR);
+       if (ret) {
+               IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret);
+               goto error;
+       }
+
+       ret = iwl_send_tx_ant_cfg(mvm, mvm->nvm_data->valid_tx_ant);
+       if (ret)
+               goto error;
+
+       /* Send phy db control command and then phy db calibration*/
+       ret = iwl_send_phy_db_data(mvm->phy_db);
+       if (ret)
+               goto error;
+
+       ret = iwl_send_phy_cfg_cmd(mvm);
+       if (ret)
+               goto error;
+
+       /* init the fw <-> mac80211 STA mapping */
+       for (i = 0; i < IWL_MVM_STATION_COUNT; i++)
+               RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL);
+
+       /* Add auxiliary station for scanning */
+       ret = iwl_mvm_add_aux_sta(mvm);
+       if (ret)
+               goto error;
+
+       IWL_DEBUG_INFO(mvm, "RT uCode started.\n");
+
+       return 0;
+ error:
+       iwl_trans_stop_device(mvm->trans);
+       return ret;
+}
+
+int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm)
+{
+       int ret, i;
+
+       lockdep_assert_held(&mvm->mutex);
+
+       ret = iwl_trans_start_hw(mvm->trans);
+       if (ret)
+               return ret;
+
+       ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_WOWLAN);
+       if (ret) {
+               IWL_ERR(mvm, "Failed to start WoWLAN firmware: %d\n", ret);
+               goto error;
+       }
+
+       ret = iwl_send_tx_ant_cfg(mvm, mvm->nvm_data->valid_tx_ant);
+       if (ret)
+               goto error;
+
+       /* Send phy db control command and then phy db calibration*/
+       ret = iwl_send_phy_db_data(mvm->phy_db);
+       if (ret)
+               goto error;
+
+       ret = iwl_send_phy_cfg_cmd(mvm);
+       if (ret)
+               goto error;
+
+       /* init the fw <-> mac80211 STA mapping */
+       for (i = 0; i < IWL_MVM_STATION_COUNT; i++)
+               RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL);
+
+       /* Add auxiliary station for scanning */
+       ret = iwl_mvm_add_aux_sta(mvm);
+       if (ret)
+               goto error;
+
+       return 0;
+ error:
+       iwl_trans_stop_device(mvm->trans);
+       return ret;
+}
+
+int iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm,
+                                   struct iwl_rx_cmd_buffer *rxb,
+                                   struct iwl_device_cmd *cmd)
+{
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
+       struct iwl_card_state_notif *card_state_notif = (void *)pkt->data;
+       u32 flags = le32_to_cpu(card_state_notif->flags);
+
+       IWL_DEBUG_RF_KILL(mvm, "Card state received: HW:%s SW:%s CT:%s\n",
+                         (flags & HW_CARD_DISABLED) ? "Kill" : "On",
+                         (flags & SW_CARD_DISABLED) ? "Kill" : "On",
+                         (flags & CT_KILL_CARD_DISABLED) ?
+                         "Reached" : "Not reached");
+
+       return 0;
+}
+
+int iwl_mvm_rx_radio_ver(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
+                        struct iwl_device_cmd *cmd)
+{
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
+       struct iwl_radio_version_notif *radio_version = (void *)pkt->data;
+
+       /* TODO: what to do with that? */
+       IWL_DEBUG_INFO(mvm,
+                      "Radio version: flavor: 0x%08x, step 0x%08x, dash 0x%08x\n",
+                      le32_to_cpu(radio_version->radio_flavor),
+                      le32_to_cpu(radio_version->radio_step),
+                      le32_to_cpu(radio_version->radio_dash));
+       return 0;
+}
diff --git a/drivers/net/wireless/iwlwifi/mvm/led.c b/drivers/net/wireless/iwlwifi/mvm/led.c
new file mode 100644 (file)
index 0000000..011906e
--- /dev/null
@@ -0,0 +1,134 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#include <linux/leds.h>
+#include "iwl-io.h"
+#include "iwl-csr.h"
+#include "mvm.h"
+
+/* Set led register on */
+static void iwl_mvm_led_enable(struct iwl_mvm *mvm)
+{
+       iwl_write32(mvm->trans, CSR_LED_REG, CSR_LED_REG_TURN_ON);
+}
+
+/* Set led register off */
+static void iwl_mvm_led_disable(struct iwl_mvm *mvm)
+{
+       iwl_write32(mvm->trans, CSR_LED_REG, CSR_LED_REG_TURN_OFF);
+}
+
+static void iwl_led_brightness_set(struct led_classdev *led_cdev,
+                                  enum led_brightness brightness)
+{
+       struct iwl_mvm *mvm = container_of(led_cdev, struct iwl_mvm, led);
+       if (brightness > 0)
+               iwl_mvm_led_enable(mvm);
+       else
+               iwl_mvm_led_disable(mvm);
+}
+
+int iwl_mvm_leds_init(struct iwl_mvm *mvm)
+{
+       int mode = iwlwifi_mod_params.led_mode;
+       int ret;
+
+       switch (mode) {
+       case IWL_LED_DEFAULT:
+       case IWL_LED_RF_STATE:
+               mode = IWL_LED_RF_STATE;
+               break;
+       case IWL_LED_DISABLE:
+               IWL_INFO(mvm, "Led disabled\n");
+               return 0;
+       default:
+               return -EINVAL;
+       };
+
+       mvm->led.name = kasprintf(GFP_KERNEL, "%s-led",
+                                  wiphy_name(mvm->hw->wiphy));
+       mvm->led.brightness_set = iwl_led_brightness_set;
+       mvm->led.max_brightness = 1;
+
+       if (mode == IWL_LED_RF_STATE)
+               mvm->led.default_trigger =
+                       ieee80211_get_radio_led_name(mvm->hw);
+
+       ret = led_classdev_register(mvm->trans->dev, &mvm->led);
+       if (ret) {
+               kfree(mvm->led.name);
+               IWL_INFO(mvm, "Failed to enable led\n");
+               return ret;
+       }
+
+       return 0;
+}
+
+void iwl_mvm_leds_exit(struct iwl_mvm *mvm)
+{
+       if (iwlwifi_mod_params.led_mode == IWL_LED_DISABLE)
+               return;
+
+       led_classdev_unregister(&mvm->led);
+       kfree(mvm->led.name);
+}
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
new file mode 100644 (file)
index 0000000..0854dc3
--- /dev/null
@@ -0,0 +1,955 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#include <linux/etherdevice.h>
+#include <net/mac80211.h>
+#include "iwl-io.h"
+#include "iwl-prph.h"
+#include "fw-api.h"
+#include "mvm.h"
+
+const u8 iwl_mvm_ac_to_tx_fifo[] = {
+       IWL_MVM_TX_FIFO_BK,
+       IWL_MVM_TX_FIFO_BE,
+       IWL_MVM_TX_FIFO_VI,
+       IWL_MVM_TX_FIFO_VO,
+};
+
+struct iwl_mvm_mac_iface_iterator_data {
+       struct iwl_mvm *mvm;
+       struct ieee80211_vif *vif;
+       unsigned long available_mac_ids[BITS_TO_LONGS(NUM_MAC_INDEX_DRIVER)];
+       unsigned long available_tsf_ids[BITS_TO_LONGS(NUM_TSF_IDS)];
+       unsigned long used_hw_queues[BITS_TO_LONGS(IWL_MVM_FIRST_AGG_QUEUE)];
+       enum iwl_tsf_id preferred_tsf;
+       bool found_vif;
+};
+
+static void iwl_mvm_mac_iface_iterator(void *_data, u8 *mac,
+                                      struct ieee80211_vif *vif)
+{
+       struct iwl_mvm_mac_iface_iterator_data *data = _data;
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       u32 ac;
+
+       /* Iterator may already find the interface being added -- skip it */
+       if (vif == data->vif) {
+               data->found_vif = true;
+               return;
+       }
+
+       /* Mark the queues used by the vif */
+       for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
+               if (vif->hw_queue[ac] != IEEE80211_INVAL_HW_QUEUE)
+                       __set_bit(vif->hw_queue[ac], data->used_hw_queues);
+
+       if (vif->cab_queue != IEEE80211_INVAL_HW_QUEUE)
+               __set_bit(vif->cab_queue, data->used_hw_queues);
+
+       /*
+        * Mark MAC IDs as used by clearing the available bit, and
+        * (below) mark TSFs as used if their existing use is not
+        * compatible with the new interface type.
+        * No locking or atomic bit operations are needed since the
+        * data is on the stack of the caller function.
+        */
+       __clear_bit(mvmvif->id, data->available_mac_ids);
+
+       /*
+        * The TSF is a hardware/firmware resource, there are 4 and
+        * the driver should assign and free them as needed. However,
+        * there are cases where 2 MACs should share the same TSF ID
+        * for the purpose of clock sync, an optimization to avoid
+        * clock drift causing overlapping TBTTs/DTIMs for a GO and
+        * client in the system.
+        *
+        * The firmware will decide according to the MAC type which
+        * will be the master and slave. Clients that need to sync
+        * with a remote station will be the master, and an AP or GO
+        * will be the slave.
+        *
+        * Depending on the new interface type it can be slaved to
+        * or become the master of an existing interface.
+        */
+       switch (data->vif->type) {
+       case NL80211_IFTYPE_STATION:
+               /*
+                * The new interface is client, so if the existing one
+                * we're iterating is an AP, the TSF should be used to
+                * avoid drift between the new client and existing AP,
+                * the existing AP will get drift updates from the new
+                * client context in this case
+                */
+               if (vif->type == NL80211_IFTYPE_AP) {
+                       if (data->preferred_tsf == NUM_TSF_IDS &&
+                           test_bit(mvmvif->tsf_id, data->available_tsf_ids))
+                               data->preferred_tsf = mvmvif->tsf_id;
+                       return;
+               }
+               break;
+       case NL80211_IFTYPE_AP:
+               /*
+                * The new interface is AP/GO, so should get drift
+                * updates from an existing client or use the same
+                * TSF as an existing GO. There's no drift between
+                * TSFs internally but if they used different TSFs
+                * then a new client MAC could update one of them
+                * and cause drift that way.
+                */
+               if (vif->type == NL80211_IFTYPE_STATION ||
+                   vif->type == NL80211_IFTYPE_AP) {
+                       if (data->preferred_tsf == NUM_TSF_IDS &&
+                           test_bit(mvmvif->tsf_id, data->available_tsf_ids))
+                               data->preferred_tsf = mvmvif->tsf_id;
+                       return;
+               }
+               break;
+       default:
+               /*
+                * For all other interface types there's no need to
+                * take drift into account. Either they're exclusive
+                * like IBSS and monitor, or we don't care much about
+                * their TSF (like P2P Device), but we won't be able
+                * to share the TSF resource.
+                */
+               break;
+       }
+
+       /*
+        * Unless we exited above, we can't share the TSF resource
+        * that the virtual interface we're iterating over is using
+        * with the new one, so clear the available bit and if this
+        * was the preferred one, reset that as well.
+        */
+       __clear_bit(mvmvif->tsf_id, data->available_tsf_ids);
+
+       if (data->preferred_tsf == mvmvif->tsf_id)
+               data->preferred_tsf = NUM_TSF_IDS;
+}
+
+/*
+ * Get the mask of the queus used by the vif
+ */
+u32 iwl_mvm_mac_get_queues_mask(struct iwl_mvm *mvm,
+                               struct ieee80211_vif *vif)
+{
+       u32 qmask, ac;
+
+       if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
+               return BIT(IWL_OFFCHANNEL_QUEUE);
+
+       qmask = (vif->cab_queue != IEEE80211_INVAL_HW_QUEUE) ?
+               BIT(vif->cab_queue) : 0;
+
+       for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
+               if (vif->hw_queue[ac] != IEEE80211_INVAL_HW_QUEUE)
+                       qmask |= BIT(vif->hw_queue[ac]);
+
+       return qmask;
+}
+
+static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm,
+                                              struct ieee80211_vif *vif)
+{
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       struct iwl_mvm_mac_iface_iterator_data data = {
+               .mvm = mvm,
+               .vif = vif,
+               .available_mac_ids = { (1 << NUM_MAC_INDEX_DRIVER) - 1 },
+               .available_tsf_ids = { (1 << NUM_TSF_IDS) - 1 },
+               /* no preference yet */
+               .preferred_tsf = NUM_TSF_IDS,
+               .used_hw_queues = {
+                       BIT(IWL_MVM_OFFCHANNEL_QUEUE) |
+                       BIT(IWL_MVM_AUX_QUEUE) |
+                       BIT(IWL_MVM_CMD_QUEUE)
+               },
+               .found_vif = false,
+       };
+       u32 ac;
+       int ret;
+
+       /*
+        * Allocate a MAC ID and a TSF for this MAC, along with the queues
+        * and other resources.
+        */
+
+       /*
+        * Before the iterator, we start with all MAC IDs and TSFs available.
+        *
+        * During iteration, all MAC IDs are cleared that are in use by other
+        * virtual interfaces, and all TSF IDs are cleared that can't be used
+        * by this new virtual interface because they're used by an interface
+        * that can't share it with the new one.
+        * At the same time, we check if there's a preferred TSF in the case
+        * that we should share it with another interface.
+        */
+
+       ieee80211_iterate_active_interfaces_atomic(
+               mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
+               iwl_mvm_mac_iface_iterator, &data);
+
+       /*
+        * In the case we're getting here during resume, it's similar to
+        * firmware restart, and with RESUME_ALL the iterator will find
+        * the vif being added already.
+        * We don't want to reassign any IDs in either case since doing
+        * so would probably assign different IDs (as interfaces aren't
+        * necessarily added in the same order), but the old IDs were
+        * preserved anyway, so skip ID assignment for both resume and
+        * recovery.
+        */
+       if (data.found_vif)
+               return 0;
+
+       /* Therefore, in recovery, we can't get here */
+       WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status));
+
+       mvmvif->id = find_first_bit(data.available_mac_ids,
+                                   NUM_MAC_INDEX_DRIVER);
+       if (mvmvif->id == NUM_MAC_INDEX_DRIVER) {
+               IWL_ERR(mvm, "Failed to init MAC context - no free ID!\n");
+               ret = -EIO;
+               goto exit_fail;
+       }
+
+       if (data.preferred_tsf != NUM_TSF_IDS)
+               mvmvif->tsf_id = data.preferred_tsf;
+       else
+               mvmvif->tsf_id = find_first_bit(data.available_tsf_ids,
+                                               NUM_TSF_IDS);
+       if (mvmvif->tsf_id == NUM_TSF_IDS) {
+               IWL_ERR(mvm, "Failed to init MAC context - no free TSF!\n");
+               ret = -EIO;
+               goto exit_fail;
+       }
+
+       mvmvif->color = 0;
+
+       /* No need to allocate data queues to P2P Device MAC.*/
+       if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
+               for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
+                       vif->hw_queue[ac] = IEEE80211_INVAL_HW_QUEUE;
+
+               return 0;
+       }
+
+       /* Find available queues, and allocate them to the ACs */
+       for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
+               u8 queue = find_first_zero_bit(data.used_hw_queues,
+                                              IWL_MVM_FIRST_AGG_QUEUE);
+
+               if (queue >= IWL_MVM_FIRST_AGG_QUEUE) {
+                       IWL_ERR(mvm, "Failed to allocate queue\n");
+                       ret = -EIO;
+                       goto exit_fail;
+               }
+
+               __set_bit(queue, data.used_hw_queues);
+               vif->hw_queue[ac] = queue;
+       }
+
+       /* Allocate the CAB queue for softAP and GO interfaces */
+       if (vif->type == NL80211_IFTYPE_AP) {
+               u8 queue = find_first_zero_bit(data.used_hw_queues,
+                                              IWL_MVM_FIRST_AGG_QUEUE);
+
+               if (queue >= IWL_MVM_FIRST_AGG_QUEUE) {
+                       IWL_ERR(mvm, "Failed to allocate cab queue\n");
+                       ret = -EIO;
+                       goto exit_fail;
+               }
+
+               vif->cab_queue = queue;
+       } else {
+               vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
+       }
+
+       mvmvif->bcast_sta.sta_id = IWL_MVM_STATION_COUNT;
+       mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
+
+       INIT_LIST_HEAD(&mvmvif->time_event_data.list);
+       mvmvif->time_event_data.id = TE_MAX;
+
+       return 0;
+
+exit_fail:
+       memset(mvmvif, 0, sizeof(struct iwl_mvm_vif));
+       memset(vif->hw_queue, IEEE80211_INVAL_HW_QUEUE, sizeof(vif->hw_queue));
+       vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
+       return ret;
+}
+
+int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+       u32 ac;
+       int ret;
+
+       lockdep_assert_held(&mvm->mutex);
+
+       ret = iwl_mvm_mac_ctxt_allocate_resources(mvm, vif);
+       if (ret)
+               return ret;
+
+       switch (vif->type) {
+       case NL80211_IFTYPE_P2P_DEVICE:
+               iwl_trans_ac_txq_enable(mvm->trans, IWL_MVM_OFFCHANNEL_QUEUE,
+                                       IWL_MVM_TX_FIFO_VO);
+               break;
+       case NL80211_IFTYPE_AP:
+               iwl_trans_ac_txq_enable(mvm->trans, vif->cab_queue,
+                                       IWL_MVM_TX_FIFO_VO);
+               /* fall through */
+       default:
+               for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
+                       iwl_trans_ac_txq_enable(mvm->trans, vif->hw_queue[ac],
+                                               iwl_mvm_ac_to_tx_fifo[ac]);
+               break;
+       }
+
+       return 0;
+}
+
+void iwl_mvm_mac_ctxt_release(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+       int ac;
+
+       lockdep_assert_held(&mvm->mutex);
+
+       switch (vif->type) {
+       case NL80211_IFTYPE_P2P_DEVICE:
+               iwl_trans_txq_disable(mvm->trans, IWL_MVM_OFFCHANNEL_QUEUE);
+               break;
+       case NL80211_IFTYPE_AP:
+               iwl_trans_txq_disable(mvm->trans, vif->cab_queue);
+               /* fall through */
+       default:
+               for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
+                       iwl_trans_txq_disable(mvm->trans, vif->hw_queue[ac]);
+       }
+}
+
+static void iwl_mvm_ack_rates(struct iwl_mvm *mvm,
+                             struct ieee80211_vif *vif,
+                             enum ieee80211_band band,
+                             u8 *cck_rates, u8 *ofdm_rates)
+{
+       struct ieee80211_supported_band *sband;
+       unsigned long basic = vif->bss_conf.basic_rates;
+       int lowest_present_ofdm = 100;
+       int lowest_present_cck = 100;
+       u8 cck = 0;
+       u8 ofdm = 0;
+       int i;
+
+       sband = mvm->hw->wiphy->bands[band];
+
+       for_each_set_bit(i, &basic, BITS_PER_LONG) {
+               int hw = sband->bitrates[i].hw_value;
+               if (hw >= IWL_FIRST_OFDM_RATE) {
+                       ofdm |= BIT(hw - IWL_FIRST_OFDM_RATE);
+                       if (lowest_present_ofdm > hw)
+                               lowest_present_ofdm = hw;
+               } else {
+                       BUILD_BUG_ON(IWL_FIRST_CCK_RATE != 0);
+
+                       cck |= BIT(hw);
+                       if (lowest_present_cck > hw)
+                               lowest_present_cck = hw;
+               }
+       }
+
+       /*
+        * Now we've got the basic rates as bitmaps in the ofdm and cck
+        * variables. This isn't sufficient though, as there might not
+        * be all the right rates in the bitmap. E.g. if the only basic
+        * rates are 5.5 Mbps and 11 Mbps, we still need to add 1 Mbps
+        * and 6 Mbps because the 802.11-2007 standard says in 9.6:
+        *
+        *    [...] a STA responding to a received frame shall transmit
+        *    its Control Response frame [...] at the highest rate in the
+        *    BSSBasicRateSet parameter that is less than or equal to the
+        *    rate of the immediately previous frame in the frame exchange
+        *    sequence ([...]) and that is of the same modulation class
+        *    ([...]) as the received frame. If no rate contained in the
+        *    BSSBasicRateSet parameter meets these conditions, then the
+        *    control frame sent in response to a received frame shall be
+        *    transmitted at the highest mandatory rate of the PHY that is
+        *    less than or equal to the rate of the received frame, and
+        *    that is of the same modulation class as the received frame.
+        *
+        * As a consequence, we need to add all mandatory rates that are
+        * lower than all of the basic rates to these bitmaps.
+        */
+
+       if (IWL_RATE_24M_INDEX < lowest_present_ofdm)
+               ofdm |= IWL_RATE_BIT_MSK(24) >> IWL_FIRST_OFDM_RATE;
+       if (IWL_RATE_12M_INDEX < lowest_present_ofdm)
+               ofdm |= IWL_RATE_BIT_MSK(12) >> IWL_FIRST_OFDM_RATE;
+       /* 6M already there or needed so always add */
+       ofdm |= IWL_RATE_BIT_MSK(6) >> IWL_FIRST_OFDM_RATE;
+
+       /*
+        * CCK is a bit more complex with DSSS vs. HR/DSSS vs. ERP.
+        * Note, however:
+        *  - if no CCK rates are basic, it must be ERP since there must
+        *    be some basic rates at all, so they're OFDM => ERP PHY
+        *    (or we're in 5 GHz, and the cck bitmap will never be used)
+        *  - if 11M is a basic rate, it must be ERP as well, so add 5.5M
+        *  - if 5.5M is basic, 1M and 2M are mandatory
+        *  - if 2M is basic, 1M is mandatory
+        *  - if 1M is basic, that's the only valid ACK rate.
+        * As a consequence, it's not as complicated as it sounds, just add
+        * any lower rates to the ACK rate bitmap.
+        */
+       if (IWL_RATE_11M_INDEX < lowest_present_cck)
+               cck |= IWL_RATE_BIT_MSK(11) >> IWL_FIRST_CCK_RATE;
+       if (IWL_RATE_5M_INDEX < lowest_present_cck)
+               cck |= IWL_RATE_BIT_MSK(5) >> IWL_FIRST_CCK_RATE;
+       if (IWL_RATE_2M_INDEX < lowest_present_cck)
+               cck |= IWL_RATE_BIT_MSK(2) >> IWL_FIRST_CCK_RATE;
+       /* 1M already there or needed so always add */
+       cck |= IWL_RATE_BIT_MSK(1) >> IWL_FIRST_CCK_RATE;
+
+       *cck_rates = cck;
+       *ofdm_rates = ofdm;
+}
+
+static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm,
+                                       struct ieee80211_vif *vif,
+                                       struct iwl_mac_ctx_cmd *cmd,
+                                       u32 action)
+{
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       struct ieee80211_chanctx_conf *chanctx;
+       u8 cck_ack_rates, ofdm_ack_rates;
+       int i;
+
+       cmd->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
+                                                           mvmvif->color));
+       cmd->action = cpu_to_le32(action);
+
+       switch (vif->type) {
+       case NL80211_IFTYPE_STATION:
+               if (vif->p2p)
+                       cmd->mac_type = cpu_to_le32(FW_MAC_TYPE_P2P_STA);
+               else
+                       cmd->mac_type = cpu_to_le32(FW_MAC_TYPE_BSS_STA);
+               break;
+       case NL80211_IFTYPE_AP:
+               cmd->mac_type = cpu_to_le32(FW_MAC_TYPE_GO);
+               break;
+       case NL80211_IFTYPE_MONITOR:
+               cmd->mac_type = cpu_to_le32(FW_MAC_TYPE_LISTENER);
+               break;
+       case NL80211_IFTYPE_P2P_DEVICE:
+               cmd->mac_type = cpu_to_le32(FW_MAC_TYPE_P2P_DEVICE);
+               break;
+       case NL80211_IFTYPE_ADHOC:
+               cmd->mac_type = cpu_to_le32(FW_MAC_TYPE_IBSS);
+               break;
+       default:
+               WARN_ON_ONCE(1);
+       }
+
+       cmd->tsf_id = cpu_to_le32(mvmvif->tsf_id);
+
+       memcpy(cmd->node_addr, vif->addr, ETH_ALEN);
+       if (vif->bss_conf.bssid)
+               memcpy(cmd->bssid_addr, vif->bss_conf.bssid, ETH_ALEN);
+       else
+               eth_broadcast_addr(cmd->bssid_addr);
+
+       rcu_read_lock();
+       chanctx = rcu_dereference(vif->chanctx_conf);
+       iwl_mvm_ack_rates(mvm, vif, chanctx ? chanctx->def.chan->band
+                                           : IEEE80211_BAND_2GHZ,
+                         &cck_ack_rates, &ofdm_ack_rates);
+       rcu_read_unlock();
+
+       cmd->cck_rates = cpu_to_le32((u32)cck_ack_rates);
+       cmd->ofdm_rates = cpu_to_le32((u32)ofdm_ack_rates);
+
+       cmd->cck_short_preamble =
+               cpu_to_le32(vif->bss_conf.use_short_preamble ?
+                           MAC_FLG_SHORT_PREAMBLE : 0);
+       cmd->short_slot =
+               cpu_to_le32(vif->bss_conf.use_short_slot ?
+                           MAC_FLG_SHORT_SLOT : 0);
+
+       for (i = 0; i < AC_NUM; i++) {
+               cmd->ac[i].cw_min = cpu_to_le16(mvmvif->queue_params[i].cw_min);
+               cmd->ac[i].cw_max = cpu_to_le16(mvmvif->queue_params[i].cw_max);
+               cmd->ac[i].aifsn = mvmvif->queue_params[i].aifs;
+               cmd->ac[i].edca_txop =
+                       cpu_to_le16(mvmvif->queue_params[i].txop * 32);
+               cmd->ac[i].fifos_mask = BIT(iwl_mvm_ac_to_tx_fifo[i]);
+       }
+
+       if (vif->bss_conf.qos)
+               cmd->qos_flags |= cpu_to_le32(MAC_QOS_FLG_UPDATE_EDCA);
+
+       if (vif->bss_conf.use_cts_prot)
+               cmd->protection_flags |= cpu_to_le32(MAC_PROT_FLG_TGG_PROTECT |
+                                                    MAC_PROT_FLG_SELF_CTS_EN);
+
+       /*
+        * I think that we should enable these 2 flags regardless the HT PROT
+        * fields in the HT IE, but I am not sure. Someone knows whom to ask?...
+        */
+       if (vif->bss_conf.chandef.width != NL80211_CHAN_WIDTH_20_NOHT) {
+               cmd->qos_flags |= cpu_to_le32(MAC_QOS_FLG_TGN);
+               cmd->protection_flags |= cpu_to_le32(MAC_PROT_FLG_HT_PROT |
+                                                    MAC_PROT_FLG_FAT_PROT);
+       }
+
+       cmd->filter_flags = cpu_to_le32(MAC_FILTER_ACCEPT_GRP);
+}
+
+static int iwl_mvm_mac_ctxt_send_cmd(struct iwl_mvm *mvm,
+                                    struct iwl_mac_ctx_cmd *cmd)
+{
+       int ret = iwl_mvm_send_cmd_pdu(mvm, MAC_CONTEXT_CMD, CMD_SYNC,
+                                      sizeof(*cmd), cmd);
+       if (ret)
+               IWL_ERR(mvm, "Failed to send MAC context (action:%d): %d\n",
+                       le32_to_cpu(cmd->action), ret);
+       return ret;
+}
+
+/*
+ * Fill the specific data for mac context of type station or p2p client
+ */
+static void iwl_mvm_mac_ctxt_cmd_fill_sta(struct iwl_mvm *mvm,
+                                         struct ieee80211_vif *vif,
+                                         struct iwl_mac_data_sta *ctxt_sta)
+{
+       /* We need the dtim_period to set the MAC as associated */
+       if (vif->bss_conf.assoc && vif->bss_conf.dtim_period)
+               ctxt_sta->is_assoc = cpu_to_le32(1);
+       else
+               ctxt_sta->is_assoc = cpu_to_le32(0);
+
+       ctxt_sta->bi = cpu_to_le32(vif->bss_conf.beacon_int);
+       ctxt_sta->bi_reciprocal =
+               cpu_to_le32(iwl_mvm_reciprocal(vif->bss_conf.beacon_int));
+       ctxt_sta->dtim_interval = cpu_to_le32(vif->bss_conf.beacon_int *
+                                             vif->bss_conf.dtim_period);
+       ctxt_sta->dtim_reciprocal =
+               cpu_to_le32(iwl_mvm_reciprocal(vif->bss_conf.beacon_int *
+                                              vif->bss_conf.dtim_period));
+
+       ctxt_sta->listen_interval = cpu_to_le32(mvm->hw->conf.listen_interval);
+       ctxt_sta->assoc_id = cpu_to_le32(vif->bss_conf.aid);
+}
+
+static int iwl_mvm_mac_ctxt_cmd_station(struct iwl_mvm *mvm,
+                                       struct ieee80211_vif *vif,
+                                       u32 action)
+{
+       struct iwl_mac_ctx_cmd cmd = {};
+
+       WARN_ON(vif->type != NL80211_IFTYPE_STATION || vif->p2p);
+
+       /* Fill the common data for all mac context types */
+       iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, action);
+
+       /* Fill the data specific for station mode */
+       iwl_mvm_mac_ctxt_cmd_fill_sta(mvm, vif, &cmd.sta);
+
+       return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd);
+}
+
+static int iwl_mvm_mac_ctxt_cmd_p2p_client(struct iwl_mvm *mvm,
+                                          struct ieee80211_vif *vif,
+                                          u32 action)
+{
+       struct iwl_mac_ctx_cmd cmd = {};
+
+       WARN_ON(vif->type != NL80211_IFTYPE_STATION || !vif->p2p);
+
+       /* Fill the common data for all mac context types */
+       iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, action);
+
+       /* Fill the data specific for station mode */
+       iwl_mvm_mac_ctxt_cmd_fill_sta(mvm, vif, &cmd.p2p_sta.sta);
+
+       cmd.p2p_sta.ctwin = cpu_to_le32(vif->bss_conf.p2p_ctwindow);
+
+       return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd);
+}
+
+static int iwl_mvm_mac_ctxt_cmd_listener(struct iwl_mvm *mvm,
+                                        struct ieee80211_vif *vif,
+                                        u32 action)
+{
+       struct iwl_mac_ctx_cmd cmd = {};
+
+       WARN_ON(vif->type != NL80211_IFTYPE_MONITOR);
+
+       iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, action);
+       /* No other data to be filled */
+       return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd);
+}
+
+struct iwl_mvm_go_iterator_data {
+       bool go_active;
+};
+
+static void iwl_mvm_go_iterator(void *_data, u8 *mac, struct ieee80211_vif *vif)
+{
+       struct iwl_mvm_go_iterator_data *data = _data;
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+       if (vif->type == NL80211_IFTYPE_AP && vif->p2p && mvmvif->ap_active)
+               data->go_active = true;
+}
+
+static int iwl_mvm_mac_ctxt_cmd_p2p_device(struct iwl_mvm *mvm,
+                                          struct ieee80211_vif *vif,
+                                          u32 action)
+{
+       struct iwl_mac_ctx_cmd cmd = {};
+       struct iwl_mvm_go_iterator_data data = {};
+
+       WARN_ON(vif->type != NL80211_IFTYPE_P2P_DEVICE);
+
+       iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, action);
+
+       cmd.protection_flags |= cpu_to_le32(MAC_PROT_FLG_TGG_PROTECT);
+       cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_PROMISC);
+
+       /*
+        * This flag should be set to true when the P2P Device is
+        * discoverable and there is at least another active P2P GO. Settings
+        * this flag will allow the P2P Device to be discoverable on other
+        * channels in addition to its listen channel.
+        * Note that this flag should not be set in other cases as it opens the
+        * Rx filters on all MAC and increases the number of interrupts.
+        */
+       ieee80211_iterate_active_interfaces_atomic(
+               mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
+               iwl_mvm_go_iterator, &data);
+
+       cmd.p2p_dev.is_disc_extended = cpu_to_le32(data.go_active ? 1 : 0);
+       return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd);
+}
+
+static void iwl_mvm_mac_ctxt_set_tim(struct iwl_mvm *mvm,
+                                    struct iwl_mac_beacon_cmd *beacon_cmd,
+                                    u8 *beacon, u32 frame_size)
+{
+       u32 tim_idx;
+       struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)beacon;
+
+       /* The index is relative to frame start but we start looking at the
+        * variable-length part of the beacon. */
+       tim_idx = mgmt->u.beacon.variable - beacon;
+
+       /* Parse variable-length elements of beacon to find WLAN_EID_TIM */
+       while ((tim_idx < (frame_size - 2)) &&
+                       (beacon[tim_idx] != WLAN_EID_TIM))
+               tim_idx += beacon[tim_idx+1] + 2;
+
+       /* If TIM field was found, set variables */
+       if ((tim_idx < (frame_size - 1)) && (beacon[tim_idx] == WLAN_EID_TIM)) {
+               beacon_cmd->tim_idx = cpu_to_le32(tim_idx);
+               beacon_cmd->tim_size = cpu_to_le32((u32)beacon[tim_idx+1]);
+       } else {
+               IWL_WARN(mvm, "Unable to find TIM Element in beacon\n");
+       }
+}
+
+static int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm,
+                                       struct ieee80211_vif *vif,
+                                       struct sk_buff *beacon)
+{
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       struct iwl_host_cmd cmd = {
+               .id = BEACON_TEMPLATE_CMD,
+               .flags = CMD_ASYNC,
+       };
+       struct iwl_mac_beacon_cmd beacon_cmd = {};
+       struct ieee80211_tx_info *info;
+       u32 beacon_skb_len;
+       u32 rate;
+
+       if (WARN_ON(!beacon))
+               return -EINVAL;
+
+       beacon_skb_len = beacon->len;
+
+       /* TODO: for now the beacon template id is set to be the mac context id.
+        * Might be better to handle it as another resource ... */
+       beacon_cmd.template_id = cpu_to_le32((u32)mvmvif->id);
+
+       /* Set up TX command fields */
+       beacon_cmd.tx.len = cpu_to_le16((u16)beacon_skb_len);
+       beacon_cmd.tx.sta_id = mvmvif->bcast_sta.sta_id;
+       beacon_cmd.tx.life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE);
+       beacon_cmd.tx.tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL |
+                                            TX_CMD_FLG_BT_DIS  |
+                                            TX_CMD_FLG_TSF);
+
+       mvm->mgmt_last_antenna_idx =
+               iwl_mvm_next_antenna(mvm, mvm->nvm_data->valid_tx_ant,
+                                    mvm->mgmt_last_antenna_idx);
+
+       beacon_cmd.tx.rate_n_flags =
+               cpu_to_le32(BIT(mvm->mgmt_last_antenna_idx) <<
+                           RATE_MCS_ANT_POS);
+
+       info = IEEE80211_SKB_CB(beacon);
+
+       if (info->band == IEEE80211_BAND_5GHZ || vif->p2p) {
+               rate = IWL_FIRST_OFDM_RATE;
+       } else {
+               rate = IWL_FIRST_CCK_RATE;
+               beacon_cmd.tx.rate_n_flags |= cpu_to_le32(RATE_MCS_CCK_MSK);
+       }
+       beacon_cmd.tx.rate_n_flags |=
+               cpu_to_le32(iwl_mvm_mac80211_idx_to_hwrate(rate));
+
+       /* Set up TX beacon command fields */
+       iwl_mvm_mac_ctxt_set_tim(mvm, &beacon_cmd,
+                                beacon->data,
+                                beacon_skb_len);
+
+       /* Submit command */
+       cmd.len[0] = sizeof(beacon_cmd);
+       cmd.data[0] = &beacon_cmd;
+       cmd.dataflags[0] = 0;
+       cmd.len[1] = beacon_skb_len;
+       cmd.data[1] = beacon->data;
+       cmd.dataflags[1] = IWL_HCMD_DFL_DUP;
+
+       return iwl_mvm_send_cmd(mvm, &cmd);
+}
+
+/* The beacon template for the AP/GO context has changed and needs update */
+int iwl_mvm_mac_ctxt_beacon_changed(struct iwl_mvm *mvm,
+                                   struct ieee80211_vif *vif)
+{
+       struct sk_buff *beacon;
+       int ret;
+
+       WARN_ON(vif->type != NL80211_IFTYPE_AP);
+
+       beacon = ieee80211_beacon_get(mvm->hw, vif);
+       if (!beacon)
+               return -ENOMEM;
+
+       ret = iwl_mvm_mac_ctxt_send_beacon(mvm, vif, beacon);
+       dev_kfree_skb(beacon);
+       return ret;
+}
+
+/*
+ * Fill the specific data for mac context of type AP of P2P GO
+ */
+static void iwl_mvm_mac_ctxt_cmd_fill_ap(struct iwl_mvm *mvm,
+                                        struct ieee80211_vif *vif,
+                                        struct iwl_mac_data_ap *ctxt_ap)
+{
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       u32 curr_dev_time;
+
+       ctxt_ap->bi = cpu_to_le32(vif->bss_conf.beacon_int);
+       ctxt_ap->bi_reciprocal =
+               cpu_to_le32(iwl_mvm_reciprocal(vif->bss_conf.beacon_int));
+       ctxt_ap->dtim_interval = cpu_to_le32(vif->bss_conf.beacon_int *
+                                            vif->bss_conf.dtim_period);
+       ctxt_ap->dtim_reciprocal =
+               cpu_to_le32(iwl_mvm_reciprocal(vif->bss_conf.beacon_int *
+                                              vif->bss_conf.dtim_period));
+
+       ctxt_ap->mcast_qid = cpu_to_le32(vif->cab_queue);
+       curr_dev_time = iwl_read_prph(mvm->trans, DEVICE_SYSTEM_TIME_REG);
+       ctxt_ap->beacon_time = cpu_to_le32(curr_dev_time);
+
+       ctxt_ap->beacon_tsf = cpu_to_le64(curr_dev_time);
+
+       /* TODO: Assume that the beacon id == mac context id */
+       ctxt_ap->beacon_template = cpu_to_le32(mvmvif->id);
+}
+
+static int iwl_mvm_mac_ctxt_cmd_ap(struct iwl_mvm *mvm,
+                                  struct ieee80211_vif *vif,
+                                  u32 action)
+{
+       struct iwl_mac_ctx_cmd cmd = {};
+
+       WARN_ON(vif->type != NL80211_IFTYPE_AP || vif->p2p);
+
+       /* Fill the common data for all mac context types */
+       iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, action);
+
+       /* Fill the data specific for ap mode */
+       iwl_mvm_mac_ctxt_cmd_fill_ap(mvm, vif, &cmd.ap);
+
+       return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd);
+}
+
+static int iwl_mvm_mac_ctxt_cmd_go(struct iwl_mvm *mvm,
+                                  struct ieee80211_vif *vif,
+                                  u32 action)
+{
+       struct iwl_mac_ctx_cmd cmd = {};
+
+       WARN_ON(vif->type != NL80211_IFTYPE_AP || !vif->p2p);
+
+       /* Fill the common data for all mac context types */
+       iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, action);
+
+       /* Fill the data specific for GO mode */
+       iwl_mvm_mac_ctxt_cmd_fill_ap(mvm, vif, &cmd.go.ap);
+
+       cmd.go.ctwin = cpu_to_le32(vif->bss_conf.p2p_ctwindow);
+       cmd.go.opp_ps_enabled = cpu_to_le32(!!vif->bss_conf.p2p_oppps);
+
+       return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd);
+}
+
+static int iwl_mvm_mac_ctx_send(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+                               u32 action)
+{
+       switch (vif->type) {
+       case NL80211_IFTYPE_STATION:
+               if (!vif->p2p)
+                       return iwl_mvm_mac_ctxt_cmd_station(mvm, vif,
+                                                           action);
+               else
+                       return iwl_mvm_mac_ctxt_cmd_p2p_client(mvm, vif,
+                                                              action);
+               break;
+       case NL80211_IFTYPE_AP:
+               if (!vif->p2p)
+                       return iwl_mvm_mac_ctxt_cmd_ap(mvm, vif, action);
+               else
+                       return iwl_mvm_mac_ctxt_cmd_go(mvm, vif, action);
+               break;
+       case NL80211_IFTYPE_MONITOR:
+               return iwl_mvm_mac_ctxt_cmd_listener(mvm, vif, action);
+       case NL80211_IFTYPE_P2P_DEVICE:
+               return iwl_mvm_mac_ctxt_cmd_p2p_device(mvm, vif, action);
+       default:
+               break;
+       }
+
+       return -EOPNOTSUPP;
+}
+
+int iwl_mvm_mac_ctxt_add(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       int ret;
+
+       if (WARN_ONCE(mvmvif->uploaded, "Adding active MAC %pM/%d\n",
+                     vif->addr, ieee80211_vif_type_p2p(vif)))
+               return -EIO;
+
+       ret = iwl_mvm_mac_ctx_send(mvm, vif, FW_CTXT_ACTION_ADD);
+       if (ret)
+               return ret;
+
+       mvmvif->uploaded = true;
+       return 0;
+}
+
+int iwl_mvm_mac_ctxt_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+       if (WARN_ONCE(!mvmvif->uploaded, "Changing inactive MAC %pM/%d\n",
+                     vif->addr, ieee80211_vif_type_p2p(vif)))
+               return -EIO;
+
+       return iwl_mvm_mac_ctx_send(mvm, vif, FW_CTXT_ACTION_MODIFY);
+}
+
+int iwl_mvm_mac_ctxt_remove(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       struct iwl_mac_ctx_cmd cmd;
+       int ret;
+
+       if (WARN_ONCE(!mvmvif->uploaded, "Removing inactive MAC %pM/%d\n",
+                     vif->addr, ieee80211_vif_type_p2p(vif)))
+               return -EIO;
+
+       memset(&cmd, 0, sizeof(cmd));
+
+       cmd.id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
+                                                          mvmvif->color));
+       cmd.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE);
+
+       ret = iwl_mvm_send_cmd_pdu(mvm, MAC_CONTEXT_CMD, CMD_SYNC,
+                                  sizeof(cmd), &cmd);
+       if (ret) {
+               IWL_ERR(mvm, "Failed to remove MAC context: %d\n", ret);
+               return ret;
+       }
+
+       mvmvif->uploaded = false;
+       return 0;
+}
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
new file mode 100644 (file)
index 0000000..e27eb97
--- /dev/null
@@ -0,0 +1,1316 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <net/mac80211.h>
+
+#include "iwl-op-mode.h"
+#include "iwl-io.h"
+#include "mvm.h"
+#include "sta.h"
+#include "time-event.h"
+#include "iwl-eeprom-parse.h"
+#include "fw-api-scan.h"
+#include "iwl-phy-db.h"
+
+static const struct ieee80211_iface_limit iwl_mvm_limits[] = {
+       {
+               .max = 1,
+               .types = BIT(NL80211_IFTYPE_STATION) |
+                       BIT(NL80211_IFTYPE_AP),
+       },
+       {
+               .max = 1,
+               .types = BIT(NL80211_IFTYPE_P2P_CLIENT) |
+                       BIT(NL80211_IFTYPE_P2P_GO),
+       },
+       {
+               .max = 1,
+               .types = BIT(NL80211_IFTYPE_P2P_DEVICE),
+       },
+};
+
+static const struct ieee80211_iface_combination iwl_mvm_iface_combinations[] = {
+       {
+               .num_different_channels = 1,
+               .max_interfaces = 3,
+               .limits = iwl_mvm_limits,
+               .n_limits = ARRAY_SIZE(iwl_mvm_limits),
+       },
+};
+
+int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
+{
+       struct ieee80211_hw *hw = mvm->hw;
+       int num_mac, ret;
+
+       /* Tell mac80211 our characteristics */
+       hw->flags = IEEE80211_HW_SIGNAL_DBM |
+                   IEEE80211_HW_SPECTRUM_MGMT |
+                   IEEE80211_HW_REPORTS_TX_ACK_STATUS |
+                   IEEE80211_HW_QUEUE_CONTROL |
+                   IEEE80211_HW_WANT_MONITOR_VIF |
+                   IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC |
+                   IEEE80211_HW_SUPPORTS_PS |
+                   IEEE80211_HW_SUPPORTS_DYNAMIC_PS |
+                   IEEE80211_HW_AMPDU_AGGREGATION;
+
+       hw->queues = IWL_FIRST_AMPDU_QUEUE;
+       hw->offchannel_tx_hw_queue = IWL_OFFCHANNEL_QUEUE;
+       hw->rate_control_algorithm = "iwl-mvm-rs";
+
+       /*
+        * Enable 11w if advertised by firmware and software crypto
+        * is not enabled (as the firmware will interpret some mgmt
+        * packets, so enabling it with software crypto isn't safe)
+        */
+       if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_MFP &&
+           !iwlwifi_mod_params.sw_crypto)
+               hw->flags |= IEEE80211_HW_MFP_CAPABLE;
+
+       hw->sta_data_size = sizeof(struct iwl_mvm_sta);
+       hw->vif_data_size = sizeof(struct iwl_mvm_vif);
+       hw->chanctx_data_size = sizeof(struct iwl_mvm_phy_ctxt);
+
+       hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
+               BIT(NL80211_IFTYPE_P2P_CLIENT) |
+               BIT(NL80211_IFTYPE_AP) |
+               BIT(NL80211_IFTYPE_P2P_GO) |
+               BIT(NL80211_IFTYPE_P2P_DEVICE);
+
+       hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
+                           WIPHY_FLAG_DISABLE_BEACON_HINTS |
+                           WIPHY_FLAG_IBSS_RSN;
+
+       hw->wiphy->iface_combinations = iwl_mvm_iface_combinations;
+       hw->wiphy->n_iface_combinations =
+               ARRAY_SIZE(iwl_mvm_iface_combinations);
+
+       hw->wiphy->max_remain_on_channel_duration = 500;
+       hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL;
+
+       /* Extract MAC address */
+       memcpy(mvm->addresses[0].addr, mvm->nvm_data->hw_addr, ETH_ALEN);
+       hw->wiphy->addresses = mvm->addresses;
+       hw->wiphy->n_addresses = 1;
+       num_mac = mvm->nvm_data->n_hw_addrs;
+       if (num_mac > 1) {
+               memcpy(mvm->addresses[1].addr, mvm->addresses[0].addr,
+                      ETH_ALEN);
+               mvm->addresses[1].addr[5]++;
+               hw->wiphy->n_addresses++;
+       }
+
+       /* we create the 802.11 header and a max-length SSID element */
+       hw->wiphy->max_scan_ie_len =
+               mvm->fw->ucode_capa.max_probe_length - 24 - 34;
+       hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
+
+       if (mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels)
+               hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
+                       &mvm->nvm_data->bands[IEEE80211_BAND_2GHZ];
+       if (mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels)
+               hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
+                       &mvm->nvm_data->bands[IEEE80211_BAND_5GHZ];
+
+       hw->wiphy->hw_version = mvm->trans->hw_id;
+
+       if (iwlwifi_mod_params.power_save)
+               hw->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT;
+       else
+               hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
+
+       hw->wiphy->features |= NL80211_FEATURE_P2P_GO_CTWIN |
+                              NL80211_FEATURE_P2P_GO_OPPPS;
+
+       mvm->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD;
+
+#ifdef CONFIG_PM_SLEEP
+       if (mvm->fw->img[IWL_UCODE_WOWLAN].sec[0].len &&
+           mvm->trans->ops->d3_suspend &&
+           mvm->trans->ops->d3_resume &&
+           device_can_wakeup(mvm->trans->dev)) {
+               hw->wiphy->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT |
+                                         WIPHY_WOWLAN_DISCONNECT |
+                                         WIPHY_WOWLAN_EAP_IDENTITY_REQ |
+                                         WIPHY_WOWLAN_RFKILL_RELEASE;
+               if (!iwlwifi_mod_params.sw_crypto)
+                       hw->wiphy->wowlan.flags |=
+                               WIPHY_WOWLAN_SUPPORTS_GTK_REKEY |
+                               WIPHY_WOWLAN_GTK_REKEY_FAILURE |
+                               WIPHY_WOWLAN_4WAY_HANDSHAKE;
+
+               hw->wiphy->wowlan.n_patterns = IWL_WOWLAN_MAX_PATTERNS;
+               hw->wiphy->wowlan.pattern_min_len = IWL_WOWLAN_MIN_PATTERN_LEN;
+               hw->wiphy->wowlan.pattern_max_len = IWL_WOWLAN_MAX_PATTERN_LEN;
+       }
+#endif
+
+       ret = iwl_mvm_leds_init(mvm);
+       if (ret)
+               return ret;
+
+       return ieee80211_register_hw(mvm->hw);
+}
+
+static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
+                          struct ieee80211_tx_control *control,
+                          struct sk_buff *skb)
+{
+       struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+       if (test_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status)) {
+               IWL_DEBUG_DROP(mvm, "Dropping - RF KILL\n");
+               goto drop;
+       }
+
+       if (IEEE80211_SKB_CB(skb)->hw_queue == IWL_OFFCHANNEL_QUEUE &&
+           !test_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status))
+               goto drop;
+
+       if (control->sta) {
+               if (iwl_mvm_tx_skb(mvm, skb, control->sta))
+                       goto drop;
+               return;
+       }
+
+       if (iwl_mvm_tx_skb_non_sta(mvm, skb))
+               goto drop;
+       return;
+ drop:
+       ieee80211_free_txskb(hw, skb);
+}
+
+static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
+                                   struct ieee80211_vif *vif,
+                                   enum ieee80211_ampdu_mlme_action action,
+                                   struct ieee80211_sta *sta, u16 tid,
+                                   u16 *ssn, u8 buf_size)
+{
+       struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+       int ret;
+
+       IWL_DEBUG_HT(mvm, "A-MPDU action on addr %pM tid %d: action %d\n",
+                    sta->addr, tid, action);
+
+       if (!(mvm->nvm_data->sku_cap_11n_enable))
+               return -EACCES;
+
+       mutex_lock(&mvm->mutex);
+
+       switch (action) {
+       case IEEE80211_AMPDU_RX_START:
+               if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG) {
+                       ret = -EINVAL;
+                       break;
+               }
+               ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, *ssn, true);
+               break;
+       case IEEE80211_AMPDU_RX_STOP:
+               ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, 0, false);
+               break;
+       case IEEE80211_AMPDU_TX_START:
+               ret = iwl_mvm_sta_tx_agg_start(mvm, vif, sta, tid, ssn);
+               break;
+       case IEEE80211_AMPDU_TX_STOP_CONT:
+       case IEEE80211_AMPDU_TX_STOP_FLUSH:
+       case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
+               ret = iwl_mvm_sta_tx_agg_stop(mvm, vif, sta, tid);
+               break;
+       case IEEE80211_AMPDU_TX_OPERATIONAL:
+               ret = iwl_mvm_sta_tx_agg_oper(mvm, vif, sta, tid, buf_size);
+               break;
+       default:
+               WARN_ON_ONCE(1);
+               ret = -EINVAL;
+               break;
+       }
+       mutex_unlock(&mvm->mutex);
+
+       return ret;
+}
+
+static void iwl_mvm_cleanup_iterator(void *data, u8 *mac,
+                                    struct ieee80211_vif *vif)
+{
+       struct iwl_mvm *mvm = data;
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+       mvmvif->uploaded = false;
+       mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
+
+       /* does this make sense at all? */
+       mvmvif->color++;
+
+       spin_lock_bh(&mvm->time_event_lock);
+       iwl_mvm_te_clear_data(mvm, &mvmvif->time_event_data);
+       spin_unlock_bh(&mvm->time_event_lock);
+
+       if (vif->type != NL80211_IFTYPE_P2P_DEVICE)
+               mvmvif->phy_ctxt = NULL;
+}
+
+static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
+{
+       iwl_trans_stop_device(mvm->trans);
+       iwl_trans_stop_hw(mvm->trans, false);
+
+       mvm->scan_status = IWL_MVM_SCAN_NONE;
+
+       /* just in case one was running */
+       ieee80211_remain_on_channel_expired(mvm->hw);
+
+       ieee80211_iterate_active_interfaces_atomic(
+               mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
+               iwl_mvm_cleanup_iterator, mvm);
+
+       memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table));
+       memset(mvm->sta_drained, 0, sizeof(mvm->sta_drained));
+
+       ieee80211_wake_queues(mvm->hw);
+
+       mvm->vif_count = 0;
+}
+
+static int iwl_mvm_mac_start(struct ieee80211_hw *hw)
+{
+       struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+       int ret;
+
+       mutex_lock(&mvm->mutex);
+
+       /* Clean up some internal and mac80211 state on restart */
+       if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
+               iwl_mvm_restart_cleanup(mvm);
+
+       ret = iwl_mvm_up(mvm);
+       mutex_unlock(&mvm->mutex);
+
+       return ret;
+}
+
+static void iwl_mvm_mac_restart_complete(struct ieee80211_hw *hw)
+{
+       struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+       int ret;
+
+       mutex_lock(&mvm->mutex);
+
+       clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
+       ret = iwl_mvm_update_quotas(mvm, NULL);
+       if (ret)
+               IWL_ERR(mvm, "Failed to update quotas after restart (%d)\n",
+                       ret);
+
+       mutex_unlock(&mvm->mutex);
+}
+
+static void iwl_mvm_mac_stop(struct ieee80211_hw *hw)
+{
+       struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+       flush_work(&mvm->async_handlers_wk);
+
+       mutex_lock(&mvm->mutex);
+       /* async_handlers_wk is now blocked */
+
+       /*
+        * The work item could be running or queued if the
+        * ROC time event stops just as we get here.
+        */
+       cancel_work_sync(&mvm->roc_done_wk);
+
+       iwl_trans_stop_device(mvm->trans);
+       iwl_trans_stop_hw(mvm->trans, false);
+
+       iwl_mvm_async_handlers_purge(mvm);
+       /* async_handlers_list is empty and will stay empty: HW is stopped */
+
+       /* the fw is stopped, the aux sta is dead: clean up driver state */
+       iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
+
+       mutex_unlock(&mvm->mutex);
+
+       /*
+        * The worker might have been waiting for the mutex, let it run and
+        * discover that its list is now empty.
+        */
+       cancel_work_sync(&mvm->async_handlers_wk);
+}
+
+static void iwl_mvm_pm_disable_iterator(void *data, u8 *mac,
+                                       struct ieee80211_vif *vif)
+{
+       struct iwl_mvm *mvm = data;
+       int ret;
+
+       ret = iwl_mvm_power_disable(mvm, vif);
+       if (ret)
+               IWL_ERR(mvm, "failed to disable power management\n");
+}
+
+static void iwl_mvm_power_update_iterator(void *data, u8 *mac,
+                                         struct ieee80211_vif *vif)
+{
+       struct iwl_mvm *mvm = data;
+
+       iwl_mvm_power_update_mode(mvm, vif);
+}
+
+static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
+                                    struct ieee80211_vif *vif)
+{
+       struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       int ret;
+
+       /*
+        * Not much to do here. The stack will not allow interface
+        * types or combinations that we didn't advertise, so we
+        * don't really have to check the types.
+        */
+
+       mutex_lock(&mvm->mutex);
+
+       /* Allocate resources for the MAC context, and add it the the fw  */
+       ret = iwl_mvm_mac_ctxt_init(mvm, vif);
+       if (ret)
+               goto out_unlock;
+
+       /*
+        * The AP binding flow can be done only after the beacon
+        * template is configured (which happens only in the mac80211
+        * start_ap() flow), and adding the broadcast station can happen
+        * only after the binding.
+        * In addition, since modifying the MAC before adding a bcast
+        * station is not allowed by the FW, delay the adding of MAC context to
+        * the point where we can also add the bcast station.
+        * In short: there's not much we can do at this point, other than
+        * allocating resources :)
+        */
+       if (vif->type == NL80211_IFTYPE_AP) {
+               u32 qmask = iwl_mvm_mac_get_queues_mask(mvm, vif);
+               ret = iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta,
+                                              qmask);
+               if (ret) {
+                       IWL_ERR(mvm, "Failed to allocate bcast sta\n");
+                       goto out_release;
+               }
+
+               goto out_unlock;
+       }
+
+       /*
+        * TODO: remove this temporary code.
+        * Currently MVM FW supports power management only on single MAC.
+        * Iterate and disable PM on all active interfaces.
+        * Note: the method below does not count the new interface being added
+        * at this moment.
+        */
+       mvm->vif_count++;
+       if (mvm->vif_count > 1) {
+               IWL_DEBUG_MAC80211(mvm,
+                                  "Disable power on existing interfaces\n");
+               ieee80211_iterate_active_interfaces_atomic(
+                                           mvm->hw,
+                                           IEEE80211_IFACE_ITER_NORMAL,
+                                           iwl_mvm_pm_disable_iterator, mvm);
+       }
+
+       ret = iwl_mvm_mac_ctxt_add(mvm, vif);
+       if (ret)
+               goto out_release;
+
+       /*
+        * Update power state on the new interface. Admittedly, based on
+        * mac80211 logics this power update will disable power management
+        */
+       iwl_mvm_power_update_mode(mvm, vif);
+
+       /*
+        * P2P_DEVICE interface does not have a channel context assigned to it,
+        * so a dedicated PHY context is allocated to it and the corresponding
+        * MAC context is bound to it at this stage.
+        */
+       if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
+               struct ieee80211_channel *chan;
+               struct cfg80211_chan_def chandef;
+
+               mvmvif->phy_ctxt = &mvm->phy_ctxt_roc;
+
+               /*
+                * The channel used here isn't relevant as it's
+                * going to be overwritten as part of the ROC flow.
+                * For now use the first channel we have.
+                */
+               chan = &mvm->hw->wiphy->bands[IEEE80211_BAND_2GHZ]->channels[0];
+               cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_NO_HT);
+               ret = iwl_mvm_phy_ctxt_add(mvm, mvmvif->phy_ctxt,
+                                          &chandef, 1, 1);
+               if (ret)
+                       goto out_remove_mac;
+
+               ret = iwl_mvm_binding_add_vif(mvm, vif);
+               if (ret)
+                       goto out_remove_phy;
+
+               ret = iwl_mvm_add_bcast_sta(mvm, vif, &mvmvif->bcast_sta);
+               if (ret)
+                       goto out_unbind;
+
+               /* Save a pointer to p2p device vif, so it can later be used to
+                * update the p2p device MAC when a GO is started/stopped */
+               mvm->p2p_device_vif = vif;
+       }
+
+       goto out_unlock;
+
+ out_unbind:
+       iwl_mvm_binding_remove_vif(mvm, vif);
+ out_remove_phy:
+       iwl_mvm_phy_ctxt_remove(mvm, mvmvif->phy_ctxt);
+ out_remove_mac:
+       mvmvif->phy_ctxt = NULL;
+       iwl_mvm_mac_ctxt_remove(mvm, vif);
+ out_release:
+       /*
+        * TODO: remove this temporary code.
+        * Currently MVM FW supports power management only on single MAC.
+        * Check if only one additional interface remains after rereasing
+        * current one. Update power mode on the remaining interface.
+        */
+       mvm->vif_count--;
+       IWL_DEBUG_MAC80211(mvm, "Currently %d interfaces active\n",
+                          mvm->vif_count);
+       if (mvm->vif_count == 1) {
+               ieee80211_iterate_active_interfaces(
+                                       mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+                                       iwl_mvm_power_update_iterator, mvm);
+       }
+       iwl_mvm_mac_ctxt_release(mvm, vif);
+ out_unlock:
+       mutex_unlock(&mvm->mutex);
+
+       return ret;
+}
+
+static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
+                                        struct ieee80211_vif *vif)
+{
+       struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       u32 tfd_msk = 0, ac;
+
+       for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
+               if (vif->hw_queue[ac] != IEEE80211_INVAL_HW_QUEUE)
+                       tfd_msk |= BIT(vif->hw_queue[ac]);
+
+       if (vif->cab_queue != IEEE80211_INVAL_HW_QUEUE)
+               tfd_msk |= BIT(vif->cab_queue);
+
+       if (tfd_msk) {
+               mutex_lock(&mvm->mutex);
+               iwl_mvm_flush_tx_path(mvm, tfd_msk, true);
+               mutex_unlock(&mvm->mutex);
+       }
+
+       if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
+               /*
+                * Flush the ROC worker which will flush the OFFCHANNEL queue.
+                * We assume here that all the packets sent to the OFFCHANNEL
+                * queue are sent in ROC session.
+                */
+               flush_work(&mvm->roc_done_wk);
+       } else {
+               /*
+                * By now, all the AC queues are empty. The AGG queues are
+                * empty too. We already got all the Tx responses for all the
+                * packets in the queues. The drain work can have been
+                * triggered. Flush it. This work item takes the mutex, so kill
+                * it before we take it.
+                */
+               flush_work(&mvm->sta_drained_wk);
+       }
+
+       mutex_lock(&mvm->mutex);
+
+       /*
+        * For AP/GO interface, the tear down of the resources allocated to the
+        * interface should be handled as part of the bss_info_changed flow.
+        */
+       if (vif->type == NL80211_IFTYPE_AP) {
+               iwl_mvm_dealloc_int_sta(mvm, &mvmvif->bcast_sta);
+               goto out_release;
+       }
+
+       if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
+               mvm->p2p_device_vif = NULL;
+               iwl_mvm_rm_bcast_sta(mvm, &mvmvif->bcast_sta);
+               iwl_mvm_binding_remove_vif(mvm, vif);
+               iwl_mvm_phy_ctxt_remove(mvm, mvmvif->phy_ctxt);
+               mvmvif->phy_ctxt = NULL;
+       }
+
+       /*
+        * TODO: remove this temporary code.
+        * Currently MVM FW supports power management only on single MAC.
+        * Check if only one additional interface remains after removing
+        * current one. Update power mode on the remaining interface.
+        */
+       if (mvm->vif_count)
+               mvm->vif_count--;
+       IWL_DEBUG_MAC80211(mvm, "Currently %d interfaces active\n",
+                          mvm->vif_count);
+       if (mvm->vif_count == 1) {
+               ieee80211_iterate_active_interfaces(
+                                       mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+                                       iwl_mvm_power_update_iterator, mvm);
+       }
+
+       iwl_mvm_mac_ctxt_remove(mvm, vif);
+
+out_release:
+       iwl_mvm_mac_ctxt_release(mvm, vif);
+       mutex_unlock(&mvm->mutex);
+}
+
+static int iwl_mvm_mac_config(struct ieee80211_hw *hw, u32 changed)
+{
+       return 0;
+}
+
+static void iwl_mvm_configure_filter(struct ieee80211_hw *hw,
+                                    unsigned int changed_flags,
+                                    unsigned int *total_flags,
+                                    u64 multicast)
+{
+       *total_flags = 0;
+}
+
+static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
+                                            struct ieee80211_vif *vif,
+                                            struct ieee80211_bss_conf *bss_conf,
+                                            u32 changes)
+{
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       int ret;
+
+       ret = iwl_mvm_mac_ctxt_changed(mvm, vif);
+       if (ret)
+               IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr);
+
+       if (changes & BSS_CHANGED_ASSOC) {
+               if (bss_conf->assoc) {
+                       /* add quota for this interface */
+                       ret = iwl_mvm_update_quotas(mvm, vif);
+                       if (ret) {
+                               IWL_ERR(mvm, "failed to update quotas\n");
+                               return;
+                       }
+               } else if (mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) {
+                       /* remove AP station now that the MAC is unassoc */
+                       ret = iwl_mvm_rm_sta_id(mvm, vif, mvmvif->ap_sta_id);
+                       if (ret)
+                               IWL_ERR(mvm, "failed to remove AP station\n");
+                       mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
+                       /* remove quota for this interface */
+                       ret = iwl_mvm_update_quotas(mvm, NULL);
+                       if (ret)
+                               IWL_ERR(mvm, "failed to update quotas\n");
+               }
+       } else if (changes & BSS_CHANGED_DTIM_PERIOD) {
+               /*
+                * We received a beacon _after_ association so
+                * remove the session protection.
+                */
+               iwl_mvm_remove_time_event(mvm, mvmvif,
+                                         &mvmvif->time_event_data);
+       } else if (changes & BSS_CHANGED_PS) {
+               /*
+                * TODO: remove this temporary code.
+                * Currently MVM FW supports power management only on single
+                * MAC. Avoid power mode update if more than one interface
+                * is active.
+                */
+               IWL_DEBUG_MAC80211(mvm, "Currently %d interfaces active\n",
+                                  mvm->vif_count);
+               if (mvm->vif_count == 1) {
+                       ret = iwl_mvm_power_update_mode(mvm, vif);
+                       if (ret)
+                               IWL_ERR(mvm, "failed to update power mode\n");
+               }
+       }
+}
+
+static int iwl_mvm_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+       struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       int ret;
+
+       mutex_lock(&mvm->mutex);
+
+       /* Send the beacon template */
+       ret = iwl_mvm_mac_ctxt_beacon_changed(mvm, vif);
+       if (ret)
+               goto out_unlock;
+
+       /* Add the mac context */
+       ret = iwl_mvm_mac_ctxt_add(mvm, vif);
+       if (ret)
+               goto out_unlock;
+
+       /* Perform the binding */
+       ret = iwl_mvm_binding_add_vif(mvm, vif);
+       if (ret)
+               goto out_remove;
+
+       mvmvif->ap_active = true;
+
+       /* Send the bcast station. At this stage the TBTT and DTIM time events
+        * are added and applied to the scheduler */
+       ret = iwl_mvm_send_bcast_sta(mvm, vif, &mvmvif->bcast_sta);
+       if (ret)
+               goto out_unbind;
+
+       ret = iwl_mvm_update_quotas(mvm, vif);
+       if (ret)
+               goto out_rm_bcast;
+
+       /* Need to update the P2P Device MAC */
+       if (vif->p2p && mvm->p2p_device_vif)
+               iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif);
+
+       mutex_unlock(&mvm->mutex);
+       return 0;
+
+out_rm_bcast:
+       iwl_mvm_send_rm_bcast_sta(mvm, &mvmvif->bcast_sta);
+out_unbind:
+       iwl_mvm_binding_remove_vif(mvm, vif);
+out_remove:
+       iwl_mvm_mac_ctxt_remove(mvm, vif);
+out_unlock:
+       mutex_unlock(&mvm->mutex);
+       return ret;
+}
+
+static void iwl_mvm_stop_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+       struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+       mutex_lock(&mvm->mutex);
+
+       mvmvif->ap_active = false;
+
+       /* Need to update the P2P Device MAC */
+       if (vif->p2p && mvm->p2p_device_vif)
+               iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif);
+
+       iwl_mvm_update_quotas(mvm, NULL);
+       iwl_mvm_send_rm_bcast_sta(mvm, &mvmvif->bcast_sta);
+       iwl_mvm_binding_remove_vif(mvm, vif);
+       iwl_mvm_mac_ctxt_remove(mvm, vif);
+
+       mutex_unlock(&mvm->mutex);
+}
+
+static void iwl_mvm_bss_info_changed_ap(struct iwl_mvm *mvm,
+                                       struct ieee80211_vif *vif,
+                                       struct ieee80211_bss_conf *bss_conf,
+                                       u32 changes)
+{
+       /* Need to send a new beacon template to the FW */
+       if (changes & BSS_CHANGED_BEACON) {
+               if (iwl_mvm_mac_ctxt_beacon_changed(mvm, vif))
+                       IWL_WARN(mvm, "Failed updating beacon data\n");
+       }
+}
+
+static void iwl_mvm_bss_info_changed(struct ieee80211_hw *hw,
+                                    struct ieee80211_vif *vif,
+                                    struct ieee80211_bss_conf *bss_conf,
+                                    u32 changes)
+{
+       struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+       mutex_lock(&mvm->mutex);
+
+       switch (vif->type) {
+       case NL80211_IFTYPE_STATION:
+               iwl_mvm_bss_info_changed_station(mvm, vif, bss_conf, changes);
+               break;
+       case NL80211_IFTYPE_AP:
+               iwl_mvm_bss_info_changed_ap(mvm, vif, bss_conf, changes);
+               break;
+       default:
+               /* shouldn't happen */
+               WARN_ON_ONCE(1);
+       }
+
+       mutex_unlock(&mvm->mutex);
+}
+
+static int iwl_mvm_mac_hw_scan(struct ieee80211_hw *hw,
+                              struct ieee80211_vif *vif,
+                              struct cfg80211_scan_request *req)
+{
+       struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+       int ret;
+
+       if (req->n_channels == 0 || req->n_channels > MAX_NUM_SCAN_CHANNELS)
+               return -EINVAL;
+
+       mutex_lock(&mvm->mutex);
+
+       if (mvm->scan_status == IWL_MVM_SCAN_NONE)
+               ret = iwl_mvm_scan_request(mvm, vif, req);
+       else
+               ret = -EBUSY;
+
+       mutex_unlock(&mvm->mutex);
+
+       return ret;
+}
+
+static void iwl_mvm_mac_cancel_hw_scan(struct ieee80211_hw *hw,
+                                      struct ieee80211_vif *vif)
+{
+       struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+       mutex_lock(&mvm->mutex);
+
+       iwl_mvm_cancel_scan(mvm);
+
+       mutex_unlock(&mvm->mutex);
+}
+
+static void
+iwl_mvm_mac_allow_buffered_frames(struct ieee80211_hw *hw,
+                                 struct ieee80211_sta *sta, u16 tid,
+                                 int num_frames,
+                                 enum ieee80211_frame_release_type reason,
+                                 bool more_data)
+{
+       struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+       struct iwl_mvm_sta *mvmsta = (void *)sta->drv_priv;
+
+       /* TODO: how do we tell the fw to send frames for a specific TID */
+
+       /*
+        * The fw will send EOSP notification when the last frame will be
+        * transmitted.
+        */
+       iwl_mvm_sta_modify_sleep_tx_count(mvm, mvmsta->sta_id, reason,
+                                         num_frames);
+}
+
+static void iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw,
+                                  struct ieee80211_vif *vif,
+                                  enum sta_notify_cmd cmd,
+                                  struct ieee80211_sta *sta)
+{
+       struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+       struct iwl_mvm_sta *mvmsta = (void *)sta->drv_priv;
+
+       switch (cmd) {
+       case STA_NOTIFY_SLEEP:
+               if (atomic_read(&mvmsta->pending_frames) > 0)
+                       ieee80211_sta_block_awake(hw, sta, true);
+               /*
+                * The fw updates the STA to be asleep. Tx packets on the Tx
+                * queues to this station will not be transmitted. The fw will
+                * send a Tx response with TX_STATUS_FAIL_DEST_PS.
+                */
+               break;
+       case STA_NOTIFY_AWAKE:
+               if (WARN_ON(mvmsta->sta_id == IWL_INVALID_STATION))
+                       break;
+               iwl_mvm_sta_modify_ps_wake(mvm, mvmsta->sta_id);
+               break;
+       default:
+               break;
+       }
+}
+
+static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
+                                struct ieee80211_vif *vif,
+                                struct ieee80211_sta *sta,
+                                enum ieee80211_sta_state old_state,
+                                enum ieee80211_sta_state new_state)
+{
+       struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       int ret;
+
+       IWL_DEBUG_MAC80211(mvm, "station %pM state change %d->%d\n",
+                          sta->addr, old_state, new_state);
+
+       /* this would be a mac80211 bug ... but don't crash */
+       if (WARN_ON_ONCE(!mvmvif->phy_ctxt))
+               return -EINVAL;
+
+       /* if a STA is being removed, reuse its ID */
+       flush_work(&mvm->sta_drained_wk);
+
+       mutex_lock(&mvm->mutex);
+       if (old_state == IEEE80211_STA_NOTEXIST &&
+           new_state == IEEE80211_STA_NONE) {
+               ret = iwl_mvm_add_sta(mvm, vif, sta);
+       } else if (old_state == IEEE80211_STA_NONE &&
+                  new_state == IEEE80211_STA_AUTH) {
+               ret = 0;
+       } else if (old_state == IEEE80211_STA_AUTH &&
+                  new_state == IEEE80211_STA_ASSOC) {
+               ret = iwl_mvm_update_sta(mvm, vif, sta);
+               if (ret == 0)
+                       iwl_mvm_rs_rate_init(mvm, sta,
+                                            mvmvif->phy_ctxt->channel->band);
+       } else if (old_state == IEEE80211_STA_ASSOC &&
+                  new_state == IEEE80211_STA_AUTHORIZED) {
+               ret = 0;
+       } else if (old_state == IEEE80211_STA_AUTHORIZED &&
+                  new_state == IEEE80211_STA_ASSOC) {
+               ret = 0;
+       } else if (old_state == IEEE80211_STA_ASSOC &&
+                  new_state == IEEE80211_STA_AUTH) {
+               ret = 0;
+       } else if (old_state == IEEE80211_STA_AUTH &&
+                  new_state == IEEE80211_STA_NONE) {
+               ret = 0;
+       } else if (old_state == IEEE80211_STA_NONE &&
+                  new_state == IEEE80211_STA_NOTEXIST) {
+               ret = iwl_mvm_rm_sta(mvm, vif, sta);
+       } else {
+               ret = -EIO;
+       }
+       mutex_unlock(&mvm->mutex);
+
+       return ret;
+}
+
+static int iwl_mvm_mac_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
+{
+       struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+       mvm->rts_threshold = value;
+
+       return 0;
+}
+
+static int iwl_mvm_mac_conf_tx(struct ieee80211_hw *hw,
+                              struct ieee80211_vif *vif, u16 ac,
+                              const struct ieee80211_tx_queue_params *params)
+{
+       struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+       mvmvif->queue_params[ac] = *params;
+
+       /*
+        * No need to update right away, we'll get BSS_CHANGED_QOS
+        * The exception is P2P_DEVICE interface which needs immediate update.
+        */
+       if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
+               int ret;
+
+               mutex_lock(&mvm->mutex);
+               ret = iwl_mvm_mac_ctxt_changed(mvm, vif);
+               mutex_unlock(&mvm->mutex);
+               return ret;
+       }
+       return 0;
+}
+
+static void iwl_mvm_mac_mgd_prepare_tx(struct ieee80211_hw *hw,
+                                     struct ieee80211_vif *vif)
+{
+       struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+       u32 duration = min(IWL_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS,
+                          200 + vif->bss_conf.beacon_int);
+       u32 min_duration = min(IWL_MVM_TE_SESSION_PROTECTION_MIN_TIME_MS,
+                              100 + vif->bss_conf.beacon_int);
+
+       if (WARN_ON_ONCE(vif->bss_conf.assoc))
+               return;
+
+       mutex_lock(&mvm->mutex);
+       /* Try really hard to protect the session and hear a beacon */
+       iwl_mvm_protect_session(mvm, vif, duration, min_duration);
+       mutex_unlock(&mvm->mutex);
+}
+
+static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
+                              enum set_key_cmd cmd,
+                              struct ieee80211_vif *vif,
+                              struct ieee80211_sta *sta,
+                              struct ieee80211_key_conf *key)
+{
+       struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+       int ret;
+
+       if (iwlwifi_mod_params.sw_crypto) {
+               IWL_DEBUG_MAC80211(mvm, "leave - hwcrypto disabled\n");
+               return -EOPNOTSUPP;
+       }
+
+       switch (key->cipher) {
+       case WLAN_CIPHER_SUITE_TKIP:
+               key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
+               /* fall-through */
+       case WLAN_CIPHER_SUITE_CCMP:
+               key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
+               break;
+       case WLAN_CIPHER_SUITE_AES_CMAC:
+               WARN_ON_ONCE(!(hw->flags & IEEE80211_HW_MFP_CAPABLE));
+               break;
+       case WLAN_CIPHER_SUITE_WEP40:
+       case WLAN_CIPHER_SUITE_WEP104:
+               /*
+                * Support for TX only, at least for now, so accept
+                * the key and do nothing else. Then mac80211 will
+                * pass it for TX but we don't have to use it for RX.
+                */
+               return 0;
+       default:
+               return -EOPNOTSUPP;
+       }
+
+       mutex_lock(&mvm->mutex);
+
+       switch (cmd) {
+       case SET_KEY:
+               IWL_DEBUG_MAC80211(mvm, "set hwcrypto key\n");
+               ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, false);
+               if (ret) {
+                       IWL_WARN(mvm, "set key failed\n");
+                       /*
+                        * can't add key for RX, but we don't need it
+                        * in the device for TX so still return 0
+                        */
+                       ret = 0;
+               }
+
+               break;
+       case DISABLE_KEY:
+               IWL_DEBUG_MAC80211(mvm, "disable hwcrypto key\n");
+               ret = iwl_mvm_remove_sta_key(mvm, vif, sta, key);
+               break;
+       default:
+               ret = -EINVAL;
+       }
+
+       mutex_unlock(&mvm->mutex);
+       return ret;
+}
+
+static void iwl_mvm_mac_update_tkip_key(struct ieee80211_hw *hw,
+                                       struct ieee80211_vif *vif,
+                                       struct ieee80211_key_conf *keyconf,
+                                       struct ieee80211_sta *sta,
+                                       u32 iv32, u16 *phase1key)
+{
+       struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+       iwl_mvm_update_tkip_key(mvm, vif, keyconf, sta, iv32, phase1key);
+}
+
+
+static int iwl_mvm_roc(struct ieee80211_hw *hw,
+                      struct ieee80211_vif *vif,
+                      struct ieee80211_channel *channel,
+                      int duration)
+{
+       struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+       struct cfg80211_chan_def chandef;
+       int ret;
+
+       if (vif->type != NL80211_IFTYPE_P2P_DEVICE) {
+               IWL_ERR(mvm, "vif isn't a P2P_DEVICE: %d\n", vif->type);
+               return -EINVAL;
+       }
+
+       IWL_DEBUG_MAC80211(mvm, "enter (%d, %d)\n", channel->hw_value,
+                          duration);
+
+       mutex_lock(&mvm->mutex);
+
+       cfg80211_chandef_create(&chandef, channel, NL80211_CHAN_NO_HT);
+       ret = iwl_mvm_phy_ctxt_changed(mvm, &mvm->phy_ctxt_roc,
+                                      &chandef, 1, 1);
+
+       /* Schedule the time events */
+       ret = iwl_mvm_start_p2p_roc(mvm, vif, duration);
+
+       mutex_unlock(&mvm->mutex);
+       IWL_DEBUG_MAC80211(mvm, "leave\n");
+
+       return ret;
+}
+
+static int iwl_mvm_cancel_roc(struct ieee80211_hw *hw)
+{
+       struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+       IWL_DEBUG_MAC80211(mvm, "enter\n");
+
+       mutex_lock(&mvm->mutex);
+       iwl_mvm_stop_p2p_roc(mvm);
+       mutex_unlock(&mvm->mutex);
+
+       IWL_DEBUG_MAC80211(mvm, "leave\n");
+       return 0;
+}
+
+static int iwl_mvm_add_chanctx(struct ieee80211_hw *hw,
+                              struct ieee80211_chanctx_conf *ctx)
+{
+       struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+       struct iwl_mvm_phy_ctxt *phy_ctxt = (void *)ctx->drv_priv;
+       int ret;
+
+       mutex_lock(&mvm->mutex);
+
+       IWL_DEBUG_MAC80211(mvm, "Add PHY context\n");
+       ret = iwl_mvm_phy_ctxt_add(mvm, phy_ctxt, &ctx->def,
+                                  ctx->rx_chains_static,
+                                  ctx->rx_chains_dynamic);
+       mutex_unlock(&mvm->mutex);
+       return ret;
+}
+
+static void iwl_mvm_remove_chanctx(struct ieee80211_hw *hw,
+                                  struct ieee80211_chanctx_conf *ctx)
+{
+       struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+       struct iwl_mvm_phy_ctxt *phy_ctxt = (void *)ctx->drv_priv;
+
+       mutex_lock(&mvm->mutex);
+       iwl_mvm_phy_ctxt_remove(mvm, phy_ctxt);
+       mutex_unlock(&mvm->mutex);
+}
+
+static void iwl_mvm_change_chanctx(struct ieee80211_hw *hw,
+                                  struct ieee80211_chanctx_conf *ctx,
+                                  u32 changed)
+{
+       struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+       struct iwl_mvm_phy_ctxt *phy_ctxt = (void *)ctx->drv_priv;
+
+       mutex_lock(&mvm->mutex);
+       iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx->def,
+                                ctx->rx_chains_static,
+                                ctx->rx_chains_dynamic);
+       mutex_unlock(&mvm->mutex);
+}
+
+static int iwl_mvm_assign_vif_chanctx(struct ieee80211_hw *hw,
+                                     struct ieee80211_vif *vif,
+                                     struct ieee80211_chanctx_conf *ctx)
+{
+       struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+       struct iwl_mvm_phy_ctxt *phyctx = (void *)ctx->drv_priv;
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       int ret;
+
+       mutex_lock(&mvm->mutex);
+
+       mvmvif->phy_ctxt = phyctx;
+
+       switch (vif->type) {
+       case NL80211_IFTYPE_AP:
+               /*
+                * The AP binding flow is handled as part of the start_ap flow
+                * (in bss_info_changed).
+                */
+               ret = 0;
+               goto out_unlock;
+       case NL80211_IFTYPE_STATION:
+       case NL80211_IFTYPE_ADHOC:
+       case NL80211_IFTYPE_MONITOR:
+               break;
+       default:
+               ret = -EINVAL;
+               goto out_unlock;
+       }
+
+       ret = iwl_mvm_binding_add_vif(mvm, vif);
+       if (ret)
+               goto out_unlock;
+
+       /*
+        * Setting the quota at this stage is only required for monitor
+        * interfaces. For the other types, the bss_info changed flow
+        * will handle quota settings.
+        */
+       if (vif->type == NL80211_IFTYPE_MONITOR) {
+               ret = iwl_mvm_update_quotas(mvm, vif);
+               if (ret)
+                       goto out_remove_binding;
+       }
+
+       goto out_unlock;
+
+ out_remove_binding:
+       iwl_mvm_binding_remove_vif(mvm, vif);
+ out_unlock:
+       mutex_unlock(&mvm->mutex);
+       if (ret)
+               mvmvif->phy_ctxt = NULL;
+       return ret;
+}
+
+static void iwl_mvm_unassign_vif_chanctx(struct ieee80211_hw *hw,
+                                        struct ieee80211_vif *vif,
+                                        struct ieee80211_chanctx_conf *ctx)
+{
+       struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+       mutex_lock(&mvm->mutex);
+
+       iwl_mvm_remove_time_event(mvm, mvmvif, &mvmvif->time_event_data);
+
+       if (vif->type == NL80211_IFTYPE_AP)
+               goto out_unlock;
+
+       iwl_mvm_binding_remove_vif(mvm, vif);
+       switch (vif->type) {
+       case NL80211_IFTYPE_MONITOR:
+               iwl_mvm_update_quotas(mvm, vif);
+               break;
+       default:
+               break;
+       }
+
+out_unlock:
+       mvmvif->phy_ctxt = NULL;
+       mutex_unlock(&mvm->mutex);
+}
+
+static int iwl_mvm_set_tim(struct ieee80211_hw *hw,
+                          struct ieee80211_sta *sta,
+                          bool set)
+{
+       struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+       struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
+
+       if (!mvm_sta || !mvm_sta->vif) {
+               IWL_ERR(mvm, "Station is not associated to a vif\n");
+               return -EINVAL;
+       }
+
+       return iwl_mvm_mac_ctxt_beacon_changed(mvm, mvm_sta->vif);
+}
+
+struct ieee80211_ops iwl_mvm_hw_ops = {
+       .tx = iwl_mvm_mac_tx,
+       .ampdu_action = iwl_mvm_mac_ampdu_action,
+       .start = iwl_mvm_mac_start,
+       .restart_complete = iwl_mvm_mac_restart_complete,
+       .stop = iwl_mvm_mac_stop,
+       .add_interface = iwl_mvm_mac_add_interface,
+       .remove_interface = iwl_mvm_mac_remove_interface,
+       .config = iwl_mvm_mac_config,
+       .configure_filter = iwl_mvm_configure_filter,
+       .bss_info_changed = iwl_mvm_bss_info_changed,
+       .hw_scan = iwl_mvm_mac_hw_scan,
+       .cancel_hw_scan = iwl_mvm_mac_cancel_hw_scan,
+       .sta_state = iwl_mvm_mac_sta_state,
+       .sta_notify = iwl_mvm_mac_sta_notify,
+       .allow_buffered_frames = iwl_mvm_mac_allow_buffered_frames,
+       .set_rts_threshold = iwl_mvm_mac_set_rts_threshold,
+       .conf_tx = iwl_mvm_mac_conf_tx,
+       .mgd_prepare_tx = iwl_mvm_mac_mgd_prepare_tx,
+       .set_key = iwl_mvm_mac_set_key,
+       .update_tkip_key = iwl_mvm_mac_update_tkip_key,
+       .remain_on_channel = iwl_mvm_roc,
+       .cancel_remain_on_channel = iwl_mvm_cancel_roc,
+
+       .add_chanctx = iwl_mvm_add_chanctx,
+       .remove_chanctx = iwl_mvm_remove_chanctx,
+       .change_chanctx = iwl_mvm_change_chanctx,
+       .assign_vif_chanctx = iwl_mvm_assign_vif_chanctx,
+       .unassign_vif_chanctx = iwl_mvm_unassign_vif_chanctx,
+
+       .start_ap = iwl_mvm_start_ap,
+       .stop_ap = iwl_mvm_stop_ap,
+
+       .set_tim = iwl_mvm_set_tim,
+
+#ifdef CONFIG_PM_SLEEP
+       /* look at d3.c */
+       .suspend = iwl_mvm_suspend,
+       .resume = iwl_mvm_resume,
+       .set_wakeup = iwl_mvm_set_wakeup,
+       .set_rekey_data = iwl_mvm_set_rekey_data,
+#if IS_ENABLED(CONFIG_IPV6)
+       .ipv6_addr_change = iwl_mvm_ipv6_addr_change,
+#endif
+       .set_default_unicast_key = iwl_mvm_set_default_unicast_key,
+#endif
+};
diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h
new file mode 100644 (file)
index 0000000..4e339cc
--- /dev/null
@@ -0,0 +1,500 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#ifndef __IWL_MVM_H__
+#define __IWL_MVM_H__
+
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/leds.h>
+#include <linux/in6.h>
+
+#include "iwl-op-mode.h"
+#include "iwl-trans.h"
+#include "iwl-notif-wait.h"
+#include "iwl-eeprom-parse.h"
+#include "iwl-test.h"
+#include "iwl-trans.h"
+#include "sta.h"
+#include "fw-api.h"
+
+#define IWL_INVALID_MAC80211_QUEUE     0xff
+#define IWL_MVM_MAX_ADDRESSES          2
+#define IWL_RSSI_OFFSET 44
+
+enum iwl_mvm_tx_fifo {
+       IWL_MVM_TX_FIFO_BK = 0,
+       IWL_MVM_TX_FIFO_BE,
+       IWL_MVM_TX_FIFO_VI,
+       IWL_MVM_TX_FIFO_VO,
+};
+
+/* Placeholder */
+#define IWL_OFFCHANNEL_QUEUE 8
+#define IWL_FIRST_AMPDU_QUEUE 11
+
+extern struct ieee80211_ops iwl_mvm_hw_ops;
+/**
+ * struct iwl_mvm_mod_params - module parameters for iwlmvm
+ * @init_dbg: if true, then the NIC won't be stopped if the INIT fw asserted.
+ *     We will register to mac80211 to have testmode working. The NIC must not
+ *     be up'ed after the INIT fw asserted. This is useful to be able to use
+ *     proprietary tools over testmode to debug the INIT fw.
+ * @power_scheme: CAM(Continuous Active Mode)-1, BPS(Balanced Power
+ *     Save)-2(default), LP(Low Power)-3
+ */
+struct iwl_mvm_mod_params {
+       bool init_dbg;
+       int power_scheme;
+};
+extern struct iwl_mvm_mod_params iwlmvm_mod_params;
+
+struct iwl_mvm_phy_ctxt {
+       u16 id;
+       u16 color;
+
+       /*
+        * TODO: This should probably be removed. Currently here only for rate
+        * scaling algorithm
+        */
+       struct ieee80211_channel *channel;
+};
+
+struct iwl_mvm_time_event_data {
+       struct ieee80211_vif *vif;
+       struct list_head list;
+       unsigned long end_jiffies;
+       u32 duration;
+       bool running;
+       u32 uid;
+
+       /*
+        * The access to the 'id' field must be done when the
+        * mvm->time_event_lock is held, as it value is used to indicate
+        * if the te is in the time event list or not (when id == TE_MAX)
+        */
+       u32 id;
+};
+
+ /* Power management */
+
+/**
+ * enum iwl_power_scheme
+ * @IWL_POWER_LEVEL_CAM - Continuously Active Mode
+ * @IWL_POWER_LEVEL_BPS - Balanced Power Save (default)
+ * @IWL_POWER_LEVEL_LP  - Low Power
+ */
+enum iwl_power_scheme {
+       IWL_POWER_SCHEME_CAM = 1,
+       IWL_POWER_SCHEME_BPS,
+       IWL_POWER_SCHEME_LP
+};
+
+#define IWL_CONN_MAX_LISTEN_INTERVAL   70
+
+/**
+ * struct iwl_mvm_vif - data per Virtual Interface, it is a MAC context
+ * @id: between 0 and 3
+ * @color: to solve races upon MAC addition and removal
+ * @ap_sta_id: the sta_id of the AP - valid only if VIF type is STA
+ * @uploaded: indicates the MAC context has been added to the device
+ * @ap_active: indicates that ap context is configured, and that the interface
+ *  should get quota etc.
+ * @queue_params: QoS params for this MAC
+ * @bcast_sta: station used for broadcast packets. Used by the following
+ *  vifs: P2P_DEVICE, GO and AP.
+ * @beacon_skb: the skb used to hold the AP/GO beacon template
+ */
+struct iwl_mvm_vif {
+       u16 id;
+       u16 color;
+       u8 ap_sta_id;
+
+       bool uploaded;
+       bool ap_active;
+
+       enum iwl_tsf_id tsf_id;
+
+       /*
+        * QoS data from mac80211, need to store this here
+        * as mac80211 has a separate callback but we need
+        * to have the data for the MAC context
+        */
+       struct ieee80211_tx_queue_params queue_params[IEEE80211_NUM_ACS];
+       struct iwl_mvm_time_event_data time_event_data;
+
+       struct iwl_mvm_int_sta bcast_sta;
+
+       /*
+        * Assigned while mac80211 has the interface in a channel context,
+        * or, for P2P Device, while it exists.
+        */
+       struct iwl_mvm_phy_ctxt *phy_ctxt;
+
+#ifdef CONFIG_PM_SLEEP
+       /* WoWLAN GTK rekey data */
+       struct {
+               u8 kck[NL80211_KCK_LEN], kek[NL80211_KEK_LEN];
+               __le64 replay_ctr;
+               bool valid;
+       } rekey_data;
+
+       int tx_key_idx;
+
+#if IS_ENABLED(CONFIG_IPV6)
+       /* IPv6 addresses for WoWLAN */
+       struct in6_addr target_ipv6_addrs[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS];
+       int num_target_ipv6_addrs;
+#endif
+#endif
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+       struct dentry *dbgfs_dir;
+       void *dbgfs_data;
+#endif
+};
+
+static inline struct iwl_mvm_vif *
+iwl_mvm_vif_from_mac80211(struct ieee80211_vif *vif)
+{
+       return (void *)vif->drv_priv;
+}
+
+enum iwl_mvm_status {
+       IWL_MVM_STATUS_HW_RFKILL,
+       IWL_MVM_STATUS_ROC_RUNNING,
+       IWL_MVM_STATUS_IN_HW_RESTART,
+};
+
+enum iwl_scan_status {
+       IWL_MVM_SCAN_NONE,
+       IWL_MVM_SCAN_OS,
+};
+
+/**
+ * struct iwl_nvm_section - describes an NVM section in memory.
+ *
+ * This struct holds an NVM section read from the NIC using NVM_ACCESS_CMD,
+ * and saved for later use by the driver. Not all NVM sections are saved
+ * this way, only the needed ones.
+ */
+struct iwl_nvm_section {
+       u16 length;
+       const u8 *data;
+};
+
+struct iwl_mvm {
+       /* for logger access */
+       struct device *dev;
+
+       struct iwl_trans *trans;
+       const struct iwl_fw *fw;
+       const struct iwl_cfg *cfg;
+       struct iwl_phy_db *phy_db;
+       struct ieee80211_hw *hw;
+
+       /* for protecting access to iwl_mvm */
+       struct mutex mutex;
+       struct list_head async_handlers_list;
+       spinlock_t async_handlers_lock;
+       struct work_struct async_handlers_wk;
+
+       struct work_struct roc_done_wk;
+
+       unsigned long status;
+
+       enum iwl_ucode_type cur_ucode;
+       bool ucode_loaded;
+       bool init_ucode_run;
+       u32 error_event_table;
+       u32 log_event_table;
+
+       u32 ampdu_ref;
+
+       struct iwl_notif_wait_data notif_wait;
+
+       unsigned long transport_queue_stop;
+       u8 queue_to_mac80211[IWL_MAX_HW_QUEUES];
+       atomic_t queue_stop_count[IWL_MAX_HW_QUEUES];
+
+       struct iwl_nvm_data *nvm_data;
+       /* eeprom blob for debugfs/testmode */
+       u8 *eeprom_blob;
+       size_t eeprom_blob_size;
+       /* NVM sections for 7000 family */
+       struct iwl_nvm_section nvm_sections[NVM_NUM_OF_SECTIONS];
+
+       /* EEPROM MAC addresses */
+       struct mac_address addresses[IWL_MVM_MAX_ADDRESSES];
+
+       /* data related to data path */
+       struct iwl_rx_phy_info last_phy_info;
+       struct ieee80211_sta __rcu *fw_id_to_mac_id[IWL_MVM_STATION_COUNT];
+       struct work_struct sta_drained_wk;
+       unsigned long sta_drained[BITS_TO_LONGS(IWL_MVM_STATION_COUNT)];
+
+       /* configured by mac80211 */
+       u32 rts_threshold;
+
+       /* Scan status, cmd (pre-allocated) and auxiliary station */
+       enum iwl_scan_status scan_status;
+       struct iwl_scan_cmd *scan_cmd;
+
+       /* Internal station */
+       struct iwl_mvm_int_sta aux_sta;
+
+       u8 scan_last_antenna_idx; /* to toggle TX between antennas */
+       u8 mgmt_last_antenna_idx;
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+       struct dentry *debugfs_dir;
+       u32 dbgfs_sram_offset, dbgfs_sram_len;
+       bool prevent_power_down_d3;
+#endif
+
+       struct iwl_mvm_phy_ctxt phy_ctxt_roc;
+
+       struct list_head time_event_list;
+       spinlock_t time_event_lock;
+
+       /*
+        * A bitmap indicating the index of the key in use. The firmware
+        * can hold 16 keys at most. Reflect this fact.
+        */
+       unsigned long fw_key_table[BITS_TO_LONGS(STA_KEY_MAX_NUM)];
+       u8 vif_count;
+
+       struct led_classdev led;
+
+       struct ieee80211_vif *p2p_device_vif;
+};
+
+/* Extract MVM priv from op_mode and _hw */
+#define IWL_OP_MODE_GET_MVM(_iwl_op_mode)              \
+       ((struct iwl_mvm *)(_iwl_op_mode)->op_mode_specific)
+
+#define IWL_MAC80211_GET_MVM(_hw)                      \
+       IWL_OP_MODE_GET_MVM((struct iwl_op_mode *)((_hw)->priv))
+
+extern const u8 iwl_mvm_ac_to_tx_fifo[];
+
+struct iwl_rate_info {
+       u8 plcp;        /* uCode API:  IWL_RATE_6M_PLCP, etc. */
+       u8 plcp_siso;   /* uCode API:  IWL_RATE_SISO_6M_PLCP, etc. */
+       u8 plcp_mimo2;  /* uCode API:  IWL_RATE_MIMO2_6M_PLCP, etc. */
+       u8 plcp_mimo3;  /* uCode API:  IWL_RATE_MIMO3_6M_PLCP, etc. */
+       u8 ieee;        /* MAC header:  IWL_RATE_6M_IEEE, etc. */
+};
+
+/******************
+ * MVM Methods
+ ******************/
+/* uCode */
+int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm);
+
+/* Utils */
+int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags,
+                                       enum ieee80211_band band);
+u8 iwl_mvm_mac80211_idx_to_hwrate(int rate_idx);
+void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm);
+u8 first_antenna(u8 mask);
+u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx);
+
+/* Tx / Host Commands */
+int __must_check iwl_mvm_send_cmd(struct iwl_mvm *mvm,
+                                 struct iwl_host_cmd *cmd);
+int __must_check iwl_mvm_send_cmd_pdu(struct iwl_mvm *mvm, u8 id,
+                                     u32 flags, u16 len, const void *data);
+int __must_check iwl_mvm_send_cmd_status(struct iwl_mvm *mvm,
+                                        struct iwl_host_cmd *cmd,
+                                        u32 *status);
+int __must_check iwl_mvm_send_cmd_pdu_status(struct iwl_mvm *mvm, u8 id,
+                                            u16 len, const void *data,
+                                            u32 *status);
+int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
+                  struct ieee80211_sta *sta);
+int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb);
+#ifdef CONFIG_IWLWIFI_DEBUG
+const char *iwl_mvm_get_tx_fail_reason(u32 status);
+#else
+static inline const char *iwl_mvm_get_tx_fail_reason(u32 status) { return ""; }
+#endif
+int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk, bool sync);
+void iwl_mvm_async_handlers_purge(struct iwl_mvm *mvm);
+
+/* Statistics */
+int iwl_mvm_rx_reply_statistics(struct iwl_mvm *mvm,
+                               struct iwl_rx_cmd_buffer *rxb,
+                               struct iwl_device_cmd *cmd);
+int iwl_mvm_rx_statistics(struct iwl_mvm *mvm,
+                         struct iwl_rx_cmd_buffer *rxb,
+                         struct iwl_device_cmd *cmd);
+
+/* NVM */
+int iwl_nvm_init(struct iwl_mvm *mvm);
+
+int iwl_mvm_up(struct iwl_mvm *mvm);
+int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm);
+
+int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm);
+
+/*
+ * FW notifications / CMD responses handlers
+ * Convention: iwl_mvm_rx_<NAME OF THE CMD>
+ */
+int iwl_mvm_rx_rx_phy_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
+                         struct iwl_device_cmd *cmd);
+int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
+                      struct iwl_device_cmd *cmd);
+int iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
+                     struct iwl_device_cmd *cmd);
+int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
+                       struct iwl_device_cmd *cmd);
+int iwl_mvm_rx_radio_ver(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
+                        struct iwl_device_cmd *cmd);
+int iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
+                         struct iwl_device_cmd *cmd);
+int iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm,
+                               struct iwl_rx_cmd_buffer *rxb,
+                               struct iwl_device_cmd *cmd);
+int iwl_mvm_rx_radio_ver(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
+                        struct iwl_device_cmd *cmd);
+
+/* MVM PHY */
+int iwl_mvm_phy_ctxt_add(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
+                        struct cfg80211_chan_def *chandef,
+                        u8 chains_static, u8 chains_dynamic);
+int iwl_mvm_phy_ctxt_changed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
+                            struct cfg80211_chan_def *chandef,
+                            u8 chains_static, u8 chains_dynamic);
+void iwl_mvm_phy_ctxt_remove(struct iwl_mvm *mvm,
+                            struct iwl_mvm_phy_ctxt *ctxt);
+
+/* MAC (virtual interface) programming */
+int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+void iwl_mvm_mac_ctxt_release(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+int iwl_mvm_mac_ctxt_add(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+int iwl_mvm_mac_ctxt_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+int iwl_mvm_mac_ctxt_remove(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+u32 iwl_mvm_mac_get_queues_mask(struct iwl_mvm *mvm,
+                               struct ieee80211_vif *vif);
+int iwl_mvm_mac_ctxt_beacon_changed(struct iwl_mvm *mvm,
+                                   struct ieee80211_vif *vif);
+
+/* Bindings */
+int iwl_mvm_binding_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+int iwl_mvm_binding_remove_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+
+/* Quota management */
+int iwl_mvm_update_quotas(struct iwl_mvm *mvm, struct ieee80211_vif *newvif);
+
+/* Scanning */
+int iwl_mvm_scan_request(struct iwl_mvm *mvm,
+                        struct ieee80211_vif *vif,
+                        struct cfg80211_scan_request *req);
+int iwl_mvm_rx_scan_response(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
+                            struct iwl_device_cmd *cmd);
+int iwl_mvm_rx_scan_complete(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
+                            struct iwl_device_cmd *cmd);
+void iwl_mvm_cancel_scan(struct iwl_mvm *mvm);
+
+/* MVM debugfs */
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir);
+int iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+                              struct dentry *dbgfs_dir);
+void iwl_power_get_params(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+                         struct iwl_powertable_cmd *cmd);
+#else
+static inline int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm,
+                                        struct dentry *dbgfs_dir)
+{
+       return 0;
+}
+#endif /* CONFIG_IWLWIFI_DEBUGFS */
+
+/* rate scaling */
+int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq,
+                       u8 flags, bool init);
+
+/* power managment */
+int iwl_mvm_power_update_mode(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+int iwl_mvm_power_disable(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+
+int iwl_mvm_leds_init(struct iwl_mvm *mvm);
+void iwl_mvm_leds_exit(struct iwl_mvm *mvm);
+
+/* D3 (WoWLAN, NetDetect) */
+int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan);
+int iwl_mvm_resume(struct ieee80211_hw *hw);
+void iwl_mvm_set_wakeup(struct ieee80211_hw *hw, bool enabled);
+void iwl_mvm_set_rekey_data(struct ieee80211_hw *hw,
+                           struct ieee80211_vif *vif,
+                           struct cfg80211_gtk_rekey_data *data);
+void iwl_mvm_ipv6_addr_change(struct ieee80211_hw *hw,
+                             struct ieee80211_vif *vif,
+                             struct inet6_dev *idev);
+void iwl_mvm_set_default_unicast_key(struct ieee80211_hw *hw,
+                                    struct ieee80211_vif *vif, int idx);
+
+#endif /* __IWL_MVM_H__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/nvm.c b/drivers/net/wireless/iwlwifi/mvm/nvm.c
new file mode 100644 (file)
index 0000000..20016bc
--- /dev/null
@@ -0,0 +1,311 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include "iwl-trans.h"
+#include "mvm.h"
+#include "iwl-eeprom-parse.h"
+#include "iwl-eeprom-read.h"
+#include "iwl-nvm-parse.h"
+
+/* list of NVM sections we are allowed/need to read */
+static const int nvm_to_read[] = {
+       NVM_SECTION_TYPE_HW,
+       NVM_SECTION_TYPE_SW,
+       NVM_SECTION_TYPE_CALIBRATION,
+       NVM_SECTION_TYPE_PRODUCTION,
+};
+
+/* used to simplify the shared operations on NCM_ACCESS_CMD versions */
+union iwl_nvm_access_cmd {
+       struct iwl_nvm_access_cmd_ver1 ver1;
+       struct iwl_nvm_access_cmd_ver2 ver2;
+};
+union iwl_nvm_access_resp {
+       struct iwl_nvm_access_resp_ver1 ver1;
+       struct iwl_nvm_access_resp_ver2 ver2;
+};
+
+static inline void iwl_nvm_fill_read_ver1(struct iwl_nvm_access_cmd_ver1 *cmd,
+                                         u16 offset, u16 length)
+{
+       cmd->offset = cpu_to_le16(offset);
+       cmd->length = cpu_to_le16(length);
+       cmd->cache_refresh = 1;
+}
+
+static inline void iwl_nvm_fill_read_ver2(struct iwl_nvm_access_cmd_ver2 *cmd,
+                                         u16 offset, u16 length, u16 section)
+{
+       cmd->offset = cpu_to_le16(offset);
+       cmd->length = cpu_to_le16(length);
+       cmd->type = cpu_to_le16(section);
+}
+
+static int iwl_nvm_read_chunk(struct iwl_mvm *mvm, u16 section,
+                             u16 offset, u16 length, u8 *data)
+{
+       union iwl_nvm_access_cmd nvm_access_cmd;
+       union iwl_nvm_access_resp *nvm_resp;
+       struct iwl_rx_packet *pkt;
+       struct iwl_host_cmd cmd = {
+               .id = NVM_ACCESS_CMD,
+               .flags = CMD_SYNC | CMD_WANT_SKB,
+               .data = { &nvm_access_cmd, },
+       };
+       int ret, bytes_read, offset_read;
+       u8 *resp_data;
+
+       memset(&nvm_access_cmd, 0, sizeof(nvm_access_cmd));
+
+       /* TODO: not sure family should be the decider, maybe FW version? */
+       if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
+               iwl_nvm_fill_read_ver2(&(nvm_access_cmd.ver2),
+                                      offset, length, section);
+               cmd.len[0] = sizeof(struct iwl_nvm_access_cmd_ver2);
+       } else {
+               iwl_nvm_fill_read_ver1(&(nvm_access_cmd.ver1),
+                                      offset, length);
+               cmd.len[0] = sizeof(struct iwl_nvm_access_cmd_ver1);
+       }
+
+       ret = iwl_mvm_send_cmd(mvm, &cmd);
+       if (ret)
+               return ret;
+
+       pkt = cmd.resp_pkt;
+       if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
+               IWL_ERR(mvm, "Bad return from NVM_ACCES_COMMAND (0x%08X)\n",
+                       pkt->hdr.flags);
+               ret = -EIO;
+               goto exit;
+       }
+
+       /* Extract NVM response */
+       nvm_resp = (void *)pkt->data;
+       if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
+               ret = le16_to_cpu(nvm_resp->ver2.status);
+               bytes_read = le16_to_cpu(nvm_resp->ver2.length);
+               offset_read = le16_to_cpu(nvm_resp->ver2.offset);
+               resp_data = nvm_resp->ver2.data;
+       } else {
+               ret = le16_to_cpu(nvm_resp->ver1.length) <= 0;
+               bytes_read = le16_to_cpu(nvm_resp->ver1.length);
+               offset_read = le16_to_cpu(nvm_resp->ver1.offset);
+               resp_data = nvm_resp->ver1.data;
+       }
+       if (ret) {
+               IWL_ERR(mvm,
+                       "NVM access command failed with status %d (device: %s)\n",
+                       ret, mvm->cfg->name);
+               ret = -EINVAL;
+               goto exit;
+       }
+
+       if (offset_read != offset) {
+               IWL_ERR(mvm, "NVM ACCESS response with invalid offset %d\n",
+                       offset_read);
+               ret = -EINVAL;
+               goto exit;
+       }
+
+       /* Write data to NVM */
+       memcpy(data + offset, resp_data, bytes_read);
+       ret = bytes_read;
+
+exit:
+       iwl_free_resp(&cmd);
+       return ret;
+}
+
+/*
+ * Reads an NVM section completely.
+ * NICs prior to 7000 family doesn't have a real NVM, but just read
+ * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
+ * by uCode, we need to manually check in this case that we don't
+ * overflow and try to read more than the EEPROM size.
+ * For 7000 family NICs, we supply the maximal size we can read, and
+ * the uCode fills the response with as much data as we can,
+ * without overflowing, so no check is needed.
+ */
+static int iwl_nvm_read_section(struct iwl_mvm *mvm, u16 section,
+                               u8 *data)
+{
+       u16 length, offset = 0;
+       int ret;
+       bool old_eeprom = mvm->cfg->device_family != IWL_DEVICE_FAMILY_7000;
+
+       length = (iwlwifi_mod_params.amsdu_size_8K ? (8 * 1024) : (4 * 1024))
+               - sizeof(union iwl_nvm_access_cmd)
+               - sizeof(struct iwl_rx_packet);
+       /*
+        * if length is greater than EEPROM size, truncate it because uCode
+        * doesn't check it by itself, and exit the loop when reached.
+        */
+       if (old_eeprom && length > mvm->cfg->base_params->eeprom_size)
+               length = mvm->cfg->base_params->eeprom_size;
+       ret = length;
+
+       /* Read the NVM until exhausted (reading less than requested) */
+       while (ret == length) {
+               ret = iwl_nvm_read_chunk(mvm, section, offset, length, data);
+               if (ret < 0) {
+                       IWL_ERR(mvm,
+                               "Cannot read NVM from section %d offset %d, length %d\n",
+                               section, offset, length);
+                       return ret;
+               }
+               offset += ret;
+               if (old_eeprom && offset == mvm->cfg->base_params->eeprom_size)
+                       break;
+       }
+
+       IWL_INFO(mvm, "NVM section %d read completed\n", section);
+       return offset;
+}
+
+static struct iwl_nvm_data *
+iwl_parse_nvm_sections(struct iwl_mvm *mvm)
+{
+       struct iwl_nvm_section *sections = mvm->nvm_sections;
+       const __le16 *hw, *sw, *calib;
+
+       /* Checking for required sections */
+       if (!mvm->nvm_sections[NVM_SECTION_TYPE_SW].data ||
+           !mvm->nvm_sections[NVM_SECTION_TYPE_HW].data) {
+               IWL_ERR(mvm, "Can't parse empty NVM sections\n");
+               return NULL;
+       }
+
+       if (WARN_ON(!mvm->cfg))
+               return NULL;
+
+       hw = (const __le16 *)sections[NVM_SECTION_TYPE_HW].data;
+       sw = (const __le16 *)sections[NVM_SECTION_TYPE_SW].data;
+       calib = (const __le16 *)sections[NVM_SECTION_TYPE_CALIBRATION].data;
+       return iwl_parse_nvm_data(mvm->trans->dev, mvm->cfg, hw, sw, calib);
+}
+
+int iwl_nvm_init(struct iwl_mvm *mvm)
+{
+       int ret, i, section;
+       u8 *nvm_buffer, *temp;
+
+       if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
+               /* TODO: find correct NVM max size for a section */
+               nvm_buffer = kmalloc(mvm->cfg->base_params->eeprom_size,
+                                    GFP_KERNEL);
+               if (!nvm_buffer)
+                       return -ENOMEM;
+               for (i = 0; i < ARRAY_SIZE(nvm_to_read); i++) {
+                       section = nvm_to_read[i];
+                       /* we override the constness for initial read */
+                       ret = iwl_nvm_read_section(mvm, section, nvm_buffer);
+                       if (ret < 0)
+                               break;
+                       temp = kmemdup(nvm_buffer, ret, GFP_KERNEL);
+                       if (!temp) {
+                               ret = -ENOMEM;
+                               break;
+                       }
+                       mvm->nvm_sections[section].data = temp;
+                       mvm->nvm_sections[section].length = ret;
+               }
+               kfree(nvm_buffer);
+               if (ret < 0)
+                       return ret;
+       } else {
+               /* allocate eeprom */
+               mvm->eeprom_blob_size = mvm->cfg->base_params->eeprom_size;
+               IWL_DEBUG_EEPROM(mvm->trans->dev, "NVM size = %zd\n",
+                                mvm->eeprom_blob_size);
+               mvm->eeprom_blob = kzalloc(mvm->eeprom_blob_size, GFP_KERNEL);
+               if (!mvm->eeprom_blob)
+                       return -ENOMEM;
+
+               ret = iwl_nvm_read_section(mvm, 0, mvm->eeprom_blob);
+               if (ret != mvm->eeprom_blob_size) {
+                       IWL_ERR(mvm, "Read partial NVM %d/%zd\n",
+                               ret, mvm->eeprom_blob_size);
+                       kfree(mvm->eeprom_blob);
+                       mvm->eeprom_blob = NULL;
+                       return -EINVAL;
+               }
+       }
+
+       ret = 0;
+       if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000)
+               mvm->nvm_data = iwl_parse_nvm_sections(mvm);
+       else
+               mvm->nvm_data =
+                       iwl_parse_eeprom_data(mvm->trans->dev,
+                                             mvm->cfg,
+                                             mvm->eeprom_blob,
+                                             mvm->eeprom_blob_size);
+
+       if (!mvm->nvm_data) {
+               kfree(mvm->eeprom_blob);
+               mvm->eeprom_blob = NULL;
+               ret = -ENOMEM;
+       }
+
+       return ret;
+}
diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c
new file mode 100644 (file)
index 0000000..aa59adf
--- /dev/null
@@ -0,0 +1,682 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include <linux/module.h>
+#include <net/mac80211.h>
+
+#include "iwl-notif-wait.h"
+#include "iwl-trans.h"
+#include "iwl-op-mode.h"
+#include "iwl-fw.h"
+#include "iwl-debug.h"
+#include "iwl-drv.h"
+#include "iwl-modparams.h"
+#include "mvm.h"
+#include "iwl-phy-db.h"
+#include "iwl-eeprom-parse.h"
+#include "iwl-csr.h"
+#include "iwl-io.h"
+#include "iwl-prph.h"
+#include "rs.h"
+#include "fw-api-scan.h"
+#include "time-event.h"
+
+/*
+ * module name, copyright, version, etc.
+ */
+#define DRV_DESCRIPTION        "The new Intel(R) wireless AGN driver for Linux"
+
+#define DRV_VERSION     IWLWIFI_VERSION
+
+MODULE_DESCRIPTION(DRV_DESCRIPTION);
+MODULE_VERSION(DRV_VERSION);
+MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
+MODULE_LICENSE("GPL");
+
+static const struct iwl_op_mode_ops iwl_mvm_ops;
+
+struct iwl_mvm_mod_params iwlmvm_mod_params = {
+       .power_scheme = IWL_POWER_SCHEME_BPS,
+       /* rest of fields are 0 by default */
+};
+
+module_param_named(init_dbg, iwlmvm_mod_params.init_dbg, bool, S_IRUGO);
+MODULE_PARM_DESC(init_dbg,
+                "set to true to debug an ASSERT in INIT fw (default: false");
+module_param_named(power_scheme, iwlmvm_mod_params.power_scheme, int, S_IRUGO);
+MODULE_PARM_DESC(power_scheme,
+                "power management scheme: 1-active, 2-balanced, 3-low power, default: 2");
+
+/*
+ * module init and exit functions
+ */
+static int __init iwl_mvm_init(void)
+{
+       int ret;
+
+       ret = iwl_mvm_rate_control_register();
+       if (ret) {
+               pr_err("Unable to register rate control algorithm: %d\n", ret);
+               return ret;
+       }
+
+       ret = iwl_opmode_register("iwlmvm", &iwl_mvm_ops);
+
+       if (ret) {
+               pr_err("Unable to register MVM op_mode: %d\n", ret);
+               iwl_mvm_rate_control_unregister();
+       }
+
+       return ret;
+}
+module_init(iwl_mvm_init);
+
+static void __exit iwl_mvm_exit(void)
+{
+       iwl_opmode_deregister("iwlmvm");
+       iwl_mvm_rate_control_unregister();
+}
+module_exit(iwl_mvm_exit);
+
+static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode)
+{
+       struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+       u8 radio_cfg_type, radio_cfg_step, radio_cfg_dash;
+       u32 reg_val = 0;
+
+       /*
+        * We can't upload the correct value to the INIT image
+        * as we don't have nvm_data by that time.
+        *
+        * TODO: Figure out what we should do here
+        */
+       if (mvm->nvm_data) {
+               radio_cfg_type = mvm->nvm_data->radio_cfg_type;
+               radio_cfg_step = mvm->nvm_data->radio_cfg_step;
+               radio_cfg_dash = mvm->nvm_data->radio_cfg_dash;
+       } else {
+               radio_cfg_type = 0;
+               radio_cfg_step = 0;
+               radio_cfg_dash = 0;
+       }
+
+       /* SKU control */
+       reg_val |= CSR_HW_REV_STEP(mvm->trans->hw_rev) <<
+                               CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
+       reg_val |= CSR_HW_REV_DASH(mvm->trans->hw_rev) <<
+                               CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
+
+       /* radio configuration */
+       reg_val |= radio_cfg_type << CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
+       reg_val |= radio_cfg_step << CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
+       reg_val |= radio_cfg_dash << CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
+
+       WARN_ON((radio_cfg_type << CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE) &
+                ~CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE);
+
+       /* silicon bits */
+       reg_val |= CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI;
+       reg_val |= CSR_HW_IF_CONFIG_REG_BIT_MAC_SI;
+
+       iwl_trans_set_bits_mask(mvm->trans, CSR_HW_IF_CONFIG_REG,
+                               CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH |
+                               CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP |
+                               CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE |
+                               CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP |
+                               CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH |
+                               CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
+                               CSR_HW_IF_CONFIG_REG_BIT_MAC_SI,
+                               reg_val);
+
+       IWL_DEBUG_INFO(mvm, "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
+                      radio_cfg_step, radio_cfg_dash);
+
+       /*
+        * W/A : NIC is stuck in a reset state after Early PCIe power off
+        * (PCIe power is lost before PERST# is asserted), causing ME FW
+        * to lose ownership and not being able to obtain it back.
+        */
+       iwl_set_bits_mask_prph(mvm->trans, APMG_PS_CTRL_REG,
+                              APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
+                              ~APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
+}
+
+struct iwl_rx_handlers {
+       u8 cmd_id;
+       bool async;
+       int (*fn)(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
+                 struct iwl_device_cmd *cmd);
+};
+
+#define RX_HANDLER(_cmd_id, _fn, _async)       \
+       { .cmd_id = _cmd_id , .fn = _fn , .async = _async }
+
+/*
+ * Handlers for fw notifications
+ * Convention: RX_HANDLER(CMD_NAME, iwl_mvm_rx_CMD_NAME
+ * This list should be in order of frequency for performance purposes.
+ *
+ * The handler can be SYNC - this means that it will be called in the Rx path
+ * which can't acquire mvm->mutex. If the handler needs to hold mvm->mutex (and
+ * only in this case!), it should be set as ASYNC. In that case, it will be
+ * called from a worker with mvm->mutex held.
+ */
+static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
+       RX_HANDLER(REPLY_RX_MPDU_CMD, iwl_mvm_rx_rx_mpdu, false),
+       RX_HANDLER(REPLY_RX_PHY_CMD, iwl_mvm_rx_rx_phy_cmd, false),
+       RX_HANDLER(TX_CMD, iwl_mvm_rx_tx_cmd, false),
+       RX_HANDLER(BA_NOTIF, iwl_mvm_rx_ba_notif, false),
+       RX_HANDLER(TIME_EVENT_NOTIFICATION, iwl_mvm_rx_time_event_notif, false),
+
+       RX_HANDLER(SCAN_REQUEST_CMD, iwl_mvm_rx_scan_response, false),
+       RX_HANDLER(SCAN_COMPLETE_NOTIFICATION, iwl_mvm_rx_scan_complete, false),
+
+       RX_HANDLER(RADIO_VERSION_NOTIFICATION, iwl_mvm_rx_radio_ver, false),
+       RX_HANDLER(CARD_STATE_NOTIFICATION, iwl_mvm_rx_card_state_notif, false),
+
+       RX_HANDLER(REPLY_ERROR, iwl_mvm_rx_fw_error, false),
+};
+#undef RX_HANDLER
+#define CMD(x) [x] = #x
+
+static const char *iwl_mvm_cmd_strings[REPLY_MAX] = {
+       CMD(MVM_ALIVE),
+       CMD(REPLY_ERROR),
+       CMD(INIT_COMPLETE_NOTIF),
+       CMD(PHY_CONTEXT_CMD),
+       CMD(MGMT_MCAST_KEY),
+       CMD(TX_CMD),
+       CMD(TXPATH_FLUSH),
+       CMD(MAC_CONTEXT_CMD),
+       CMD(TIME_EVENT_CMD),
+       CMD(TIME_EVENT_NOTIFICATION),
+       CMD(BINDING_CONTEXT_CMD),
+       CMD(TIME_QUOTA_CMD),
+       CMD(RADIO_VERSION_NOTIFICATION),
+       CMD(SCAN_REQUEST_CMD),
+       CMD(SCAN_ABORT_CMD),
+       CMD(SCAN_START_NOTIFICATION),
+       CMD(SCAN_RESULTS_NOTIFICATION),
+       CMD(SCAN_COMPLETE_NOTIFICATION),
+       CMD(NVM_ACCESS_CMD),
+       CMD(PHY_CONFIGURATION_CMD),
+       CMD(CALIB_RES_NOTIF_PHY_DB),
+       CMD(SET_CALIB_DEFAULT_CMD),
+       CMD(CALIBRATION_COMPLETE_NOTIFICATION),
+       CMD(ADD_STA),
+       CMD(REMOVE_STA),
+       CMD(LQ_CMD),
+       CMD(SCAN_OFFLOAD_CONFIG_CMD),
+       CMD(SCAN_OFFLOAD_REQUEST_CMD),
+       CMD(SCAN_OFFLOAD_ABORT_CMD),
+       CMD(SCAN_OFFLOAD_COMPLETE),
+       CMD(SCAN_OFFLOAD_UPDATE_PROFILES_CMD),
+       CMD(POWER_TABLE_CMD),
+       CMD(WEP_KEY),
+       CMD(REPLY_RX_PHY_CMD),
+       CMD(REPLY_RX_MPDU_CMD),
+       CMD(BEACON_TEMPLATE_CMD),
+       CMD(STATISTICS_NOTIFICATION),
+       CMD(TX_ANT_CONFIGURATION_CMD),
+       CMD(D3_CONFIG_CMD),
+       CMD(PROT_OFFLOAD_CONFIG_CMD),
+       CMD(OFFLOADS_QUERY_CMD),
+       CMD(REMOTE_WAKE_CONFIG_CMD),
+       CMD(WOWLAN_PATTERNS),
+       CMD(WOWLAN_CONFIGURATION),
+       CMD(WOWLAN_TSC_RSC_PARAM),
+       CMD(WOWLAN_TKIP_PARAM),
+       CMD(WOWLAN_KEK_KCK_MATERIAL),
+       CMD(WOWLAN_GET_STATUSES),
+       CMD(WOWLAN_TX_POWER_PER_DB),
+       CMD(NET_DETECT_CONFIG_CMD),
+       CMD(NET_DETECT_PROFILES_QUERY_CMD),
+       CMD(NET_DETECT_PROFILES_CMD),
+       CMD(NET_DETECT_HOTSPOTS_CMD),
+       CMD(NET_DETECT_HOTSPOTS_QUERY_CMD),
+};
+#undef CMD
+
+/* this forward declaration can avoid to export the function */
+static void iwl_mvm_async_handlers_wk(struct work_struct *wk);
+
+static struct iwl_op_mode *
+iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
+                     const struct iwl_fw *fw, struct dentry *dbgfs_dir)
+{
+       struct ieee80211_hw *hw;
+       struct iwl_op_mode *op_mode;
+       struct iwl_mvm *mvm;
+       struct iwl_trans_config trans_cfg = {};
+       static const u8 no_reclaim_cmds[] = {
+               TX_CMD,
+       };
+       int err, scan_size;
+
+       switch (cfg->device_family) {
+       case IWL_DEVICE_FAMILY_6030:
+       case IWL_DEVICE_FAMILY_6005:
+       case IWL_DEVICE_FAMILY_7000:
+               break;
+       default:
+               IWL_ERR(trans, "Trying to load mvm on an unsupported device\n");
+               return NULL;
+       }
+
+       /********************************
+        * 1. Allocating and configuring HW data
+        ********************************/
+       hw = ieee80211_alloc_hw(sizeof(struct iwl_op_mode) +
+                               sizeof(struct iwl_mvm),
+                               &iwl_mvm_hw_ops);
+       if (!hw)
+               return NULL;
+
+       op_mode = hw->priv;
+       op_mode->ops = &iwl_mvm_ops;
+       op_mode->trans = trans;
+
+       mvm = IWL_OP_MODE_GET_MVM(op_mode);
+       mvm->dev = trans->dev;
+       mvm->trans = trans;
+       mvm->cfg = cfg;
+       mvm->fw = fw;
+       mvm->hw = hw;
+
+       mutex_init(&mvm->mutex);
+       spin_lock_init(&mvm->async_handlers_lock);
+       INIT_LIST_HEAD(&mvm->time_event_list);
+       INIT_LIST_HEAD(&mvm->async_handlers_list);
+       spin_lock_init(&mvm->time_event_lock);
+
+       INIT_WORK(&mvm->async_handlers_wk, iwl_mvm_async_handlers_wk);
+       INIT_WORK(&mvm->roc_done_wk, iwl_mvm_roc_done_wk);
+       INIT_WORK(&mvm->sta_drained_wk, iwl_mvm_sta_drained_wk);
+
+       SET_IEEE80211_DEV(mvm->hw, mvm->trans->dev);
+
+       /*
+        * Populate the state variables that the transport layer needs
+        * to know about.
+        */
+       trans_cfg.op_mode = op_mode;
+       trans_cfg.no_reclaim_cmds = no_reclaim_cmds;
+       trans_cfg.n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds);
+       trans_cfg.rx_buf_size_8k = iwlwifi_mod_params.amsdu_size_8K;
+
+       /* TODO: this should really be a TLV */
+       if (cfg->device_family == IWL_DEVICE_FAMILY_7000)
+               trans_cfg.bc_table_dword = true;
+
+       if (!iwlwifi_mod_params.wd_disable)
+               trans_cfg.queue_watchdog_timeout = cfg->base_params->wd_timeout;
+       else
+               trans_cfg.queue_watchdog_timeout = IWL_WATCHDOG_DISABLED;
+
+       trans_cfg.command_names = iwl_mvm_cmd_strings;
+
+       trans_cfg.cmd_queue = IWL_MVM_CMD_QUEUE;
+       trans_cfg.cmd_fifo = IWL_MVM_CMD_FIFO;
+
+       snprintf(mvm->hw->wiphy->fw_version,
+                sizeof(mvm->hw->wiphy->fw_version),
+                "%s", fw->fw_version);
+
+       /* Configure transport layer */
+       iwl_trans_configure(mvm->trans, &trans_cfg);
+
+       trans->rx_mpdu_cmd = REPLY_RX_MPDU_CMD;
+       trans->rx_mpdu_cmd_hdr_size = sizeof(struct iwl_rx_mpdu_res_start);
+
+       /* set up notification wait support */
+       iwl_notification_wait_init(&mvm->notif_wait);
+
+       /* Init phy db */
+       mvm->phy_db = iwl_phy_db_init(trans);
+       if (!mvm->phy_db) {
+               IWL_ERR(mvm, "Cannot init phy_db\n");
+               goto out_free;
+       }
+
+       IWL_INFO(mvm, "Detected %s, REV=0x%X\n",
+                mvm->cfg->name, mvm->trans->hw_rev);
+
+       err = iwl_trans_start_hw(mvm->trans);
+       if (err)
+               goto out_free;
+
+       mutex_lock(&mvm->mutex);
+       err = iwl_run_init_mvm_ucode(mvm, true);
+       mutex_unlock(&mvm->mutex);
+       if (err && !iwlmvm_mod_params.init_dbg) {
+               IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", err);
+               goto out_free;
+       }
+
+       /* Stop the hw after the ALIVE and NVM has been read */
+       if (!iwlmvm_mod_params.init_dbg)
+               iwl_trans_stop_hw(mvm->trans, false);
+
+       scan_size = sizeof(struct iwl_scan_cmd) +
+               mvm->fw->ucode_capa.max_probe_length +
+               (MAX_NUM_SCAN_CHANNELS * sizeof(struct iwl_scan_channel));
+       mvm->scan_cmd = kmalloc(scan_size, GFP_KERNEL);
+       if (!mvm->scan_cmd)
+               goto out_free;
+
+       err = iwl_mvm_mac_setup_register(mvm);
+       if (err)
+               goto out_free;
+
+       err = iwl_mvm_dbgfs_register(mvm, dbgfs_dir);
+       if (err)
+               goto out_unregister;
+
+       return op_mode;
+
+ out_unregister:
+       ieee80211_unregister_hw(mvm->hw);
+ out_free:
+       iwl_phy_db_free(mvm->phy_db);
+       kfree(mvm->scan_cmd);
+       kfree(mvm->eeprom_blob);
+       iwl_trans_stop_hw(trans, true);
+       ieee80211_free_hw(mvm->hw);
+       return NULL;
+}
+
+static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
+{
+       struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+       int i;
+
+       iwl_mvm_leds_exit(mvm);
+
+       ieee80211_unregister_hw(mvm->hw);
+
+       kfree(mvm->scan_cmd);
+
+       iwl_trans_stop_hw(mvm->trans, true);
+
+       iwl_phy_db_free(mvm->phy_db);
+       mvm->phy_db = NULL;
+
+       kfree(mvm->eeprom_blob);
+       iwl_free_nvm_data(mvm->nvm_data);
+       for (i = 0; i < NVM_NUM_OF_SECTIONS; i++)
+               kfree(mvm->nvm_sections[i].data);
+
+       ieee80211_free_hw(mvm->hw);
+}
+
+struct iwl_async_handler_entry {
+       struct list_head list;
+       struct iwl_rx_cmd_buffer rxb;
+       int (*fn)(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
+                 struct iwl_device_cmd *cmd);
+};
+
+void iwl_mvm_async_handlers_purge(struct iwl_mvm *mvm)
+{
+       struct iwl_async_handler_entry *entry, *tmp;
+
+       spin_lock_bh(&mvm->async_handlers_lock);
+       list_for_each_entry_safe(entry, tmp, &mvm->async_handlers_list, list) {
+               iwl_free_rxb(&entry->rxb);
+               list_del(&entry->list);
+               kfree(entry);
+       }
+       spin_unlock_bh(&mvm->async_handlers_lock);
+}
+
+static void iwl_mvm_async_handlers_wk(struct work_struct *wk)
+{
+       struct iwl_mvm *mvm =
+               container_of(wk, struct iwl_mvm, async_handlers_wk);
+       struct iwl_async_handler_entry *entry, *tmp;
+       struct list_head local_list;
+
+       INIT_LIST_HEAD(&local_list);
+
+       /* Ensure that we are not in stop flow (check iwl_mvm_mac_stop) */
+       mutex_lock(&mvm->mutex);
+
+       /*
+        * Sync with Rx path with a lock. Remove all the entries from this list,
+        * add them to a local one (lock free), and then handle them.
+        */
+       spin_lock_bh(&mvm->async_handlers_lock);
+       list_splice_init(&mvm->async_handlers_list, &local_list);
+       spin_unlock_bh(&mvm->async_handlers_lock);
+
+       list_for_each_entry_safe(entry, tmp, &local_list, list) {
+               if (entry->fn(mvm, &entry->rxb, NULL))
+                       IWL_WARN(mvm,
+                                "returned value from ASYNC handlers are ignored\n");
+               iwl_free_rxb(&entry->rxb);
+               list_del(&entry->list);
+               kfree(entry);
+       }
+       mutex_unlock(&mvm->mutex);
+}
+
+static int iwl_mvm_rx_dispatch(struct iwl_op_mode *op_mode,
+                              struct iwl_rx_cmd_buffer *rxb,
+                              struct iwl_device_cmd *cmd)
+{
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
+       struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+       u8 i;
+
+       /*
+        * Do the notification wait before RX handlers so
+        * even if the RX handler consumes the RXB we have
+        * access to it in the notification wait entry.
+        */
+       iwl_notification_wait_notify(&mvm->notif_wait, pkt);
+
+       for (i = 0; i < ARRAY_SIZE(iwl_mvm_rx_handlers); i++) {
+               const struct iwl_rx_handlers *rx_h = &iwl_mvm_rx_handlers[i];
+               struct iwl_async_handler_entry *entry;
+
+               if (rx_h->cmd_id != pkt->hdr.cmd)
+                       continue;
+
+               if (!rx_h->async)
+                       return rx_h->fn(mvm, rxb, cmd);
+
+               entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
+               /* we can't do much... */
+               if (!entry)
+                       return 0;
+
+               entry->rxb._page = rxb_steal_page(rxb);
+               entry->rxb._offset = rxb->_offset;
+               entry->rxb._rx_page_order = rxb->_rx_page_order;
+               entry->fn = rx_h->fn;
+               spin_lock(&mvm->async_handlers_lock);
+               list_add_tail(&entry->list, &mvm->async_handlers_list);
+               spin_unlock(&mvm->async_handlers_lock);
+               schedule_work(&mvm->async_handlers_wk);
+               break;
+       }
+
+       return 0;
+}
+
+static void iwl_mvm_stop_sw_queue(struct iwl_op_mode *op_mode, int queue)
+{
+       struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+       int mq = mvm->queue_to_mac80211[queue];
+
+       if (WARN_ON_ONCE(mq == IWL_INVALID_MAC80211_QUEUE))
+               return;
+
+       if (atomic_inc_return(&mvm->queue_stop_count[mq]) > 1) {
+               IWL_DEBUG_TX_QUEUES(mvm,
+                                   "queue %d (mac80211 %d) already stopped\n",
+                                   queue, mq);
+               return;
+       }
+
+       set_bit(mq, &mvm->transport_queue_stop);
+       ieee80211_stop_queue(mvm->hw, mq);
+}
+
+static void iwl_mvm_wake_sw_queue(struct iwl_op_mode *op_mode, int queue)
+{
+       struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+       int mq = mvm->queue_to_mac80211[queue];
+
+       if (WARN_ON_ONCE(mq == IWL_INVALID_MAC80211_QUEUE))
+               return;
+
+       if (atomic_dec_return(&mvm->queue_stop_count[mq]) > 0) {
+               IWL_DEBUG_TX_QUEUES(mvm,
+                                   "queue %d (mac80211 %d) already awake\n",
+                                   queue, mq);
+               return;
+       }
+
+       clear_bit(mq, &mvm->transport_queue_stop);
+
+       ieee80211_wake_queue(mvm->hw, mq);
+}
+
+static void iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
+{
+       struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+
+       if (state)
+               set_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status);
+       else
+               clear_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status);
+
+       wiphy_rfkill_set_hw_state(mvm->hw->wiphy, state);
+}
+
+static void iwl_mvm_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb)
+{
+       struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+       struct ieee80211_tx_info *info;
+
+       info = IEEE80211_SKB_CB(skb);
+       iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]);
+       ieee80211_free_txskb(mvm->hw, skb);
+}
+
+static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode)
+{
+       struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+
+       iwl_mvm_dump_nic_error_log(mvm);
+
+       iwl_abort_notification_waits(&mvm->notif_wait);
+
+       /*
+        * If we're restarting already, don't cycle restarts.
+        * If INIT fw asserted, it will likely fail again.
+        * If WoWLAN fw asserted, don't restart either, mac80211
+        * can't recover this since we're already half suspended.
+        */
+       if (test_and_set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
+               IWL_ERR(mvm, "Firmware error during reconfiguration! Abort.\n");
+       } else if (mvm->cur_ucode == IWL_UCODE_REGULAR &&
+                  iwlwifi_mod_params.restart_fw) {
+               /*
+                * This is a bit racy, but worst case we tell mac80211 about
+                * a stopped/aborted (sched) scan when that was already done
+                * which is not a problem. It is necessary to abort any scan
+                * here because mac80211 requires having the scan cleared
+                * before restarting.
+                * We'll reset the scan_status to NONE in restart cleanup in
+                * the next start() call from mac80211.
+                */
+               switch (mvm->scan_status) {
+               case IWL_MVM_SCAN_NONE:
+                       break;
+               case IWL_MVM_SCAN_OS:
+                       ieee80211_scan_completed(mvm->hw, true);
+                       break;
+               }
+
+               ieee80211_restart_hw(mvm->hw);
+       }
+}
+
+static void iwl_mvm_cmd_queue_full(struct iwl_op_mode *op_mode)
+{
+       WARN_ON(1);
+}
+
+static const struct iwl_op_mode_ops iwl_mvm_ops = {
+       .start = iwl_op_mode_mvm_start,
+       .stop = iwl_op_mode_mvm_stop,
+       .rx = iwl_mvm_rx_dispatch,
+       .queue_full = iwl_mvm_stop_sw_queue,
+       .queue_not_full = iwl_mvm_wake_sw_queue,
+       .hw_rf_kill = iwl_mvm_set_hw_rfkill_state,
+       .free_skb = iwl_mvm_free_skb,
+       .nic_error = iwl_mvm_nic_error,
+       .cmd_queue_full = iwl_mvm_cmd_queue_full,
+       .nic_config = iwl_mvm_nic_config,
+};
diff --git a/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c
new file mode 100644 (file)
index 0000000..b428448
--- /dev/null
@@ -0,0 +1,292 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#include <net/mac80211.h>
+#include "fw-api.h"
+#include "mvm.h"
+
+/* Maps the driver specific channel width definition to the the fw values */
+static inline u8 iwl_mvm_get_channel_width(struct cfg80211_chan_def *chandef)
+{
+       switch (chandef->width) {
+       case NL80211_CHAN_WIDTH_20_NOHT:
+       case NL80211_CHAN_WIDTH_20:
+               return PHY_VHT_CHANNEL_MODE20;
+       case NL80211_CHAN_WIDTH_40:
+               return PHY_VHT_CHANNEL_MODE40;
+       case NL80211_CHAN_WIDTH_80:
+               return PHY_VHT_CHANNEL_MODE80;
+       case NL80211_CHAN_WIDTH_160:
+               return PHY_VHT_CHANNEL_MODE160;
+       default:
+               WARN(1, "Invalid channel width=%u", chandef->width);
+               return PHY_VHT_CHANNEL_MODE20;
+       }
+}
+
+/*
+ * Maps the driver specific control channel position (relative to the center
+ * freq) definitions to the the fw values
+ */
+static inline u8 iwl_mvm_get_ctrl_pos(struct cfg80211_chan_def *chandef)
+{
+       switch (chandef->chan->center_freq - chandef->center_freq1) {
+       case -70:
+               return PHY_VHT_CTRL_POS_4_BELOW;
+       case -50:
+               return PHY_VHT_CTRL_POS_3_BELOW;
+       case -30:
+               return PHY_VHT_CTRL_POS_2_BELOW;
+       case -10:
+               return PHY_VHT_CTRL_POS_1_BELOW;
+       case  10:
+               return PHY_VHT_CTRL_POS_1_ABOVE;
+       case  30:
+               return PHY_VHT_CTRL_POS_2_ABOVE;
+       case  50:
+               return PHY_VHT_CTRL_POS_3_ABOVE;
+       case  70:
+               return PHY_VHT_CTRL_POS_4_ABOVE;
+       default:
+               WARN(1, "Invalid channel definition");
+       case 0:
+               /*
+                * The FW is expected to check the control channel position only
+                * when in HT/VHT and the channel width is not 20MHz. Return
+                * this value as the default one.
+                */
+               return PHY_VHT_CTRL_POS_1_BELOW;
+       }
+}
+
+/*
+ * Construct the generic fields of the PHY context command
+ */
+static void iwl_mvm_phy_ctxt_cmd_hdr(struct iwl_mvm_phy_ctxt *ctxt,
+                                    struct iwl_phy_context_cmd *cmd,
+                                    u32 action, u32 apply_time)
+{
+       memset(cmd, 0, sizeof(struct iwl_phy_context_cmd));
+
+       cmd->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(ctxt->id,
+                                                           ctxt->color));
+       cmd->action = cpu_to_le32(action);
+       cmd->apply_time = cpu_to_le32(apply_time);
+}
+
+/*
+ * Add the phy configuration to the PHY context command
+ */
+static void iwl_mvm_phy_ctxt_cmd_data(struct iwl_mvm *mvm,
+                                     struct iwl_phy_context_cmd *cmd,
+                                     struct cfg80211_chan_def *chandef,
+                                     u8 chains_static, u8 chains_dynamic)
+{
+       u8 valid_rx_chains, active_cnt, idle_cnt;
+
+       /* Set the channel info data */
+       cmd->ci.band = (chandef->chan->band == IEEE80211_BAND_2GHZ ?
+             PHY_BAND_24 : PHY_BAND_5);
+
+       cmd->ci.channel = chandef->chan->hw_value;
+       cmd->ci.width = iwl_mvm_get_channel_width(chandef);
+       cmd->ci.ctrl_pos = iwl_mvm_get_ctrl_pos(chandef);
+
+       /* Set rx the chains */
+
+       /* TODO:
+        * Need to add on chain noise calibration limitations, and
+        * BT coex considerations.
+        */
+       valid_rx_chains = mvm->nvm_data->valid_rx_ant;
+       idle_cnt = chains_static;
+       active_cnt = chains_dynamic;
+
+       cmd->rxchain_info = cpu_to_le32(valid_rx_chains <<
+                                       PHY_RX_CHAIN_VALID_POS);
+       cmd->rxchain_info |= cpu_to_le32(idle_cnt << PHY_RX_CHAIN_CNT_POS);
+       cmd->rxchain_info |= cpu_to_le32(active_cnt <<
+                                        PHY_RX_CHAIN_MIMO_CNT_POS);
+
+       cmd->txchain_info = cpu_to_le32(mvm->nvm_data->valid_tx_ant);
+}
+
+/*
+ * Send a command to apply the current phy configuration. The command is send
+ * only if something in the configuration changed: in case that this is the
+ * first time that the phy configuration is applied or in case that the phy
+ * configuration changed from the previous apply.
+ */
+static int iwl_mvm_phy_ctxt_apply(struct iwl_mvm *mvm,
+                                 struct iwl_mvm_phy_ctxt *ctxt,
+                                 struct cfg80211_chan_def *chandef,
+                                 u8 chains_static, u8 chains_dynamic,
+                                 u32 action, u32 apply_time)
+{
+       struct iwl_phy_context_cmd cmd;
+       int ret;
+
+       /* Set the command header fields */
+       iwl_mvm_phy_ctxt_cmd_hdr(ctxt, &cmd, action, apply_time);
+
+       /* Set the command data */
+       iwl_mvm_phy_ctxt_cmd_data(mvm, &cmd, chandef,
+                                 chains_static, chains_dynamic);
+
+       ret = iwl_mvm_send_cmd_pdu(mvm, PHY_CONTEXT_CMD, CMD_SYNC,
+                                  sizeof(struct iwl_phy_context_cmd),
+                                  &cmd);
+       if (ret)
+               IWL_ERR(mvm, "PHY ctxt cmd error. ret=%d\n", ret);
+       return ret;
+}
+
+
+struct phy_ctx_used_data {
+       unsigned long used[BITS_TO_LONGS(NUM_PHY_CTX)];
+};
+
+static void iwl_mvm_phy_ctx_used_iter(struct ieee80211_hw *hw,
+                                     struct ieee80211_chanctx_conf *ctx,
+                                     void *_data)
+{
+       struct phy_ctx_used_data *data = _data;
+       struct iwl_mvm_phy_ctxt *phy_ctxt = (void *)ctx->drv_priv;
+
+       __set_bit(phy_ctxt->id, data->used);
+}
+
+/*
+ * Send a command to add a PHY context based on the current HW configuration.
+ */
+int iwl_mvm_phy_ctxt_add(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
+                        struct cfg80211_chan_def *chandef,
+                        u8 chains_static, u8 chains_dynamic)
+{
+       struct phy_ctx_used_data data = {
+               .used = { },
+       };
+
+       /*
+        * If this is a regular PHY context (not the ROC one)
+        * skip the ROC PHY context's ID.
+        */
+       if (ctxt != &mvm->phy_ctxt_roc)
+               __set_bit(mvm->phy_ctxt_roc.id, data.used);
+
+       lockdep_assert_held(&mvm->mutex);
+       ctxt->color++;
+
+       if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
+               ieee80211_iter_chan_contexts_atomic(
+                       mvm->hw, iwl_mvm_phy_ctx_used_iter, &data);
+
+               ctxt->id = find_first_zero_bit(data.used, NUM_PHY_CTX);
+               if (WARN_ONCE(ctxt->id == NUM_PHY_CTX,
+                             "Failed to init PHY context - no free ID!\n"))
+                       return -EIO;
+       }
+
+       ctxt->channel = chandef->chan;
+       return iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef,
+                                     chains_static, chains_dynamic,
+                                     FW_CTXT_ACTION_ADD, 0);
+}
+
+/*
+ * Send a command to modify the PHY context based on the current HW
+ * configuration. Note that the function does not check that the configuration
+ * changed.
+ */
+int iwl_mvm_phy_ctxt_changed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
+                            struct cfg80211_chan_def *chandef,
+                            u8 chains_static, u8 chains_dynamic)
+{
+       lockdep_assert_held(&mvm->mutex);
+
+       ctxt->channel = chandef->chan;
+       return iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef,
+                                     chains_static, chains_dynamic,
+                                     FW_CTXT_ACTION_MODIFY, 0);
+}
+
+/*
+ * Send a command to the FW to remove the given phy context.
+ * Once the command is sent, regardless of success or failure, the context is
+ * marked as invalid
+ */
+void iwl_mvm_phy_ctxt_remove(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt)
+{
+       struct iwl_phy_context_cmd cmd;
+       int ret;
+
+       lockdep_assert_held(&mvm->mutex);
+
+       iwl_mvm_phy_ctxt_cmd_hdr(ctxt, &cmd, FW_CTXT_ACTION_REMOVE, 0);
+       ret = iwl_mvm_send_cmd_pdu(mvm, PHY_CONTEXT_CMD, CMD_SYNC,
+                                  sizeof(struct iwl_phy_context_cmd),
+                                  &cmd);
+       if (ret)
+               IWL_ERR(mvm, "Failed to send PHY remove: ctxt id=%d\n",
+                       ctxt->id);
+}
diff --git a/drivers/net/wireless/iwlwifi/mvm/power.c b/drivers/net/wireless/iwlwifi/mvm/power.c
new file mode 100644 (file)
index 0000000..5a92a49
--- /dev/null
@@ -0,0 +1,207 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+
+#include <net/mac80211.h>
+
+#include "iwl-debug.h"
+#include "mvm.h"
+#include "iwl-modparams.h"
+#include "fw-api-power.h"
+
+#define POWER_KEEP_ALIVE_PERIOD_SEC    25
+
+static void iwl_power_build_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+                               struct iwl_powertable_cmd *cmd)
+{
+       struct ieee80211_hw *hw = mvm->hw;
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       struct ieee80211_chanctx_conf *chanctx_conf;
+       struct ieee80211_channel *chan;
+       int dtimper, dtimper_msec;
+       int keep_alive;
+       bool radar_detect = false;
+
+       cmd->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
+                                                           mvmvif->color));
+       cmd->action = cpu_to_le32(FW_CTXT_ACTION_MODIFY);
+
+       if ((!vif->bss_conf.ps) ||
+           (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM))
+               return;
+
+       cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK);
+
+       dtimper = hw->conf.ps_dtim_period ?: 1;
+
+       /* Check if radar detection is required on current channel */
+       rcu_read_lock();
+       chanctx_conf = rcu_dereference(vif->chanctx_conf);
+       WARN_ON(!chanctx_conf);
+       if (chanctx_conf) {
+               chan = chanctx_conf->def.chan;
+               radar_detect = chan->flags & IEEE80211_CHAN_RADAR;
+       }
+       rcu_read_unlock();
+
+       /* Check skip over DTIM conditions */
+       if (!radar_detect && (dtimper <= 10) &&
+           (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_LP)) {
+               cmd->flags |= cpu_to_le16(POWER_FLAGS_SLEEP_OVER_DTIM_MSK);
+               cmd->num_skip_dtim = 2;
+       }
+
+       /* Check that keep alive period is at least 3 * DTIM */
+       dtimper_msec = dtimper * vif->bss_conf.beacon_int;
+       keep_alive = max_t(int, 3 * dtimper_msec,
+                          MSEC_PER_SEC * POWER_KEEP_ALIVE_PERIOD_SEC);
+       keep_alive = DIV_ROUND_UP(keep_alive, MSEC_PER_SEC);
+
+       cmd->keep_alive_seconds = cpu_to_le16(keep_alive);
+
+       if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_LP) {
+               /* TODO: Also for D3 (device sleep / WoWLAN) */
+               cmd->rx_data_timeout = cpu_to_le32(10);
+               cmd->tx_data_timeout = cpu_to_le32(10);
+       } else {
+               cmd->rx_data_timeout = cpu_to_le32(50);
+               cmd->tx_data_timeout = cpu_to_le32(50);
+       }
+}
+
+int iwl_mvm_power_update_mode(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+       struct iwl_powertable_cmd cmd = {};
+
+       if (!iwlwifi_mod_params.power_save) {
+               IWL_DEBUG_POWER(mvm, "Power management is not allowed\n");
+               return 0;
+       }
+
+       if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
+               return 0;
+
+       iwl_power_build_cmd(mvm, vif, &cmd);
+
+       IWL_DEBUG_POWER(mvm,
+                       "Sending power table command on mac id 0x%X for power level %d, flags = 0x%X\n",
+                       cmd.id_and_color, iwlmvm_mod_params.power_scheme,
+                       le16_to_cpu(cmd.flags));
+
+       if (cmd.flags & cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK)) {
+               IWL_DEBUG_POWER(mvm, "Keep alive = %u sec\n",
+                               le16_to_cpu(cmd.keep_alive_seconds));
+               IWL_DEBUG_POWER(mvm, "Rx timeout = %u usec\n",
+                               le32_to_cpu(cmd.rx_data_timeout));
+               IWL_DEBUG_POWER(mvm, "Tx timeout = %u usec\n",
+                               le32_to_cpu(cmd.tx_data_timeout));
+               IWL_DEBUG_POWER(mvm, "Rx timeout (uAPSD) = %u usec\n",
+                               le32_to_cpu(cmd.rx_data_timeout_uapsd));
+               IWL_DEBUG_POWER(mvm, "Tx timeout = %u usec\n",
+                               le32_to_cpu(cmd.tx_data_timeout_uapsd));
+               IWL_DEBUG_POWER(mvm, "LP RX RSSI threshold = %u\n",
+                               cmd.lprx_rssi_threshold);
+               IWL_DEBUG_POWER(mvm, "DTIMs to skip = %u\n", cmd.num_skip_dtim);
+       }
+
+       return iwl_mvm_send_cmd_pdu(mvm, POWER_TABLE_CMD, CMD_SYNC,
+                                   sizeof(cmd), &cmd);
+}
+
+int iwl_mvm_power_disable(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+       struct iwl_powertable_cmd cmd = {};
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+       if (!iwlwifi_mod_params.power_save) {
+               IWL_DEBUG_POWER(mvm, "Power management is not allowed\n");
+               return 0;
+       }
+
+       if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
+               return 0;
+
+       cmd.id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
+                                                           mvmvif->color));
+       cmd.action = cpu_to_le32(FW_CTXT_ACTION_MODIFY);
+
+       IWL_DEBUG_POWER(mvm,
+                       "Sending power table command on mac id 0x%X for power level %d, flags = 0x%X\n",
+                       cmd.id_and_color, iwlmvm_mod_params.power_scheme,
+                       le16_to_cpu(cmd.flags));
+
+       return iwl_mvm_send_cmd_pdu(mvm, POWER_TABLE_CMD, CMD_ASYNC,
+                                   sizeof(cmd), &cmd);
+}
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+void iwl_power_get_params(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+                         struct iwl_powertable_cmd *cmd)
+{
+       iwl_power_build_cmd(mvm, vif, cmd);
+}
+#endif /* CONFIG_IWLWIFI_DEBUGFS */
diff --git a/drivers/net/wireless/iwlwifi/mvm/quota.c b/drivers/net/wireless/iwlwifi/mvm/quota.c
new file mode 100644 (file)
index 0000000..9256284
--- /dev/null
@@ -0,0 +1,197 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#include <net/mac80211.h>
+#include "fw-api.h"
+#include "mvm.h"
+
+struct iwl_mvm_quota_iterator_data {
+       int n_interfaces[MAX_BINDINGS];
+       int colors[MAX_BINDINGS];
+       struct ieee80211_vif *new_vif;
+};
+
+static void iwl_mvm_quota_iterator(void *_data, u8 *mac,
+                                  struct ieee80211_vif *vif)
+{
+       struct iwl_mvm_quota_iterator_data *data = _data;
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       u16 id;
+
+       /*
+        * We'll account for the new interface (if any) below,
+        * skip it here in case we're not called from within
+        * the add_interface callback (otherwise it won't show
+        * up in iteration)
+        */
+       if (vif == data->new_vif)
+               return;
+
+       if (!mvmvif->phy_ctxt)
+               return;
+
+       /* currently, PHY ID == binding ID */
+       id = mvmvif->phy_ctxt->id;
+
+       /* need at least one binding per PHY */
+       BUILD_BUG_ON(NUM_PHY_CTX > MAX_BINDINGS);
+
+       if (WARN_ON_ONCE(id >= MAX_BINDINGS))
+               return;
+
+       if (data->colors[id] < 0)
+               data->colors[id] = mvmvif->phy_ctxt->color;
+       else
+               WARN_ON_ONCE(data->colors[id] != mvmvif->phy_ctxt->color);
+
+       switch (vif->type) {
+       case NL80211_IFTYPE_STATION:
+               if (vif->bss_conf.assoc)
+                       data->n_interfaces[id]++;
+               break;
+       case NL80211_IFTYPE_AP:
+               if (mvmvif->ap_active)
+                       data->n_interfaces[id]++;
+               break;
+       case NL80211_IFTYPE_MONITOR:
+               data->n_interfaces[id]++;
+               break;
+       case NL80211_IFTYPE_P2P_DEVICE:
+               break;
+       case NL80211_IFTYPE_ADHOC:
+               if (vif->bss_conf.ibss_joined)
+                       data->n_interfaces[id]++;
+               break;
+       default:
+               WARN_ON_ONCE(1);
+               break;
+       }
+}
+
+int iwl_mvm_update_quotas(struct iwl_mvm *mvm, struct ieee80211_vif *newvif)
+{
+       struct iwl_time_quota_cmd cmd;
+       int i, idx, ret, num_active_bindings, quota, quota_rem;
+       struct iwl_mvm_quota_iterator_data data = {
+               .n_interfaces = {},
+               .colors = { -1, -1, -1, -1 },
+               .new_vif = newvif,
+       };
+
+       /* update all upon completion */
+       if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
+               return 0;
+
+       BUILD_BUG_ON(data.colors[MAX_BINDINGS - 1] != -1);
+
+       lockdep_assert_held(&mvm->mutex);
+
+       memset(&cmd, 0, sizeof(cmd));
+
+       ieee80211_iterate_active_interfaces_atomic(
+               mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+               iwl_mvm_quota_iterator, &data);
+       if (newvif) {
+               data.new_vif = NULL;
+               iwl_mvm_quota_iterator(&data, newvif->addr, newvif);
+       }
+
+       /*
+        * The FW's scheduling session consists of
+        * IWL_MVM_MAX_QUOTA fragments. Divide these fragments
+        * equally between all the bindings that require quota
+        */
+       num_active_bindings = 0;
+       for (i = 0; i < MAX_BINDINGS; i++) {
+               cmd.quotas[i].id_and_color = cpu_to_le32(FW_CTXT_INVALID);
+               if (data.n_interfaces[i] > 0)
+                       num_active_bindings++;
+       }
+
+       if (!num_active_bindings)
+               goto send_cmd;
+
+       quota = IWL_MVM_MAX_QUOTA / num_active_bindings;
+       quota_rem = IWL_MVM_MAX_QUOTA % num_active_bindings;
+
+       for (idx = 0, i = 0; i < MAX_BINDINGS; i++) {
+               if (data.n_interfaces[i] <= 0)
+                       continue;
+
+               cmd.quotas[idx].id_and_color =
+                       cpu_to_le32(FW_CMD_ID_AND_COLOR(i, data.colors[i]));
+               cmd.quotas[idx].quota = cpu_to_le32(quota);
+               cmd.quotas[idx].max_duration = cpu_to_le32(IWL_MVM_MAX_QUOTA);
+               idx++;
+       }
+
+       /* Give the remainder of the session to the first binding */
+       le32_add_cpu(&cmd.quotas[0].quota, quota_rem);
+
+send_cmd:
+       ret = iwl_mvm_send_cmd_pdu(mvm, TIME_QUOTA_CMD, CMD_SYNC,
+                                  sizeof(cmd), &cmd);
+       if (ret)
+               IWL_ERR(mvm, "Failed to send quota: %d\n", ret);
+       return ret;
+}
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.c b/drivers/net/wireless/iwlwifi/mvm/rs.c
new file mode 100644 (file)
index 0000000..56b636d
--- /dev/null
@@ -0,0 +1,3080 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <net/mac80211.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/delay.h>
+
+#include <linux/workqueue.h>
+#include "rs.h"
+#include "fw-api.h"
+#include "sta.h"
+#include "iwl-op-mode.h"
+#include "mvm.h"
+
+#define RS_NAME "iwl-mvm-rs"
+
+#define NUM_TRY_BEFORE_ANT_TOGGLE 1
+#define IWL_NUMBER_TRY      1
+#define IWL_HT_NUMBER_TRY   3
+
+#define IWL_RATE_MAX_WINDOW            62      /* # tx in history window */
+#define IWL_RATE_MIN_FAILURE_TH                6       /* min failures to calc tpt */
+#define IWL_RATE_MIN_SUCCESS_TH                8       /* min successes to calc tpt */
+
+/* max allowed rate miss before sync LQ cmd */
+#define IWL_MISSED_RATE_MAX            15
+/* max time to accum history 2 seconds */
+#define IWL_RATE_SCALE_FLUSH_INTVL   (3*HZ)
+
+static u8 rs_ht_to_legacy[] = {
+       IWL_RATE_6M_INDEX, IWL_RATE_6M_INDEX,
+       IWL_RATE_6M_INDEX, IWL_RATE_6M_INDEX,
+       IWL_RATE_6M_INDEX,
+       IWL_RATE_6M_INDEX, IWL_RATE_9M_INDEX,
+       IWL_RATE_12M_INDEX, IWL_RATE_18M_INDEX,
+       IWL_RATE_24M_INDEX, IWL_RATE_36M_INDEX,
+       IWL_RATE_48M_INDEX, IWL_RATE_54M_INDEX
+};
+
+static const u8 ant_toggle_lookup[] = {
+       /*ANT_NONE -> */ ANT_NONE,
+       /*ANT_A    -> */ ANT_B,
+       /*ANT_B    -> */ ANT_C,
+       /*ANT_AB   -> */ ANT_BC,
+       /*ANT_C    -> */ ANT_A,
+       /*ANT_AC   -> */ ANT_AB,
+       /*ANT_BC   -> */ ANT_AC,
+       /*ANT_ABC  -> */ ANT_ABC,
+};
+
+#define IWL_DECLARE_RATE_INFO(r, s, ip, in, rp, rn, pp, np)    \
+       [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP,      \
+                                   IWL_RATE_SISO_##s##M_PLCP, \
+                                   IWL_RATE_MIMO2_##s##M_PLCP,\
+                                   IWL_RATE_MIMO3_##s##M_PLCP,\
+                                   IWL_RATE_##r##M_IEEE,      \
+                                   IWL_RATE_##ip##M_INDEX,    \
+                                   IWL_RATE_##in##M_INDEX,    \
+                                   IWL_RATE_##rp##M_INDEX,    \
+                                   IWL_RATE_##rn##M_INDEX,    \
+                                   IWL_RATE_##pp##M_INDEX,    \
+                                   IWL_RATE_##np##M_INDEX }
+
+/*
+ * Parameter order:
+ *   rate, ht rate, prev rate, next rate, prev tgg rate, next tgg rate
+ *
+ * If there isn't a valid next or previous rate then INV is used which
+ * maps to IWL_RATE_INVALID
+ *
+ */
+static const struct iwl_rs_rate_info iwl_rates[IWL_RATE_COUNT] = {
+       IWL_DECLARE_RATE_INFO(1, INV, INV, 2, INV, 2, INV, 2),    /*  1mbps */
+       IWL_DECLARE_RATE_INFO(2, INV, 1, 5, 1, 5, 1, 5),          /*  2mbps */
+       IWL_DECLARE_RATE_INFO(5, INV, 2, 6, 2, 11, 2, 11),        /*5.5mbps */
+       IWL_DECLARE_RATE_INFO(11, INV, 9, 12, 9, 12, 5, 18),      /* 11mbps */
+       IWL_DECLARE_RATE_INFO(6, 6, 5, 9, 5, 11, 5, 11),        /*  6mbps */
+       IWL_DECLARE_RATE_INFO(9, 6, 6, 11, 6, 11, 5, 11),       /*  9mbps */
+       IWL_DECLARE_RATE_INFO(12, 12, 11, 18, 11, 18, 11, 18),   /* 12mbps */
+       IWL_DECLARE_RATE_INFO(18, 18, 12, 24, 12, 24, 11, 24),   /* 18mbps */
+       IWL_DECLARE_RATE_INFO(24, 24, 18, 36, 18, 36, 18, 36),   /* 24mbps */
+       IWL_DECLARE_RATE_INFO(36, 36, 24, 48, 24, 48, 24, 48),   /* 36mbps */
+       IWL_DECLARE_RATE_INFO(48, 48, 36, 54, 36, 54, 36, 54),   /* 48mbps */
+       IWL_DECLARE_RATE_INFO(54, 54, 48, INV, 48, INV, 48, INV),/* 54mbps */
+       IWL_DECLARE_RATE_INFO(60, 60, 48, INV, 48, INV, 48, INV),/* 60mbps */
+       /* FIXME:RS:          ^^    should be INV (legacy) */
+};
+
+static inline u8 rs_extract_rate(u32 rate_n_flags)
+{
+       /* also works for HT because bits 7:6 are zero there */
+       return (u8)(rate_n_flags & RATE_LEGACY_RATE_MSK);
+}
+
+static int iwl_hwrate_to_plcp_idx(u32 rate_n_flags)
+{
+       int idx = 0;
+
+       /* HT rate format */
+       if (rate_n_flags & RATE_MCS_HT_MSK) {
+               idx = rs_extract_rate(rate_n_flags);
+
+               if (idx >= IWL_RATE_MIMO3_6M_PLCP)
+                       idx = idx - IWL_RATE_MIMO3_6M_PLCP;
+               else if (idx >= IWL_RATE_MIMO2_6M_PLCP)
+                       idx = idx - IWL_RATE_MIMO2_6M_PLCP;
+
+               idx += IWL_FIRST_OFDM_RATE;
+               /* skip 9M not supported in ht*/
+               if (idx >= IWL_RATE_9M_INDEX)
+                       idx += 1;
+               if ((idx >= IWL_FIRST_OFDM_RATE) && (idx <= IWL_LAST_OFDM_RATE))
+                       return idx;
+
+       /* legacy rate format, search for match in table */
+       } else {
+               for (idx = 0; idx < ARRAY_SIZE(iwl_rates); idx++)
+                       if (iwl_rates[idx].plcp ==
+                                       rs_extract_rate(rate_n_flags))
+                               return idx;
+       }
+
+       return -1;
+}
+
+static void rs_rate_scale_perform(struct iwl_mvm *mvm,
+                                  struct sk_buff *skb,
+                                  struct ieee80211_sta *sta,
+                                  struct iwl_lq_sta *lq_sta);
+static void rs_fill_link_cmd(struct iwl_mvm *mvm,
+                            struct iwl_lq_sta *lq_sta, u32 rate_n_flags);
+static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search);
+
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
+                            u32 *rate_n_flags, int index);
+#else
+static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
+                            u32 *rate_n_flags, int index)
+{}
+#endif
+
+/**
+ * The following tables contain the expected throughput metrics for all rates
+ *
+ *     1, 2, 5.5, 11, 6, 9, 12, 18, 24, 36, 48, 54, 60 MBits
+ *
+ * where invalid entries are zeros.
+ *
+ * CCK rates are only valid in legacy table and will only be used in G
+ * (2.4 GHz) band.
+ */
+
+static s32 expected_tpt_legacy[IWL_RATE_COUNT] = {
+       7, 13, 35, 58, 40, 57, 72, 98, 121, 154, 177, 186, 0
+};
+
+static s32 expected_tpt_siso20MHz[4][IWL_RATE_COUNT] = {
+       {0, 0, 0, 0, 42, 0,  76, 102, 124, 159, 183, 193, 202}, /* Norm */
+       {0, 0, 0, 0, 46, 0,  82, 110, 132, 168, 192, 202, 210}, /* SGI */
+       {0, 0, 0, 0, 47, 0,  91, 133, 171, 242, 305, 334, 362}, /* AGG */
+       {0, 0, 0, 0, 52, 0, 101, 145, 187, 264, 330, 361, 390}, /* AGG+SGI */
+};
+
+static s32 expected_tpt_siso40MHz[4][IWL_RATE_COUNT] = {
+       {0, 0, 0, 0,  77, 0, 127, 160, 184, 220, 242, 250, 257}, /* Norm */
+       {0, 0, 0, 0,  83, 0, 135, 169, 193, 229, 250, 257, 264}, /* SGI */
+       {0, 0, 0, 0,  94, 0, 177, 249, 313, 423, 512, 550, 586}, /* AGG */
+       {0, 0, 0, 0, 104, 0, 193, 270, 338, 454, 545, 584, 620}, /* AGG+SGI */
+};
+
+static s32 expected_tpt_mimo2_20MHz[4][IWL_RATE_COUNT] = {
+       {0, 0, 0, 0,  74, 0, 123, 155, 179, 214, 236, 244, 251}, /* Norm */
+       {0, 0, 0, 0,  81, 0, 131, 164, 188, 223, 243, 251, 257}, /* SGI */
+       {0, 0, 0, 0,  89, 0, 167, 235, 296, 402, 488, 526, 560}, /* AGG */
+       {0, 0, 0, 0,  97, 0, 182, 255, 320, 431, 520, 558, 593}, /* AGG+SGI*/
+};
+
+static s32 expected_tpt_mimo2_40MHz[4][IWL_RATE_COUNT] = {
+       {0, 0, 0, 0, 123, 0, 182, 214, 235, 264, 279, 285, 289}, /* Norm */
+       {0, 0, 0, 0, 131, 0, 191, 222, 242, 270, 284, 289, 293}, /* SGI */
+       {0, 0, 0, 0, 171, 0, 305, 410, 496, 634, 731, 771, 805}, /* AGG */
+       {0, 0, 0, 0, 186, 0, 329, 439, 527, 667, 764, 803, 838}, /* AGG+SGI */
+};
+
+static s32 expected_tpt_mimo3_20MHz[4][IWL_RATE_COUNT] = {
+       {0, 0, 0, 0,  99, 0, 153, 186, 208, 239, 256, 263, 268}, /* Norm */
+       {0, 0, 0, 0, 106, 0, 162, 194, 215, 246, 262, 268, 273}, /* SGI */
+       {0, 0, 0, 0, 134, 0, 249, 346, 431, 574, 685, 732, 775}, /* AGG */
+       {0, 0, 0, 0, 148, 0, 272, 376, 465, 614, 727, 775, 818}, /* AGG+SGI */
+};
+
+static s32 expected_tpt_mimo3_40MHz[4][IWL_RATE_COUNT] = {
+       {0, 0, 0, 0, 152, 0, 211, 239, 255, 279,  290,  294,  297}, /* Norm */
+       {0, 0, 0, 0, 160, 0, 219, 245, 261, 284,  294,  297,  300}, /* SGI */
+       {0, 0, 0, 0, 254, 0, 443, 584, 695, 868,  984, 1030, 1070}, /* AGG */
+       {0, 0, 0, 0, 277, 0, 478, 624, 737, 911, 1026, 1070, 1109}, /* AGG+SGI */
+};
+
+/* mbps, mcs */
+static const struct iwl_rate_mcs_info iwl_rate_mcs[IWL_RATE_COUNT] = {
+       {  "1", "BPSK DSSS"},
+       {  "2", "QPSK DSSS"},
+       {"5.5", "BPSK CCK"},
+       { "11", "QPSK CCK"},
+       {  "6", "BPSK 1/2"},
+       {  "9", "BPSK 1/2"},
+       { "12", "QPSK 1/2"},
+       { "18", "QPSK 3/4"},
+       { "24", "16QAM 1/2"},
+       { "36", "16QAM 3/4"},
+       { "48", "64QAM 2/3"},
+       { "54", "64QAM 3/4"},
+       { "60", "64QAM 5/6"},
+};
+
+#define MCS_INDEX_PER_STREAM   (8)
+
+static void rs_rate_scale_clear_window(struct iwl_rate_scale_data *window)
+{
+       window->data = 0;
+       window->success_counter = 0;
+       window->success_ratio = IWL_INVALID_VALUE;
+       window->counter = 0;
+       window->average_tpt = IWL_INVALID_VALUE;
+       window->stamp = 0;
+}
+
+static inline u8 rs_is_valid_ant(u8 valid_antenna, u8 ant_type)
+{
+       return (ant_type & valid_antenna) == ant_type;
+}
+
+/*
+ *     removes the old data from the statistics. All data that is older than
+ *     TID_MAX_TIME_DIFF, will be deleted.
+ */
+static void rs_tl_rm_old_stats(struct iwl_traffic_load *tl, u32 curr_time)
+{
+       /* The oldest age we want to keep */
+       u32 oldest_time = curr_time - TID_MAX_TIME_DIFF;
+
+       while (tl->queue_count &&
+              (tl->time_stamp < oldest_time)) {
+               tl->total -= tl->packet_count[tl->head];
+               tl->packet_count[tl->head] = 0;
+               tl->time_stamp += TID_QUEUE_CELL_SPACING;
+               tl->queue_count--;
+               tl->head++;
+               if (tl->head >= TID_QUEUE_MAX_SIZE)
+                       tl->head = 0;
+       }
+}
+
+/*
+ *     increment traffic load value for tid and also remove
+ *     any old values if passed the certain time period
+ */
+static u8 rs_tl_add_packet(struct iwl_lq_sta *lq_data,
+                          struct ieee80211_hdr *hdr)
+{
+       u32 curr_time = jiffies_to_msecs(jiffies);
+       u32 time_diff;
+       s32 index;
+       struct iwl_traffic_load *tl = NULL;
+       u8 tid;
+
+       if (ieee80211_is_data_qos(hdr->frame_control)) {
+               u8 *qc = ieee80211_get_qos_ctl(hdr);
+               tid = qc[0] & 0xf;
+       } else {
+               return IWL_MAX_TID_COUNT;
+       }
+
+       if (unlikely(tid >= IWL_MAX_TID_COUNT))
+               return IWL_MAX_TID_COUNT;
+
+       tl = &lq_data->load[tid];
+
+       curr_time -= curr_time % TID_ROUND_VALUE;
+
+       /* Happens only for the first packet. Initialize the data */
+       if (!(tl->queue_count)) {
+               tl->total = 1;
+               tl->time_stamp = curr_time;
+               tl->queue_count = 1;
+               tl->head = 0;
+               tl->packet_count[0] = 1;
+               return IWL_MAX_TID_COUNT;
+       }
+
+       time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time);
+       index = time_diff / TID_QUEUE_CELL_SPACING;
+
+       /* The history is too long: remove data that is older than */
+       /* TID_MAX_TIME_DIFF */
+       if (index >= TID_QUEUE_MAX_SIZE)
+               rs_tl_rm_old_stats(tl, curr_time);
+
+       index = (tl->head + index) % TID_QUEUE_MAX_SIZE;
+       tl->packet_count[index] = tl->packet_count[index] + 1;
+       tl->total = tl->total + 1;
+
+       if ((index + 1) > tl->queue_count)
+               tl->queue_count = index + 1;
+
+       return tid;
+}
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+/**
+ * Program the device to use fixed rate for frame transmit
+ * This is for debugging/testing only
+ * once the device start use fixed rate, we need to reload the module
+ * to being back the normal operation.
+ */
+static void rs_program_fix_rate(struct iwl_mvm *mvm,
+                               struct iwl_lq_sta *lq_sta)
+{
+       lq_sta->active_legacy_rate = 0x0FFF;    /* 1 - 54 MBits, includes CCK */
+       lq_sta->active_siso_rate   = 0x1FD0;    /* 6 - 60 MBits, no 9, no CCK */
+       lq_sta->active_mimo2_rate  = 0x1FD0;    /* 6 - 60 MBits, no 9, no CCK */
+       lq_sta->active_mimo3_rate  = 0x1FD0;    /* 6 - 60 MBits, no 9, no CCK */
+
+       IWL_DEBUG_RATE(mvm, "sta_id %d rate 0x%X\n",
+                      lq_sta->lq.sta_id, lq_sta->dbg_fixed_rate);
+
+       if (lq_sta->dbg_fixed_rate) {
+               rs_fill_link_cmd(NULL, lq_sta, lq_sta->dbg_fixed_rate);
+               iwl_mvm_send_lq_cmd(lq_sta->drv, &lq_sta->lq, CMD_ASYNC, false);
+       }
+}
+#endif
+
+/*
+       get the traffic load value for tid
+*/
+static u32 rs_tl_get_load(struct iwl_lq_sta *lq_data, u8 tid)
+{
+       u32 curr_time = jiffies_to_msecs(jiffies);
+       u32 time_diff;
+       s32 index;
+       struct iwl_traffic_load *tl = NULL;
+
+       if (tid >= IWL_MAX_TID_COUNT)
+               return 0;
+
+       tl = &(lq_data->load[tid]);
+
+       curr_time -= curr_time % TID_ROUND_VALUE;
+
+       if (!(tl->queue_count))
+               return 0;
+
+       time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time);
+       index = time_diff / TID_QUEUE_CELL_SPACING;
+
+       /* The history is too long: remove data that is older than */
+       /* TID_MAX_TIME_DIFF */
+       if (index >= TID_QUEUE_MAX_SIZE)
+               rs_tl_rm_old_stats(tl, curr_time);
+
+       return tl->total;
+}
+
+static int rs_tl_turn_on_agg_for_tid(struct iwl_mvm *mvm,
+                                     struct iwl_lq_sta *lq_data, u8 tid,
+                                     struct ieee80211_sta *sta)
+{
+       int ret = -EAGAIN;
+       u32 load;
+
+       load = rs_tl_get_load(lq_data, tid);
+
+       if ((iwlwifi_mod_params.auto_agg) || (load > IWL_AGG_LOAD_THRESHOLD)) {
+               IWL_DEBUG_HT(mvm, "Starting Tx agg: STA: %pM tid: %d\n",
+                            sta->addr, tid);
+               ret = ieee80211_start_tx_ba_session(sta, tid, 5000);
+               if (ret == -EAGAIN) {
+                       /*
+                        * driver and mac80211 is out of sync
+                        * this might be cause by reloading firmware
+                        * stop the tx ba session here
+                        */
+                       IWL_ERR(mvm, "Fail start Tx agg on tid: %d\n",
+                               tid);
+                       ieee80211_stop_tx_ba_session(sta, tid);
+               }
+       } else {
+               IWL_DEBUG_HT(mvm,
+                            "Aggregation not enabled for tid %d because load = %u\n",
+                            tid, load);
+       }
+       return ret;
+}
+
+static void rs_tl_turn_on_agg(struct iwl_mvm *mvm, u8 tid,
+                             struct iwl_lq_sta *lq_data,
+                             struct ieee80211_sta *sta)
+{
+       if (tid < IWL_MAX_TID_COUNT)
+               rs_tl_turn_on_agg_for_tid(mvm, lq_data, tid, sta);
+       else
+               IWL_ERR(mvm, "tid exceeds max TID count: %d/%d\n",
+                       tid, IWL_MAX_TID_COUNT);
+}
+
+static inline int get_num_of_ant_from_rate(u32 rate_n_flags)
+{
+       return !!(rate_n_flags & RATE_MCS_ANT_A_MSK) +
+              !!(rate_n_flags & RATE_MCS_ANT_B_MSK) +
+              !!(rate_n_flags & RATE_MCS_ANT_C_MSK);
+}
+
+/*
+ * Static function to get the expected throughput from an iwl_scale_tbl_info
+ * that wraps a NULL pointer check
+ */
+static s32 get_expected_tpt(struct iwl_scale_tbl_info *tbl, int rs_index)
+{
+       if (tbl->expected_tpt)
+               return tbl->expected_tpt[rs_index];
+       return 0;
+}
+
+/**
+ * rs_collect_tx_data - Update the success/failure sliding window
+ *
+ * We keep a sliding window of the last 62 packets transmitted
+ * at this rate.  window->data contains the bitmask of successful
+ * packets.
+ */
+static int rs_collect_tx_data(struct iwl_scale_tbl_info *tbl,
+                             int scale_index, int attempts, int successes)
+{
+       struct iwl_rate_scale_data *window = NULL;
+       static const u64 mask = (((u64)1) << (IWL_RATE_MAX_WINDOW - 1));
+       s32 fail_count, tpt;
+
+       if (scale_index < 0 || scale_index >= IWL_RATE_COUNT)
+               return -EINVAL;
+
+       /* Select window for current tx bit rate */
+       window = &(tbl->win[scale_index]);
+
+       /* Get expected throughput */
+       tpt = get_expected_tpt(tbl, scale_index);
+
+       /*
+        * Keep track of only the latest 62 tx frame attempts in this rate's
+        * history window; anything older isn't really relevant any more.
+        * If we have filled up the sliding window, drop the oldest attempt;
+        * if the oldest attempt (highest bit in bitmap) shows "success",
+        * subtract "1" from the success counter (this is the main reason
+        * we keep these bitmaps!).
+        */
+       while (attempts > 0) {
+               if (window->counter >= IWL_RATE_MAX_WINDOW) {
+                       /* remove earliest */
+                       window->counter = IWL_RATE_MAX_WINDOW - 1;
+
+                       if (window->data & mask) {
+                               window->data &= ~mask;
+                               window->success_counter--;
+                       }
+               }
+
+               /* Increment frames-attempted counter */
+               window->counter++;
+
+               /* Shift bitmap by one frame to throw away oldest history */
+               window->data <<= 1;
+
+               /* Mark the most recent #successes attempts as successful */
+               if (successes > 0) {
+                       window->success_counter++;
+                       window->data |= 0x1;
+                       successes--;
+               }
+
+               attempts--;
+       }
+
+       /* Calculate current success ratio, avoid divide-by-0! */
+       if (window->counter > 0)
+               window->success_ratio = 128 * (100 * window->success_counter)
+                                       / window->counter;
+       else
+               window->success_ratio = IWL_INVALID_VALUE;
+
+       fail_count = window->counter - window->success_counter;
+
+       /* Calculate average throughput, if we have enough history. */
+       if ((fail_count >= IWL_RATE_MIN_FAILURE_TH) ||
+           (window->success_counter >= IWL_RATE_MIN_SUCCESS_TH))
+               window->average_tpt = (window->success_ratio * tpt + 64) / 128;
+       else
+               window->average_tpt = IWL_INVALID_VALUE;
+
+       /* Tag this window as having been updated */
+       window->stamp = jiffies;
+
+       return 0;
+}
+
+/*
+ * Fill uCode API rate_n_flags field, based on "search" or "active" table.
+ */
+/* FIXME:RS:remove this function and put the flags statically in the table */
+static u32 rate_n_flags_from_tbl(struct iwl_mvm *mvm,
+                                struct iwl_scale_tbl_info *tbl,
+                                int index, u8 use_green)
+{
+       u32 rate_n_flags = 0;
+
+       if (is_legacy(tbl->lq_type)) {
+               rate_n_flags = iwl_rates[index].plcp;
+               if (index >= IWL_FIRST_CCK_RATE && index <= IWL_LAST_CCK_RATE)
+                       rate_n_flags |= RATE_MCS_CCK_MSK;
+       } else if (is_Ht(tbl->lq_type)) {
+               if (index > IWL_LAST_OFDM_RATE) {
+                       IWL_ERR(mvm, "Invalid HT rate index %d\n", index);
+                       index = IWL_LAST_OFDM_RATE;
+               }
+               rate_n_flags = RATE_MCS_HT_MSK;
+
+               if (is_siso(tbl->lq_type))
+                       rate_n_flags |= iwl_rates[index].plcp_siso;
+               else if (is_mimo2(tbl->lq_type))
+                       rate_n_flags |= iwl_rates[index].plcp_mimo2;
+               else
+                       rate_n_flags |= iwl_rates[index].plcp_mimo3;
+       } else {
+               IWL_ERR(mvm, "Invalid tbl->lq_type %d\n", tbl->lq_type);
+       }
+
+       rate_n_flags |= ((tbl->ant_type << RATE_MCS_ANT_POS) &
+                                                    RATE_MCS_ANT_ABC_MSK);
+
+       if (is_Ht(tbl->lq_type)) {
+               if (tbl->is_ht40)
+                       rate_n_flags |= RATE_MCS_CHAN_WIDTH_40;
+               if (tbl->is_SGI)
+                       rate_n_flags |= RATE_MCS_SGI_MSK;
+
+               if (use_green) {
+                       rate_n_flags |= RATE_HT_MCS_GF_MSK;
+                       if (is_siso(tbl->lq_type) && tbl->is_SGI) {
+                               rate_n_flags &= ~RATE_MCS_SGI_MSK;
+                               IWL_ERR(mvm, "GF was set with SGI:SISO\n");
+                       }
+               }
+       }
+       return rate_n_flags;
+}
+
+/*
+ * Interpret uCode API's rate_n_flags format,
+ * fill "search" or "active" tx mode table.
+ */
+static int rs_get_tbl_info_from_mcs(const u32 rate_n_flags,
+                                   enum ieee80211_band band,
+                                   struct iwl_scale_tbl_info *tbl,
+                                   int *rate_idx)
+{
+       u32 ant_msk = (rate_n_flags & RATE_MCS_ANT_ABC_MSK);
+       u8 num_of_ant = get_num_of_ant_from_rate(rate_n_flags);
+       u8 mcs;
+
+       memset(tbl, 0, sizeof(struct iwl_scale_tbl_info));
+       *rate_idx = iwl_hwrate_to_plcp_idx(rate_n_flags);
+
+       if (*rate_idx  == IWL_RATE_INVALID) {
+               *rate_idx = -1;
+               return -EINVAL;
+       }
+       tbl->is_SGI = 0;        /* default legacy setup */
+       tbl->is_ht40 = 0;
+       tbl->ant_type = (ant_msk >> RATE_MCS_ANT_POS);
+       tbl->lq_type = LQ_NONE;
+       tbl->max_search = IWL_MAX_SEARCH;
+
+       /* legacy rate format */
+       if (!(rate_n_flags & RATE_MCS_HT_MSK)) {
+               if (num_of_ant == 1) {
+                       if (band == IEEE80211_BAND_5GHZ)
+                               tbl->lq_type = LQ_A;
+                       else
+                               tbl->lq_type = LQ_G;
+               }
+       /* HT rate format */
+       } else {
+               if (rate_n_flags & RATE_MCS_SGI_MSK)
+                       tbl->is_SGI = 1;
+
+               if (rate_n_flags & RATE_MCS_CHAN_WIDTH_40) /* TODO */
+                       tbl->is_ht40 = 1;
+
+               mcs = rs_extract_rate(rate_n_flags);
+
+               /* SISO */
+               if (mcs <= IWL_RATE_SISO_60M_PLCP) {
+                       if (num_of_ant == 1)
+                               tbl->lq_type = LQ_SISO; /*else NONE*/
+               /* MIMO2 */
+               } else if (mcs <= IWL_RATE_MIMO2_60M_PLCP) {
+                       if (num_of_ant == 2)
+                               tbl->lq_type = LQ_MIMO2;
+               /* MIMO3 */
+               } else {
+                       if (num_of_ant == 3) {
+                               tbl->max_search = IWL_MAX_11N_MIMO3_SEARCH;
+                               tbl->lq_type = LQ_MIMO3;
+                       }
+               }
+       }
+       return 0;
+}
+
+/* switch to another antenna/antennas and return 1 */
+/* if no other valid antenna found, return 0 */
+static int rs_toggle_antenna(u32 valid_ant, u32 *rate_n_flags,
+                            struct iwl_scale_tbl_info *tbl)
+{
+       u8 new_ant_type;
+
+       if (!tbl->ant_type || tbl->ant_type > ANT_ABC)
+               return 0;
+
+       if (!rs_is_valid_ant(valid_ant, tbl->ant_type))
+               return 0;
+
+       new_ant_type = ant_toggle_lookup[tbl->ant_type];
+
+       while ((new_ant_type != tbl->ant_type) &&
+              !rs_is_valid_ant(valid_ant, new_ant_type))
+               new_ant_type = ant_toggle_lookup[new_ant_type];
+
+       if (new_ant_type == tbl->ant_type)
+               return 0;
+
+       tbl->ant_type = new_ant_type;
+       *rate_n_flags &= ~RATE_MCS_ANT_ABC_MSK;
+       *rate_n_flags |= new_ant_type << RATE_MCS_ANT_POS;
+       return 1;
+}
+
+/**
+ * Green-field mode is valid if the station supports it and
+ * there are no non-GF stations present in the BSS.
+ */
+static bool rs_use_green(struct ieee80211_sta *sta)
+{
+       struct iwl_mvm_sta *sta_priv = (void *)sta->drv_priv;
+
+       bool use_green = !(sta_priv->vif->bss_conf.ht_operation_mode &
+                               IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
+
+       return (sta->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD) && use_green;
+}
+
+/**
+ * rs_get_supported_rates - get the available rates
+ *
+ * if management frame or broadcast frame only return
+ * basic available rates.
+ *
+ */
+static u16 rs_get_supported_rates(struct iwl_lq_sta *lq_sta,
+                                 struct ieee80211_hdr *hdr,
+                                 enum iwl_table_type rate_type)
+{
+       if (is_legacy(rate_type)) {
+               return lq_sta->active_legacy_rate;
+       } else {
+               if (is_siso(rate_type))
+                       return lq_sta->active_siso_rate;
+               else if (is_mimo2(rate_type))
+                       return lq_sta->active_mimo2_rate;
+               else
+                       return lq_sta->active_mimo3_rate;
+       }
+}
+
+static u16 rs_get_adjacent_rate(struct iwl_mvm *mvm, u8 index, u16 rate_mask,
+                               int rate_type)
+{
+       u8 high = IWL_RATE_INVALID;
+       u8 low = IWL_RATE_INVALID;
+
+       /* 802.11A or ht walks to the next literal adjacent rate in
+        * the rate table */
+       if (is_a_band(rate_type) || !is_legacy(rate_type)) {
+               int i;
+               u32 mask;
+
+               /* Find the previous rate that is in the rate mask */
+               i = index - 1;
+               for (mask = (1 << i); i >= 0; i--, mask >>= 1) {
+                       if (rate_mask & mask) {
+                               low = i;
+                               break;
+                       }
+               }
+
+               /* Find the next rate that is in the rate mask */
+               i = index + 1;
+               for (mask = (1 << i); i < IWL_RATE_COUNT; i++, mask <<= 1) {
+                       if (rate_mask & mask) {
+                               high = i;
+                               break;
+                       }
+               }
+
+               return (high << 8) | low;
+       }
+
+       low = index;
+       while (low != IWL_RATE_INVALID) {
+               low = iwl_rates[low].prev_rs;
+               if (low == IWL_RATE_INVALID)
+                       break;
+               if (rate_mask & (1 << low))
+                       break;
+               IWL_DEBUG_RATE(mvm, "Skipping masked lower rate: %d\n", low);
+       }
+
+       high = index;
+       while (high != IWL_RATE_INVALID) {
+               high = iwl_rates[high].next_rs;
+               if (high == IWL_RATE_INVALID)
+                       break;
+               if (rate_mask & (1 << high))
+                       break;
+               IWL_DEBUG_RATE(mvm, "Skipping masked higher rate: %d\n", high);
+       }
+
+       return (high << 8) | low;
+}
+
+static u32 rs_get_lower_rate(struct iwl_lq_sta *lq_sta,
+                            struct iwl_scale_tbl_info *tbl,
+                            u8 scale_index, u8 ht_possible)
+{
+       s32 low;
+       u16 rate_mask;
+       u16 high_low;
+       u8 switch_to_legacy = 0;
+       u8 is_green = lq_sta->is_green;
+       struct iwl_mvm *mvm = lq_sta->drv;
+
+       /* check if we need to switch from HT to legacy rates.
+        * assumption is that mandatory rates (1Mbps or 6Mbps)
+        * are always supported (spec demand) */
+       if (!is_legacy(tbl->lq_type) && (!ht_possible || !scale_index)) {
+               switch_to_legacy = 1;
+               scale_index = rs_ht_to_legacy[scale_index];
+               if (lq_sta->band == IEEE80211_BAND_5GHZ)
+                       tbl->lq_type = LQ_A;
+               else
+                       tbl->lq_type = LQ_G;
+
+               if (num_of_ant(tbl->ant_type) > 1)
+                       tbl->ant_type =
+                           first_antenna(mvm->nvm_data->valid_tx_ant);
+
+               tbl->is_ht40 = 0;
+               tbl->is_SGI = 0;
+               tbl->max_search = IWL_MAX_SEARCH;
+       }
+
+       rate_mask = rs_get_supported_rates(lq_sta, NULL, tbl->lq_type);
+
+       /* Mask with station rate restriction */
+       if (is_legacy(tbl->lq_type)) {
+               /* supp_rates has no CCK bits in A mode */
+               if (lq_sta->band == IEEE80211_BAND_5GHZ)
+                       rate_mask  = (u16)(rate_mask &
+                          (lq_sta->supp_rates << IWL_FIRST_OFDM_RATE));
+               else
+                       rate_mask = (u16)(rate_mask & lq_sta->supp_rates);
+       }
+
+       /* If we switched from HT to legacy, check current rate */
+       if (switch_to_legacy && (rate_mask & (1 << scale_index))) {
+               low = scale_index;
+               goto out;
+       }
+
+       high_low = rs_get_adjacent_rate(lq_sta->drv, scale_index, rate_mask,
+                                       tbl->lq_type);
+       low = high_low & 0xff;
+
+       if (low == IWL_RATE_INVALID)
+               low = scale_index;
+
+out:
+       return rate_n_flags_from_tbl(lq_sta->drv, tbl, low, is_green);
+}
+
+/*
+ * Simple function to compare two rate scale table types
+ */
+static bool table_type_matches(struct iwl_scale_tbl_info *a,
+                              struct iwl_scale_tbl_info *b)
+{
+       return (a->lq_type == b->lq_type) && (a->ant_type == b->ant_type) &&
+               (a->is_SGI == b->is_SGI);
+}
+
+/*
+ * mac80211 sends us Tx status
+ */
+static void rs_tx_status(void *mvm_r, struct ieee80211_supported_band *sband,
+                        struct ieee80211_sta *sta, void *priv_sta,
+                        struct sk_buff *skb)
+{
+       int legacy_success;
+       int retries;
+       int rs_index, mac_index, i;
+       struct iwl_lq_sta *lq_sta = priv_sta;
+       struct iwl_lq_cmd *table;
+       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+       struct iwl_op_mode *op_mode = (struct iwl_op_mode *)mvm_r;
+       struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+       enum mac80211_rate_control_flags mac_flags;
+       u32 tx_rate;
+       struct iwl_scale_tbl_info tbl_type;
+       struct iwl_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl;
+
+       IWL_DEBUG_RATE_LIMIT(mvm,
+                            "get frame ack response, update rate scale window\n");
+
+       /* Treat uninitialized rate scaling data same as non-existing. */
+       if (!lq_sta) {
+               IWL_DEBUG_RATE(mvm, "Station rate scaling not created yet.\n");
+               return;
+       } else if (!lq_sta->drv) {
+               IWL_DEBUG_RATE(mvm, "Rate scaling not initialized yet.\n");
+               return;
+       }
+
+       if (!ieee80211_is_data(hdr->frame_control) ||
+           info->flags & IEEE80211_TX_CTL_NO_ACK)
+               return;
+
+       /* This packet was aggregated but doesn't carry status info */
+       if ((info->flags & IEEE80211_TX_CTL_AMPDU) &&
+           !(info->flags & IEEE80211_TX_STAT_AMPDU))
+               return;
+
+       /*
+        * Ignore this Tx frame response if its initial rate doesn't match
+        * that of latest Link Quality command.  There may be stragglers
+        * from a previous Link Quality command, but we're no longer interested
+        * in those; they're either from the "active" mode while we're trying
+        * to check "search" mode, or a prior "search" mode after we've moved
+        * to a new "search" mode (which might become the new "active" mode).
+        */
+       table = &lq_sta->lq;
+       tx_rate = le32_to_cpu(table->rs_table[0]);
+       rs_get_tbl_info_from_mcs(tx_rate, info->band, &tbl_type, &rs_index);
+       if (info->band == IEEE80211_BAND_5GHZ)
+               rs_index -= IWL_FIRST_OFDM_RATE;
+       mac_flags = info->status.rates[0].flags;
+       mac_index = info->status.rates[0].idx;
+       /* For HT packets, map MCS to PLCP */
+       if (mac_flags & IEEE80211_TX_RC_MCS) {
+               /* Remove # of streams */
+               mac_index &= RATE_HT_MCS_RATE_CODE_MSK;
+               if (mac_index >= (IWL_RATE_9M_INDEX - IWL_FIRST_OFDM_RATE))
+                       mac_index++;
+               /*
+                * mac80211 HT index is always zero-indexed; we need to move
+                * HT OFDM rates after CCK rates in 2.4 GHz band
+                */
+               if (info->band == IEEE80211_BAND_2GHZ)
+                       mac_index += IWL_FIRST_OFDM_RATE;
+       }
+       /* Here we actually compare this rate to the latest LQ command */
+       if ((mac_index < 0) ||
+           (tbl_type.is_SGI != !!(mac_flags & IEEE80211_TX_RC_SHORT_GI)) ||
+           (tbl_type.is_ht40 != !!(mac_flags & IEEE80211_TX_RC_40_MHZ_WIDTH)) ||
+           (tbl_type.ant_type != info->status.antenna) ||
+           (!!(tx_rate & RATE_MCS_HT_MSK) !=
+                               !!(mac_flags & IEEE80211_TX_RC_MCS)) ||
+           (!!(tx_rate & RATE_HT_MCS_GF_MSK) !=
+                               !!(mac_flags & IEEE80211_TX_RC_GREEN_FIELD)) ||
+           (rs_index != mac_index)) {
+               IWL_DEBUG_RATE(mvm,
+                              "initial rate %d does not match %d (0x%x)\n",
+                              mac_index, rs_index, tx_rate);
+               /*
+                * Since rates mis-match, the last LQ command may have failed.
+                * After IWL_MISSED_RATE_MAX mis-matches, resync the uCode with
+                * ... driver.
+                */
+               lq_sta->missed_rate_counter++;
+               if (lq_sta->missed_rate_counter > IWL_MISSED_RATE_MAX) {
+                       lq_sta->missed_rate_counter = 0;
+                       iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, CMD_ASYNC, false);
+               }
+               /* Regardless, ignore this status info for outdated rate */
+               return;
+       } else
+               /* Rate did match, so reset the missed_rate_counter */
+               lq_sta->missed_rate_counter = 0;
+
+       /* Figure out if rate scale algorithm is in active or search table */
+       if (table_type_matches(&tbl_type,
+                              &(lq_sta->lq_info[lq_sta->active_tbl]))) {
+               curr_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+               other_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
+       } else if (table_type_matches(
+                       &tbl_type, &lq_sta->lq_info[1 - lq_sta->active_tbl])) {
+               curr_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
+               other_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+       } else {
+               IWL_DEBUG_RATE(mvm,
+                              "Neither active nor search matches tx rate\n");
+               tmp_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+               IWL_DEBUG_RATE(mvm, "active- lq:%x, ant:%x, SGI:%d\n",
+                              tmp_tbl->lq_type, tmp_tbl->ant_type,
+                              tmp_tbl->is_SGI);
+               tmp_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
+               IWL_DEBUG_RATE(mvm, "search- lq:%x, ant:%x, SGI:%d\n",
+                              tmp_tbl->lq_type, tmp_tbl->ant_type,
+                              tmp_tbl->is_SGI);
+               IWL_DEBUG_RATE(mvm, "actual- lq:%x, ant:%x, SGI:%d\n",
+                              tbl_type.lq_type, tbl_type.ant_type,
+                              tbl_type.is_SGI);
+               /*
+                * no matching table found, let's by-pass the data collection
+                * and continue to perform rate scale to find the rate table
+                */
+               rs_stay_in_table(lq_sta, true);
+               goto done;
+       }
+
+       /*
+        * Updating the frame history depends on whether packets were
+        * aggregated.
+        *
+        * For aggregation, all packets were transmitted at the same rate, the
+        * first index into rate scale table.
+        */
+       if (info->flags & IEEE80211_TX_STAT_AMPDU) {
+               tx_rate = le32_to_cpu(table->rs_table[0]);
+               rs_get_tbl_info_from_mcs(tx_rate, info->band, &tbl_type,
+                                        &rs_index);
+               rs_collect_tx_data(curr_tbl, rs_index,
+                                  info->status.ampdu_len,
+                                  info->status.ampdu_ack_len);
+
+               /* Update success/fail counts if not searching for new mode */
+               if (lq_sta->stay_in_tbl) {
+                       lq_sta->total_success += info->status.ampdu_ack_len;
+                       lq_sta->total_failed += (info->status.ampdu_len -
+                                       info->status.ampdu_ack_len);
+               }
+       } else {
+       /*
+        * For legacy, update frame history with for each Tx retry.
+        */
+               retries = info->status.rates[0].count - 1;
+               /* HW doesn't send more than 15 retries */
+               retries = min(retries, 15);
+
+               /* The last transmission may have been successful */
+               legacy_success = !!(info->flags & IEEE80211_TX_STAT_ACK);
+               /* Collect data for each rate used during failed TX attempts */
+               for (i = 0; i <= retries; ++i) {
+                       tx_rate = le32_to_cpu(table->rs_table[i]);
+                       rs_get_tbl_info_from_mcs(tx_rate, info->band,
+                                                &tbl_type, &rs_index);
+                       /*
+                        * Only collect stats if retried rate is in the same RS
+                        * table as active/search.
+                        */
+                       if (table_type_matches(&tbl_type, curr_tbl))
+                               tmp_tbl = curr_tbl;
+                       else if (table_type_matches(&tbl_type, other_tbl))
+                               tmp_tbl = other_tbl;
+                       else
+                               continue;
+                       rs_collect_tx_data(tmp_tbl, rs_index, 1,
+                                          i < retries ? 0 : legacy_success);
+               }
+
+               /* Update success/fail counts if not searching for new mode */
+               if (lq_sta->stay_in_tbl) {
+                       lq_sta->total_success += legacy_success;
+                       lq_sta->total_failed += retries + (1 - legacy_success);
+               }
+       }
+       /* The last TX rate is cached in lq_sta; it's set in if/else above */
+       lq_sta->last_rate_n_flags = tx_rate;
+done:
+       /* See if there's a better rate or modulation mode to try. */
+       if (sta && sta->supp_rates[sband->band])
+               rs_rate_scale_perform(mvm, skb, sta, lq_sta);
+}
+
+/*
+ * Begin a period of staying with a selected modulation mode.
+ * Set "stay_in_tbl" flag to prevent any mode switches.
+ * Set frame tx success limits according to legacy vs. high-throughput,
+ * and reset overall (spanning all rates) tx success history statistics.
+ * These control how long we stay using same modulation mode before
+ * searching for a new mode.
+ */
+static void rs_set_stay_in_table(struct iwl_mvm *mvm, u8 is_legacy,
+                                struct iwl_lq_sta *lq_sta)
+{
+       IWL_DEBUG_RATE(mvm, "we are staying in the same table\n");
+       lq_sta->stay_in_tbl = 1;        /* only place this gets set */
+       if (is_legacy) {
+               lq_sta->table_count_limit = IWL_LEGACY_TABLE_COUNT;
+               lq_sta->max_failure_limit = IWL_LEGACY_FAILURE_LIMIT;
+               lq_sta->max_success_limit = IWL_LEGACY_SUCCESS_LIMIT;
+       } else {
+               lq_sta->table_count_limit = IWL_NONE_LEGACY_TABLE_COUNT;
+               lq_sta->max_failure_limit = IWL_NONE_LEGACY_FAILURE_LIMIT;
+               lq_sta->max_success_limit = IWL_NONE_LEGACY_SUCCESS_LIMIT;
+       }
+       lq_sta->table_count = 0;
+       lq_sta->total_failed = 0;
+       lq_sta->total_success = 0;
+       lq_sta->flush_timer = jiffies;
+       lq_sta->action_counter = 0;
+}
+
+/*
+ * Find correct throughput table for given mode of modulation
+ */
+static void rs_set_expected_tpt_table(struct iwl_lq_sta *lq_sta,
+                                     struct iwl_scale_tbl_info *tbl)
+{
+       /* Used to choose among HT tables */
+       s32 (*ht_tbl_pointer)[IWL_RATE_COUNT];
+
+       /* Check for invalid LQ type */
+       if (WARN_ON_ONCE(!is_legacy(tbl->lq_type) && !is_Ht(tbl->lq_type))) {
+               tbl->expected_tpt = expected_tpt_legacy;
+               return;
+       }
+
+       /* Legacy rates have only one table */
+       if (is_legacy(tbl->lq_type)) {
+               tbl->expected_tpt = expected_tpt_legacy;
+               return;
+       }
+
+       /* Choose among many HT tables depending on number of streams
+        * (SISO/MIMO2/MIMO3), channel width (20/40), SGI, and aggregation
+        * status */
+       if (is_siso(tbl->lq_type) && !tbl->is_ht40)
+               ht_tbl_pointer = expected_tpt_siso20MHz;
+       else if (is_siso(tbl->lq_type))
+               ht_tbl_pointer = expected_tpt_siso40MHz;
+       else if (is_mimo2(tbl->lq_type) && !tbl->is_ht40)
+               ht_tbl_pointer = expected_tpt_mimo2_20MHz;
+       else if (is_mimo2(tbl->lq_type))
+               ht_tbl_pointer = expected_tpt_mimo2_40MHz;
+       else if (is_mimo3(tbl->lq_type) && !tbl->is_ht40)
+               ht_tbl_pointer = expected_tpt_mimo3_20MHz;
+       else /* if (is_mimo3(tbl->lq_type)) <-- must be true */
+               ht_tbl_pointer = expected_tpt_mimo3_40MHz;
+
+       if (!tbl->is_SGI && !lq_sta->is_agg)            /* Normal */
+               tbl->expected_tpt = ht_tbl_pointer[0];
+       else if (tbl->is_SGI && !lq_sta->is_agg)        /* SGI */
+               tbl->expected_tpt = ht_tbl_pointer[1];
+       else if (!tbl->is_SGI && lq_sta->is_agg)        /* AGG */
+               tbl->expected_tpt = ht_tbl_pointer[2];
+       else                                            /* AGG+SGI */
+               tbl->expected_tpt = ht_tbl_pointer[3];
+}
+
+/*
+ * Find starting rate for new "search" high-throughput mode of modulation.
+ * Goal is to find lowest expected rate (under perfect conditions) that is
+ * above the current measured throughput of "active" mode, to give new mode
+ * a fair chance to prove itself without too many challenges.
+ *
+ * This gets called when transitioning to more aggressive modulation
+ * (i.e. legacy to SISO or MIMO, or SISO to MIMO), as well as less aggressive
+ * (i.e. MIMO to SISO).  When moving to MIMO, bit rate will typically need
+ * to decrease to match "active" throughput.  When moving from MIMO to SISO,
+ * bit rate will typically need to increase, but not if performance was bad.
+ */
+static s32 rs_get_best_rate(struct iwl_mvm *mvm,
+                           struct iwl_lq_sta *lq_sta,
+                           struct iwl_scale_tbl_info *tbl,     /* "search" */
+                           u16 rate_mask, s8 index)
+{
+       /* "active" values */
+       struct iwl_scale_tbl_info *active_tbl =
+           &(lq_sta->lq_info[lq_sta->active_tbl]);
+       s32 active_sr = active_tbl->win[index].success_ratio;
+       s32 active_tpt = active_tbl->expected_tpt[index];
+
+       /* expected "search" throughput */
+       s32 *tpt_tbl = tbl->expected_tpt;
+
+       s32 new_rate, high, low, start_hi;
+       u16 high_low;
+       s8 rate = index;
+
+       new_rate = high = low = start_hi = IWL_RATE_INVALID;
+
+       while (1) {
+               high_low = rs_get_adjacent_rate(mvm, rate, rate_mask,
+                                               tbl->lq_type);
+
+               low = high_low & 0xff;
+               high = (high_low >> 8) & 0xff;
+
+               /*
+                * Lower the "search" bit rate, to give new "search" mode
+                * approximately the same throughput as "active" if:
+                *
+                * 1) "Active" mode has been working modestly well (but not
+                *    great), and expected "search" throughput (under perfect
+                *    conditions) at candidate rate is above the actual
+                *    measured "active" throughput (but less than expected
+                *    "active" throughput under perfect conditions).
+                * OR
+                * 2) "Active" mode has been working perfectly or very well
+                *    and expected "search" throughput (under perfect
+                *    conditions) at candidate rate is above expected
+                *    "active" throughput (under perfect conditions).
+                */
+               if ((((100 * tpt_tbl[rate]) > lq_sta->last_tpt) &&
+                    ((active_sr > IWL_RATE_DECREASE_TH) &&
+                     (active_sr <= IWL_RATE_HIGH_TH) &&
+                     (tpt_tbl[rate] <= active_tpt))) ||
+                   ((active_sr >= IWL_RATE_SCALE_SWITCH) &&
+                    (tpt_tbl[rate] > active_tpt))) {
+                       /* (2nd or later pass)
+                        * If we've already tried to raise the rate, and are
+                        * now trying to lower it, use the higher rate. */
+                       if (start_hi != IWL_RATE_INVALID) {
+                               new_rate = start_hi;
+                               break;
+                       }
+
+                       new_rate = rate;
+
+                       /* Loop again with lower rate */
+                       if (low != IWL_RATE_INVALID)
+                               rate = low;
+
+                       /* Lower rate not available, use the original */
+                       else
+                               break;
+
+               /* Else try to raise the "search" rate to match "active" */
+               } else {
+                       /* (2nd or later pass)
+                        * If we've already tried to lower the rate, and are
+                        * now trying to raise it, use the lower rate. */
+                       if (new_rate != IWL_RATE_INVALID)
+                               break;
+
+                       /* Loop again with higher rate */
+                       else if (high != IWL_RATE_INVALID) {
+                               start_hi = high;
+                               rate = high;
+
+                       /* Higher rate not available, use the original */
+                       } else {
+                               new_rate = rate;
+                               break;
+                       }
+               }
+       }
+
+       return new_rate;
+}
+
+static bool iwl_is_ht40_tx_allowed(struct ieee80211_sta *sta)
+{
+       return sta->bandwidth >= IEEE80211_STA_RX_BW_40;
+}
+
+/*
+ * Set up search table for MIMO2
+ */
+static int rs_switch_to_mimo2(struct iwl_mvm *mvm,
+                            struct iwl_lq_sta *lq_sta,
+                            struct ieee80211_sta *sta,
+                            struct iwl_scale_tbl_info *tbl, int index)
+{
+       u16 rate_mask;
+       s32 rate;
+       s8 is_green = lq_sta->is_green;
+
+       if (!sta->ht_cap.ht_supported)
+               return -1;
+
+       if (sta->smps_mode == IEEE80211_SMPS_STATIC)
+               return -1;
+
+       /* Need both Tx chains/antennas to support MIMO */
+       if (num_of_ant(mvm->nvm_data->valid_tx_ant) < 2)
+               return -1;
+
+       IWL_DEBUG_RATE(mvm, "LQ: try to switch to MIMO2\n");
+
+       tbl->lq_type = LQ_MIMO2;
+       tbl->action = 0;
+       tbl->max_search = IWL_MAX_SEARCH;
+       rate_mask = lq_sta->active_mimo2_rate;
+
+       if (iwl_is_ht40_tx_allowed(sta))
+               tbl->is_ht40 = 1;
+       else
+               tbl->is_ht40 = 0;
+
+       rs_set_expected_tpt_table(lq_sta, tbl);
+
+       rate = rs_get_best_rate(mvm, lq_sta, tbl, rate_mask, index);
+
+       IWL_DEBUG_RATE(mvm, "LQ: MIMO2 best rate %d mask %X\n",
+                      rate, rate_mask);
+       if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) {
+               IWL_DEBUG_RATE(mvm, "Can't switch with index %d rate mask %x\n",
+                              rate, rate_mask);
+               return -1;
+       }
+       tbl->current_rate = rate_n_flags_from_tbl(mvm, tbl, rate, is_green);
+
+       IWL_DEBUG_RATE(mvm, "LQ: Switch to new mcs %X index is green %X\n",
+                      tbl->current_rate, is_green);
+       return 0;
+}
+
+/*
+ * Set up search table for MIMO3
+ */
+static int rs_switch_to_mimo3(struct iwl_mvm *mvm,
+                            struct iwl_lq_sta *lq_sta,
+                            struct ieee80211_sta *sta,
+                            struct iwl_scale_tbl_info *tbl, int index)
+{
+       u16 rate_mask;
+       s32 rate;
+       s8 is_green = lq_sta->is_green;
+
+       if (!sta->ht_cap.ht_supported)
+               return -1;
+
+       if (sta->smps_mode == IEEE80211_SMPS_STATIC)
+               return -1;
+
+       /* Need both Tx chains/antennas to support MIMO */
+       if (num_of_ant(mvm->nvm_data->valid_tx_ant) < 3)
+               return -1;
+
+       IWL_DEBUG_RATE(mvm, "LQ: try to switch to MIMO3\n");
+
+       tbl->lq_type = LQ_MIMO3;
+       tbl->action = 0;
+       tbl->max_search = IWL_MAX_11N_MIMO3_SEARCH;
+       rate_mask = lq_sta->active_mimo3_rate;
+
+       if (iwl_is_ht40_tx_allowed(sta))
+               tbl->is_ht40 = 1;
+       else
+               tbl->is_ht40 = 0;
+
+       rs_set_expected_tpt_table(lq_sta, tbl);
+
+       rate = rs_get_best_rate(mvm, lq_sta, tbl, rate_mask, index);
+
+       IWL_DEBUG_RATE(mvm, "LQ: MIMO3 best rate %d mask %X\n",
+                      rate, rate_mask);
+       if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) {
+               IWL_DEBUG_RATE(mvm, "Can't switch with index %d rate mask %x\n",
+                              rate, rate_mask);
+               return -1;
+       }
+       tbl->current_rate = rate_n_flags_from_tbl(mvm, tbl, rate, is_green);
+
+       IWL_DEBUG_RATE(mvm, "LQ: Switch to new mcs %X index is green %X\n",
+                      tbl->current_rate, is_green);
+       return 0;
+}
+
+/*
+ * Set up search table for SISO
+ */
+static int rs_switch_to_siso(struct iwl_mvm *mvm,
+                            struct iwl_lq_sta *lq_sta,
+                            struct ieee80211_sta *sta,
+                            struct iwl_scale_tbl_info *tbl, int index)
+{
+       u16 rate_mask;
+       u8 is_green = lq_sta->is_green;
+       s32 rate;
+
+       if (!sta->ht_cap.ht_supported)
+               return -1;
+
+       IWL_DEBUG_RATE(mvm, "LQ: try to switch to SISO\n");
+
+       tbl->lq_type = LQ_SISO;
+       tbl->action = 0;
+       tbl->max_search = IWL_MAX_SEARCH;
+       rate_mask = lq_sta->active_siso_rate;
+
+       if (iwl_is_ht40_tx_allowed(sta))
+               tbl->is_ht40 = 1;
+       else
+               tbl->is_ht40 = 0;
+
+       if (is_green)
+               tbl->is_SGI = 0; /*11n spec: no SGI in SISO+Greenfield*/
+
+       rs_set_expected_tpt_table(lq_sta, tbl);
+       rate = rs_get_best_rate(mvm, lq_sta, tbl, rate_mask, index);
+
+       IWL_DEBUG_RATE(mvm, "LQ: get best rate %d mask %X\n", rate, rate_mask);
+       if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) {
+               IWL_DEBUG_RATE(mvm,
+                              "can not switch with index %d rate mask %x\n",
+                              rate, rate_mask);
+               return -1;
+       }
+       tbl->current_rate = rate_n_flags_from_tbl(mvm, tbl, rate, is_green);
+       IWL_DEBUG_RATE(mvm, "LQ: Switch to new mcs %X index is green %X\n",
+                      tbl->current_rate, is_green);
+       return 0;
+}
+
+/*
+ * Try to switch to new modulation mode from legacy
+ */
+static int rs_move_legacy_other(struct iwl_mvm *mvm,
+                               struct iwl_lq_sta *lq_sta,
+                               struct ieee80211_sta *sta,
+                               int index)
+{
+       struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+       struct iwl_scale_tbl_info *search_tbl =
+                               &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
+       struct iwl_rate_scale_data *window = &(tbl->win[index]);
+       u32 sz = (sizeof(struct iwl_scale_tbl_info) -
+                 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
+       u8 start_action;
+       u8 valid_tx_ant = mvm->nvm_data->valid_tx_ant;
+       u8 tx_chains_num = num_of_ant(valid_tx_ant);
+       int ret;
+       u8 update_search_tbl_counter = 0;
+
+       start_action = tbl->action;
+       while (1) {
+               lq_sta->action_counter++;
+               switch (tbl->action) {
+               case IWL_LEGACY_SWITCH_ANTENNA1:
+               case IWL_LEGACY_SWITCH_ANTENNA2:
+                       IWL_DEBUG_RATE(mvm, "LQ: Legacy toggle Antenna\n");
+
+                       if ((tbl->action == IWL_LEGACY_SWITCH_ANTENNA1 &&
+                            tx_chains_num <= 1) ||
+                           (tbl->action == IWL_LEGACY_SWITCH_ANTENNA2 &&
+                            tx_chains_num <= 2))
+                               break;
+
+                       /* Don't change antenna if success has been great */
+                       if (window->success_ratio >= IWL_RS_GOOD_RATIO)
+                               break;
+
+                       /* Set up search table to try other antenna */
+                       memcpy(search_tbl, tbl, sz);
+
+                       if (rs_toggle_antenna(valid_tx_ant,
+                                             &search_tbl->current_rate,
+                                             search_tbl)) {
+                               update_search_tbl_counter = 1;
+                               rs_set_expected_tpt_table(lq_sta, search_tbl);
+                               goto out;
+                       }
+                       break;
+               case IWL_LEGACY_SWITCH_SISO:
+                       IWL_DEBUG_RATE(mvm, "LQ: Legacy switch to SISO\n");
+
+                       /* Set up search table to try SISO */
+                       memcpy(search_tbl, tbl, sz);
+                       search_tbl->is_SGI = 0;
+                       ret = rs_switch_to_siso(mvm, lq_sta, sta,
+                                                search_tbl, index);
+                       if (!ret) {
+                               lq_sta->action_counter = 0;
+                               goto out;
+                       }
+
+                       break;
+               case IWL_LEGACY_SWITCH_MIMO2_AB:
+               case IWL_LEGACY_SWITCH_MIMO2_AC:
+               case IWL_LEGACY_SWITCH_MIMO2_BC:
+                       IWL_DEBUG_RATE(mvm, "LQ: Legacy switch to MIMO2\n");
+
+                       /* Set up search table to try MIMO */
+                       memcpy(search_tbl, tbl, sz);
+                       search_tbl->is_SGI = 0;
+
+                       if (tbl->action == IWL_LEGACY_SWITCH_MIMO2_AB)
+                               search_tbl->ant_type = ANT_AB;
+                       else if (tbl->action == IWL_LEGACY_SWITCH_MIMO2_AC)
+                               search_tbl->ant_type = ANT_AC;
+                       else
+                               search_tbl->ant_type = ANT_BC;
+
+                       if (!rs_is_valid_ant(valid_tx_ant,
+                                            search_tbl->ant_type))
+                               break;
+
+                       ret = rs_switch_to_mimo2(mvm, lq_sta, sta,
+                                                search_tbl, index);
+                       if (!ret) {
+                               lq_sta->action_counter = 0;
+                               goto out;
+                       }
+                       break;
+
+               case IWL_LEGACY_SWITCH_MIMO3_ABC:
+                       IWL_DEBUG_RATE(mvm, "LQ: Legacy switch to MIMO3\n");
+
+                       /* Set up search table to try MIMO3 */
+                       memcpy(search_tbl, tbl, sz);
+                       search_tbl->is_SGI = 0;
+
+                       search_tbl->ant_type = ANT_ABC;
+
+                       if (!rs_is_valid_ant(valid_tx_ant,
+                                            search_tbl->ant_type))
+                               break;
+
+                       ret = rs_switch_to_mimo3(mvm, lq_sta, sta,
+                                                search_tbl, index);
+                       if (!ret) {
+                               lq_sta->action_counter = 0;
+                               goto out;
+                       }
+                       break;
+               }
+               tbl->action++;
+               if (tbl->action > IWL_LEGACY_SWITCH_MIMO3_ABC)
+                       tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
+
+               if (tbl->action == start_action)
+                       break;
+       }
+       search_tbl->lq_type = LQ_NONE;
+       return 0;
+
+out:
+       lq_sta->search_better_tbl = 1;
+       tbl->action++;
+       if (tbl->action > IWL_LEGACY_SWITCH_MIMO3_ABC)
+               tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
+       if (update_search_tbl_counter)
+               search_tbl->action = tbl->action;
+       return 0;
+}
+
+/*
+ * Try to switch to new modulation mode from SISO
+ */
+static int rs_move_siso_to_other(struct iwl_mvm *mvm,
+                                struct iwl_lq_sta *lq_sta,
+                                struct ieee80211_sta *sta, int index)
+{
+       u8 is_green = lq_sta->is_green;
+       struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+       struct iwl_scale_tbl_info *search_tbl =
+                               &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
+       struct iwl_rate_scale_data *window = &(tbl->win[index]);
+       struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+       u32 sz = (sizeof(struct iwl_scale_tbl_info) -
+                 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
+       u8 start_action;
+       u8 valid_tx_ant = mvm->nvm_data->valid_tx_ant;
+       u8 tx_chains_num = num_of_ant(valid_tx_ant);
+       u8 update_search_tbl_counter = 0;
+       int ret;
+
+       start_action = tbl->action;
+       while (1) {
+               lq_sta->action_counter++;
+               switch (tbl->action) {
+               case IWL_SISO_SWITCH_ANTENNA1:
+               case IWL_SISO_SWITCH_ANTENNA2:
+                       IWL_DEBUG_RATE(mvm, "LQ: SISO toggle Antenna\n");
+                       if ((tbl->action == IWL_SISO_SWITCH_ANTENNA1 &&
+                            tx_chains_num <= 1) ||
+                           (tbl->action == IWL_SISO_SWITCH_ANTENNA2 &&
+                            tx_chains_num <= 2))
+                               break;
+
+                       if (window->success_ratio >= IWL_RS_GOOD_RATIO)
+                               break;
+
+                       memcpy(search_tbl, tbl, sz);
+                       if (rs_toggle_antenna(valid_tx_ant,
+                                             &search_tbl->current_rate,
+                                             search_tbl)) {
+                               update_search_tbl_counter = 1;
+                               goto out;
+                       }
+                       break;
+               case IWL_SISO_SWITCH_MIMO2_AB:
+               case IWL_SISO_SWITCH_MIMO2_AC:
+               case IWL_SISO_SWITCH_MIMO2_BC:
+                       IWL_DEBUG_RATE(mvm, "LQ: SISO switch to MIMO2\n");
+                       memcpy(search_tbl, tbl, sz);
+                       search_tbl->is_SGI = 0;
+
+                       if (tbl->action == IWL_SISO_SWITCH_MIMO2_AB)
+                               search_tbl->ant_type = ANT_AB;
+                       else if (tbl->action == IWL_SISO_SWITCH_MIMO2_AC)
+                               search_tbl->ant_type = ANT_AC;
+                       else
+                               search_tbl->ant_type = ANT_BC;
+
+                       if (!rs_is_valid_ant(valid_tx_ant,
+                                            search_tbl->ant_type))
+                               break;
+
+                       ret = rs_switch_to_mimo2(mvm, lq_sta, sta,
+                                                search_tbl, index);
+                       if (!ret)
+                               goto out;
+                       break;
+               case IWL_SISO_SWITCH_GI:
+                       if (!tbl->is_ht40 && !(ht_cap->cap &
+                                               IEEE80211_HT_CAP_SGI_20))
+                               break;
+                       if (tbl->is_ht40 && !(ht_cap->cap &
+                                               IEEE80211_HT_CAP_SGI_40))
+                               break;
+
+                       IWL_DEBUG_RATE(mvm, "LQ: SISO toggle SGI/NGI\n");
+
+                       memcpy(search_tbl, tbl, sz);
+                       if (is_green) {
+                               if (!tbl->is_SGI)
+                                       break;
+                               else
+                                       IWL_ERR(mvm,
+                                               "SGI was set in GF+SISO\n");
+                       }
+                       search_tbl->is_SGI = !tbl->is_SGI;
+                       rs_set_expected_tpt_table(lq_sta, search_tbl);
+                       if (tbl->is_SGI) {
+                               s32 tpt = lq_sta->last_tpt / 100;
+                               if (tpt >= search_tbl->expected_tpt[index])
+                                       break;
+                       }
+                       search_tbl->current_rate =
+                               rate_n_flags_from_tbl(mvm, search_tbl,
+                                                     index, is_green);
+                       update_search_tbl_counter = 1;
+                       goto out;
+               case IWL_SISO_SWITCH_MIMO3_ABC:
+                       IWL_DEBUG_RATE(mvm, "LQ: SISO switch to MIMO3\n");
+                       memcpy(search_tbl, tbl, sz);
+                       search_tbl->is_SGI = 0;
+                       search_tbl->ant_type = ANT_ABC;
+
+                       if (!rs_is_valid_ant(valid_tx_ant,
+                                            search_tbl->ant_type))
+                               break;
+
+                       ret = rs_switch_to_mimo3(mvm, lq_sta, sta,
+                                                search_tbl, index);
+                       if (!ret)
+                               goto out;
+                       break;
+               }
+               tbl->action++;
+               if (tbl->action > IWL_LEGACY_SWITCH_MIMO3_ABC)
+                       tbl->action = IWL_SISO_SWITCH_ANTENNA1;
+
+               if (tbl->action == start_action)
+                       break;
+       }
+       search_tbl->lq_type = LQ_NONE;
+       return 0;
+
+ out:
+       lq_sta->search_better_tbl = 1;
+       tbl->action++;
+       if (tbl->action > IWL_SISO_SWITCH_MIMO3_ABC)
+               tbl->action = IWL_SISO_SWITCH_ANTENNA1;
+       if (update_search_tbl_counter)
+               search_tbl->action = tbl->action;
+
+       return 0;
+}
+
+/*
+ * Try to switch to new modulation mode from MIMO2
+ */
+static int rs_move_mimo2_to_other(struct iwl_mvm *mvm,
+                                struct iwl_lq_sta *lq_sta,
+                                struct ieee80211_sta *sta, int index)
+{
+       s8 is_green = lq_sta->is_green;
+       struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+       struct iwl_scale_tbl_info *search_tbl =
+                               &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
+       struct iwl_rate_scale_data *window = &(tbl->win[index]);
+       struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+       u32 sz = (sizeof(struct iwl_scale_tbl_info) -
+                 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
+       u8 start_action;
+       u8 valid_tx_ant = mvm->nvm_data->valid_tx_ant;
+       u8 tx_chains_num = num_of_ant(valid_tx_ant);
+       u8 update_search_tbl_counter = 0;
+       int ret;
+
+       start_action = tbl->action;
+       while (1) {
+               lq_sta->action_counter++;
+               switch (tbl->action) {
+               case IWL_MIMO2_SWITCH_ANTENNA1:
+               case IWL_MIMO2_SWITCH_ANTENNA2:
+                       IWL_DEBUG_RATE(mvm, "LQ: MIMO2 toggle Antennas\n");
+
+                       if (tx_chains_num <= 2)
+                               break;
+
+                       if (window->success_ratio >= IWL_RS_GOOD_RATIO)
+                               break;
+
+                       memcpy(search_tbl, tbl, sz);
+                       if (rs_toggle_antenna(valid_tx_ant,
+                                             &search_tbl->current_rate,
+                                             search_tbl)) {
+                               update_search_tbl_counter = 1;
+                               goto out;
+                       }
+                       break;
+               case IWL_MIMO2_SWITCH_SISO_A:
+               case IWL_MIMO2_SWITCH_SISO_B:
+               case IWL_MIMO2_SWITCH_SISO_C:
+                       IWL_DEBUG_RATE(mvm, "LQ: MIMO2 switch to SISO\n");
+
+                       /* Set up new search table for SISO */
+                       memcpy(search_tbl, tbl, sz);
+
+                       if (tbl->action == IWL_MIMO2_SWITCH_SISO_A)
+                               search_tbl->ant_type = ANT_A;
+                       else if (tbl->action == IWL_MIMO2_SWITCH_SISO_B)
+                               search_tbl->ant_type = ANT_B;
+                       else
+                               search_tbl->ant_type = ANT_C;
+
+                       if (!rs_is_valid_ant(valid_tx_ant,
+                                            search_tbl->ant_type))
+                               break;
+
+                       ret = rs_switch_to_siso(mvm, lq_sta, sta,
+                                                search_tbl, index);
+                       if (!ret)
+                               goto out;
+
+                       break;
+
+               case IWL_MIMO2_SWITCH_GI:
+                       if (!tbl->is_ht40 && !(ht_cap->cap &
+                                               IEEE80211_HT_CAP_SGI_20))
+                               break;
+                       if (tbl->is_ht40 && !(ht_cap->cap &
+                                               IEEE80211_HT_CAP_SGI_40))
+                               break;
+
+                       IWL_DEBUG_RATE(mvm, "LQ: MIMO2 toggle SGI/NGI\n");
+
+                       /* Set up new search table for MIMO2 */
+                       memcpy(search_tbl, tbl, sz);
+                       search_tbl->is_SGI = !tbl->is_SGI;
+                       rs_set_expected_tpt_table(lq_sta, search_tbl);
+                       /*
+                        * If active table already uses the fastest possible
+                        * modulation (dual stream with short guard interval),
+                        * and it's working well, there's no need to look
+                        * for a better type of modulation!
+                        */
+                       if (tbl->is_SGI) {
+                               s32 tpt = lq_sta->last_tpt / 100;
+                               if (tpt >= search_tbl->expected_tpt[index])
+                                       break;
+                       }
+                       search_tbl->current_rate =
+                               rate_n_flags_from_tbl(mvm, search_tbl,
+                                                     index, is_green);
+                       update_search_tbl_counter = 1;
+                       goto out;
+
+               case IWL_MIMO2_SWITCH_MIMO3_ABC:
+                       IWL_DEBUG_RATE(mvm, "LQ: MIMO2 switch to MIMO3\n");
+                       memcpy(search_tbl, tbl, sz);
+                       search_tbl->is_SGI = 0;
+                       search_tbl->ant_type = ANT_ABC;
+
+                       if (!rs_is_valid_ant(valid_tx_ant,
+                                            search_tbl->ant_type))
+                               break;
+
+                       ret = rs_switch_to_mimo3(mvm, lq_sta, sta,
+                                                search_tbl, index);
+                       if (!ret)
+                               goto out;
+
+                       break;
+               }
+               tbl->action++;
+               if (tbl->action > IWL_MIMO2_SWITCH_MIMO3_ABC)
+                       tbl->action = IWL_MIMO2_SWITCH_ANTENNA1;
+
+               if (tbl->action == start_action)
+                       break;
+       }
+       search_tbl->lq_type = LQ_NONE;
+       return 0;
+ out:
+       lq_sta->search_better_tbl = 1;
+       tbl->action++;
+       if (tbl->action > IWL_MIMO2_SWITCH_MIMO3_ABC)
+               tbl->action = IWL_MIMO2_SWITCH_ANTENNA1;
+       if (update_search_tbl_counter)
+               search_tbl->action = tbl->action;
+
+       return 0;
+}
+
+/*
+ * Try to switch to new modulation mode from MIMO3
+ */
+static int rs_move_mimo3_to_other(struct iwl_mvm *mvm,
+                                struct iwl_lq_sta *lq_sta,
+                                struct ieee80211_sta *sta, int index)
+{
+       s8 is_green = lq_sta->is_green;
+       struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+       struct iwl_scale_tbl_info *search_tbl =
+                               &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
+       struct iwl_rate_scale_data *window = &(tbl->win[index]);
+       struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+       u32 sz = (sizeof(struct iwl_scale_tbl_info) -
+                 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
+       u8 start_action;
+       u8 valid_tx_ant = mvm->nvm_data->valid_tx_ant;
+       u8 tx_chains_num = num_of_ant(valid_tx_ant);
+       int ret;
+       u8 update_search_tbl_counter = 0;
+
+       start_action = tbl->action;
+       while (1) {
+               lq_sta->action_counter++;
+               switch (tbl->action) {
+               case IWL_MIMO3_SWITCH_ANTENNA1:
+               case IWL_MIMO3_SWITCH_ANTENNA2:
+                       IWL_DEBUG_RATE(mvm, "LQ: MIMO3 toggle Antennas\n");
+
+                       if (tx_chains_num <= 3)
+                               break;
+
+                       if (window->success_ratio >= IWL_RS_GOOD_RATIO)
+                               break;
+
+                       memcpy(search_tbl, tbl, sz);
+                       if (rs_toggle_antenna(valid_tx_ant,
+                                             &search_tbl->current_rate,
+                                             search_tbl))
+                               goto out;
+                       break;
+               case IWL_MIMO3_SWITCH_SISO_A:
+               case IWL_MIMO3_SWITCH_SISO_B:
+               case IWL_MIMO3_SWITCH_SISO_C:
+                       IWL_DEBUG_RATE(mvm, "LQ: MIMO3 switch to SISO\n");
+
+                       /* Set up new search table for SISO */
+                       memcpy(search_tbl, tbl, sz);
+
+                       if (tbl->action == IWL_MIMO3_SWITCH_SISO_A)
+                               search_tbl->ant_type = ANT_A;
+                       else if (tbl->action == IWL_MIMO3_SWITCH_SISO_B)
+                               search_tbl->ant_type = ANT_B;
+                       else
+                               search_tbl->ant_type = ANT_C;
+
+                       if (!rs_is_valid_ant(valid_tx_ant,
+                                            search_tbl->ant_type))
+                               break;
+
+                       ret = rs_switch_to_siso(mvm, lq_sta, sta,
+                                               search_tbl, index);
+                       if (!ret)
+                               goto out;
+
+                       break;
+
+               case IWL_MIMO3_SWITCH_MIMO2_AB:
+               case IWL_MIMO3_SWITCH_MIMO2_AC:
+               case IWL_MIMO3_SWITCH_MIMO2_BC:
+                       IWL_DEBUG_RATE(mvm, "LQ: MIMO3 switch to MIMO2\n");
+
+                       memcpy(search_tbl, tbl, sz);
+                       search_tbl->is_SGI = 0;
+                       if (tbl->action == IWL_MIMO3_SWITCH_MIMO2_AB)
+                               search_tbl->ant_type = ANT_AB;
+                       else if (tbl->action == IWL_MIMO3_SWITCH_MIMO2_AC)
+                               search_tbl->ant_type = ANT_AC;
+                       else
+                               search_tbl->ant_type = ANT_BC;
+
+                       if (!rs_is_valid_ant(valid_tx_ant,
+                                            search_tbl->ant_type))
+                               break;
+
+                       ret = rs_switch_to_mimo2(mvm, lq_sta, sta,
+                                                search_tbl, index);
+                       if (!ret)
+                               goto out;
+
+                       break;
+
+               case IWL_MIMO3_SWITCH_GI:
+                       if (!tbl->is_ht40 && !(ht_cap->cap &
+                                               IEEE80211_HT_CAP_SGI_20))
+                               break;
+                       if (tbl->is_ht40 && !(ht_cap->cap &
+                                               IEEE80211_HT_CAP_SGI_40))
+                               break;
+
+                       IWL_DEBUG_RATE(mvm, "LQ: MIMO3 toggle SGI/NGI\n");
+
+                       /* Set up new search table for MIMO */
+                       memcpy(search_tbl, tbl, sz);
+                       search_tbl->is_SGI = !tbl->is_SGI;
+                       rs_set_expected_tpt_table(lq_sta, search_tbl);
+                       /*
+                        * If active table already uses the fastest possible
+                        * modulation (dual stream with short guard interval),
+                        * and it's working well, there's no need to look
+                        * for a better type of modulation!
+                        */
+                       if (tbl->is_SGI) {
+                               s32 tpt = lq_sta->last_tpt / 100;
+                               if (tpt >= search_tbl->expected_tpt[index])
+                                       break;
+                       }
+                       search_tbl->current_rate =
+                               rate_n_flags_from_tbl(mvm, search_tbl,
+                                                     index, is_green);
+                       update_search_tbl_counter = 1;
+                       goto out;
+               }
+               tbl->action++;
+               if (tbl->action > IWL_MIMO3_SWITCH_GI)
+                       tbl->action = IWL_MIMO3_SWITCH_ANTENNA1;
+
+               if (tbl->action == start_action)
+                       break;
+       }
+       search_tbl->lq_type = LQ_NONE;
+       return 0;
+ out:
+       lq_sta->search_better_tbl = 1;
+       tbl->action++;
+       if (tbl->action > IWL_MIMO3_SWITCH_GI)
+               tbl->action = IWL_MIMO3_SWITCH_ANTENNA1;
+       if (update_search_tbl_counter)
+               search_tbl->action = tbl->action;
+
+       return 0;
+}
+
+/*
+ * Check whether we should continue using same modulation mode, or
+ * begin search for a new mode, based on:
+ * 1) # tx successes or failures while using this mode
+ * 2) # times calling this function
+ * 3) elapsed time in this mode (not used, for now)
+ */
+static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search)
+{
+       struct iwl_scale_tbl_info *tbl;
+       int i;
+       int active_tbl;
+       int flush_interval_passed = 0;
+       struct iwl_mvm *mvm;
+
+       mvm = lq_sta->drv;
+       active_tbl = lq_sta->active_tbl;
+
+       tbl = &(lq_sta->lq_info[active_tbl]);
+
+       /* If we've been disallowing search, see if we should now allow it */
+       if (lq_sta->stay_in_tbl) {
+               /* Elapsed time using current modulation mode */
+               if (lq_sta->flush_timer)
+                       flush_interval_passed =
+                               time_after(jiffies,
+                                          (unsigned long)(lq_sta->flush_timer +
+                                               IWL_RATE_SCALE_FLUSH_INTVL));
+
+               /*
+                * Check if we should allow search for new modulation mode.
+                * If many frames have failed or succeeded, or we've used
+                * this same modulation for a long time, allow search, and
+                * reset history stats that keep track of whether we should
+                * allow a new search.  Also (below) reset all bitmaps and
+                * stats in active history.
+                */
+               if (force_search ||
+                   (lq_sta->total_failed > lq_sta->max_failure_limit) ||
+                   (lq_sta->total_success > lq_sta->max_success_limit) ||
+                   ((!lq_sta->search_better_tbl) &&
+                    (lq_sta->flush_timer) && (flush_interval_passed))) {
+                       IWL_DEBUG_RATE(mvm,
+                                      "LQ: stay is expired %d %d %d\n",
+                                    lq_sta->total_failed,
+                                    lq_sta->total_success,
+                                    flush_interval_passed);
+
+                       /* Allow search for new mode */
+                       lq_sta->stay_in_tbl = 0;        /* only place reset */
+                       lq_sta->total_failed = 0;
+                       lq_sta->total_success = 0;
+                       lq_sta->flush_timer = 0;
+               /*
+                * Else if we've used this modulation mode enough repetitions
+                * (regardless of elapsed time or success/failure), reset
+                * history bitmaps and rate-specific stats for all rates in
+                * active table.
+                */
+               } else {
+                       lq_sta->table_count++;
+                       if (lq_sta->table_count >=
+                           lq_sta->table_count_limit) {
+                               lq_sta->table_count = 0;
+
+                               IWL_DEBUG_RATE(mvm,
+                                              "LQ: stay in table clear win\n");
+                               for (i = 0; i < IWL_RATE_COUNT; i++)
+                                       rs_rate_scale_clear_window(
+                                               &(tbl->win[i]));
+                       }
+               }
+
+               /* If transitioning to allow "search", reset all history
+                * bitmaps and stats in active table (this will become the new
+                * "search" table). */
+               if (!lq_sta->stay_in_tbl) {
+                       for (i = 0; i < IWL_RATE_COUNT; i++)
+                               rs_rate_scale_clear_window(&(tbl->win[i]));
+               }
+       }
+}
+
+/*
+ * setup rate table in uCode
+ */
+static void rs_update_rate_tbl(struct iwl_mvm *mvm,
+                              struct iwl_lq_sta *lq_sta,
+                              struct iwl_scale_tbl_info *tbl,
+                              int index, u8 is_green)
+{
+       u32 rate;
+
+       /* Update uCode's rate table. */
+       rate = rate_n_flags_from_tbl(mvm, tbl, index, is_green);
+       rs_fill_link_cmd(mvm, lq_sta, rate);
+       iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, CMD_ASYNC, false);
+}
+
+/*
+ * Do rate scaling and search for new modulation mode.
+ */
+static void rs_rate_scale_perform(struct iwl_mvm *mvm,
+                                 struct sk_buff *skb,
+                                 struct ieee80211_sta *sta,
+                                 struct iwl_lq_sta *lq_sta)
+{
+       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+       int low = IWL_RATE_INVALID;
+       int high = IWL_RATE_INVALID;
+       int index;
+       int i;
+       struct iwl_rate_scale_data *window = NULL;
+       int current_tpt = IWL_INVALID_VALUE;
+       int low_tpt = IWL_INVALID_VALUE;
+       int high_tpt = IWL_INVALID_VALUE;
+       u32 fail_count;
+       s8 scale_action = 0;
+       u16 rate_mask;
+       u8 update_lq = 0;
+       struct iwl_scale_tbl_info *tbl, *tbl1;
+       u16 rate_scale_index_msk = 0;
+       u8 is_green = 0;
+       u8 active_tbl = 0;
+       u8 done_search = 0;
+       u16 high_low;
+       s32 sr;
+       u8 tid = IWL_MAX_TID_COUNT;
+       struct iwl_mvm_sta *sta_priv = (void *)sta->drv_priv;
+       struct iwl_mvm_tid_data *tid_data;
+
+       IWL_DEBUG_RATE(mvm, "rate scale calculate new rate for skb\n");
+
+       /* Send management frames and NO_ACK data using lowest rate. */
+       /* TODO: this could probably be improved.. */
+       if (!ieee80211_is_data(hdr->frame_control) ||
+           info->flags & IEEE80211_TX_CTL_NO_ACK)
+               return;
+
+       lq_sta->supp_rates = sta->supp_rates[lq_sta->band];
+
+       tid = rs_tl_add_packet(lq_sta, hdr);
+       if ((tid != IWL_MAX_TID_COUNT) &&
+           (lq_sta->tx_agg_tid_en & (1 << tid))) {
+               tid_data = &sta_priv->tid_data[tid];
+               if (tid_data->state == IWL_AGG_OFF)
+                       lq_sta->is_agg = 0;
+               else
+                       lq_sta->is_agg = 1;
+       } else {
+               lq_sta->is_agg = 0;
+       }
+
+       /*
+        * Select rate-scale / modulation-mode table to work with in
+        * the rest of this function:  "search" if searching for better
+        * modulation mode, or "active" if doing rate scaling within a mode.
+        */
+       if (!lq_sta->search_better_tbl)
+               active_tbl = lq_sta->active_tbl;
+       else
+               active_tbl = 1 - lq_sta->active_tbl;
+
+       tbl = &(lq_sta->lq_info[active_tbl]);
+       if (is_legacy(tbl->lq_type))
+               lq_sta->is_green = 0;
+       else
+               lq_sta->is_green = rs_use_green(sta);
+       is_green = lq_sta->is_green;
+
+       /* current tx rate */
+       index = lq_sta->last_txrate_idx;
+
+       IWL_DEBUG_RATE(mvm, "Rate scale index %d for type %d\n", index,
+                      tbl->lq_type);
+
+       /* rates available for this association, and for modulation mode */
+       rate_mask = rs_get_supported_rates(lq_sta, hdr, tbl->lq_type);
+
+       IWL_DEBUG_RATE(mvm, "mask 0x%04X\n", rate_mask);
+
+       /* mask with station rate restriction */
+       if (is_legacy(tbl->lq_type)) {
+               if (lq_sta->band == IEEE80211_BAND_5GHZ)
+                       /* supp_rates has no CCK bits in A mode */
+                       rate_scale_index_msk = (u16) (rate_mask &
+                               (lq_sta->supp_rates << IWL_FIRST_OFDM_RATE));
+               else
+                       rate_scale_index_msk = (u16) (rate_mask &
+                                                     lq_sta->supp_rates);
+
+       } else {
+               rate_scale_index_msk = rate_mask;
+       }
+
+       if (!rate_scale_index_msk)
+               rate_scale_index_msk = rate_mask;
+
+       if (!((1 << index) & rate_scale_index_msk)) {
+               IWL_ERR(mvm, "Current Rate is not valid\n");
+               if (lq_sta->search_better_tbl) {
+                       /* revert to active table if search table is not valid*/
+                       tbl->lq_type = LQ_NONE;
+                       lq_sta->search_better_tbl = 0;
+                       tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+                       /* get "active" rate info */
+                       index = iwl_hwrate_to_plcp_idx(tbl->current_rate);
+                       rs_update_rate_tbl(mvm, lq_sta, tbl, index, is_green);
+               }
+               return;
+       }
+
+       /* Get expected throughput table and history window for current rate */
+       if (!tbl->expected_tpt) {
+               IWL_ERR(mvm, "tbl->expected_tpt is NULL\n");
+               return;
+       }
+
+       /* force user max rate if set by user */
+       if ((lq_sta->max_rate_idx != -1) &&
+           (lq_sta->max_rate_idx < index)) {
+               index = lq_sta->max_rate_idx;
+               update_lq = 1;
+               window = &(tbl->win[index]);
+               goto lq_update;
+       }
+
+       window = &(tbl->win[index]);
+
+       /*
+        * If there is not enough history to calculate actual average
+        * throughput, keep analyzing results of more tx frames, without
+        * changing rate or mode (bypass most of the rest of this function).
+        * Set up new rate table in uCode only if old rate is not supported
+        * in current association (use new rate found above).
+        */
+       fail_count = window->counter - window->success_counter;
+       if ((fail_count < IWL_RATE_MIN_FAILURE_TH) &&
+           (window->success_counter < IWL_RATE_MIN_SUCCESS_TH)) {
+               IWL_DEBUG_RATE(mvm,
+                              "LQ: still below TH. succ=%d total=%d for index %d\n",
+                              window->success_counter, window->counter, index);
+
+               /* Can't calculate this yet; not enough history */
+               window->average_tpt = IWL_INVALID_VALUE;
+
+               /* Should we stay with this modulation mode,
+                * or search for a new one? */
+               rs_stay_in_table(lq_sta, false);
+
+               goto out;
+       }
+       /* Else we have enough samples; calculate estimate of
+        * actual average throughput */
+       if (window->average_tpt != ((window->success_ratio *
+                       tbl->expected_tpt[index] + 64) / 128)) {
+               IWL_ERR(mvm,
+                       "expected_tpt should have been calculated by now\n");
+               window->average_tpt = ((window->success_ratio *
+                                       tbl->expected_tpt[index] + 64) / 128);
+       }
+
+       /* If we are searching for better modulation mode, check success. */
+       if (lq_sta->search_better_tbl) {
+               /* If good success, continue using the "search" mode;
+                * no need to send new link quality command, since we're
+                * continuing to use the setup that we've been trying. */
+               if (window->average_tpt > lq_sta->last_tpt) {
+                       IWL_DEBUG_RATE(mvm,
+                                      "LQ: SWITCHING TO NEW TABLE suc=%d cur-tpt=%d old-tpt=%d\n",
+                                      window->success_ratio,
+                                      window->average_tpt,
+                                      lq_sta->last_tpt);
+
+                       if (!is_legacy(tbl->lq_type))
+                               lq_sta->enable_counter = 1;
+
+                       /* Swap tables; "search" becomes "active" */
+                       lq_sta->active_tbl = active_tbl;
+                       current_tpt = window->average_tpt;
+               /* Else poor success; go back to mode in "active" table */
+               } else {
+                       IWL_DEBUG_RATE(mvm,
+                                      "LQ: GOING BACK TO THE OLD TABLE suc=%d cur-tpt=%d old-tpt=%d\n",
+                                      window->success_ratio,
+                                      window->average_tpt,
+                                      lq_sta->last_tpt);
+
+                       /* Nullify "search" table */
+                       tbl->lq_type = LQ_NONE;
+
+                       /* Revert to "active" table */
+                       active_tbl = lq_sta->active_tbl;
+                       tbl = &(lq_sta->lq_info[active_tbl]);
+
+                       /* Revert to "active" rate and throughput info */
+                       index = iwl_hwrate_to_plcp_idx(tbl->current_rate);
+                       current_tpt = lq_sta->last_tpt;
+
+                       /* Need to set up a new rate table in uCode */
+                       update_lq = 1;
+               }
+
+               /* Either way, we've made a decision; modulation mode
+                * search is done, allow rate adjustment next time. */
+               lq_sta->search_better_tbl = 0;
+               done_search = 1;        /* Don't switch modes below! */
+               goto lq_update;
+       }
+
+       /* (Else) not in search of better modulation mode, try for better
+        * starting rate, while staying in this mode. */
+       high_low = rs_get_adjacent_rate(mvm, index, rate_scale_index_msk,
+                                       tbl->lq_type);
+       low = high_low & 0xff;
+       high = (high_low >> 8) & 0xff;
+
+       /* If user set max rate, dont allow higher than user constrain */
+       if ((lq_sta->max_rate_idx != -1) &&
+           (lq_sta->max_rate_idx < high))
+               high = IWL_RATE_INVALID;
+
+       sr = window->success_ratio;
+
+       /* Collect measured throughputs for current and adjacent rates */
+       current_tpt = window->average_tpt;
+       if (low != IWL_RATE_INVALID)
+               low_tpt = tbl->win[low].average_tpt;
+       if (high != IWL_RATE_INVALID)
+               high_tpt = tbl->win[high].average_tpt;
+
+       scale_action = 0;
+
+       /* Too many failures, decrease rate */
+       if ((sr <= IWL_RATE_DECREASE_TH) || (current_tpt == 0)) {
+               IWL_DEBUG_RATE(mvm,
+                              "decrease rate because of low success_ratio\n");
+               scale_action = -1;
+       /* No throughput measured yet for adjacent rates; try increase. */
+       } else if ((low_tpt == IWL_INVALID_VALUE) &&
+                  (high_tpt == IWL_INVALID_VALUE)) {
+               if (high != IWL_RATE_INVALID && sr >= IWL_RATE_INCREASE_TH)
+                       scale_action = 1;
+               else if (low != IWL_RATE_INVALID)
+                       scale_action = 0;
+       }
+
+       /* Both adjacent throughputs are measured, but neither one has better
+        * throughput; we're using the best rate, don't change it! */
+       else if ((low_tpt != IWL_INVALID_VALUE) &&
+                (high_tpt != IWL_INVALID_VALUE) &&
+                (low_tpt < current_tpt) &&
+                (high_tpt < current_tpt))
+               scale_action = 0;
+
+       /* At least one adjacent rate's throughput is measured,
+        * and may have better performance. */
+       else {
+               /* Higher adjacent rate's throughput is measured */
+               if (high_tpt != IWL_INVALID_VALUE) {
+                       /* Higher rate has better throughput */
+                       if (high_tpt > current_tpt &&
+                           sr >= IWL_RATE_INCREASE_TH) {
+                               scale_action = 1;
+                       } else {
+                               scale_action = 0;
+                       }
+
+               /* Lower adjacent rate's throughput is measured */
+               } else if (low_tpt != IWL_INVALID_VALUE) {
+                       /* Lower rate has better throughput */
+                       if (low_tpt > current_tpt) {
+                               IWL_DEBUG_RATE(mvm,
+                                              "decrease rate because of low tpt\n");
+                               scale_action = -1;
+                       } else if (sr >= IWL_RATE_INCREASE_TH) {
+                               scale_action = 1;
+                       }
+               }
+       }
+
+       /* Sanity check; asked for decrease, but success rate or throughput
+        * has been good at old rate.  Don't change it. */
+       if ((scale_action == -1) && (low != IWL_RATE_INVALID) &&
+           ((sr > IWL_RATE_HIGH_TH) ||
+            (current_tpt > (100 * tbl->expected_tpt[low]))))
+               scale_action = 0;
+
+       switch (scale_action) {
+       case -1:
+               /* Decrease starting rate, update uCode's rate table */
+               if (low != IWL_RATE_INVALID) {
+                       update_lq = 1;
+                       index = low;
+               }
+
+               break;
+       case 1:
+               /* Increase starting rate, update uCode's rate table */
+               if (high != IWL_RATE_INVALID) {
+                       update_lq = 1;
+                       index = high;
+               }
+
+               break;
+       case 0:
+               /* No change */
+       default:
+               break;
+       }
+
+       IWL_DEBUG_RATE(mvm,
+                      "choose rate scale index %d action %d low %d high %d type %d\n",
+                      index, scale_action, low, high, tbl->lq_type);
+
+lq_update:
+       /* Replace uCode's rate table for the destination station. */
+       if (update_lq)
+               rs_update_rate_tbl(mvm, lq_sta, tbl, index, is_green);
+
+       rs_stay_in_table(lq_sta, false);
+
+       /*
+        * Search for new modulation mode if we're:
+        * 1)  Not changing rates right now
+        * 2)  Not just finishing up a search
+        * 3)  Allowing a new search
+        */
+       if (!update_lq && !done_search &&
+           !lq_sta->stay_in_tbl && window->counter) {
+               /* Save current throughput to compare with "search" throughput*/
+               lq_sta->last_tpt = current_tpt;
+
+               /* Select a new "search" modulation mode to try.
+                * If one is found, set up the new "search" table. */
+               if (is_legacy(tbl->lq_type))
+                       rs_move_legacy_other(mvm, lq_sta, sta, index);
+               else if (is_siso(tbl->lq_type))
+                       rs_move_siso_to_other(mvm, lq_sta, sta, index);
+               else if (is_mimo2(tbl->lq_type))
+                       rs_move_mimo2_to_other(mvm, lq_sta, sta, index);
+               else
+                       rs_move_mimo3_to_other(mvm, lq_sta, sta, index);
+
+               /* If new "search" mode was selected, set up in uCode table */
+               if (lq_sta->search_better_tbl) {
+                       /* Access the "search" table, clear its history. */
+                       tbl = &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
+                       for (i = 0; i < IWL_RATE_COUNT; i++)
+                               rs_rate_scale_clear_window(&(tbl->win[i]));
+
+                       /* Use new "search" start rate */
+                       index = iwl_hwrate_to_plcp_idx(tbl->current_rate);
+
+                       IWL_DEBUG_RATE(mvm,
+                                      "Switch current  mcs: %X index: %d\n",
+                                      tbl->current_rate, index);
+                       rs_fill_link_cmd(mvm, lq_sta, tbl->current_rate);
+                       iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, CMD_ASYNC, false);
+               } else {
+                       done_search = 1;
+               }
+       }
+
+       if (done_search && !lq_sta->stay_in_tbl) {
+               /* If the "active" (non-search) mode was legacy,
+                * and we've tried switching antennas,
+                * but we haven't been able to try HT modes (not available),
+                * stay with best antenna legacy modulation for a while
+                * before next round of mode comparisons. */
+               tbl1 = &(lq_sta->lq_info[lq_sta->active_tbl]);
+               if (is_legacy(tbl1->lq_type) && !sta->ht_cap.ht_supported &&
+                   lq_sta->action_counter > tbl1->max_search) {
+                       IWL_DEBUG_RATE(mvm, "LQ: STAY in legacy table\n");
+                       rs_set_stay_in_table(mvm, 1, lq_sta);
+               }
+
+               /* If we're in an HT mode, and all 3 mode switch actions
+                * have been tried and compared, stay in this best modulation
+                * mode for a while before next round of mode comparisons. */
+               if (lq_sta->enable_counter &&
+                   (lq_sta->action_counter >= tbl1->max_search)) {
+                       if ((lq_sta->last_tpt > IWL_AGG_TPT_THREHOLD) &&
+                           (lq_sta->tx_agg_tid_en & (1 << tid)) &&
+                           (tid != IWL_MAX_TID_COUNT)) {
+                               tid_data = &sta_priv->tid_data[tid];
+                               if (tid_data->state == IWL_AGG_OFF) {
+                                       IWL_DEBUG_RATE(mvm,
+                                                      "try to aggregate tid %d\n",
+                                                      tid);
+                                       rs_tl_turn_on_agg(mvm, tid,
+                                                         lq_sta, sta);
+                               }
+                       }
+                       rs_set_stay_in_table(mvm, 0, lq_sta);
+               }
+       }
+
+out:
+       tbl->current_rate = rate_n_flags_from_tbl(mvm, tbl, index, is_green);
+       lq_sta->last_txrate_idx = index;
+}
+
+/**
+ * rs_initialize_lq - Initialize a station's hardware rate table
+ *
+ * The uCode's station table contains a table of fallback rates
+ * for automatic fallback during transmission.
+ *
+ * NOTE: This sets up a default set of values.  These will be replaced later
+ *       if the driver's iwl-agn-rs rate scaling algorithm is used, instead of
+ *       rc80211_simple.
+ *
+ * NOTE: Run REPLY_ADD_STA command to set up station table entry, before
+ *       calling this function (which runs REPLY_TX_LINK_QUALITY_CMD,
+ *       which requires station table entry to exist).
+ */
+static void rs_initialize_lq(struct iwl_mvm *mvm,
+                            struct ieee80211_sta *sta,
+                            struct iwl_lq_sta *lq_sta,
+                            enum ieee80211_band band)
+{
+       struct iwl_scale_tbl_info *tbl;
+       int rate_idx;
+       int i;
+       u32 rate;
+       u8 use_green = rs_use_green(sta);
+       u8 active_tbl = 0;
+       u8 valid_tx_ant;
+
+       if (!sta || !lq_sta)
+               return;
+
+       i = lq_sta->last_txrate_idx;
+
+       valid_tx_ant = mvm->nvm_data->valid_tx_ant;
+
+       if (!lq_sta->search_better_tbl)
+               active_tbl = lq_sta->active_tbl;
+       else
+               active_tbl = 1 - lq_sta->active_tbl;
+
+       tbl = &(lq_sta->lq_info[active_tbl]);
+
+       if ((i < 0) || (i >= IWL_RATE_COUNT))
+               i = 0;
+
+       rate = iwl_rates[i].plcp;
+       tbl->ant_type = first_antenna(valid_tx_ant);
+       rate |= tbl->ant_type << RATE_MCS_ANT_POS;
+
+       if (i >= IWL_FIRST_CCK_RATE && i <= IWL_LAST_CCK_RATE)
+               rate |= RATE_MCS_CCK_MSK;
+
+       rs_get_tbl_info_from_mcs(rate, band, tbl, &rate_idx);
+       if (!rs_is_valid_ant(valid_tx_ant, tbl->ant_type))
+               rs_toggle_antenna(valid_tx_ant, &rate, tbl);
+
+       rate = rate_n_flags_from_tbl(mvm, tbl, rate_idx, use_green);
+       tbl->current_rate = rate;
+       rs_set_expected_tpt_table(lq_sta, tbl);
+       rs_fill_link_cmd(NULL, lq_sta, rate);
+       /* TODO restore station should remember the lq cmd */
+       iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, CMD_SYNC, true);
+}
+
+static void rs_get_rate(void *mvm_r, struct ieee80211_sta *sta, void *mvm_sta,
+                       struct ieee80211_tx_rate_control *txrc)
+{
+       struct sk_buff *skb = txrc->skb;
+       struct ieee80211_supported_band *sband = txrc->sband;
+       struct iwl_op_mode *op_mode __maybe_unused =
+                       (struct iwl_op_mode *)mvm_r;
+       struct iwl_mvm *mvm __maybe_unused = IWL_OP_MODE_GET_MVM(op_mode);
+       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+       struct iwl_lq_sta *lq_sta = mvm_sta;
+       int rate_idx;
+
+       IWL_DEBUG_RATE_LIMIT(mvm, "rate scale calculate new rate for skb\n");
+
+       /* Get max rate if user set max rate */
+       if (lq_sta) {
+               lq_sta->max_rate_idx = txrc->max_rate_idx;
+               if ((sband->band == IEEE80211_BAND_5GHZ) &&
+                   (lq_sta->max_rate_idx != -1))
+                       lq_sta->max_rate_idx += IWL_FIRST_OFDM_RATE;
+               if ((lq_sta->max_rate_idx < 0) ||
+                   (lq_sta->max_rate_idx >= IWL_RATE_COUNT))
+                       lq_sta->max_rate_idx = -1;
+       }
+
+       /* Treat uninitialized rate scaling data same as non-existing. */
+       if (lq_sta && !lq_sta->drv) {
+               IWL_DEBUG_RATE(mvm, "Rate scaling not initialized yet.\n");
+               mvm_sta = NULL;
+       }
+
+       /* Send management frames and NO_ACK data using lowest rate. */
+       if (rate_control_send_low(sta, mvm_sta, txrc))
+               return;
+
+       rate_idx  = lq_sta->last_txrate_idx;
+
+       if (lq_sta->last_rate_n_flags & RATE_MCS_HT_MSK) {
+               rate_idx -= IWL_FIRST_OFDM_RATE;
+               /* 6M and 9M shared same MCS index */
+               rate_idx = (rate_idx > 0) ? (rate_idx - 1) : 0;
+               if (rs_extract_rate(lq_sta->last_rate_n_flags) >=
+                   IWL_RATE_MIMO3_6M_PLCP)
+                       rate_idx = rate_idx + (2 * MCS_INDEX_PER_STREAM);
+               else if (rs_extract_rate(lq_sta->last_rate_n_flags) >=
+                        IWL_RATE_MIMO2_6M_PLCP)
+                       rate_idx = rate_idx + MCS_INDEX_PER_STREAM;
+               info->control.rates[0].flags = IEEE80211_TX_RC_MCS;
+               if (lq_sta->last_rate_n_flags & RATE_MCS_SGI_MSK)
+                       info->control.rates[0].flags |= IEEE80211_TX_RC_SHORT_GI;
+               if (lq_sta->last_rate_n_flags & RATE_MCS_CHAN_WIDTH_40) /* TODO */
+                       info->control.rates[0].flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
+               if (lq_sta->last_rate_n_flags & RATE_HT_MCS_GF_MSK)
+                       info->control.rates[0].flags |= IEEE80211_TX_RC_GREEN_FIELD;
+       } else {
+               /* Check for invalid rates */
+               if ((rate_idx < 0) || (rate_idx >= IWL_RATE_COUNT_LEGACY) ||
+                   ((sband->band == IEEE80211_BAND_5GHZ) &&
+                    (rate_idx < IWL_FIRST_OFDM_RATE)))
+                       rate_idx = rate_lowest_index(sband, sta);
+               /* On valid 5 GHz rate, adjust index */
+               else if (sband->band == IEEE80211_BAND_5GHZ)
+                       rate_idx -= IWL_FIRST_OFDM_RATE;
+               info->control.rates[0].flags = 0;
+       }
+       info->control.rates[0].idx = rate_idx;
+}
+
+static void *rs_alloc_sta(void *mvm_rate, struct ieee80211_sta *sta,
+                         gfp_t gfp)
+{
+       struct iwl_mvm_sta *sta_priv = (struct iwl_mvm_sta *)sta->drv_priv;
+       struct iwl_op_mode *op_mode __maybe_unused =
+                       (struct iwl_op_mode *)mvm_rate;
+       struct iwl_mvm *mvm __maybe_unused = IWL_OP_MODE_GET_MVM(op_mode);
+
+       IWL_DEBUG_RATE(mvm, "create station rate scale window\n");
+
+       return &sta_priv->lq_sta;
+}
+
+/*
+ * Called after adding a new station to initialize rate scaling
+ */
+void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+                         enum ieee80211_band band)
+{
+       int i, j;
+       struct ieee80211_hw *hw = mvm->hw;
+       struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+       struct iwl_mvm_sta *sta_priv;
+       struct iwl_lq_sta *lq_sta;
+       struct ieee80211_supported_band *sband;
+       unsigned long supp; /* must be unsigned long for for_each_set_bit */
+
+       sta_priv = (struct iwl_mvm_sta *)sta->drv_priv;
+       lq_sta = &sta_priv->lq_sta;
+       sband = hw->wiphy->bands[band];
+
+       lq_sta->lq.sta_id = sta_priv->sta_id;
+
+       for (j = 0; j < LQ_SIZE; j++)
+               for (i = 0; i < IWL_RATE_COUNT; i++)
+                       rs_rate_scale_clear_window(&lq_sta->lq_info[j].win[i]);
+
+       lq_sta->flush_timer = 0;
+       lq_sta->supp_rates = sta->supp_rates[sband->band];
+       for (j = 0; j < LQ_SIZE; j++)
+               for (i = 0; i < IWL_RATE_COUNT; i++)
+                       rs_rate_scale_clear_window(&lq_sta->lq_info[j].win[i]);
+
+       IWL_DEBUG_RATE(mvm,
+                      "LQ: *** rate scale station global init for station %d ***\n",
+                      sta_priv->sta_id);
+       /* TODO: what is a good starting rate for STA? About middle? Maybe not
+        * the lowest or the highest rate.. Could consider using RSSI from
+        * previous packets? Need to have IEEE 802.1X auth succeed immediately
+        * after assoc.. */
+
+       lq_sta->max_rate_idx = -1;
+       lq_sta->missed_rate_counter = IWL_MISSED_RATE_MAX;
+       lq_sta->is_green = rs_use_green(sta);
+       lq_sta->band = sband->band;
+       /*
+        * active legacy rates as per supported rates bitmap
+        */
+       supp = sta->supp_rates[sband->band];
+       lq_sta->active_legacy_rate = 0;
+       for_each_set_bit(i, &supp, BITS_PER_LONG)
+               lq_sta->active_legacy_rate |= BIT(sband->bitrates[i].hw_value);
+
+       /*
+        * active_siso_rate mask includes 9 MBits (bit 5), and CCK (bits 0-3),
+        * supp_rates[] does not; shift to convert format, force 9 MBits off.
+        */
+       lq_sta->active_siso_rate = ht_cap->mcs.rx_mask[0] << 1;
+       lq_sta->active_siso_rate |= ht_cap->mcs.rx_mask[0] & 0x1;
+       lq_sta->active_siso_rate &= ~((u16)0x2);
+       lq_sta->active_siso_rate <<= IWL_FIRST_OFDM_RATE;
+
+       /* Same here */
+       lq_sta->active_mimo2_rate = ht_cap->mcs.rx_mask[1] << 1;
+       lq_sta->active_mimo2_rate |= ht_cap->mcs.rx_mask[1] & 0x1;
+       lq_sta->active_mimo2_rate &= ~((u16)0x2);
+       lq_sta->active_mimo2_rate <<= IWL_FIRST_OFDM_RATE;
+
+       lq_sta->active_mimo3_rate = ht_cap->mcs.rx_mask[2] << 1;
+       lq_sta->active_mimo3_rate |= ht_cap->mcs.rx_mask[2] & 0x1;
+       lq_sta->active_mimo3_rate &= ~((u16)0x2);
+       lq_sta->active_mimo3_rate <<= IWL_FIRST_OFDM_RATE;
+
+       IWL_DEBUG_RATE(mvm,
+                      "SISO-RATE=%X MIMO2-RATE=%X MIMO3-RATE=%X\n",
+                      lq_sta->active_siso_rate,
+                      lq_sta->active_mimo2_rate,
+                      lq_sta->active_mimo3_rate);
+
+       /* These values will be overridden later */
+       lq_sta->lq.single_stream_ant_msk =
+               first_antenna(mvm->nvm_data->valid_tx_ant);
+       lq_sta->lq.dual_stream_ant_msk =
+               mvm->nvm_data->valid_tx_ant &
+               ~first_antenna(mvm->nvm_data->valid_tx_ant);
+       if (!lq_sta->lq.dual_stream_ant_msk) {
+               lq_sta->lq.dual_stream_ant_msk = ANT_AB;
+       } else if (num_of_ant(mvm->nvm_data->valid_tx_ant) == 2) {
+               lq_sta->lq.dual_stream_ant_msk =
+                       mvm->nvm_data->valid_tx_ant;
+       }
+
+       /* as default allow aggregation for all tids */
+       lq_sta->tx_agg_tid_en = IWL_AGG_ALL_TID;
+       lq_sta->drv = mvm;
+
+       /* Set last_txrate_idx to lowest rate */
+       lq_sta->last_txrate_idx = rate_lowest_index(sband, sta);
+       if (sband->band == IEEE80211_BAND_5GHZ)
+               lq_sta->last_txrate_idx += IWL_FIRST_OFDM_RATE;
+       lq_sta->is_agg = 0;
+#ifdef CONFIG_MAC80211_DEBUGFS
+       lq_sta->dbg_fixed_rate = 0;
+#endif
+
+       rs_initialize_lq(mvm, sta, lq_sta, band);
+}
+
+static void rs_fill_link_cmd(struct iwl_mvm *mvm,
+                            struct iwl_lq_sta *lq_sta, u32 new_rate)
+{
+       struct iwl_scale_tbl_info tbl_type;
+       int index = 0;
+       int rate_idx;
+       int repeat_rate = 0;
+       u8 ant_toggle_cnt = 0;
+       u8 use_ht_possible = 1;
+       u8 valid_tx_ant = 0;
+       struct iwl_lq_cmd *lq_cmd = &lq_sta->lq;
+
+       /* Override starting rate (index 0) if needed for debug purposes */
+       rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
+
+       /* Interpret new_rate (rate_n_flags) */
+       rs_get_tbl_info_from_mcs(new_rate, lq_sta->band,
+                                &tbl_type, &rate_idx);
+
+       /* How many times should we repeat the initial rate? */
+       if (is_legacy(tbl_type.lq_type)) {
+               ant_toggle_cnt = 1;
+               repeat_rate = IWL_NUMBER_TRY;
+       } else {
+               repeat_rate = min(IWL_HT_NUMBER_TRY,
+                                 LINK_QUAL_AGG_DISABLE_START_DEF - 1);
+       }
+
+       lq_cmd->mimo_delim = is_mimo(tbl_type.lq_type) ? 1 : 0;
+
+       /* Fill 1st table entry (index 0) */
+       lq_cmd->rs_table[index] = cpu_to_le32(new_rate);
+
+       if (num_of_ant(tbl_type.ant_type) == 1)
+               lq_cmd->single_stream_ant_msk = tbl_type.ant_type;
+       else if (num_of_ant(tbl_type.ant_type) == 2)
+               lq_cmd->dual_stream_ant_msk = tbl_type.ant_type;
+       /* otherwise we don't modify the existing value */
+
+       index++;
+       repeat_rate--;
+       if (mvm)
+               valid_tx_ant = mvm->nvm_data->valid_tx_ant;
+
+       /* Fill rest of rate table */
+       while (index < LINK_QUAL_MAX_RETRY_NUM) {
+               /* Repeat initial/next rate.
+                * For legacy IWL_NUMBER_TRY == 1, this loop will not execute.
+                * For HT IWL_HT_NUMBER_TRY == 3, this executes twice. */
+               while (repeat_rate > 0 && (index < LINK_QUAL_MAX_RETRY_NUM)) {
+                       if (is_legacy(tbl_type.lq_type)) {
+                               if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE)
+                                       ant_toggle_cnt++;
+                               else if (mvm &&
+                                        rs_toggle_antenna(valid_tx_ant,
+                                                       &new_rate, &tbl_type))
+                                       ant_toggle_cnt = 1;
+                       }
+
+                       /* Override next rate if needed for debug purposes */
+                       rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
+
+                       /* Fill next table entry */
+                       lq_cmd->rs_table[index] =
+                                       cpu_to_le32(new_rate);
+                       repeat_rate--;
+                       index++;
+               }
+
+               rs_get_tbl_info_from_mcs(new_rate, lq_sta->band, &tbl_type,
+                                        &rate_idx);
+
+
+               /* Indicate to uCode which entries might be MIMO.
+                * If initial rate was MIMO, this will finally end up
+                * as (IWL_HT_NUMBER_TRY * 2), after 2nd pass, otherwise 0. */
+               if (is_mimo(tbl_type.lq_type))
+                       lq_cmd->mimo_delim = index;
+
+               /* Get next rate */
+               new_rate = rs_get_lower_rate(lq_sta, &tbl_type, rate_idx,
+                                            use_ht_possible);
+
+               /* How many times should we repeat the next rate? */
+               if (is_legacy(tbl_type.lq_type)) {
+                       if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE)
+                               ant_toggle_cnt++;
+                       else if (mvm &&
+                                rs_toggle_antenna(valid_tx_ant,
+                                                  &new_rate, &tbl_type))
+                               ant_toggle_cnt = 1;
+
+                       repeat_rate = IWL_NUMBER_TRY;
+               } else {
+                       repeat_rate = IWL_HT_NUMBER_TRY;
+               }
+
+               /* Don't allow HT rates after next pass.
+                * rs_get_lower_rate() will change type to LQ_A or LQ_G. */
+               use_ht_possible = 0;
+
+               /* Override next rate if needed for debug purposes */
+               rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
+
+               /* Fill next table entry */
+               lq_cmd->rs_table[index] = cpu_to_le32(new_rate);
+
+               index++;
+               repeat_rate--;
+       }
+
+       lq_cmd->agg_frame_cnt_limit = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
+       lq_cmd->agg_disable_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
+
+       lq_cmd->agg_time_limit =
+               cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
+}
+
+static void *rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
+{
+       return hw->priv;
+}
+/* rate scale requires free function to be implemented */
+static void rs_free(void *mvm_rate)
+{
+       return;
+}
+
+static void rs_free_sta(void *mvm_r, struct ieee80211_sta *sta,
+                       void *mvm_sta)
+{
+       struct iwl_op_mode *op_mode __maybe_unused = mvm_r;
+       struct iwl_mvm *mvm __maybe_unused = IWL_OP_MODE_GET_MVM(op_mode);
+
+       IWL_DEBUG_RATE(mvm, "enter\n");
+       IWL_DEBUG_RATE(mvm, "leave\n");
+}
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
+                            u32 *rate_n_flags, int index)
+{
+       struct iwl_mvm *mvm;
+       u8 valid_tx_ant;
+       u8 ant_sel_tx;
+
+       mvm = lq_sta->drv;
+       valid_tx_ant = mvm->nvm_data->valid_tx_ant;
+       if (lq_sta->dbg_fixed_rate) {
+               ant_sel_tx =
+                 ((lq_sta->dbg_fixed_rate & RATE_MCS_ANT_ABC_MSK)
+                 >> RATE_MCS_ANT_POS);
+               if ((valid_tx_ant & ant_sel_tx) == ant_sel_tx) {
+                       *rate_n_flags = lq_sta->dbg_fixed_rate;
+                       IWL_DEBUG_RATE(mvm, "Fixed rate ON\n");
+               } else {
+                       lq_sta->dbg_fixed_rate = 0;
+                       IWL_ERR(mvm,
+                               "Invalid antenna selection 0x%X, Valid is 0x%X\n",
+                               ant_sel_tx, valid_tx_ant);
+                       IWL_DEBUG_RATE(mvm, "Fixed rate OFF\n");
+               }
+       } else {
+               IWL_DEBUG_RATE(mvm, "Fixed rate OFF\n");
+       }
+}
+
+static ssize_t rs_sta_dbgfs_scale_table_write(struct file *file,
+                       const char __user *user_buf, size_t count, loff_t *ppos)
+{
+       struct iwl_lq_sta *lq_sta = file->private_data;
+       struct iwl_mvm *mvm;
+       char buf[64];
+       size_t buf_size;
+       u32 parsed_rate;
+
+
+       mvm = lq_sta->drv;
+       memset(buf, 0, sizeof(buf));
+       buf_size = min(count, sizeof(buf) -  1);
+       if (copy_from_user(buf, user_buf, buf_size))
+               return -EFAULT;
+
+       if (sscanf(buf, "%x", &parsed_rate) == 1)
+               lq_sta->dbg_fixed_rate = parsed_rate;
+       else
+               lq_sta->dbg_fixed_rate = 0;
+
+       rs_program_fix_rate(mvm, lq_sta);
+
+       return count;
+}
+
+static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
+                       char __user *user_buf, size_t count, loff_t *ppos)
+{
+       char *buff;
+       int desc = 0;
+       int i = 0;
+       int index = 0;
+       ssize_t ret;
+
+       struct iwl_lq_sta *lq_sta = file->private_data;
+       struct iwl_mvm *mvm;
+       struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+
+       mvm = lq_sta->drv;
+       buff = kmalloc(1024, GFP_KERNEL);
+       if (!buff)
+               return -ENOMEM;
+
+       desc += sprintf(buff+desc, "sta_id %d\n", lq_sta->lq.sta_id);
+       desc += sprintf(buff+desc, "failed=%d success=%d rate=0%X\n",
+                       lq_sta->total_failed, lq_sta->total_success,
+                       lq_sta->active_legacy_rate);
+       desc += sprintf(buff+desc, "fixed rate 0x%X\n",
+                       lq_sta->dbg_fixed_rate);
+       desc += sprintf(buff+desc, "valid_tx_ant %s%s%s\n",
+           (mvm->nvm_data->valid_tx_ant & ANT_A) ? "ANT_A," : "",
+           (mvm->nvm_data->valid_tx_ant & ANT_B) ? "ANT_B," : "",
+           (mvm->nvm_data->valid_tx_ant & ANT_C) ? "ANT_C" : "");
+       desc += sprintf(buff+desc, "lq type %s\n",
+          (is_legacy(tbl->lq_type)) ? "legacy" : "HT");
+       if (is_Ht(tbl->lq_type)) {
+               desc += sprintf(buff+desc, " %s",
+                  (is_siso(tbl->lq_type)) ? "SISO" :
+                  ((is_mimo2(tbl->lq_type)) ? "MIMO2" : "MIMO3"));
+                  desc += sprintf(buff+desc, " %s",
+                  (tbl->is_ht40) ? "40MHz" : "20MHz");
+                  desc += sprintf(buff+desc, " %s %s %s\n",
+                                  (tbl->is_SGI) ? "SGI" : "",
+                  (lq_sta->is_green) ? "GF enabled" : "",
+                  (lq_sta->is_agg) ? "AGG on" : "");
+       }
+       desc += sprintf(buff+desc, "last tx rate=0x%X\n",
+                       lq_sta->last_rate_n_flags);
+       desc += sprintf(buff+desc,
+                       "general: flags=0x%X mimo-d=%d s-ant0x%x d-ant=0x%x\n",
+                       lq_sta->lq.flags,
+                       lq_sta->lq.mimo_delim,
+                       lq_sta->lq.single_stream_ant_msk,
+                       lq_sta->lq.dual_stream_ant_msk);
+
+       desc += sprintf(buff+desc,
+                       "agg: time_limit=%d dist_start_th=%d frame_cnt_limit=%d\n",
+                       le16_to_cpu(lq_sta->lq.agg_time_limit),
+                       lq_sta->lq.agg_disable_start_th,
+                       lq_sta->lq.agg_frame_cnt_limit);
+
+       desc += sprintf(buff+desc,
+                       "Start idx [0]=0x%x [1]=0x%x [2]=0x%x [3]=0x%x\n",
+                       lq_sta->lq.initial_rate_index[0],
+                       lq_sta->lq.initial_rate_index[1],
+                       lq_sta->lq.initial_rate_index[2],
+                       lq_sta->lq.initial_rate_index[3]);
+
+       for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
+               index = iwl_hwrate_to_plcp_idx(
+                       le32_to_cpu(lq_sta->lq.rs_table[i]));
+               if (is_legacy(tbl->lq_type)) {
+                       desc += sprintf(buff+desc, " rate[%d] 0x%X %smbps\n",
+                                       i, le32_to_cpu(lq_sta->lq.rs_table[i]),
+                                       iwl_rate_mcs[index].mbps);
+               } else {
+                       desc += sprintf(buff+desc,
+                                       " rate[%d] 0x%X %smbps (%s)\n",
+                                       i, le32_to_cpu(lq_sta->lq.rs_table[i]),
+                                       iwl_rate_mcs[index].mbps,
+                                       iwl_rate_mcs[index].mcs);
+               }
+       }
+
+       ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
+       kfree(buff);
+       return ret;
+}
+
+static const struct file_operations rs_sta_dbgfs_scale_table_ops = {
+       .write = rs_sta_dbgfs_scale_table_write,
+       .read = rs_sta_dbgfs_scale_table_read,
+       .open = simple_open,
+       .llseek = default_llseek,
+};
+static ssize_t rs_sta_dbgfs_stats_table_read(struct file *file,
+                       char __user *user_buf, size_t count, loff_t *ppos)
+{
+       char *buff;
+       int desc = 0;
+       int i, j;
+       ssize_t ret;
+
+       struct iwl_lq_sta *lq_sta = file->private_data;
+
+       buff = kmalloc(1024, GFP_KERNEL);
+       if (!buff)
+               return -ENOMEM;
+
+       for (i = 0; i < LQ_SIZE; i++) {
+               desc += sprintf(buff+desc,
+                               "%s type=%d SGI=%d HT40=%d DUP=0 GF=%d\n"
+                               "rate=0x%X\n",
+                               lq_sta->active_tbl == i ? "*" : "x",
+                               lq_sta->lq_info[i].lq_type,
+                               lq_sta->lq_info[i].is_SGI,
+                               lq_sta->lq_info[i].is_ht40,
+                               lq_sta->is_green,
+                               lq_sta->lq_info[i].current_rate);
+               for (j = 0; j < IWL_RATE_COUNT; j++) {
+                       desc += sprintf(buff+desc,
+                               "counter=%d success=%d %%=%d\n",
+                               lq_sta->lq_info[i].win[j].counter,
+                               lq_sta->lq_info[i].win[j].success_counter,
+                               lq_sta->lq_info[i].win[j].success_ratio);
+               }
+       }
+       ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
+       kfree(buff);
+       return ret;
+}
+
+static const struct file_operations rs_sta_dbgfs_stats_table_ops = {
+       .read = rs_sta_dbgfs_stats_table_read,
+       .open = simple_open,
+       .llseek = default_llseek,
+};
+
+static ssize_t rs_sta_dbgfs_rate_scale_data_read(struct file *file,
+                       char __user *user_buf, size_t count, loff_t *ppos)
+{
+       struct iwl_lq_sta *lq_sta = file->private_data;
+       struct iwl_scale_tbl_info *tbl = &lq_sta->lq_info[lq_sta->active_tbl];
+       char buff[120];
+       int desc = 0;
+
+       if (is_Ht(tbl->lq_type))
+               desc += sprintf(buff+desc,
+                               "Bit Rate= %d Mb/s\n",
+                               tbl->expected_tpt[lq_sta->last_txrate_idx]);
+       else
+               desc += sprintf(buff+desc,
+                               "Bit Rate= %d Mb/s\n",
+                               iwl_rates[lq_sta->last_txrate_idx].ieee >> 1);
+
+       return simple_read_from_buffer(user_buf, count, ppos, buff, desc);
+}
+
+static const struct file_operations rs_sta_dbgfs_rate_scale_data_ops = {
+       .read = rs_sta_dbgfs_rate_scale_data_read,
+       .open = simple_open,
+       .llseek = default_llseek,
+};
+
+static void rs_add_debugfs(void *mvm, void *mvm_sta, struct dentry *dir)
+{
+       struct iwl_lq_sta *lq_sta = mvm_sta;
+       lq_sta->rs_sta_dbgfs_scale_table_file =
+               debugfs_create_file("rate_scale_table", S_IRUSR | S_IWUSR, dir,
+                                   lq_sta, &rs_sta_dbgfs_scale_table_ops);
+       lq_sta->rs_sta_dbgfs_stats_table_file =
+               debugfs_create_file("rate_stats_table", S_IRUSR, dir,
+                                   lq_sta, &rs_sta_dbgfs_stats_table_ops);
+       lq_sta->rs_sta_dbgfs_rate_scale_data_file =
+               debugfs_create_file("rate_scale_data", S_IRUSR, dir,
+                                   lq_sta, &rs_sta_dbgfs_rate_scale_data_ops);
+       lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file =
+               debugfs_create_u8("tx_agg_tid_enable", S_IRUSR | S_IWUSR, dir,
+                                 &lq_sta->tx_agg_tid_en);
+}
+
+static void rs_remove_debugfs(void *mvm, void *mvm_sta)
+{
+       struct iwl_lq_sta *lq_sta = mvm_sta;
+       debugfs_remove(lq_sta->rs_sta_dbgfs_scale_table_file);
+       debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file);
+       debugfs_remove(lq_sta->rs_sta_dbgfs_rate_scale_data_file);
+       debugfs_remove(lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file);
+}
+#endif
+
+/*
+ * Initialization of rate scaling information is done by driver after
+ * the station is added. Since mac80211 calls this function before a
+ * station is added we ignore it.
+ */
+static void rs_rate_init_stub(void *mvm_r,
+                                struct ieee80211_supported_band *sband,
+                                struct ieee80211_sta *sta, void *mvm_sta)
+{
+}
+static struct rate_control_ops rs_mvm_ops = {
+       .module = NULL,
+       .name = RS_NAME,
+       .tx_status = rs_tx_status,
+       .get_rate = rs_get_rate,
+       .rate_init = rs_rate_init_stub,
+       .alloc = rs_alloc,
+       .free = rs_free,
+       .alloc_sta = rs_alloc_sta,
+       .free_sta = rs_free_sta,
+#ifdef CONFIG_MAC80211_DEBUGFS
+       .add_sta_debugfs = rs_add_debugfs,
+       .remove_sta_debugfs = rs_remove_debugfs,
+#endif
+};
+
+int iwl_mvm_rate_control_register(void)
+{
+       return ieee80211_rate_control_register(&rs_mvm_ops);
+}
+
+void iwl_mvm_rate_control_unregister(void)
+{
+       ieee80211_rate_control_unregister(&rs_mvm_ops);
+}
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.h b/drivers/net/wireless/iwlwifi/mvm/rs.h
new file mode 100644 (file)
index 0000000..219c685
--- /dev/null
@@ -0,0 +1,393 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#ifndef __rs_h__
+#define __rs_h__
+
+#include <net/mac80211.h>
+
+#include "iwl-config.h"
+
+#include "fw-api.h"
+#include "iwl-trans.h"
+
+struct iwl_rs_rate_info {
+       u8 plcp;        /* uCode API:  IWL_RATE_6M_PLCP, etc. */
+       u8 plcp_siso;   /* uCode API:  IWL_RATE_SISO_6M_PLCP, etc. */
+       u8 plcp_mimo2;  /* uCode API:  IWL_RATE_MIMO2_6M_PLCP, etc. */
+       u8 plcp_mimo3;  /* uCode API:  IWL_RATE_MIMO3_6M_PLCP, etc. */
+       u8 ieee;        /* MAC header:  IWL_RATE_6M_IEEE, etc. */
+       u8 prev_ieee;    /* previous rate in IEEE speeds */
+       u8 next_ieee;    /* next rate in IEEE speeds */
+       u8 prev_rs;      /* previous rate used in rs algo */
+       u8 next_rs;      /* next rate used in rs algo */
+       u8 prev_rs_tgg;  /* previous rate used in TGG rs algo */
+       u8 next_rs_tgg;  /* next rate used in TGG rs algo */
+};
+
+#define IWL_RATE_60M_PLCP 3
+
+enum {
+       IWL_RATE_INVM_INDEX = IWL_RATE_COUNT,
+       IWL_RATE_INVALID = IWL_RATE_COUNT,
+};
+
+#define LINK_QUAL_MAX_RETRY_NUM 16
+
+enum {
+       IWL_RATE_6M_INDEX_TABLE = 0,
+       IWL_RATE_9M_INDEX_TABLE,
+       IWL_RATE_12M_INDEX_TABLE,
+       IWL_RATE_18M_INDEX_TABLE,
+       IWL_RATE_24M_INDEX_TABLE,
+       IWL_RATE_36M_INDEX_TABLE,
+       IWL_RATE_48M_INDEX_TABLE,
+       IWL_RATE_54M_INDEX_TABLE,
+       IWL_RATE_1M_INDEX_TABLE,
+       IWL_RATE_2M_INDEX_TABLE,
+       IWL_RATE_5M_INDEX_TABLE,
+       IWL_RATE_11M_INDEX_TABLE,
+       IWL_RATE_INVM_INDEX_TABLE = IWL_RATE_INVM_INDEX - 1,
+};
+
+/* #define vs. enum to keep from defaulting to 'large integer' */
+#define        IWL_RATE_6M_MASK   (1 << IWL_RATE_6M_INDEX)
+#define        IWL_RATE_9M_MASK   (1 << IWL_RATE_9M_INDEX)
+#define        IWL_RATE_12M_MASK  (1 << IWL_RATE_12M_INDEX)
+#define        IWL_RATE_18M_MASK  (1 << IWL_RATE_18M_INDEX)
+#define        IWL_RATE_24M_MASK  (1 << IWL_RATE_24M_INDEX)
+#define        IWL_RATE_36M_MASK  (1 << IWL_RATE_36M_INDEX)
+#define        IWL_RATE_48M_MASK  (1 << IWL_RATE_48M_INDEX)
+#define        IWL_RATE_54M_MASK  (1 << IWL_RATE_54M_INDEX)
+#define IWL_RATE_60M_MASK  (1 << IWL_RATE_60M_INDEX)
+#define        IWL_RATE_1M_MASK   (1 << IWL_RATE_1M_INDEX)
+#define        IWL_RATE_2M_MASK   (1 << IWL_RATE_2M_INDEX)
+#define        IWL_RATE_5M_MASK   (1 << IWL_RATE_5M_INDEX)
+#define        IWL_RATE_11M_MASK  (1 << IWL_RATE_11M_INDEX)
+
+
+/* uCode API values for OFDM high-throughput (HT) bit rates */
+enum {
+       IWL_RATE_SISO_6M_PLCP = 0,
+       IWL_RATE_SISO_12M_PLCP = 1,
+       IWL_RATE_SISO_18M_PLCP = 2,
+       IWL_RATE_SISO_24M_PLCP = 3,
+       IWL_RATE_SISO_36M_PLCP = 4,
+       IWL_RATE_SISO_48M_PLCP = 5,
+       IWL_RATE_SISO_54M_PLCP = 6,
+       IWL_RATE_SISO_60M_PLCP = 7,
+       IWL_RATE_MIMO2_6M_PLCP  = 0x8,
+       IWL_RATE_MIMO2_12M_PLCP = 0x9,
+       IWL_RATE_MIMO2_18M_PLCP = 0xa,
+       IWL_RATE_MIMO2_24M_PLCP = 0xb,
+       IWL_RATE_MIMO2_36M_PLCP = 0xc,
+       IWL_RATE_MIMO2_48M_PLCP = 0xd,
+       IWL_RATE_MIMO2_54M_PLCP = 0xe,
+       IWL_RATE_MIMO2_60M_PLCP = 0xf,
+       IWL_RATE_MIMO3_6M_PLCP  = 0x10,
+       IWL_RATE_MIMO3_12M_PLCP = 0x11,
+       IWL_RATE_MIMO3_18M_PLCP = 0x12,
+       IWL_RATE_MIMO3_24M_PLCP = 0x13,
+       IWL_RATE_MIMO3_36M_PLCP = 0x14,
+       IWL_RATE_MIMO3_48M_PLCP = 0x15,
+       IWL_RATE_MIMO3_54M_PLCP = 0x16,
+       IWL_RATE_MIMO3_60M_PLCP = 0x17,
+       IWL_RATE_SISO_INVM_PLCP,
+       IWL_RATE_MIMO2_INVM_PLCP = IWL_RATE_SISO_INVM_PLCP,
+       IWL_RATE_MIMO3_INVM_PLCP = IWL_RATE_SISO_INVM_PLCP,
+};
+
+/* MAC header values for bit rates */
+enum {
+       IWL_RATE_6M_IEEE  = 12,
+       IWL_RATE_9M_IEEE  = 18,
+       IWL_RATE_12M_IEEE = 24,
+       IWL_RATE_18M_IEEE = 36,
+       IWL_RATE_24M_IEEE = 48,
+       IWL_RATE_36M_IEEE = 72,
+       IWL_RATE_48M_IEEE = 96,
+       IWL_RATE_54M_IEEE = 108,
+       IWL_RATE_60M_IEEE = 120,
+       IWL_RATE_1M_IEEE  = 2,
+       IWL_RATE_2M_IEEE  = 4,
+       IWL_RATE_5M_IEEE  = 11,
+       IWL_RATE_11M_IEEE = 22,
+};
+
+#define IWL_RATES_MASK ((1 << IWL_RATE_COUNT) - 1)
+
+#define IWL_INVALID_VALUE    -1
+
+#define IWL_MIN_RSSI_VAL                 -100
+#define IWL_MAX_RSSI_VAL                    0
+
+/* These values specify how many Tx frame attempts before
+ * searching for a new modulation mode */
+#define IWL_LEGACY_FAILURE_LIMIT       160
+#define IWL_LEGACY_SUCCESS_LIMIT       480
+#define IWL_LEGACY_TABLE_COUNT         160
+
+#define IWL_NONE_LEGACY_FAILURE_LIMIT  400
+#define IWL_NONE_LEGACY_SUCCESS_LIMIT  4500
+#define IWL_NONE_LEGACY_TABLE_COUNT    1500
+
+/* Success ratio (ACKed / attempted tx frames) values (perfect is 128 * 100) */
+#define IWL_RS_GOOD_RATIO              12800   /* 100% */
+#define IWL_RATE_SCALE_SWITCH          10880   /*  85% */
+#define IWL_RATE_HIGH_TH               10880   /*  85% */
+#define IWL_RATE_INCREASE_TH           6400    /*  50% */
+#define IWL_RATE_DECREASE_TH           1920    /*  15% */
+
+/* possible actions when in legacy mode */
+#define IWL_LEGACY_SWITCH_ANTENNA1      0
+#define IWL_LEGACY_SWITCH_ANTENNA2      1
+#define IWL_LEGACY_SWITCH_SISO          2
+#define IWL_LEGACY_SWITCH_MIMO2_AB      3
+#define IWL_LEGACY_SWITCH_MIMO2_AC      4
+#define IWL_LEGACY_SWITCH_MIMO2_BC      5
+#define IWL_LEGACY_SWITCH_MIMO3_ABC     6
+
+/* possible actions when in siso mode */
+#define IWL_SISO_SWITCH_ANTENNA1        0
+#define IWL_SISO_SWITCH_ANTENNA2        1
+#define IWL_SISO_SWITCH_MIMO2_AB        2
+#define IWL_SISO_SWITCH_MIMO2_AC        3
+#define IWL_SISO_SWITCH_MIMO2_BC        4
+#define IWL_SISO_SWITCH_GI              5
+#define IWL_SISO_SWITCH_MIMO3_ABC       6
+
+
+/* possible actions when in mimo mode */
+#define IWL_MIMO2_SWITCH_ANTENNA1       0
+#define IWL_MIMO2_SWITCH_ANTENNA2       1
+#define IWL_MIMO2_SWITCH_SISO_A         2
+#define IWL_MIMO2_SWITCH_SISO_B         3
+#define IWL_MIMO2_SWITCH_SISO_C         4
+#define IWL_MIMO2_SWITCH_GI             5
+#define IWL_MIMO2_SWITCH_MIMO3_ABC      6
+
+
+/* possible actions when in mimo3 mode */
+#define IWL_MIMO3_SWITCH_ANTENNA1       0
+#define IWL_MIMO3_SWITCH_ANTENNA2       1
+#define IWL_MIMO3_SWITCH_SISO_A         2
+#define IWL_MIMO3_SWITCH_SISO_B         3
+#define IWL_MIMO3_SWITCH_SISO_C         4
+#define IWL_MIMO3_SWITCH_MIMO2_AB       5
+#define IWL_MIMO3_SWITCH_MIMO2_AC       6
+#define IWL_MIMO3_SWITCH_MIMO2_BC       7
+#define IWL_MIMO3_SWITCH_GI             8
+
+
+#define IWL_MAX_11N_MIMO3_SEARCH IWL_MIMO3_SWITCH_GI
+#define IWL_MAX_SEARCH IWL_MIMO2_SWITCH_MIMO3_ABC
+
+/*FIXME:RS:add possible actions for MIMO3*/
+
+#define IWL_ACTION_LIMIT               3       /* # possible actions */
+
+#define LINK_QUAL_AGG_TIME_LIMIT_DEF   (4000) /* 4 milliseconds */
+#define LINK_QUAL_AGG_TIME_LIMIT_MAX   (8000)
+#define LINK_QUAL_AGG_TIME_LIMIT_MIN   (100)
+
+#define LINK_QUAL_AGG_DISABLE_START_DEF        (3)
+#define LINK_QUAL_AGG_DISABLE_START_MAX        (255)
+#define LINK_QUAL_AGG_DISABLE_START_MIN        (0)
+
+#define LINK_QUAL_AGG_FRAME_LIMIT_DEF  (63)
+#define LINK_QUAL_AGG_FRAME_LIMIT_MAX  (63)
+#define LINK_QUAL_AGG_FRAME_LIMIT_MIN  (0)
+
+#define LQ_SIZE                2       /* 2 mode tables:  "Active" and "Search" */
+
+/* load per tid defines for A-MPDU activation */
+#define IWL_AGG_TPT_THREHOLD   0
+#define IWL_AGG_LOAD_THRESHOLD 10
+#define IWL_AGG_ALL_TID                0xff
+#define TID_QUEUE_CELL_SPACING 50      /*mS */
+#define TID_QUEUE_MAX_SIZE     20
+#define TID_ROUND_VALUE                5       /* mS */
+
+#define TID_MAX_TIME_DIFF ((TID_QUEUE_MAX_SIZE - 1) * TID_QUEUE_CELL_SPACING)
+#define TIME_WRAP_AROUND(x, y) (((y) > (x)) ? (y) - (x) : (0-(x)) + (y))
+
+enum iwl_table_type {
+       LQ_NONE,
+       LQ_G,           /* legacy types */
+       LQ_A,
+       LQ_SISO,        /* high-throughput types */
+       LQ_MIMO2,
+       LQ_MIMO3,
+       LQ_MAX,
+};
+
+#define is_legacy(tbl) (((tbl) == LQ_G) || ((tbl) == LQ_A))
+#define is_siso(tbl) ((tbl) == LQ_SISO)
+#define is_mimo2(tbl) ((tbl) == LQ_MIMO2)
+#define is_mimo3(tbl) ((tbl) == LQ_MIMO3)
+#define is_mimo(tbl) (is_mimo2(tbl) || is_mimo3(tbl))
+#define is_Ht(tbl) (is_siso(tbl) || is_mimo(tbl))
+#define is_a_band(tbl) ((tbl) == LQ_A)
+#define is_g_and(tbl) ((tbl) == LQ_G)
+
+#define IWL_MAX_MCS_DISPLAY_SIZE       12
+
+struct iwl_rate_mcs_info {
+       char    mbps[IWL_MAX_MCS_DISPLAY_SIZE];
+       char    mcs[IWL_MAX_MCS_DISPLAY_SIZE];
+};
+
+/**
+ * struct iwl_rate_scale_data -- tx success history for one rate
+ */
+struct iwl_rate_scale_data {
+       u64 data;               /* bitmap of successful frames */
+       s32 success_counter;    /* number of frames successful */
+       s32 success_ratio;      /* per-cent * 128  */
+       s32 counter;            /* number of frames attempted */
+       s32 average_tpt;        /* success ratio * expected throughput */
+       unsigned long stamp;
+};
+
+/**
+ * struct iwl_scale_tbl_info -- tx params and success history for all rates
+ *
+ * There are two of these in struct iwl_lq_sta,
+ * one for "active", and one for "search".
+ */
+struct iwl_scale_tbl_info {
+       enum iwl_table_type lq_type;
+       u8 ant_type;
+       u8 is_SGI;      /* 1 = short guard interval */
+       u8 is_ht40;     /* 1 = 40 MHz channel width */
+       u8 action;      /* change modulation; IWL_[LEGACY/SISO/MIMO]_SWITCH_* */
+       u8 max_search;  /* maximun number of tables we can search */
+       s32 *expected_tpt;      /* throughput metrics; expected_tpt_G, etc. */
+       u32 current_rate;  /* rate_n_flags, uCode API format */
+       struct iwl_rate_scale_data win[IWL_RATE_COUNT]; /* rate histories */
+};
+
+struct iwl_traffic_load {
+       unsigned long time_stamp;       /* age of the oldest statistics */
+       u32 packet_count[TID_QUEUE_MAX_SIZE];   /* packet count in this time
+                                                * slice */
+       u32 total;                      /* total num of packets during the
+                                        * last TID_MAX_TIME_DIFF */
+       u8 queue_count;                 /* number of queues that has
+                                        * been used since the last cleanup */
+       u8 head;                        /* start of the circular buffer */
+};
+
+/**
+ * struct iwl_lq_sta -- driver's rate scaling private structure
+ *
+ * Pointer to this gets passed back and forth between driver and mac80211.
+ */
+struct iwl_lq_sta {
+       u8 active_tbl;          /* index of active table, range 0-1 */
+       u8 enable_counter;      /* indicates HT mode */
+       u8 stay_in_tbl;         /* 1: disallow, 0: allow search for new mode */
+       u8 search_better_tbl;   /* 1: currently trying alternate mode */
+       s32 last_tpt;
+
+       /* The following determine when to search for a new mode */
+       u32 table_count_limit;
+       u32 max_failure_limit;  /* # failed frames before new search */
+       u32 max_success_limit;  /* # successful frames before new search */
+       u32 table_count;
+       u32 total_failed;       /* total failed frames, any/all rates */
+       u32 total_success;      /* total successful frames, any/all rates */
+       u64 flush_timer;        /* time staying in mode before new search */
+
+       u8 action_counter;      /* # mode-switch actions tried */
+       u8 is_green;
+       enum ieee80211_band band;
+
+       /* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */
+       u32 supp_rates;
+       u16 active_legacy_rate;
+       u16 active_siso_rate;
+       u16 active_mimo2_rate;
+       u16 active_mimo3_rate;
+       s8 max_rate_idx;     /* Max rate set by user */
+       u8 missed_rate_counter;
+
+       struct iwl_lq_cmd lq;
+       struct iwl_scale_tbl_info lq_info[LQ_SIZE]; /* "active", "search" */
+       struct iwl_traffic_load load[IWL_MAX_TID_COUNT];
+       u8 tx_agg_tid_en;
+#ifdef CONFIG_MAC80211_DEBUGFS
+       struct dentry *rs_sta_dbgfs_scale_table_file;
+       struct dentry *rs_sta_dbgfs_stats_table_file;
+       struct dentry *rs_sta_dbgfs_rate_scale_data_file;
+       struct dentry *rs_sta_dbgfs_tx_agg_tid_en_file;
+       u32 dbg_fixed_rate;
+#endif
+       struct iwl_mvm *drv;
+
+       /* used to be in sta_info */
+       int last_txrate_idx;
+       /* last tx rate_n_flags */
+       u32 last_rate_n_flags;
+       /* packets destined for this STA are aggregated */
+       u8 is_agg;
+       /* BT traffic this sta was last updated in */
+       u8 last_bt_traffic;
+};
+
+static inline u8 num_of_ant(u8 mask)
+{
+       return  !!((mask) & ANT_A) +
+               !!((mask) & ANT_B) +
+               !!((mask) & ANT_C);
+}
+
+/* Initialize station's rate scaling information after adding station */
+extern void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm,
+                                struct ieee80211_sta *sta,
+                                enum ieee80211_band band);
+
+/**
+ * iwl_rate_control_register - Register the rate control algorithm callbacks
+ *
+ * Since the rate control algorithm is hardware specific, there is no need
+ * or reason to place it as a stand alone module.  The driver can call
+ * iwl_rate_control_register in order to register the rate control callbacks
+ * with the mac80211 subsystem.  This should be performed prior to calling
+ * ieee80211_register_hw
+ *
+ */
+extern int iwl_mvm_rate_control_register(void);
+
+/**
+ * iwl_rate_control_unregister - Unregister the rate control callbacks
+ *
+ * This should be called after calling ieee80211_unregister_hw, but before
+ * the driver is unloaded.
+ */
+extern void iwl_mvm_rate_control_unregister(void);
+
+#endif /* __rs__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/rx.c b/drivers/net/wireless/iwlwifi/mvm/rx.c
new file mode 100644 (file)
index 0000000..3f3ce91
--- /dev/null
@@ -0,0 +1,355 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *****************************************************************************/
+#include "iwl-trans.h"
+
+#include "mvm.h"
+#include "fw-api.h"
+
+/*
+ * iwl_mvm_rx_rx_phy_cmd - REPLY_RX_PHY_CMD handler
+ *
+ * Copies the phy information in mvm->last_phy_info, it will be used when the
+ * actual data will come from the fw in the next packet.
+ */
+int iwl_mvm_rx_rx_phy_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
+                         struct iwl_device_cmd *cmd)
+{
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
+
+       memcpy(&mvm->last_phy_info, pkt->data, sizeof(mvm->last_phy_info));
+       mvm->ampdu_ref++;
+       return 0;
+}
+
+/*
+ * iwl_mvm_pass_packet_to_mac80211 - builds the packet for mac80211
+ *
+ * Adds the rxb to a new skb and give it to mac80211
+ */
+static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
+                                           struct ieee80211_hdr *hdr, u16 len,
+                                           u32 ampdu_status,
+                                           struct iwl_rx_cmd_buffer *rxb,
+                                           struct ieee80211_rx_status *stats)
+{
+       struct sk_buff *skb;
+       unsigned int hdrlen, fraglen;
+
+       /* Dont use dev_alloc_skb(), we'll have enough headroom once
+        * ieee80211_hdr pulled.
+        */
+       skb = alloc_skb(128, GFP_ATOMIC);
+       if (!skb) {
+               IWL_ERR(mvm, "alloc_skb failed\n");
+               return;
+       }
+       /* If frame is small enough to fit in skb->head, pull it completely.
+        * If not, only pull ieee80211_hdr so that splice() or TCP coalesce
+        * are more efficient.
+        */
+       hdrlen = (len <= skb_tailroom(skb)) ? len : sizeof(*hdr);
+
+       memcpy(skb_put(skb, hdrlen), hdr, hdrlen);
+       fraglen = len - hdrlen;
+
+       if (fraglen) {
+               int offset = (void *)hdr + hdrlen -
+                            rxb_addr(rxb) + rxb_offset(rxb);
+
+               skb_add_rx_frag(skb, 0, rxb_steal_page(rxb), offset,
+                               fraglen, rxb->truesize);
+       }
+
+       memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
+
+       ieee80211_rx_ni(mvm->hw, skb);
+}
+
+/*
+ * iwl_mvm_calc_rssi - calculate the rssi in dBm
+ * @phy_info: the phy information for the coming packet
+ */
+static int iwl_mvm_calc_rssi(struct iwl_mvm *mvm,
+                            struct iwl_rx_phy_info *phy_info)
+{
+       u32 rssi_a, rssi_b, rssi_c, max_rssi, agc_db;
+       u32 val;
+
+       /* Find max rssi among 3 possible receivers.
+        * These values are measured by the Digital Signal Processor (DSP).
+        * They should stay fairly constant even as the signal strength varies,
+        * if the radio's Automatic Gain Control (AGC) is working right.
+        * AGC value (see below) will provide the "interesting" info.
+        */
+       val = le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_RSSI_AB_IDX]);
+       rssi_a = (val & IWL_OFDM_RSSI_INBAND_A_MSK) >> IWL_OFDM_RSSI_A_POS;
+       rssi_b = (val & IWL_OFDM_RSSI_INBAND_B_MSK) >> IWL_OFDM_RSSI_B_POS;
+       val = le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_RSSI_C_IDX]);
+       rssi_c = (val & IWL_OFDM_RSSI_INBAND_C_MSK) >> IWL_OFDM_RSSI_C_POS;
+
+       val = le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_AGC_IDX]);
+       agc_db = (val & IWL_OFDM_AGC_DB_MSK) >> IWL_OFDM_AGC_DB_POS;
+
+       max_rssi = max_t(u32, rssi_a, rssi_b);
+       max_rssi = max_t(u32, max_rssi, rssi_c);
+
+       IWL_DEBUG_STATS(mvm, "Rssi In A %d B %d C %d Max %d AGC dB %d\n",
+                       rssi_a, rssi_b, rssi_c, max_rssi, agc_db);
+
+       /* dBm = max_rssi dB - agc dB - constant.
+        * Higher AGC (higher radio gain) means lower signal. */
+       return max_rssi - agc_db - IWL_RSSI_OFFSET;
+}
+
+/*
+ * iwl_mvm_set_mac80211_rx_flag - translate fw status to mac80211 format
+ * @mvm: the mvm object
+ * @hdr: 80211 header
+ * @stats: status in mac80211's format
+ * @rx_pkt_status: status coming from fw
+ *
+ * returns non 0 value if the packet should be dropped
+ */
+static u32 iwl_mvm_set_mac80211_rx_flag(struct iwl_mvm *mvm,
+                                       struct ieee80211_hdr *hdr,
+                                       struct ieee80211_rx_status *stats,
+                                       u32 rx_pkt_status)
+{
+       if (!ieee80211_has_protected(hdr->frame_control) ||
+           (rx_pkt_status & RX_MPDU_RES_STATUS_SEC_ENC_MSK) ==
+                            RX_MPDU_RES_STATUS_SEC_NO_ENC)
+               return 0;
+
+       /* packet was encrypted with unknown alg */
+       if ((rx_pkt_status & RX_MPDU_RES_STATUS_SEC_ENC_MSK) ==
+                                       RX_MPDU_RES_STATUS_SEC_ENC_ERR)
+               return 0;
+
+       switch (rx_pkt_status & RX_MPDU_RES_STATUS_SEC_ENC_MSK) {
+       case RX_MPDU_RES_STATUS_SEC_CCM_ENC:
+               /* alg is CCM: check MIC only */
+               if (!(rx_pkt_status & RX_MPDU_RES_STATUS_MIC_OK))
+                       return -1;
+
+               stats->flag |= RX_FLAG_DECRYPTED;
+               IWL_DEBUG_WEP(mvm, "hw decrypted CCMP successfully\n");
+               return 0;
+
+       case RX_MPDU_RES_STATUS_SEC_TKIP_ENC:
+               /* Don't drop the frame and decrypt it in SW */
+               if (!(rx_pkt_status & RX_MPDU_RES_STATUS_TTAK_OK))
+                       return 0;
+               /* fall through if TTAK OK */
+
+       case RX_MPDU_RES_STATUS_SEC_WEP_ENC:
+               if (!(rx_pkt_status & RX_MPDU_RES_STATUS_ICV_OK))
+                       return -1;
+
+               stats->flag |= RX_FLAG_DECRYPTED;
+               return 0;
+
+       default:
+               IWL_ERR(mvm, "Unhandled alg: 0x%x\n", rx_pkt_status);
+       }
+
+       return 0;
+}
+
+/*
+ * iwl_mvm_rx_rx_mpdu - REPLY_RX_MPDU_CMD handler
+ *
+ * Handles the actual data of the Rx packet from the fw
+ */
+int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
+                      struct iwl_device_cmd *cmd)
+{
+       struct ieee80211_hdr *hdr;
+       struct ieee80211_rx_status rx_status = {};
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
+       struct iwl_rx_phy_info *phy_info;
+       struct iwl_rx_mpdu_res_start *rx_res;
+       u32 len;
+       u32 ampdu_status;
+       u32 rate_n_flags;
+       u32 rx_pkt_status;
+
+       phy_info = &mvm->last_phy_info;
+       rx_res = (struct iwl_rx_mpdu_res_start *)pkt->data;
+       hdr = (struct ieee80211_hdr *)(pkt->data + sizeof(*rx_res));
+       len = le16_to_cpu(rx_res->byte_count);
+       rx_pkt_status = le32_to_cpup((__le32 *)
+               (pkt->data + sizeof(*rx_res) + len));
+
+       memset(&rx_status, 0, sizeof(rx_status));
+
+       /*
+        * drop the packet if it has failed being decrypted by HW
+        */
+       if (iwl_mvm_set_mac80211_rx_flag(mvm, hdr, &rx_status, rx_pkt_status)) {
+               IWL_DEBUG_DROP(mvm, "Bad decryption results 0x%08x\n",
+                              rx_pkt_status);
+               return 0;
+       }
+
+       if ((unlikely(phy_info->cfg_phy_cnt > 20))) {
+               IWL_DEBUG_DROP(mvm, "dsp size out of range [0,20]: %d\n",
+                              phy_info->cfg_phy_cnt);
+               return 0;
+       }
+
+       if (!(rx_pkt_status & RX_MPDU_RES_STATUS_CRC_OK) ||
+           !(rx_pkt_status & RX_MPDU_RES_STATUS_OVERRUN_OK)) {
+               IWL_DEBUG_RX(mvm, "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status);
+               return 0;
+       }
+
+       /* This will be used in several places later */
+       rate_n_flags = le32_to_cpu(phy_info->rate_n_flags);
+
+       /* rx_status carries information about the packet to mac80211 */
+       rx_status.mactime = le64_to_cpu(phy_info->timestamp);
+       rx_status.band =
+               (phy_info->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_BAND_24)) ?
+                               IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
+       rx_status.freq =
+               ieee80211_channel_to_frequency(le16_to_cpu(phy_info->channel),
+                                              rx_status.band);
+       /*
+        * TSF as indicated by the fw is at INA time, but mac80211 expects the
+        * TSF at the beginning of the MPDU.
+        */
+       /*rx_status.flag |= RX_FLAG_MACTIME_MPDU;*/
+
+       /* Find max signal strength (dBm) among 3 antenna/receiver chains */
+       rx_status.signal = iwl_mvm_calc_rssi(mvm, phy_info);
+
+       IWL_DEBUG_STATS_LIMIT(mvm, "Rssi %d, TSF %llu\n", rx_status.signal,
+                             (unsigned long long)rx_status.mactime);
+
+       /*
+        * "antenna number"
+        *
+        * It seems that the antenna field in the phy flags value
+        * is actually a bit field. This is undefined by radiotap,
+        * it wants an actual antenna number but I always get "7"
+        * for most legacy frames I receive indicating that the
+        * same frame was received on all three RX chains.
+        *
+        * I think this field should be removed in favor of a
+        * new 802.11n radiotap field "RX chains" that is defined
+        * as a bitmask.
+        */
+       rx_status.antenna = (le16_to_cpu(phy_info->phy_flags) &
+                               RX_RES_PHY_FLAGS_ANTENNA)
+                               >> RX_RES_PHY_FLAGS_ANTENNA_POS;
+
+       /* set the preamble flag if appropriate */
+       if (phy_info->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_SHORT_PREAMBLE))
+               rx_status.flag |= RX_FLAG_SHORTPRE;
+
+       if (phy_info->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_AGG)) {
+               /*
+                * We know which subframes of an A-MPDU belong
+                * together since we get a single PHY response
+                * from the firmware for all of them
+                */
+               rx_status.flag |= RX_FLAG_AMPDU_DETAILS;
+               rx_status.ampdu_reference = mvm->ampdu_ref;
+       }
+
+       /* Set up the HT phy flags */
+       switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) {
+       case RATE_MCS_CHAN_WIDTH_20:
+               break;
+       case RATE_MCS_CHAN_WIDTH_40:
+               rx_status.flag |= RX_FLAG_40MHZ;
+               break;
+       case RATE_MCS_CHAN_WIDTH_80:
+               rx_status.flag |= RX_FLAG_80MHZ;
+               break;
+       case RATE_MCS_CHAN_WIDTH_160:
+               rx_status.flag |= RX_FLAG_160MHZ;
+               break;
+       }
+       if (rate_n_flags & RATE_MCS_SGI_MSK)
+               rx_status.flag |= RX_FLAG_SHORT_GI;
+       if (rate_n_flags & RATE_HT_MCS_GF_MSK)
+               rx_status.flag |= RX_FLAG_HT_GF;
+       if (rate_n_flags & RATE_MCS_HT_MSK) {
+               rx_status.flag |= RX_FLAG_HT;
+               rx_status.rate_idx = rate_n_flags & RATE_HT_MCS_INDEX_MSK;
+       } else if (rate_n_flags & RATE_MCS_VHT_MSK) {
+               rx_status.vht_nss =
+                       ((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >>
+                                               RATE_VHT_MCS_NSS_POS) + 1;
+               rx_status.rate_idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK;
+               rx_status.flag |= RX_FLAG_VHT;
+       } else {
+               rx_status.rate_idx =
+                       iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags,
+                                                           rx_status.band);
+       }
+
+       iwl_mvm_pass_packet_to_mac80211(mvm, hdr, len, ampdu_status,
+                                       rxb, &rx_status);
+       return 0;
+}
diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c
new file mode 100644 (file)
index 0000000..406c53a
--- /dev/null
@@ -0,0 +1,437 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#include <linux/etherdevice.h>
+#include <net/mac80211.h>
+
+#include "mvm.h"
+#include "iwl-eeprom-parse.h"
+#include "fw-api-scan.h"
+
+#define IWL_PLCP_QUIET_THRESH 1
+#define IWL_ACTIVE_QUIET_TIME 10
+
+static inline __le16 iwl_mvm_scan_rx_chain(struct iwl_mvm *mvm)
+{
+       u16 rx_chain;
+       u8 rx_ant = mvm->nvm_data->valid_rx_ant;
+
+       rx_chain = rx_ant << PHY_RX_CHAIN_VALID_POS;
+       rx_chain |= rx_ant << PHY_RX_CHAIN_FORCE_MIMO_SEL_POS;
+       rx_chain |= rx_ant << PHY_RX_CHAIN_FORCE_SEL_POS;
+       rx_chain |= 0x1 << PHY_RX_CHAIN_DRIVER_FORCE_POS;
+       return cpu_to_le16(rx_chain);
+}
+
+static inline __le32 iwl_mvm_scan_max_out_time(struct ieee80211_vif *vif)
+{
+       if (vif->bss_conf.assoc)
+               return cpu_to_le32(200 * 1024);
+       else
+               return 0;
+}
+
+static inline __le32 iwl_mvm_scan_suspend_time(struct ieee80211_vif *vif)
+{
+       if (vif->bss_conf.assoc)
+               return cpu_to_le32(vif->bss_conf.beacon_int);
+       else
+               return 0;
+}
+
+static inline __le32
+iwl_mvm_scan_rxon_flags(struct cfg80211_scan_request *req)
+{
+       if (req->channels[0]->band == IEEE80211_BAND_2GHZ)
+               return cpu_to_le32(PHY_BAND_24);
+       else
+               return cpu_to_le32(PHY_BAND_5);
+}
+
+static inline __le32
+iwl_mvm_scan_rate_n_flags(struct iwl_mvm *mvm, enum ieee80211_band band,
+                         bool no_cck)
+{
+       u32 tx_ant;
+
+       mvm->scan_last_antenna_idx =
+               iwl_mvm_next_antenna(mvm, mvm->nvm_data->valid_tx_ant,
+                                    mvm->scan_last_antenna_idx);
+       tx_ant = BIT(mvm->scan_last_antenna_idx) << RATE_MCS_ANT_POS;
+
+       if (band == IEEE80211_BAND_2GHZ && !no_cck)
+               return cpu_to_le32(IWL_RATE_1M_PLCP | RATE_MCS_CCK_MSK |
+                                  tx_ant);
+       else
+               return cpu_to_le32(IWL_RATE_6M_PLCP | tx_ant);
+}
+
+/*
+ * We insert the SSIDs in an inverted order, because the FW will
+ * invert it back. The most prioritized SSID, which is first in the
+ * request list, is not copied here, but inserted directly to the probe
+ * request.
+ */
+static void iwl_mvm_scan_fill_ssids(struct iwl_scan_cmd *cmd,
+                                   struct cfg80211_scan_request *req)
+{
+       int fw_idx, req_idx;
+
+       fw_idx = 0;
+       for (req_idx = req->n_ssids - 1; req_idx > 0; req_idx--) {
+               cmd->direct_scan[fw_idx].id = WLAN_EID_SSID;
+               cmd->direct_scan[fw_idx].len = req->ssids[req_idx].ssid_len;
+               memcpy(cmd->direct_scan[fw_idx].ssid,
+                      req->ssids[req_idx].ssid,
+                      req->ssids[req_idx].ssid_len);
+       }
+}
+
+/*
+ * If req->n_ssids > 0, it means we should do an active scan.
+ * In case of active scan w/o directed scan, we receive a zero-length SSID
+ * just to notify that this scan is active and not passive.
+ * In order to notify the FW of the number of SSIDs we wish to scan (including
+ * the zero-length one), we need to set the corresponding bits in chan->type,
+ * one for each SSID, and set the active bit (first).
+ */
+static u16 iwl_mvm_get_active_dwell(enum ieee80211_band band, int n_ssids)
+{
+       if (band == IEEE80211_BAND_2GHZ)
+               return 30  + 3 * (n_ssids + 1);
+       return 20  + 2 * (n_ssids + 1);
+}
+
+static u16 iwl_mvm_get_passive_dwell(enum ieee80211_band band)
+{
+       return band == IEEE80211_BAND_2GHZ ? 100 + 20 : 100 + 10;
+}
+
+static void iwl_mvm_scan_fill_channels(struct iwl_scan_cmd *cmd,
+                                      struct cfg80211_scan_request *req)
+{
+       u16 passive_dwell = iwl_mvm_get_passive_dwell(req->channels[0]->band);
+       u16 active_dwell = iwl_mvm_get_active_dwell(req->channels[0]->band,
+                                                   req->n_ssids);
+       struct iwl_scan_channel *chan = (struct iwl_scan_channel *)
+               (cmd->data + le16_to_cpu(cmd->tx_cmd.len));
+       int i;
+       __le32 chan_type_value;
+
+       if (req->n_ssids > 0)
+               chan_type_value = cpu_to_le32(BIT(req->n_ssids + 1) - 1);
+       else
+               chan_type_value = SCAN_CHANNEL_TYPE_PASSIVE;
+
+       for (i = 0; i < cmd->channel_count; i++) {
+               chan->channel = cpu_to_le16(req->channels[i]->hw_value);
+               if (req->channels[i]->flags & IEEE80211_CHAN_PASSIVE_SCAN)
+                       chan->type = SCAN_CHANNEL_TYPE_PASSIVE;
+               else
+                       chan->type = chan_type_value;
+               chan->active_dwell = cpu_to_le16(active_dwell);
+               chan->passive_dwell = cpu_to_le16(passive_dwell);
+               chan->iteration_count = cpu_to_le16(1);
+               chan++;
+       }
+}
+
+/*
+ * Fill in probe request with the following parameters:
+ * TA is our vif HW address, which mac80211 ensures we have.
+ * Packet is broadcasted, so this is both SA and DA.
+ * The probe request IE is made out of two: first comes the most prioritized
+ * SSID if a directed scan is requested. Second comes whatever extra
+ * information was given to us as the scan request IE.
+ */
+static u16 iwl_mvm_fill_probe_req(struct ieee80211_mgmt *frame, const u8 *ta,
+                                 int n_ssids, const u8 *ssid, int ssid_len,
+                                 const u8 *ie, int ie_len,
+                                 int left)
+{
+       int len = 0;
+       u8 *pos = NULL;
+
+       /* Make sure there is enough space for the probe request,
+        * two mandatory IEs and the data */
+       left -= 24;
+       if (left < 0)
+               return 0;
+
+       frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
+       eth_broadcast_addr(frame->da);
+       memcpy(frame->sa, ta, ETH_ALEN);
+       eth_broadcast_addr(frame->bssid);
+       frame->seq_ctrl = 0;
+
+       len += 24;
+
+       /* for passive scans, no need to fill anything */
+       if (n_ssids == 0)
+               return (u16)len;
+
+       /* points to the payload of the request */
+       pos = &frame->u.probe_req.variable[0];
+
+       /* fill in our SSID IE */
+       left -= ssid_len + 2;
+       if (left < 0)
+               return 0;
+       *pos++ = WLAN_EID_SSID;
+       *pos++ = ssid_len;
+       if (ssid && ssid_len) { /* ssid_len may be == 0 even if ssid is valid */
+               memcpy(pos, ssid, ssid_len);
+               pos += ssid_len;
+       }
+
+       len += ssid_len + 2;
+
+       if (WARN_ON(left < ie_len))
+               return len;
+
+       if (ie && ie_len) {
+               memcpy(pos, ie, ie_len);
+               len += ie_len;
+       }
+
+       return (u16)len;
+}
+
+int iwl_mvm_scan_request(struct iwl_mvm *mvm,
+                        struct ieee80211_vif *vif,
+                        struct cfg80211_scan_request *req)
+{
+       struct iwl_host_cmd hcmd = {
+               .id = SCAN_REQUEST_CMD,
+               .len = { 0, },
+               .data = { mvm->scan_cmd, },
+               .flags = CMD_SYNC,
+               .dataflags = { IWL_HCMD_DFL_NOCOPY, },
+       };
+       struct iwl_scan_cmd *cmd = mvm->scan_cmd;
+       int ret;
+       u32 status;
+       int ssid_len = 0;
+       u8 *ssid = NULL;
+
+       lockdep_assert_held(&mvm->mutex);
+       BUG_ON(mvm->scan_cmd == NULL);
+
+       IWL_DEBUG_SCAN(mvm, "Handling mac80211 scan request\n");
+       mvm->scan_status = IWL_MVM_SCAN_OS;
+       memset(cmd, 0, sizeof(struct iwl_scan_cmd) +
+              mvm->fw->ucode_capa.max_probe_length +
+              (MAX_NUM_SCAN_CHANNELS * sizeof(struct iwl_scan_channel)));
+
+       cmd->channel_count = (u8)req->n_channels;
+       cmd->quiet_time = cpu_to_le16(IWL_ACTIVE_QUIET_TIME);
+       cmd->quiet_plcp_th = cpu_to_le16(IWL_PLCP_QUIET_THRESH);
+       cmd->rxchain_sel_flags = iwl_mvm_scan_rx_chain(mvm);
+       cmd->max_out_time = iwl_mvm_scan_max_out_time(vif);
+       cmd->suspend_time = iwl_mvm_scan_suspend_time(vif);
+       cmd->rxon_flags = iwl_mvm_scan_rxon_flags(req);
+       cmd->filter_flags = cpu_to_le32(MAC_FILTER_ACCEPT_GRP |
+                                       MAC_FILTER_IN_BEACON);
+       cmd->type = SCAN_TYPE_FORCED;
+       cmd->repeats = cpu_to_le32(1);
+
+       /*
+        * If the user asked for passive scan, don't change to active scan if
+        * you see any activity on the channel - remain passive.
+        */
+       if (req->n_ssids > 0) {
+               cmd->passive2active = cpu_to_le16(1);
+               ssid = req->ssids[0].ssid;
+               ssid_len = req->ssids[0].ssid_len;
+       } else {
+               cmd->passive2active = 0;
+       }
+
+       iwl_mvm_scan_fill_ssids(cmd, req);
+
+       cmd->tx_cmd.tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL);
+       cmd->tx_cmd.sta_id = mvm->aux_sta.sta_id;
+       cmd->tx_cmd.life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE);
+       cmd->tx_cmd.rate_n_flags =
+                       iwl_mvm_scan_rate_n_flags(mvm, req->channels[0]->band,
+                                                 req->no_cck);
+
+       cmd->tx_cmd.len =
+               cpu_to_le16(iwl_mvm_fill_probe_req(
+                           (struct ieee80211_mgmt *)cmd->data,
+                           vif->addr,
+                           req->n_ssids, ssid, ssid_len,
+                           req->ie, req->ie_len,
+                           mvm->fw->ucode_capa.max_probe_length));
+
+       iwl_mvm_scan_fill_channels(cmd, req);
+
+       cmd->len = cpu_to_le16(sizeof(struct iwl_scan_cmd) +
+               le16_to_cpu(cmd->tx_cmd.len) +
+               (cmd->channel_count * sizeof(struct iwl_scan_channel)));
+       hcmd.len[0] = le16_to_cpu(cmd->len);
+
+       status = SCAN_RESPONSE_OK;
+       ret = iwl_mvm_send_cmd_status(mvm, &hcmd, &status);
+       if (!ret && status == SCAN_RESPONSE_OK) {
+               IWL_DEBUG_SCAN(mvm, "Scan request was sent successfully\n");
+       } else {
+               /*
+                * If the scan failed, it usually means that the FW was unable
+                * to allocate the time events. Warn on it, but maybe we
+                * should try to send the command again with different params.
+                */
+               IWL_ERR(mvm, "Scan failed! status 0x%x ret %d\n",
+                       status, ret);
+               mvm->scan_status = IWL_MVM_SCAN_NONE;
+               ret = -EIO;
+       }
+       return ret;
+}
+
+int iwl_mvm_rx_scan_response(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
+                         struct iwl_device_cmd *cmd)
+{
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
+       struct iwl_cmd_response *resp = (void *)pkt->data;
+
+       IWL_DEBUG_SCAN(mvm, "Scan response received. status 0x%x\n",
+                      le32_to_cpu(resp->status));
+       return 0;
+}
+
+int iwl_mvm_rx_scan_complete(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
+                         struct iwl_device_cmd *cmd)
+{
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
+       struct iwl_scan_complete_notif *notif = (void *)pkt->data;
+
+       IWL_DEBUG_SCAN(mvm, "Scan complete: status=0x%x scanned channels=%d\n",
+                      notif->status, notif->scanned_channels);
+
+       mvm->scan_status = IWL_MVM_SCAN_NONE;
+       ieee80211_scan_completed(mvm->hw, notif->status != SCAN_COMP_STATUS_OK);
+
+       return 0;
+}
+
+static bool iwl_mvm_scan_abort_notif(struct iwl_notif_wait_data *notif_wait,
+                                    struct iwl_rx_packet *pkt, void *data)
+{
+       struct iwl_mvm *mvm =
+               container_of(notif_wait, struct iwl_mvm, notif_wait);
+       struct iwl_scan_complete_notif *notif;
+       u32 *resp;
+
+       switch (pkt->hdr.cmd) {
+       case SCAN_ABORT_CMD:
+               resp = (void *)pkt->data;
+               if (*resp == CAN_ABORT_STATUS) {
+                       IWL_DEBUG_SCAN(mvm,
+                                      "Scan can be aborted, wait until completion\n");
+                       return false;
+               }
+
+               IWL_DEBUG_SCAN(mvm, "Scan cannot be aborted, exit now: %d\n",
+                              *resp);
+               return true;
+
+       case SCAN_COMPLETE_NOTIFICATION:
+               notif = (void *)pkt->data;
+               IWL_DEBUG_SCAN(mvm, "Scan aborted: status 0x%x\n",
+                              notif->status);
+               return true;
+
+       default:
+               WARN_ON(1);
+               return false;
+       };
+}
+
+void iwl_mvm_cancel_scan(struct iwl_mvm *mvm)
+{
+       struct iwl_notification_wait wait_scan_abort;
+       static const u8 scan_abort_notif[] = { SCAN_ABORT_CMD,
+                                              SCAN_COMPLETE_NOTIFICATION };
+       int ret;
+
+       iwl_init_notification_wait(&mvm->notif_wait, &wait_scan_abort,
+                                  scan_abort_notif,
+                                  ARRAY_SIZE(scan_abort_notif),
+                                  iwl_mvm_scan_abort_notif, NULL);
+
+       ret = iwl_mvm_send_cmd_pdu(mvm, SCAN_ABORT_CMD, CMD_SYNC, 0, NULL);
+       if (ret) {
+               IWL_ERR(mvm, "Couldn't send SCAN_ABORT_CMD: %d\n", ret);
+               goto out_remove_notif;
+       }
+
+       ret = iwl_wait_notification(&mvm->notif_wait, &wait_scan_abort, 1 * HZ);
+       if (ret)
+               IWL_ERR(mvm, "%s - failed on timeout\n", __func__);
+
+       return;
+
+out_remove_notif:
+       iwl_remove_notification(&mvm->notif_wait, &wait_scan_abort);
+}
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.c b/drivers/net/wireless/iwlwifi/mvm/sta.c
new file mode 100644 (file)
index 0000000..a1eb692
--- /dev/null
@@ -0,0 +1,1235 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include <net/mac80211.h>
+
+#include "mvm.h"
+#include "sta.h"
+
+static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm)
+{
+       int sta_id;
+
+       WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status));
+
+       lockdep_assert_held(&mvm->mutex);
+
+       /* Don't take rcu_read_lock() since we are protected by mvm->mutex */
+       for (sta_id = 0; sta_id < IWL_MVM_STATION_COUNT; sta_id++)
+               if (!rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
+                                              lockdep_is_held(&mvm->mutex)))
+                       return sta_id;
+       return IWL_MVM_STATION_COUNT;
+}
+
+/* send station add/update command to firmware */
+int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+                          bool update)
+{
+       struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
+       struct iwl_mvm_add_sta_cmd add_sta_cmd;
+       int ret;
+       u32 status;
+       u32 agg_size = 0, mpdu_dens = 0;
+
+       memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
+
+       add_sta_cmd.sta_id = mvm_sta->sta_id;
+       add_sta_cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
+       if (!update) {
+               add_sta_cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk);
+               memcpy(&add_sta_cmd.addr, sta->addr, ETH_ALEN);
+       }
+       add_sta_cmd.add_modify = update ? 1 : 0;
+
+       /* STA_FLG_FAT_EN_MSK ? */
+       /* STA_FLG_MIMO_EN_MSK ? */
+
+       if (sta->ht_cap.ht_supported) {
+               add_sta_cmd.station_flags_msk |=
+                       cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK |
+                                   STA_FLG_AGG_MPDU_DENS_MSK);
+
+               mpdu_dens = sta->ht_cap.ampdu_density;
+       }
+
+       if (sta->vht_cap.vht_supported) {
+               agg_size = sta->vht_cap.cap &
+                       IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
+               agg_size >>=
+                       IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
+       } else if (sta->ht_cap.ht_supported) {
+               agg_size = sta->ht_cap.ampdu_factor;
+       }
+
+       add_sta_cmd.station_flags |=
+               cpu_to_le32(agg_size << STA_FLG_MAX_AGG_SIZE_SHIFT);
+       add_sta_cmd.station_flags |=
+               cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT);
+
+       status = ADD_STA_SUCCESS;
+       ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(add_sta_cmd),
+                                         &add_sta_cmd, &status);
+       if (ret)
+               return ret;
+
+       switch (status) {
+       case ADD_STA_SUCCESS:
+               IWL_DEBUG_ASSOC(mvm, "ADD_STA PASSED\n");
+               break;
+       default:
+               ret = -EIO;
+               IWL_ERR(mvm, "ADD_STA failed\n");
+               break;
+       }
+
+       return ret;
+}
+
+int iwl_mvm_add_sta(struct iwl_mvm *mvm,
+                   struct ieee80211_vif *vif,
+                   struct ieee80211_sta *sta)
+{
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
+       int i, ret, sta_id;
+
+       lockdep_assert_held(&mvm->mutex);
+
+       if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
+               sta_id = iwl_mvm_find_free_sta_id(mvm);
+       else
+               sta_id = mvm_sta->sta_id;
+
+       if (WARN_ON_ONCE(sta_id == IWL_MVM_STATION_COUNT))
+               return -ENOSPC;
+
+       spin_lock_init(&mvm_sta->lock);
+
+       mvm_sta->sta_id = sta_id;
+       mvm_sta->mac_id_n_color = FW_CMD_ID_AND_COLOR(mvmvif->id,
+                                                     mvmvif->color);
+       mvm_sta->vif = vif;
+       mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
+
+       /* HW restart, don't assume the memory has been zeroed */
+       atomic_set(&mvm_sta->pending_frames, 0);
+       mvm_sta->tid_disable_agg = 0;
+       mvm_sta->tfd_queue_msk = 0;
+       for (i = 0; i < IEEE80211_NUM_ACS; i++)
+               if (vif->hw_queue[i] != IEEE80211_INVAL_HW_QUEUE)
+                       mvm_sta->tfd_queue_msk |= BIT(vif->hw_queue[i]);
+
+       if (vif->cab_queue != IEEE80211_INVAL_HW_QUEUE)
+               mvm_sta->tfd_queue_msk |= BIT(vif->cab_queue);
+
+       /* for HW restart - need to reset the seq_number etc... */
+       memset(mvm_sta->tid_data, 0, sizeof(mvm_sta->tid_data));
+
+       ret = iwl_mvm_sta_send_to_fw(mvm, sta, false);
+       if (ret)
+               return ret;
+
+       /* The first station added is the AP, the others are TDLS STAs */
+       if (vif->type == NL80211_IFTYPE_STATION &&
+           mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT)
+               mvmvif->ap_sta_id = sta_id;
+
+       rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], sta);
+
+       return 0;
+}
+
+int iwl_mvm_update_sta(struct iwl_mvm *mvm,
+                      struct ieee80211_vif *vif,
+                      struct ieee80211_sta *sta)
+{
+       return iwl_mvm_sta_send_to_fw(mvm, sta, true);
+}
+
+int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
+                     bool drain)
+{
+       struct iwl_mvm_add_sta_cmd cmd = {};
+       int ret;
+       u32 status;
+
+       lockdep_assert_held(&mvm->mutex);
+
+       cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
+       cmd.sta_id = mvmsta->sta_id;
+       cmd.add_modify = STA_MODE_MODIFY;
+       cmd.station_flags = drain ? cpu_to_le32(STA_FLG_DRAIN_FLOW) : 0;
+       cmd.station_flags_msk = cpu_to_le32(STA_FLG_DRAIN_FLOW);
+
+       status = ADD_STA_SUCCESS;
+       ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd),
+                                         &cmd, &status);
+       if (ret)
+               return ret;
+
+       switch (status) {
+       case ADD_STA_SUCCESS:
+               IWL_DEBUG_INFO(mvm, "Frames for staid %d will drained in fw\n",
+                              mvmsta->sta_id);
+               break;
+       default:
+               ret = -EIO;
+               IWL_ERR(mvm, "Couldn't drain frames for staid %d\n",
+                       mvmsta->sta_id);
+               break;
+       }
+
+       return ret;
+}
+
+/*
+ * Remove a station from the FW table. Before sending the command to remove
+ * the station validate that the station is indeed known to the driver (sanity
+ * only).
+ */
+static int iwl_mvm_rm_sta_common(struct iwl_mvm *mvm, u8 sta_id)
+{
+       struct ieee80211_sta *sta;
+       struct iwl_mvm_rm_sta_cmd rm_sta_cmd = {
+               .sta_id = sta_id,
+       };
+       int ret;
+
+       sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
+                                       lockdep_is_held(&mvm->mutex));
+
+       /* Note: internal stations are marked as error values */
+       if (!sta) {
+               IWL_ERR(mvm, "Invalid station id\n");
+               return -EINVAL;
+       }
+
+       ret = iwl_mvm_send_cmd_pdu(mvm, REMOVE_STA, CMD_SYNC,
+                                  sizeof(rm_sta_cmd), &rm_sta_cmd);
+       if (ret) {
+               IWL_ERR(mvm, "Failed to remove station. Id=%d\n", sta_id);
+               return ret;
+       }
+
+       return 0;
+}
+
+void iwl_mvm_sta_drained_wk(struct work_struct *wk)
+{
+       struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, sta_drained_wk);
+       u8 sta_id;
+
+       /*
+        * The mutex is needed because of the SYNC cmd, but not only: if the
+        * work would run concurrently with iwl_mvm_rm_sta, it would run before
+        * iwl_mvm_rm_sta sets the station as busy, and exit. Then
+        * iwl_mvm_rm_sta would set the station as busy, and nobody will clean
+        * that later.
+        */
+       mutex_lock(&mvm->mutex);
+
+       for_each_set_bit(sta_id, mvm->sta_drained, IWL_MVM_STATION_COUNT) {
+               int ret;
+               struct ieee80211_sta *sta =
+                       rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
+                                                 lockdep_is_held(&mvm->mutex));
+
+               /* This station is in use */
+               if (!IS_ERR(sta))
+                       continue;
+
+               if (PTR_ERR(sta) == -EINVAL) {
+                       IWL_ERR(mvm, "Drained sta %d, but it is internal?\n",
+                               sta_id);
+                       continue;
+               }
+
+               if (!sta) {
+                       IWL_ERR(mvm, "Drained sta %d, but it was NULL?\n",
+                               sta_id);
+                       continue;
+               }
+
+               WARN_ON(PTR_ERR(sta) != -EBUSY);
+               /* This station was removed and we waited until it got drained,
+                * we can now proceed and remove it.
+                */
+               ret = iwl_mvm_rm_sta_common(mvm, sta_id);
+               if (ret) {
+                       IWL_ERR(mvm,
+                               "Couldn't remove sta %d after it was drained\n",
+                               sta_id);
+                       continue;
+               }
+               rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], NULL);
+               clear_bit(sta_id, mvm->sta_drained);
+       }
+
+       mutex_unlock(&mvm->mutex);
+}
+
+int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
+                  struct ieee80211_vif *vif,
+                  struct ieee80211_sta *sta)
+{
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
+       int ret;
+
+       lockdep_assert_held(&mvm->mutex);
+
+       if (vif->type == NL80211_IFTYPE_STATION &&
+           mvmvif->ap_sta_id == mvm_sta->sta_id) {
+               /*
+                * Put a non-NULL since the fw station isn't removed.
+                * It will be removed after the MAC will be set as
+                * unassoc.
+                */
+               rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id],
+                                  ERR_PTR(-EINVAL));
+
+               /* flush its queues here since we are freeing mvm_sta */
+               ret = iwl_mvm_flush_tx_path(mvm, mvm_sta->tfd_queue_msk, true);
+
+               /* if we are associated - we can't remove the AP STA now */
+               if (vif->bss_conf.assoc)
+                       return ret;
+
+               /* unassoc - go ahead - remove the AP STA now */
+               mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
+       }
+
+       /*
+        * There are frames pending on the AC queues for this station.
+        * We need to wait until all the frames are drained...
+        */
+       if (atomic_read(&mvm_sta->pending_frames)) {
+               ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
+               rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id],
+                                  ERR_PTR(-EBUSY));
+       } else {
+               ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id);
+               rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL);
+       }
+
+       return ret;
+}
+
+int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm,
+                     struct ieee80211_vif *vif,
+                     u8 sta_id)
+{
+       int ret = iwl_mvm_rm_sta_common(mvm, sta_id);
+
+       lockdep_assert_held(&mvm->mutex);
+
+       rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], NULL);
+       return ret;
+}
+
+int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta,
+                            u32 qmask)
+{
+       if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
+               sta->sta_id = iwl_mvm_find_free_sta_id(mvm);
+               if (WARN_ON_ONCE(sta->sta_id == IWL_MVM_STATION_COUNT))
+                       return -ENOSPC;
+       }
+
+       sta->tfd_queue_msk = qmask;
+
+       /* put a non-NULL value so iterating over the stations won't stop */
+       rcu_assign_pointer(mvm->fw_id_to_mac_id[sta->sta_id], ERR_PTR(-EINVAL));
+       return 0;
+}
+
+void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta)
+{
+       rcu_assign_pointer(mvm->fw_id_to_mac_id[sta->sta_id], NULL);
+       memset(sta, 0, sizeof(struct iwl_mvm_int_sta));
+       sta->sta_id = IWL_MVM_STATION_COUNT;
+}
+
+static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
+                                     struct iwl_mvm_int_sta *sta,
+                                     const u8 *addr,
+                                     u16 mac_id, u16 color)
+{
+       struct iwl_mvm_add_sta_cmd cmd;
+       int ret;
+       u32 status;
+
+       lockdep_assert_held(&mvm->mutex);
+
+       memset(&cmd, 0, sizeof(struct iwl_mvm_add_sta_cmd));
+       cmd.sta_id = sta->sta_id;
+       cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id,
+                                                            color));
+
+       cmd.tfd_queue_msk = cpu_to_le32(sta->tfd_queue_msk);
+
+       if (addr)
+               memcpy(cmd.addr, addr, ETH_ALEN);
+
+       ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd),
+                                         &cmd, &status);
+       if (ret)
+               return ret;
+
+       switch (status) {
+       case ADD_STA_SUCCESS:
+               IWL_DEBUG_INFO(mvm, "Internal station added.\n");
+               return 0;
+       default:
+               ret = -EIO;
+               IWL_ERR(mvm, "Add internal station failed, status=0x%x\n",
+                       status);
+               break;
+       }
+       return ret;
+}
+
+int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
+{
+       int ret;
+
+       lockdep_assert_held(&mvm->mutex);
+
+       /* Add the aux station, but without any queues */
+       ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, 0);
+       if (ret)
+               return ret;
+
+       ret = iwl_mvm_add_int_sta_common(mvm, &mvm->aux_sta, NULL,
+                                        MAC_INDEX_AUX, 0);
+
+       if (ret)
+               iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
+       return ret;
+}
+
+/*
+ * Send the add station command for the vif's broadcast station.
+ * Assumes that the station was already allocated.
+ *
+ * @mvm: the mvm component
+ * @vif: the interface to which the broadcast station is added
+ * @bsta: the broadcast station to add.
+ */
+int iwl_mvm_send_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+                          struct iwl_mvm_int_sta *bsta)
+{
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       static const u8 baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
+
+       lockdep_assert_held(&mvm->mutex);
+
+       if (WARN_ON_ONCE(bsta->sta_id == IWL_MVM_STATION_COUNT))
+               return -ENOSPC;
+
+       return iwl_mvm_add_int_sta_common(mvm, bsta, baddr,
+                                         mvmvif->id, mvmvif->color);
+}
+
+/* Send the FW a request to remove the station from it's internal data
+ * structures, but DO NOT remove the entry from the local data structures. */
+int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm,
+                             struct iwl_mvm_int_sta *bsta)
+{
+       int ret;
+
+       lockdep_assert_held(&mvm->mutex);
+
+       ret = iwl_mvm_rm_sta_common(mvm, bsta->sta_id);
+       if (ret)
+               IWL_WARN(mvm, "Failed sending remove station\n");
+       return ret;
+}
+
+/* Allocate a new station entry for the broadcast station to the given vif,
+ * and send it to the FW.
+ * Note that each P2P mac should have its own broadcast station.
+ *
+ * @mvm: the mvm component
+ * @vif: the interface to which the broadcast station is added
+ * @bsta: the broadcast station to add. */
+int iwl_mvm_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+                         struct iwl_mvm_int_sta *bsta)
+{
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       static const u8 baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
+       u32 qmask;
+       int ret;
+
+       lockdep_assert_held(&mvm->mutex);
+
+       qmask = iwl_mvm_mac_get_queues_mask(mvm, vif);
+       ret = iwl_mvm_allocate_int_sta(mvm, bsta, qmask);
+       if (ret)
+               return ret;
+
+       ret = iwl_mvm_add_int_sta_common(mvm, bsta, baddr,
+                                        mvmvif->id, mvmvif->color);
+
+       if (ret)
+               iwl_mvm_dealloc_int_sta(mvm, bsta);
+       return ret;
+}
+
+/*
+ * Send the FW a request to remove the station from it's internal data
+ * structures, and in addition remove it from the local data structure.
+ */
+int iwl_mvm_rm_bcast_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *bsta)
+{
+       int ret;
+
+       lockdep_assert_held(&mvm->mutex);
+
+       ret = iwl_mvm_rm_sta_common(mvm, bsta->sta_id);
+       if (ret)
+               return ret;
+
+       iwl_mvm_dealloc_int_sta(mvm, bsta);
+       return ret;
+}
+
+int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+                      int tid, u16 ssn, bool start)
+{
+       struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
+       struct iwl_mvm_add_sta_cmd cmd = {};
+       int ret;
+       u32 status;
+
+       lockdep_assert_held(&mvm->mutex);
+
+       cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
+       cmd.sta_id = mvm_sta->sta_id;
+       cmd.add_modify = STA_MODE_MODIFY;
+       cmd.add_immediate_ba_tid = (u8) tid;
+       cmd.add_immediate_ba_ssn = cpu_to_le16(ssn);
+       cmd.modify_mask = start ? STA_MODIFY_ADD_BA_TID :
+                                 STA_MODIFY_REMOVE_BA_TID;
+
+       status = ADD_STA_SUCCESS;
+       ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd),
+                                         &cmd, &status);
+       if (ret)
+               return ret;
+
+       switch (status) {
+       case ADD_STA_SUCCESS:
+               IWL_DEBUG_INFO(mvm, "RX BA Session %sed in fw\n",
+                              start ? "start" : "stopp");
+               break;
+       case ADD_STA_IMMEDIATE_BA_FAILURE:
+               IWL_WARN(mvm, "RX BA Session refused by fw\n");
+               ret = -ENOSPC;
+               break;
+       default:
+               ret = -EIO;
+               IWL_ERR(mvm, "RX BA Session failed %sing, status 0x%x\n",
+                       start ? "start" : "stopp", status);
+               break;
+       }
+
+       return ret;
+}
+
+static int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+                             int tid, u8 queue, bool start)
+{
+       struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
+       struct iwl_mvm_add_sta_cmd cmd = {};
+       int ret;
+       u32 status;
+
+       lockdep_assert_held(&mvm->mutex);
+
+       if (start) {
+               mvm_sta->tfd_queue_msk |= BIT(queue);
+               mvm_sta->tid_disable_agg &= ~BIT(tid);
+       } else {
+               mvm_sta->tfd_queue_msk &= ~BIT(queue);
+               mvm_sta->tid_disable_agg |= BIT(tid);
+       }
+
+       cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
+       cmd.sta_id = mvm_sta->sta_id;
+       cmd.add_modify = STA_MODE_MODIFY;
+       cmd.modify_mask = STA_MODIFY_QUEUES | STA_MODIFY_TID_DISABLE_TX;
+       cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk);
+       cmd.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg);
+
+       status = ADD_STA_SUCCESS;
+       ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd),
+                                         &cmd, &status);
+       if (ret)
+               return ret;
+
+       switch (status) {
+       case ADD_STA_SUCCESS:
+               break;
+       default:
+               ret = -EIO;
+               IWL_ERR(mvm, "TX BA Session failed %sing, status 0x%x\n",
+                       start ? "start" : "stopp", status);
+               break;
+       }
+
+       return ret;
+}
+
+static const u8 tid_to_ac[] = {
+       IEEE80211_AC_BE,
+       IEEE80211_AC_BK,
+       IEEE80211_AC_BK,
+       IEEE80211_AC_BE,
+       IEEE80211_AC_VI,
+       IEEE80211_AC_VI,
+       IEEE80211_AC_VO,
+       IEEE80211_AC_VO,
+};
+
+int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+                            struct ieee80211_sta *sta, u16 tid, u16 *ssn)
+{
+       struct iwl_mvm_sta *mvmsta = (void *)sta->drv_priv;
+       struct iwl_mvm_tid_data *tid_data;
+       int txq_id;
+
+       if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
+               return -EINVAL;
+
+       if (mvmsta->tid_data[tid].state != IWL_AGG_OFF) {
+               IWL_ERR(mvm, "Start AGG when state is not IWL_AGG_OFF %d!\n",
+                       mvmsta->tid_data[tid].state);
+               return -ENXIO;
+       }
+
+       lockdep_assert_held(&mvm->mutex);
+
+       for (txq_id = IWL_MVM_FIRST_AGG_QUEUE;
+            txq_id <= IWL_MVM_LAST_AGG_QUEUE; txq_id++)
+               if (mvm->queue_to_mac80211[txq_id] ==
+                   IWL_INVALID_MAC80211_QUEUE)
+                       break;
+
+       if (txq_id > IWL_MVM_LAST_AGG_QUEUE) {
+               IWL_ERR(mvm, "Failed to allocate agg queue\n");
+               return -EIO;
+       }
+
+       /* the new tx queue is still connected to the same mac80211 queue */
+       mvm->queue_to_mac80211[txq_id] = vif->hw_queue[tid_to_ac[tid]];
+
+       spin_lock_bh(&mvmsta->lock);
+       tid_data = &mvmsta->tid_data[tid];
+       tid_data->ssn = SEQ_TO_SN(tid_data->seq_number);
+       tid_data->txq_id = txq_id;
+       *ssn = tid_data->ssn;
+
+       IWL_DEBUG_TX_QUEUES(mvm,
+                           "Start AGG: sta %d tid %d queue %d - ssn = %d, next_recl = %d\n",
+                           mvmsta->sta_id, tid, txq_id, tid_data->ssn,
+                           tid_data->next_reclaimed);
+
+       if (tid_data->ssn == tid_data->next_reclaimed) {
+               tid_data->state = IWL_AGG_STARTING;
+               ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+       } else {
+               tid_data->state = IWL_EMPTYING_HW_QUEUE_ADDBA;
+       }
+
+       spin_unlock_bh(&mvmsta->lock);
+
+       return 0;
+}
+
+int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+                           struct ieee80211_sta *sta, u16 tid, u8 buf_size)
+{
+       struct iwl_mvm_sta *mvmsta = (void *)sta->drv_priv;
+       struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
+       int queue, fifo, ret;
+       u16 ssn;
+
+       buf_size = min_t(int, buf_size, LINK_QUAL_AGG_FRAME_LIMIT_DEF);
+
+       spin_lock_bh(&mvmsta->lock);
+       ssn = tid_data->ssn;
+       queue = tid_data->txq_id;
+       tid_data->state = IWL_AGG_ON;
+       tid_data->ssn = 0xffff;
+       spin_unlock_bh(&mvmsta->lock);
+
+       fifo = iwl_mvm_ac_to_tx_fifo[tid_to_ac[tid]];
+
+       ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
+       if (ret)
+               return -EIO;
+
+       iwl_trans_txq_enable(mvm->trans, queue, fifo, mvmsta->sta_id, tid,
+                            buf_size, ssn);
+
+       /*
+        * Even though in theory the peer could have different
+        * aggregation reorder buffer sizes for different sessions,
+        * our ucode doesn't allow for that and has a global limit
+        * for each station. Therefore, use the minimum of all the
+        * aggregation sessions and our default value.
+        */
+       mvmsta->max_agg_bufsize =
+               min(mvmsta->max_agg_bufsize, buf_size);
+       mvmsta->lq_sta.lq.agg_frame_cnt_limit = mvmsta->max_agg_bufsize;
+
+       if (mvm->cfg->ht_params->use_rts_for_aggregation) {
+               /*
+                * switch to RTS/CTS if it is the prefer protection
+                * method for HT traffic
+                */
+               mvmsta->lq_sta.lq.flags |= LQ_FLAG_SET_STA_TLC_RTS_MSK;
+               /*
+                * TODO: remove the TLC_RTS flag when we tear down the last
+                * AGG session (agg_tids_count in DVM)
+                */
+       }
+
+       IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n",
+                    sta->addr, tid);
+
+       return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.lq, CMD_ASYNC, false);
+}
+
+int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+                           struct ieee80211_sta *sta, u16 tid)
+{
+       struct iwl_mvm_sta *mvmsta = (void *)sta->drv_priv;
+       struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
+       u16 txq_id;
+       int err;
+
+       spin_lock_bh(&mvmsta->lock);
+
+       txq_id = tid_data->txq_id;
+
+       IWL_DEBUG_TX_QUEUES(mvm, "Stop AGG: sta %d tid %d q %d state %d\n",
+                           mvmsta->sta_id, tid, txq_id, tid_data->state);
+
+       switch (tid_data->state) {
+       case IWL_AGG_ON:
+               tid_data->ssn = SEQ_TO_SN(tid_data->seq_number);
+
+               IWL_DEBUG_TX_QUEUES(mvm,
+                                   "ssn = %d, next_recl = %d\n",
+                                   tid_data->ssn, tid_data->next_reclaimed);
+
+               /* There are still packets for this RA / TID in the HW */
+               if (tid_data->ssn != tid_data->next_reclaimed) {
+                       tid_data->state = IWL_EMPTYING_HW_QUEUE_DELBA;
+                       err = 0;
+                       break;
+               }
+
+               tid_data->ssn = 0xffff;
+               iwl_trans_txq_disable(mvm->trans, txq_id);
+               /* fall through */
+       case IWL_AGG_STARTING:
+       case IWL_EMPTYING_HW_QUEUE_ADDBA:
+               /*
+                * The agg session has been stopped before it was set up. This
+                * can happen when the AddBA timer times out for example.
+                */
+
+               /* No barriers since we are under mutex */
+               lockdep_assert_held(&mvm->mutex);
+               mvm->queue_to_mac80211[txq_id] = IWL_INVALID_MAC80211_QUEUE;
+
+               ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+               tid_data->state = IWL_AGG_OFF;
+               err = 0;
+               break;
+       default:
+               IWL_ERR(mvm,
+                       "Stopping AGG while state not ON or starting for %d on %d (%d)\n",
+                       mvmsta->sta_id, tid, tid_data->state);
+               IWL_ERR(mvm,
+                       "\ttid_data->txq_id = %d\n", tid_data->txq_id);
+               err = -EINVAL;
+       }
+
+       spin_unlock_bh(&mvmsta->lock);
+
+       return err;
+}
+
+static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm)
+{
+       int i;
+
+       lockdep_assert_held(&mvm->mutex);
+
+       i = find_first_zero_bit(mvm->fw_key_table, STA_KEY_MAX_NUM);
+
+       if (i == STA_KEY_MAX_NUM)
+               return STA_KEY_IDX_INVALID;
+
+       __set_bit(i, mvm->fw_key_table);
+
+       return i;
+}
+
+static u8 iwl_mvm_get_key_sta_id(struct ieee80211_vif *vif,
+                                struct ieee80211_sta *sta)
+{
+       struct iwl_mvm_vif *mvmvif = (void *)vif->drv_priv;
+
+       if (sta) {
+               struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
+
+               return mvm_sta->sta_id;
+       }
+
+       /*
+        * The device expects GTKs for station interfaces to be
+        * installed as GTKs for the AP station. If we have no
+        * station ID, then use AP's station ID.
+        */
+       if (vif->type == NL80211_IFTYPE_STATION &&
+           mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT)
+               return mvmvif->ap_sta_id;
+
+       return IWL_INVALID_STATION;
+}
+
+static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
+                               struct iwl_mvm_sta *mvm_sta,
+                               struct ieee80211_key_conf *keyconf,
+                               u8 sta_id, u32 tkip_iv32, u16 *tkip_p1k,
+                               u32 cmd_flags)
+{
+       __le16 key_flags;
+       struct iwl_mvm_add_sta_cmd cmd = {};
+       int ret, status;
+       u16 keyidx;
+       int i;
+
+       keyidx = (keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
+                STA_KEY_FLG_KEYID_MSK;
+       key_flags = cpu_to_le16(keyidx);
+       key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_KEY_MAP);
+
+       switch (keyconf->cipher) {
+       case WLAN_CIPHER_SUITE_TKIP:
+               key_flags |= cpu_to_le16(STA_KEY_FLG_TKIP);
+               cmd.key.tkip_rx_tsc_byte2 = tkip_iv32;
+               for (i = 0; i < 5; i++)
+                       cmd.key.tkip_rx_ttak[i] = cpu_to_le16(tkip_p1k[i]);
+               memcpy(cmd.key.key, keyconf->key, keyconf->keylen);
+               break;
+       case WLAN_CIPHER_SUITE_CCMP:
+               key_flags |= cpu_to_le16(STA_KEY_FLG_CCM);
+               memcpy(cmd.key.key, keyconf->key, keyconf->keylen);
+               break;
+       default:
+               WARN_ON(1);
+               return -EINVAL;
+       }
+
+       if (!(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+               key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
+
+       cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
+       cmd.key.key_offset = keyconf->hw_key_idx;
+       cmd.key.key_flags = key_flags;
+       cmd.add_modify = STA_MODE_MODIFY;
+       cmd.modify_mask = STA_MODIFY_KEY;
+       cmd.sta_id = sta_id;
+
+       status = ADD_STA_SUCCESS;
+       if (cmd_flags == CMD_SYNC)
+               ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd),
+                                                 &cmd, &status);
+       else
+               ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
+                                          sizeof(cmd), &cmd);
+
+       switch (status) {
+       case ADD_STA_SUCCESS:
+               IWL_DEBUG_WEP(mvm, "MODIFY_STA: set dynamic key passed\n");
+               break;
+       default:
+               ret = -EIO;
+               IWL_ERR(mvm, "MODIFY_STA: set dynamic key failed\n");
+               break;
+       }
+
+       return ret;
+}
+
+static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm,
+                                struct ieee80211_key_conf *keyconf,
+                                u8 sta_id, bool remove_key)
+{
+       struct iwl_mvm_mgmt_mcast_key_cmd igtk_cmd = {};
+
+       /* verify the key details match the required command's expectations */
+       if (WARN_ON((keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC) ||
+                   (keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) ||
+                   (keyconf->keyidx != 4 && keyconf->keyidx != 5)))
+               return -EINVAL;
+
+       igtk_cmd.key_id = cpu_to_le32(keyconf->keyidx);
+       igtk_cmd.sta_id = cpu_to_le32(sta_id);
+
+       if (remove_key) {
+               igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_NOT_VALID);
+       } else {
+               struct ieee80211_key_seq seq;
+               const u8 *pn;
+
+               memcpy(igtk_cmd.IGTK, keyconf->key, keyconf->keylen);
+               ieee80211_aes_cmac_calculate_k1_k2(keyconf,
+                                                  igtk_cmd.K1, igtk_cmd.K2);
+               ieee80211_get_key_rx_seq(keyconf, 0, &seq);
+               pn = seq.aes_cmac.pn;
+               igtk_cmd.receive_seq_cnt = cpu_to_le64(((u64) pn[5] << 0) |
+                                                      ((u64) pn[4] << 8) |
+                                                      ((u64) pn[3] << 16) |
+                                                      ((u64) pn[2] << 24) |
+                                                      ((u64) pn[1] << 32) |
+                                                      ((u64) pn[0] << 40));
+       }
+
+       IWL_DEBUG_INFO(mvm, "%s igtk for sta %u\n",
+                      remove_key ? "removing" : "installing",
+                      igtk_cmd.sta_id);
+
+       return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, CMD_SYNC,
+                                   sizeof(igtk_cmd), &igtk_cmd);
+}
+
+
+static inline u8 *iwl_mvm_get_mac_addr(struct iwl_mvm *mvm,
+                                      struct ieee80211_vif *vif,
+                                      struct ieee80211_sta *sta)
+{
+       struct iwl_mvm_vif *mvmvif = (void *)vif->drv_priv;
+
+       if (sta)
+               return sta->addr;
+
+       if (vif->type == NL80211_IFTYPE_STATION &&
+           mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) {
+               u8 sta_id = mvmvif->ap_sta_id;
+               sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
+                                               lockdep_is_held(&mvm->mutex));
+               return sta->addr;
+       }
+
+
+       return NULL;
+}
+
+int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
+                       struct ieee80211_vif *vif,
+                       struct ieee80211_sta *sta,
+                       struct ieee80211_key_conf *keyconf,
+                       bool have_key_offset)
+{
+       struct iwl_mvm_sta *mvm_sta;
+       int ret;
+       u8 *addr, sta_id;
+       struct ieee80211_key_seq seq;
+       u16 p1k[5];
+
+       lockdep_assert_held(&mvm->mutex);
+
+       /* Get the station id from the mvm local station table */
+       sta_id = iwl_mvm_get_key_sta_id(vif, sta);
+       if (sta_id == IWL_INVALID_STATION) {
+               IWL_ERR(mvm, "Failed to find station id\n");
+               return -EINVAL;
+       }
+
+       if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC) {
+               ret = iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, false);
+               goto end;
+       }
+
+       /*
+        * It is possible that the 'sta' parameter is NULL, and thus
+        * there is a need to retrieve  the sta from the local station table.
+        */
+       if (!sta) {
+               sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
+                                               lockdep_is_held(&mvm->mutex));
+               if (IS_ERR_OR_NULL(sta)) {
+                       IWL_ERR(mvm, "Invalid station id\n");
+                       return -EINVAL;
+               }
+       }
+
+       mvm_sta = (struct iwl_mvm_sta *)sta->drv_priv;
+       if (WARN_ON_ONCE(mvm_sta->vif != vif))
+               return -EINVAL;
+
+       if (!have_key_offset) {
+               /*
+                * The D3 firmware hardcodes the PTK offset to 0, so we have to
+                * configure it there. As a result, this workaround exists to
+                * let the caller set the key offset (hw_key_idx), see d3.c.
+                */
+               keyconf->hw_key_idx = iwl_mvm_set_fw_key_idx(mvm);
+               if (keyconf->hw_key_idx == STA_KEY_IDX_INVALID)
+                       return -ENOSPC;
+       }
+
+       switch (keyconf->cipher) {
+       case WLAN_CIPHER_SUITE_TKIP:
+               addr = iwl_mvm_get_mac_addr(mvm, vif, sta);
+               /* get phase 1 key from mac80211 */
+               ieee80211_get_key_rx_seq(keyconf, 0, &seq);
+               ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k);
+               ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, sta_id,
+                                          seq.tkip.iv32, p1k, CMD_SYNC);
+               break;
+       case WLAN_CIPHER_SUITE_CCMP:
+               ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, sta_id,
+                                          0, NULL, CMD_SYNC);
+               break;
+       default:
+               IWL_ERR(mvm, "Unknown cipher %x\n", keyconf->cipher);
+               ret = -EINVAL;
+       }
+
+       if (ret)
+               __clear_bit(keyconf->hw_key_idx, mvm->fw_key_table);
+
+end:
+       IWL_DEBUG_WEP(mvm, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n",
+                     keyconf->cipher, keyconf->keylen, keyconf->keyidx,
+                     sta->addr, ret);
+       return ret;
+}
+
+int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
+                          struct ieee80211_vif *vif,
+                          struct ieee80211_sta *sta,
+                          struct ieee80211_key_conf *keyconf)
+{
+       struct iwl_mvm_sta *mvm_sta;
+       struct iwl_mvm_add_sta_cmd cmd = {};
+       __le16 key_flags;
+       int ret, status;
+       u8 sta_id;
+
+       lockdep_assert_held(&mvm->mutex);
+
+       /* Get the station id from the mvm local station table */
+       sta_id = iwl_mvm_get_key_sta_id(vif, sta);
+
+       IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n",
+                     keyconf->keyidx, sta_id);
+
+       if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC)
+               return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true);
+
+       ret = __test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table);
+       if (!ret) {
+               IWL_ERR(mvm, "offset %d not used in fw key table.\n",
+                       keyconf->hw_key_idx);
+               return -ENOENT;
+       }
+
+       if (sta_id == IWL_INVALID_STATION) {
+               IWL_DEBUG_WEP(mvm, "station non-existent, early return.\n");
+               return 0;
+       }
+
+       /*
+        * It is possible that the 'sta' parameter is NULL, and thus
+        * there is a need to retrieve the sta from the local station table,
+        * for example when a GTK is removed (where the sta_id will then be
+        * the AP ID, and no station was passed by mac80211.)
+        */
+       if (!sta) {
+               sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
+                                               lockdep_is_held(&mvm->mutex));
+               if (!sta) {
+                       IWL_ERR(mvm, "Invalid station id\n");
+                       return -EINVAL;
+               }
+       }
+
+       mvm_sta = (struct iwl_mvm_sta *)sta->drv_priv;
+       if (WARN_ON_ONCE(mvm_sta->vif != vif))
+               return -EINVAL;
+
+       key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
+                                STA_KEY_FLG_KEYID_MSK);
+       key_flags |= cpu_to_le16(STA_KEY_FLG_NO_ENC | STA_KEY_FLG_WEP_KEY_MAP);
+       key_flags |= cpu_to_le16(STA_KEY_NOT_VALID);
+
+       if (!(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+               key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
+
+       cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
+       cmd.key.key_flags = key_flags;
+       cmd.key.key_offset = keyconf->hw_key_idx;
+       cmd.sta_id = sta_id;
+
+       cmd.modify_mask = STA_MODIFY_KEY;
+       cmd.add_modify = STA_MODE_MODIFY;
+
+       status = ADD_STA_SUCCESS;
+       ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd),
+                                         &cmd, &status);
+
+       switch (status) {
+       case ADD_STA_SUCCESS:
+               IWL_DEBUG_WEP(mvm, "MODIFY_STA: remove sta key passed\n");
+               break;
+       default:
+               ret = -EIO;
+               IWL_ERR(mvm, "MODIFY_STA: remove sta key failed\n");
+               break;
+       }
+
+       return ret;
+}
+
+void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
+                            struct ieee80211_vif *vif,
+                            struct ieee80211_key_conf *keyconf,
+                            struct ieee80211_sta *sta, u32 iv32,
+                            u16 *phase1key)
+{
+       struct iwl_mvm_sta *mvm_sta;
+       u8 sta_id = iwl_mvm_get_key_sta_id(vif, sta);
+
+       if (WARN_ON_ONCE(sta_id == IWL_INVALID_STATION))
+               return;
+
+       rcu_read_lock();
+
+       if (!sta) {
+               sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
+               if (WARN_ON(IS_ERR_OR_NULL(sta))) {
+                       rcu_read_unlock();
+                       return;
+               }
+       }
+
+       mvm_sta = (void *)sta->drv_priv;
+       iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, sta_id,
+                            iv32, phase1key, CMD_ASYNC);
+       rcu_read_unlock();
+}
+
+void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm, int sta_id)
+{
+       struct iwl_mvm_add_sta_cmd cmd = {
+               .add_modify = STA_MODE_MODIFY,
+               .sta_id = sta_id,
+               .modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT,
+               .sleep_state_flags = cpu_to_le16(STA_SLEEP_STATE_AWAKE),
+       };
+       int ret;
+
+       /*
+        * Same modify mask for sleep_tx_count and sleep_state_flags but this
+        * should be fine since if we set the STA as "awake", then
+        * sleep_tx_count is not relevant.
+        */
+       ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, sizeof(cmd), &cmd);
+       if (ret)
+               IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
+}
+
+void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm, int sta_id,
+                                      enum ieee80211_frame_release_type reason,
+                                      u16 cnt)
+{
+       u16 sleep_state_flags =
+               (reason == IEEE80211_FRAME_RELEASE_UAPSD) ?
+                       STA_SLEEP_STATE_UAPSD : STA_SLEEP_STATE_PS_POLL;
+       struct iwl_mvm_add_sta_cmd cmd = {
+               .add_modify = STA_MODE_MODIFY,
+               .sta_id = sta_id,
+               .modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT,
+               .sleep_tx_count = cpu_to_le16(cnt),
+               /*
+                * Same modify mask for sleep_tx_count and sleep_state_flags so
+                * we must set the sleep_state_flags too.
+                */
+               .sleep_state_flags = cpu_to_le16(sleep_state_flags),
+       };
+       int ret;
+
+       /* TODO: somehow the fw doesn't seem to take PS_POLL into account */
+       ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, sizeof(cmd), &cmd);
+       if (ret)
+               IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
+}
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.h b/drivers/net/wireless/iwlwifi/mvm/sta.h
new file mode 100644 (file)
index 0000000..bdd7c5e
--- /dev/null
@@ -0,0 +1,372 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#ifndef __sta_h__
+#define __sta_h__
+
+#include <linux/spinlock.h>
+#include <net/mac80211.h>
+#include <linux/wait.h>
+
+#include "iwl-trans.h" /* for IWL_MAX_TID_COUNT */
+#include "fw-api.h" /* IWL_MVM_STATION_COUNT */
+#include "rs.h"
+
+struct iwl_mvm;
+
+/**
+ * DOC: station table - introduction
+ *
+ * The station table is a list of data structure that reprensent the stations.
+ * In STA/P2P client mode, the driver will hold one station for the AP/ GO.
+ * In GO/AP mode, the driver will have as many stations as associated clients.
+ * All these stations are reflected in the fw's station table. The driver
+ * keeps the fw's station table up to date with the ADD_STA command. Stations
+ * can be removed by the REMOVE_STA command.
+ *
+ * All the data related to a station is held in the structure %iwl_mvm_sta
+ * which is embed in the mac80211's %ieee80211_sta (in the drv_priv) area.
+ * This data includes the index of the station in the fw, per tid information
+ * (sequence numbers, Block-ack state machine, etc...). The stations are
+ * created and deleted by the %sta_state callback from %ieee80211_ops.
+ *
+ * The driver holds a map: %fw_id_to_mac_id that allows to fetch a
+ * %ieee80211_sta (and the %iwl_mvm_sta embedded into it) based on a fw
+ * station index. That way, the driver is able to get the tid related data in
+ * O(1) in time sensitive paths (Tx / Tx response / BA notification). These
+ * paths are triggered by the fw, and the driver needs to get a pointer to the
+ * %ieee80211 structure. This map helps to get that pointer quickly.
+ */
+
+/**
+ * DOC: station table - locking
+ *
+ * As stated before, the station is created / deleted by mac80211's %sta_state
+ * callback from %ieee80211_ops which can sleep. The next paragraph explains
+ * the locking of a single stations, the next ones relates to the station
+ * table.
+ *
+ * The station holds the sequence number per tid. So this data needs to be
+ * accessed in the Tx path (which is softIRQ). It also holds the Block-Ack
+ * information (the state machine / and the logic that checks if the queues
+ * were drained), so it also needs to be accessible from the Tx response flow.
+ * In short, the station needs to be access from sleepable context as well as
+ * from tasklets, so the station itself needs a spinlock.
+ *
+ * The writers of %fw_id_to_mac_id map are serialized by the global mutex of
+ * the mvm op_mode. This is possible since %sta_state can sleep.
+ * The pointers in this map are RCU protected, hence we won't replace the
+ * station while we have Tx / Tx response / BA notification running.
+ *
+ * If a station is deleted while it still has packets in its A-MPDU queues,
+ * then the reclaim flow will notice that there is no station in the map for
+ * sta_id and it will dump the responses.
+ */
+
+/**
+ * DOC: station table - internal stations
+ *
+ * The FW needs a few internal stations that are not reflected in
+ * mac80211, such as broadcast station in AP / GO mode, or AUX sta for
+ * scanning and P2P device (during the GO negotiation).
+ * For these kind of stations we have %iwl_mvm_int_sta struct which holds the
+ * data relevant for them from both %iwl_mvm_sta and %ieee80211_sta.
+ * Usually the data for these stations is static, so no locking is required,
+ * and no TID data as this is also not needed.
+ * One thing to note, is that these stations have an ID in the fw, but not
+ * in mac80211. In order to "reserve" them a sta_id in %fw_id_to_mac_id
+ * we fill ERR_PTR(EINVAL) in this mapping and all other dereferencing of
+ * pointers from this mapping need to check that the value is not error
+ * or NULL.
+ *
+ * Currently there is only one auxiliary station for scanning, initialized
+ * on init.
+ */
+
+/**
+ * DOC: station table - AP Station in STA mode
+ *
+ * %iwl_mvm_vif includes the index of the AP station in the fw's STA table:
+ * %ap_sta_id. To get the point to the coresponsding %ieee80211_sta,
+ * &fw_id_to_mac_id can be used. Due to the way the fw works, we must not remove
+ * the AP station from the fw before setting the MAC context as unassociated.
+ * Hence, %fw_id_to_mac_id[%ap_sta_id] will be NULLed when the AP station is
+ * removed by mac80211, but the station won't be removed in the fw until the
+ * VIF is set as unassociated. Then, %ap_sta_id will be invalidated.
+ */
+
+/**
+ * DOC: station table - Drain vs. Flush
+ *
+ * Flush means that all the frames in the SCD queue are dumped regardless the
+ * station to which they were sent. We do that when we disassociate and before
+ * we remove the STA of the AP. The flush can be done synchronously against the
+ * fw.
+ * Drain means that the fw will drop all the frames sent to a specific station.
+ * This is useful when a client (if we are IBSS / GO or AP) disassociates. In
+ * that case, we need to drain all the frames for that client from the AC queues
+ * that are shared with the other clients. Only then, we can remove the STA in
+ * the fw. In order to do so, we track the non-AMPDU packets for each station.
+ * If mac80211 removes a STA and if it still has non-AMPDU packets pending in
+ * the queues, we mark this station as %EBUSY in %fw_id_to_mac_id, and drop all
+ * the frames for this STA (%iwl_mvm_rm_sta). When the last frame is dropped
+ * (we know about it with its Tx response), we remove the station in fw and set
+ * it as %NULL in %fw_id_to_mac_id: this is the purpose of
+ * %iwl_mvm_sta_drained_wk.
+ */
+
+/**
+ * DOC: station table - fw restart
+ *
+ * When the fw asserts, or we have any other issue that requires to reset the
+ * driver, we require mac80211 to reconfigure the driver. Since the private
+ * data of the stations is embed in mac80211's %ieee80211_sta, that data will
+ * not be zeroed and needs to be reinitialized manually.
+ * %IWL_MVM_STATUS_IN_HW_RESTART is set during restart and that will hint us
+ * that we must not allocate a new sta_id but reuse the previous one. This
+ * means that the stations being re-added after the reset will have the same
+ * place in the fw as before the reset. We do need to zero the %fw_id_to_mac_id
+ * map, since the stations aren't in the fw any more. Internal stations that
+ * are not added by mac80211 will be re-added in the init flow that is called
+ * after the restart: mac80211 call's %iwl_mvm_mac_start which calls to
+ * %iwl_mvm_up.
+ */
+
+/**
+ * DOC: AP mode - PS
+ *
+ * When a station is asleep, the fw will set it as "asleep". All the
+ * non-aggregation frames to that station will be dropped by the fw
+ * (%TX_STATUS_FAIL_DEST_PS failure code).
+ * AMPDUs are in a separate queue that is stopped by the fw. We just need to
+ * let mac80211 know how many frames we have in these queues so that it can
+ * properly handle trigger frames.
+ * When the a trigger frame is received, mac80211 tells the driver to send
+ * frames from the AMPDU queues or AC queue depending on which queue are
+ * delivery-enabled and what TID has frames to transmit (Note that mac80211 has
+ * all the knowledege since all the non-agg frames are buffered / filtered, and
+ * the driver tells mac80211 about agg frames). The driver needs to tell the fw
+ * to let frames out even if the station is asleep. This is done by
+ * %iwl_mvm_sta_modify_sleep_tx_count.
+ * When we receive a frame from that station with PM bit unset, the
+ * driver needs to let the fw know that this station isn't alseep any more.
+ * This is done by %iwl_mvm_sta_modify_ps_wake.
+ *
+ * TODO - EOSP handling
+ */
+
+/**
+ * enum iwl_mvm_agg_state
+ *
+ * The state machine of the BA agreement establishment / tear down.
+ * These states relate to a specific RA / TID.
+ *
+ * @IWL_AGG_OFF: aggregation is not used
+ * @IWL_AGG_STARTING: aggregation are starting (between start and oper)
+ * @IWL_AGG_ON: aggregation session is up
+ * @IWL_EMPTYING_HW_QUEUE_ADDBA: establishing a BA session - waiting for the
+ *     HW queue to be empty from packets for this RA /TID.
+ * @IWL_EMPTYING_HW_QUEUE_DELBA: tearing down a BA session - waiting for the
+ *     HW queue to be empty from packets for this RA /TID.
+ */
+enum iwl_mvm_agg_state {
+       IWL_AGG_OFF = 0,
+       IWL_AGG_STARTING,
+       IWL_AGG_ON,
+       IWL_EMPTYING_HW_QUEUE_ADDBA,
+       IWL_EMPTYING_HW_QUEUE_DELBA,
+};
+
+/**
+ * struct iwl_mvm_tid_data - holds the states for each RA / TID
+ * @seq_number: the next WiFi sequence number to use
+ * @next_reclaimed: the WiFi sequence number of the next packet to be acked.
+ *     This is basically (last acked packet++).
+ * @rate_n_flags: Rate at which Tx was attempted. Holds the data between the
+ *     Tx response (TX_CMD), and the block ack notification (COMPRESSED_BA).
+ * @state: state of the BA agreement establishment / tear down.
+ * @txq_id: Tx queue used by the BA session
+ * @ssn: the first packet to be sent in AGG HW queue in Tx AGG start flow, or
+ *     the first packet to be sent in legacy HW queue in Tx AGG stop flow.
+ *     Basically when next_reclaimed reaches ssn, we can tell mac80211 that
+ *     we are ready to finish the Tx AGG stop / start flow.
+ * @wait_for_ba: Expect block-ack before next Tx reply
+ */
+struct iwl_mvm_tid_data {
+       u16 seq_number;
+       u16 next_reclaimed;
+       /* The rest is Tx AGG related */
+       u32 rate_n_flags;
+       enum iwl_mvm_agg_state state;
+       u16 txq_id;
+       u16 ssn;
+       bool wait_for_ba;
+};
+
+/**
+ * struct iwl_mvm_sta - representation of a station in the driver
+ * @sta_id: the index of the station in the fw (will be replaced by id_n_color)
+ * @tfd_queue_msk: the tfd queues used by the station
+ * @mac_id_n_color: the MAC context this station is linked to
+ * @tid_disable_agg: bitmap: if bit(tid) is set, the fw won't send ampdus for
+ *     tid.
+ * @max_agg_bufsize: the maximal size of the AGG buffer for this station
+ * @lock: lock to protect the whole struct. Since %tid_data is access from Tx
+ * and from Tx response flow, it needs a spinlock.
+ * @pending_frames: number of frames for this STA on the shared Tx queues.
+ * @tid_data: per tid data. Look at %iwl_mvm_tid_data.
+ *
+ * When mac80211 creates a station it reserves some space (hw->sta_data_size)
+ * in the structure for use by driver. This structure is placed in that
+ * space.
+ *
+ */
+struct iwl_mvm_sta {
+       u32 sta_id;
+       u32 tfd_queue_msk;
+       u32 mac_id_n_color;
+       u16 tid_disable_agg;
+       u8 max_agg_bufsize;
+       spinlock_t lock;
+       atomic_t pending_frames;
+       struct iwl_mvm_tid_data tid_data[IWL_MAX_TID_COUNT];
+       struct iwl_lq_sta lq_sta;
+       struct ieee80211_vif *vif;
+
+#ifdef CONFIG_PM_SLEEP
+       u16 last_seq_ctl;
+#endif
+};
+
+/**
+ * struct iwl_mvm_int_sta - representation of an internal station (auxiliary or
+ * broadcast)
+ * @sta_id: the index of the station in the fw (will be replaced by id_n_color)
+ * @tfd_queue_msk: the tfd queues used by the station
+ */
+struct iwl_mvm_int_sta {
+       u32 sta_id;
+       u32 tfd_queue_msk;
+};
+
+int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+                          bool update);
+int iwl_mvm_add_sta(struct iwl_mvm *mvm,
+                   struct ieee80211_vif *vif,
+                   struct ieee80211_sta *sta);
+int iwl_mvm_update_sta(struct iwl_mvm *mvm,
+                      struct ieee80211_vif *vif,
+                      struct ieee80211_sta *sta);
+int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
+                  struct ieee80211_vif *vif,
+                  struct ieee80211_sta *sta);
+int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm,
+                     struct ieee80211_vif *vif,
+                     u8 sta_id);
+int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
+                       struct ieee80211_vif *vif,
+                       struct ieee80211_sta *sta,
+                       struct ieee80211_key_conf *key,
+                       bool have_key_offset);
+int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
+                          struct ieee80211_vif *vif,
+                          struct ieee80211_sta *sta,
+                          struct ieee80211_key_conf *keyconf);
+
+void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
+                            struct ieee80211_vif *vif,
+                            struct ieee80211_key_conf *keyconf,
+                            struct ieee80211_sta *sta, u32 iv32,
+                            u16 *phase1key);
+
+/* AMPDU */
+int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+                      int tid, u16 ssn, bool start);
+int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+                       struct ieee80211_sta *sta, u16 tid, u16 *ssn);
+int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+                       struct ieee80211_sta *sta, u16 tid, u8 buf_size);
+int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+                           struct ieee80211_sta *sta, u16 tid);
+
+int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm);
+int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta,
+                            u32 qmask);
+void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm,
+                            struct iwl_mvm_int_sta *sta);
+int iwl_mvm_send_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+                          struct iwl_mvm_int_sta *bsta);
+int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm,
+                             struct iwl_mvm_int_sta *bsta);
+int iwl_mvm_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+                         struct iwl_mvm_int_sta *bsta);
+int iwl_mvm_rm_bcast_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *bsta);
+void iwl_mvm_sta_drained_wk(struct work_struct *wk);
+void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm, int sta_id);
+void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm, int sta_id,
+                                      enum ieee80211_frame_release_type reason,
+                                      u16 cnt);
+int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
+                     bool drain);
+
+#endif /* __sta_h__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.c b/drivers/net/wireless/iwlwifi/mvm/time-event.c
new file mode 100644 (file)
index 0000000..c09b71f
--- /dev/null
@@ -0,0 +1,511 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#include <linux/jiffies.h>
+#include <net/mac80211.h>
+
+#include "iwl-notif-wait.h"
+#include "iwl-trans.h"
+#include "fw-api.h"
+#include "time-event.h"
+#include "mvm.h"
+#include "iwl-io.h"
+#include "iwl-prph.h"
+
+/* A TimeUnit is 1024 microsecond */
+#define TU_TO_JIFFIES(_tu)     (usecs_to_jiffies((_tu) * 1024))
+#define MSEC_TO_TU(_msec)      (_msec*1000/1024)
+
+/* For ROC use a TE type which has priority high enough to be scheduled when
+ * there is a concurrent BSS or GO/AP. Currently, use a TE type that has
+ * priority similar to the TE priority used for action scans by the FW.
+ * TODO: This needs to be changed, based on the reason for the ROC, i.e., use
+ * TE_P2P_DEVICE_DISCOVERABLE for remain on channel without mgmt skb, and use
+ * TE_P2P_DEVICE_ACTION_SCAN
+ */
+#define IWL_MVM_ROC_TE_TYPE TE_P2P_DEVICE_ACTION_SCAN
+
+void iwl_mvm_te_clear_data(struct iwl_mvm *mvm,
+                          struct iwl_mvm_time_event_data *te_data)
+{
+       lockdep_assert_held(&mvm->time_event_lock);
+
+       if (te_data->id == TE_MAX)
+               return;
+
+       list_del(&te_data->list);
+       te_data->running = false;
+       te_data->uid = 0;
+       te_data->id = TE_MAX;
+       te_data->vif = NULL;
+}
+
+void iwl_mvm_roc_done_wk(struct work_struct *wk)
+{
+       struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, roc_done_wk);
+
+       synchronize_net();
+
+       /*
+        * Flush the offchannel queue -- this is called when the time
+        * event finishes or is cancelled, so that frames queued for it
+        * won't get stuck on the queue and be transmitted in the next
+        * time event.
+        * We have to send the command asynchronously since this cannot
+        * be under the mutex for locking reasons, but that's not an
+        * issue as it will have to complete before the next command is
+        * executed, and a new time event means a new command.
+        */
+       iwl_mvm_flush_tx_path(mvm, BIT(IWL_OFFCHANNEL_QUEUE), false);
+}
+
+static void iwl_mvm_roc_finished(struct iwl_mvm *mvm)
+{
+       /*
+        * First, clear the ROC_RUNNING status bit. This will cause the TX
+        * path to drop offchannel transmissions. That would also be done
+        * by mac80211, but it is racy, in particular in the case that the
+        * time event actually completed in the firmware (which is handled
+        * in iwl_mvm_te_handle_notif).
+        */
+       clear_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status);
+
+       /*
+        * Of course, our status bit is just as racy as mac80211, so in
+        * addition, fire off the work struct which will drop all frames
+        * from the hardware queues that made it through the race. First
+        * it will of course synchronize the TX path to make sure that
+        * any *new* TX will be rejected.
+        */
+       schedule_work(&mvm->roc_done_wk);
+}
+
+/*
+ * Handles a FW notification for an event that is known to the driver.
+ *
+ * @mvm: the mvm component
+ * @te_data: the time event data
+ * @notif: the notification data corresponding the time event data.
+ */
+static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm,
+                                   struct iwl_mvm_time_event_data *te_data,
+                                   struct iwl_time_event_notif *notif)
+{
+       lockdep_assert_held(&mvm->time_event_lock);
+
+       IWL_DEBUG_TE(mvm, "Handle time event notif - UID = 0x%x action %d\n",
+                    le32_to_cpu(notif->unique_id),
+                    le32_to_cpu(notif->action));
+
+       /*
+        * The FW sends the start/end time event notifications even for events
+        * that it fails to schedule. This is indicated in the status field of
+        * the notification. This happens in cases that the scheduler cannot
+        * find a schedule that can handle the event (for example requesting a
+        * P2P Device discoveribility, while there are other higher priority
+        * events in the system).
+        */
+       WARN_ONCE(!le32_to_cpu(notif->status),
+                 "Failed to schedule time event\n");
+
+       if (le32_to_cpu(notif->action) == TE_NOTIF_HOST_END) {
+               IWL_DEBUG_TE(mvm,
+                            "TE ended - current time %lu, estimated end %lu\n",
+                            jiffies, te_data->end_jiffies);
+
+               if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) {
+                       ieee80211_remain_on_channel_expired(mvm->hw);
+                       iwl_mvm_roc_finished(mvm);
+               }
+
+               /*
+                * By now, we should have finished association
+                * and know the dtim period.
+                */
+               if (te_data->vif->type == NL80211_IFTYPE_STATION &&
+                   (!te_data->vif->bss_conf.assoc ||
+                    !te_data->vif->bss_conf.dtim_period)) {
+                       IWL_ERR(mvm,
+                               "No assocation and the time event is over already...\n");
+                       ieee80211_connection_loss(te_data->vif);
+               }
+
+               iwl_mvm_te_clear_data(mvm, te_data);
+       } else if (le32_to_cpu(notif->action) == TE_NOTIF_HOST_START) {
+               te_data->running = true;
+               te_data->end_jiffies = jiffies +
+                       TU_TO_JIFFIES(te_data->duration);
+
+               if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) {
+                       set_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status);
+                       ieee80211_ready_on_channel(mvm->hw);
+               }
+       } else {
+               IWL_WARN(mvm, "Got TE with unknown action\n");
+       }
+}
+
+/*
+ * The Rx handler for time event notifications
+ */
+int iwl_mvm_rx_time_event_notif(struct iwl_mvm *mvm,
+                               struct iwl_rx_cmd_buffer *rxb,
+                               struct iwl_device_cmd *cmd)
+{
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
+       struct iwl_time_event_notif *notif = (void *)pkt->data;
+       struct iwl_mvm_time_event_data *te_data, *tmp;
+
+       IWL_DEBUG_TE(mvm, "Time event notification - UID = 0x%x action %d\n",
+                    le32_to_cpu(notif->unique_id),
+                    le32_to_cpu(notif->action));
+
+       spin_lock_bh(&mvm->time_event_lock);
+       list_for_each_entry_safe(te_data, tmp, &mvm->time_event_list, list) {
+               if (le32_to_cpu(notif->unique_id) == te_data->uid)
+                       iwl_mvm_te_handle_notif(mvm, te_data, notif);
+       }
+       spin_unlock_bh(&mvm->time_event_lock);
+
+       return 0;
+}
+
+static bool iwl_mvm_time_event_response(struct iwl_notif_wait_data *notif_wait,
+                                       struct iwl_rx_packet *pkt, void *data)
+{
+       struct iwl_mvm *mvm =
+               container_of(notif_wait, struct iwl_mvm, notif_wait);
+       struct iwl_mvm_time_event_data *te_data = data;
+       struct iwl_time_event_resp *resp;
+       int resp_len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
+
+       if (WARN_ON(pkt->hdr.cmd != TIME_EVENT_CMD))
+               return true;
+
+       if (WARN_ON_ONCE(resp_len != sizeof(pkt->hdr) + sizeof(*resp))) {
+               IWL_ERR(mvm, "Invalid TIME_EVENT_CMD response\n");
+               return true;
+       }
+
+       resp = (void *)pkt->data;
+       te_data->uid = le32_to_cpu(resp->unique_id);
+       IWL_DEBUG_TE(mvm, "TIME_EVENT_CMD response - UID = 0x%x\n",
+                    te_data->uid);
+       return true;
+}
+
+static int iwl_mvm_time_event_send_add(struct iwl_mvm *mvm,
+                                      struct ieee80211_vif *vif,
+                                      struct iwl_mvm_time_event_data *te_data,
+                                      struct iwl_time_event_cmd *te_cmd)
+{
+       static const u8 time_event_response[] = { TIME_EVENT_CMD };
+       struct iwl_notification_wait wait_time_event;
+       int ret;
+
+       lockdep_assert_held(&mvm->mutex);
+
+       spin_lock_bh(&mvm->time_event_lock);
+       if (WARN_ON(te_data->id != TE_MAX)) {
+               spin_unlock_bh(&mvm->time_event_lock);
+               return -EIO;
+       }
+       te_data->vif = vif;
+       te_data->duration = le32_to_cpu(te_cmd->duration);
+       te_data->id = le32_to_cpu(te_cmd->id);
+       list_add_tail(&te_data->list, &mvm->time_event_list);
+       spin_unlock_bh(&mvm->time_event_lock);
+
+       /*
+        * Use a notification wait, which really just processes the
+        * command response and doesn't wait for anything, in order
+        * to be able to process the response and get the UID inside
+        * the RX path. Using CMD_WANT_SKB doesn't work because it
+        * stores the buffer and then wakes up this thread, by which
+        * time another notification (that the time event started)
+        * might already be processed unsuccessfully.
+        */
+       iwl_init_notification_wait(&mvm->notif_wait, &wait_time_event,
+                                  time_event_response,
+                                  ARRAY_SIZE(time_event_response),
+                                  iwl_mvm_time_event_response, te_data);
+
+       ret = iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, CMD_SYNC,
+                                  sizeof(*te_cmd), te_cmd);
+       if (ret) {
+               IWL_ERR(mvm, "Couldn't send TIME_EVENT_CMD: %d\n", ret);
+               iwl_remove_notification(&mvm->notif_wait, &wait_time_event);
+               goto out_clear_te;
+       }
+
+       /* No need to wait for anything, so just pass 1 (0 isn't valid) */
+       ret = iwl_wait_notification(&mvm->notif_wait, &wait_time_event, 1);
+       /* should never fail */
+       WARN_ON_ONCE(ret);
+
+       if (ret) {
+ out_clear_te:
+               spin_lock_bh(&mvm->time_event_lock);
+               iwl_mvm_te_clear_data(mvm, te_data);
+               spin_unlock_bh(&mvm->time_event_lock);
+       }
+       return ret;
+}
+
+void iwl_mvm_protect_session(struct iwl_mvm *mvm,
+                            struct ieee80211_vif *vif,
+                            u32 duration, u32 min_duration)
+{
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
+       struct iwl_time_event_cmd time_cmd = {};
+
+       lockdep_assert_held(&mvm->mutex);
+
+       if (te_data->running &&
+           time_after(te_data->end_jiffies,
+                      jiffies + TU_TO_JIFFIES(min_duration))) {
+               IWL_DEBUG_TE(mvm, "We have enough time in the current TE: %u\n",
+                            jiffies_to_msecs(te_data->end_jiffies - jiffies));
+               return;
+       }
+
+       if (te_data->running) {
+               IWL_DEBUG_TE(mvm, "extend 0x%x: only %u ms left\n",
+                            te_data->uid,
+                            jiffies_to_msecs(te_data->end_jiffies - jiffies));
+               /*
+                * we don't have enough time
+                * cancel the current TE and issue a new one
+                * Of course it would be better to remove the old one only
+                * when the new one is added, but we don't care if we are off
+                * channel for a bit. All we need to do, is not to return
+                * before we actually begin to be on the channel.
+                */
+               iwl_mvm_stop_session_protection(mvm, vif);
+       }
+
+       time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD);
+       time_cmd.id_and_color =
+               cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
+       time_cmd.id = cpu_to_le32(TE_BSS_STA_AGGRESSIVE_ASSOC);
+
+       time_cmd.apply_time =
+               cpu_to_le32(iwl_read_prph(mvm->trans, DEVICE_SYSTEM_TIME_REG));
+
+       time_cmd.dep_policy = TE_INDEPENDENT;
+       time_cmd.is_present = cpu_to_le32(1);
+       time_cmd.max_frags = cpu_to_le32(TE_FRAG_NONE);
+       time_cmd.max_delay = cpu_to_le32(500);
+       /* TODO: why do we need to interval = bi if it is not periodic? */
+       time_cmd.interval = cpu_to_le32(1);
+       time_cmd.interval_reciprocal = cpu_to_le32(iwl_mvm_reciprocal(1));
+       time_cmd.duration = cpu_to_le32(duration);
+       time_cmd.repeat = cpu_to_le32(1);
+       time_cmd.notify = cpu_to_le32(TE_NOTIF_HOST_START | TE_NOTIF_HOST_END);
+
+       iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
+}
+
+/*
+ * Explicit request to remove a time event. The removal of a time event needs to
+ * be synchronized with the flow of a time event's end notification, which also
+ * removes the time event from the op mode data structures.
+ */
+void iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
+                              struct iwl_mvm_vif *mvmvif,
+                              struct iwl_mvm_time_event_data *te_data)
+{
+       struct iwl_time_event_cmd time_cmd = {};
+       u32 id, uid;
+       int ret;
+
+       /*
+        * It is possible that by the time we got to this point the time
+        * event was already removed.
+        */
+       spin_lock_bh(&mvm->time_event_lock);
+
+       /* Save time event uid before clearing its data */
+       uid = te_data->uid;
+       id = te_data->id;
+
+       /*
+        * The clear_data function handles time events that were already removed
+        */
+       iwl_mvm_te_clear_data(mvm, te_data);
+       spin_unlock_bh(&mvm->time_event_lock);
+
+       /*
+        * It is possible that by the time we try to remove it, the time event
+        * has already ended and removed. In such a case there is no need to
+        * send a removal command.
+        */
+       if (id == TE_MAX) {
+               IWL_DEBUG_TE(mvm, "TE 0x%x has already ended\n", uid);
+               return;
+       }
+
+       /* When we remove a TE, the UID is to be set in the id field */
+       time_cmd.id = cpu_to_le32(uid);
+       time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE);
+       time_cmd.id_and_color =
+               cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
+
+       IWL_DEBUG_TE(mvm, "Removing TE 0x%x\n", le32_to_cpu(time_cmd.id));
+       ret = iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, CMD_ASYNC,
+                                  sizeof(time_cmd), &time_cmd);
+       if (WARN_ON(ret))
+               return;
+}
+
+void iwl_mvm_stop_session_protection(struct iwl_mvm *mvm,
+                                    struct ieee80211_vif *vif)
+{
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
+
+       lockdep_assert_held(&mvm->mutex);
+       iwl_mvm_remove_time_event(mvm, mvmvif, te_data);
+}
+
+int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+                         int duration)
+{
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
+       struct iwl_time_event_cmd time_cmd = {};
+
+       lockdep_assert_held(&mvm->mutex);
+       if (te_data->running) {
+               IWL_WARN(mvm, "P2P_DEVICE remain on channel already running\n");
+               return -EBUSY;
+       }
+
+       /*
+        * Flush the done work, just in case it's still pending, so that
+        * the work it does can complete and we can accept new frames.
+        */
+       flush_work(&mvm->roc_done_wk);
+
+       time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD);
+       time_cmd.id_and_color =
+               cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
+       time_cmd.id = cpu_to_le32(IWL_MVM_ROC_TE_TYPE);
+
+       time_cmd.apply_time = cpu_to_le32(0);
+       time_cmd.dep_policy = cpu_to_le32(TE_INDEPENDENT);
+       time_cmd.is_present = cpu_to_le32(1);
+
+       time_cmd.interval = cpu_to_le32(1);
+
+       /*
+        * IWL_MVM_ROC_TE_TYPE can have lower priority than other events
+        * that are being scheduled by the driver/fw, and thus it might not be
+        * scheduled. To improve the chances of it being scheduled, allow it to
+        * be fragmented.
+        * In addition, for the same reasons, allow to delay the scheduling of
+        * the time event.
+        */
+       time_cmd.max_frags = cpu_to_le32(MSEC_TO_TU(duration)/20);
+       time_cmd.max_delay = cpu_to_le32(MSEC_TO_TU(duration/2));
+       time_cmd.duration = cpu_to_le32(MSEC_TO_TU(duration));
+       time_cmd.repeat = cpu_to_le32(1);
+       time_cmd.notify = cpu_to_le32(TE_NOTIF_HOST_START | TE_NOTIF_HOST_END);
+
+       return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
+}
+
+void iwl_mvm_stop_p2p_roc(struct iwl_mvm *mvm)
+{
+       struct iwl_mvm_vif *mvmvif;
+       struct iwl_mvm_time_event_data *te_data;
+
+       lockdep_assert_held(&mvm->mutex);
+
+       /*
+        * Iterate over the list of time events and find the time event that is
+        * associated with a P2P_DEVICE interface.
+        * This assumes that a P2P_DEVICE interface can have only a single time
+        * event at any given time and this time event coresponds to a ROC
+        * request
+        */
+       mvmvif = NULL;
+       spin_lock_bh(&mvm->time_event_lock);
+       list_for_each_entry(te_data, &mvm->time_event_list, list) {
+               if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) {
+                       mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif);
+                       break;
+               }
+       }
+       spin_unlock_bh(&mvm->time_event_lock);
+
+       if (!mvmvif) {
+               IWL_WARN(mvm, "P2P_DEVICE no remain on channel event\n");
+               return;
+       }
+
+       iwl_mvm_remove_time_event(mvm, mvmvif, te_data);
+
+       iwl_mvm_roc_finished(mvm);
+}
diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.h b/drivers/net/wireless/iwlwifi/mvm/time-event.h
new file mode 100644 (file)
index 0000000..64fb57a
--- /dev/null
@@ -0,0 +1,214 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#ifndef __time_event_h__
+#define __time_event_h__
+
+#include "fw-api.h"
+
+#include "mvm.h"
+
+/**
+ * DOC: Time Events - what is it?
+ *
+ * Time Events are a fw feature that allows the driver to control the presence
+ * of the device on the channel. Since the fw supports multiple channels
+ * concurrently, the fw may choose to jump to another channel at any time.
+ * In order to make sure that the fw is on a specific channel at a certain time
+ * and for a certain duration, the driver needs to issue a time event.
+ *
+ * The simplest example is for BSS association. The driver issues a time event,
+ * waits for it to start, and only then tells mac80211 that we can start the
+ * association. This way, we make sure that the association will be done
+ * smoothly and won't be interrupted by channel switch decided within the fw.
+ */
+
+ /**
+ * DOC: The flow against the fw
+ *
+ * When the driver needs to make sure we are in a certain channel, at a certain
+ * time and for a certain duration, it sends a Time Event. The flow against the
+ * fw goes like this:
+ *     1) Driver sends a TIME_EVENT_CMD to the fw
+ *     2) Driver gets the response for that command. This response contains the
+ *        Unique ID (UID) of the event.
+ *     3) The fw sends notification when the event starts.
+ *
+ * Of course the API provides various options that allow to cover parameters
+ * of the flow.
+ *     What is the duration of the event?
+ *     What is the start time of the event?
+ *     Is there an end-time for the event?
+ *     How much can the event be delayed?
+ *     Can the event be split?
+ *     If yes what is the maximal number of chunks?
+ *     etc...
+ */
+
+/**
+ * DOC: Abstraction to the driver
+ *
+ * In order to simplify the use of time events to the rest of the driver,
+ * we abstract the use of time events. This component provides the functions
+ * needed by the driver.
+ */
+
+#define IWL_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS 500
+#define IWL_MVM_TE_SESSION_PROTECTION_MIN_TIME_MS 400
+
+/**
+ * iwl_mvm_protect_session - start / extend the session protection.
+ * @mvm: the mvm component
+ * @vif: the virtual interface for which the session is issued
+ * @duration: the duration of the session in TU.
+ * @min_duration: will start a new session if the current session will end
+ *     in less than min_duration.
+ *
+ * This function can be used to start a session protection which means that the
+ * fw will stay on the channel for %duration_ms milliseconds. This function
+ * will block (sleep) until the session starts. This function can also be used
+ * to extend a currently running session.
+ * This function is meant to be used for BSS association for example, where we
+ * want to make sure that the fw stays on the channel during the association.
+ */
+void iwl_mvm_protect_session(struct iwl_mvm *mvm,
+                            struct ieee80211_vif *vif,
+                            u32 duration, u32 min_duration);
+
+/**
+ * iwl_mvm_stop_session_protection - cancel the session protection.
+ * @mvm: the mvm component
+ * @vif: the virtual interface for which the session is issued
+ *
+ * This functions cancels the session protection which is an act of good
+ * citizenship. If it is not needed any more it should be cancelled because
+ * the other bindings wait for the medium during that time.
+ * This funtions doesn't sleep.
+ */
+void iwl_mvm_stop_session_protection(struct iwl_mvm *mvm,
+                                     struct ieee80211_vif *vif);
+
+/*
+ * iwl_mvm_rx_time_event_notif - handles %TIME_EVENT_NOTIFICATION.
+ */
+int iwl_mvm_rx_time_event_notif(struct iwl_mvm *mvm,
+                               struct iwl_rx_cmd_buffer *rxb,
+                               struct iwl_device_cmd *cmd);
+
+/**
+ * iwl_mvm_start_p2p_roc - start remain on channel for p2p device functionlity
+ * @mvm: the mvm component
+ * @vif: the virtual interface for which the roc is requested. It is assumed
+ * that the vif type is NL80211_IFTYPE_P2P_DEVICE
+ * @duration: the requested duration in millisecond for the fw to be on the
+ * channel that is bound to the vif.
+ *
+ * This function can be used to issue a remain on channel session,
+ * which means that the fw will stay in the channel for the request %duration
+ * milliseconds. The function is async, meaning that it only issues the ROC
+ * request but does not wait for it to start. Once the FW is ready to serve the
+ * ROC request, it will issue a notification to the driver that it is on the
+ * requested channel. Once the FW completes the ROC request it will issue
+ * another notification to the driver.
+ */
+int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+                         int duration);
+
+/**
+ * iwl_mvm_stop_p2p_roc - stop remain on channel for p2p device functionlity
+ * @mvm: the mvm component
+ *
+ * This function can be used to cancel an ongoing ROC session.
+ * The function is async, it will instruct the FW to stop serving the ROC
+ * session, but will not wait for the actual stopping of the session.
+ */
+void iwl_mvm_stop_p2p_roc(struct iwl_mvm *mvm);
+
+/**
+ * iwl_mvm_remove_time_event - general function to clean up of time event
+ * @mvm: the mvm component
+ * @vif: the vif to which the time event belongs
+ * @te_data: the time event data that corresponds to that time event
+ *
+ * This function can be used to cancel a time event regardless its type.
+ * It is useful for cleaning up time events running before removing an
+ * interface.
+ */
+void iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
+                              struct iwl_mvm_vif *mvmvif,
+                              struct iwl_mvm_time_event_data *te_data);
+
+/**
+ * iwl_mvm_te_clear_data - remove time event from list
+ * @mvm: the mvm component
+ * @te_data: the time event data to remove
+ *
+ * This function is mostly internal, it is made available here only
+ * for firmware restart purposes.
+ */
+void iwl_mvm_te_clear_data(struct iwl_mvm *mvm,
+                          struct iwl_mvm_time_event_data *te_data);
+
+void iwl_mvm_roc_done_wk(struct work_struct *wk);
+
+#endif /* __time_event_h__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c
new file mode 100644 (file)
index 0000000..6b67ce3
--- /dev/null
@@ -0,0 +1,916 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include <linux/ieee80211.h>
+#include <linux/etherdevice.h>
+
+#include "iwl-trans.h"
+#include "iwl-eeprom-parse.h"
+#include "mvm.h"
+#include "sta.h"
+
+/*
+ * Sets most of the Tx cmd's fields
+ */
+static void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
+                              struct iwl_tx_cmd *tx_cmd,
+                              struct ieee80211_tx_info *info, u8 sta_id)
+{
+       struct ieee80211_hdr *hdr = (void *)skb->data;
+       __le16 fc = hdr->frame_control;
+       u32 tx_flags = le32_to_cpu(tx_cmd->tx_flags);
+       u32 len = skb->len + FCS_LEN;
+
+       if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
+               tx_flags |= TX_CMD_FLG_ACK;
+       else
+               tx_flags &= ~TX_CMD_FLG_ACK;
+
+       if (ieee80211_is_probe_resp(fc))
+               tx_flags |= TX_CMD_FLG_TSF;
+       else if (ieee80211_is_back_req(fc))
+               tx_flags |= TX_CMD_FLG_ACK | TX_CMD_FLG_BAR;
+
+       /* High prio packet (wrt. BT coex) if it is EAPOL, MCAST or MGMT */
+       if (info->band == IEEE80211_BAND_2GHZ        &&
+           (skb->protocol == cpu_to_be16(ETH_P_PAE)  ||
+            is_multicast_ether_addr(hdr->addr1)      ||
+            ieee80211_is_back_req(fc)                ||
+            ieee80211_is_mgmt(fc)))
+               tx_flags |= TX_CMD_FLG_BT_DIS;
+
+       if (ieee80211_has_morefrags(fc))
+               tx_flags |= TX_CMD_FLG_MORE_FRAG;
+
+       if (ieee80211_is_data_qos(fc)) {
+               u8 *qc = ieee80211_get_qos_ctl(hdr);
+               tx_cmd->tid_tspec = qc[0] & 0xf;
+               tx_flags &= ~TX_CMD_FLG_SEQ_CTL;
+       } else {
+               tx_cmd->tid_tspec = IWL_TID_NON_QOS;
+               if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
+                       tx_flags |= TX_CMD_FLG_SEQ_CTL;
+               else
+                       tx_flags &= ~TX_CMD_FLG_SEQ_CTL;
+       }
+
+       if (ieee80211_is_mgmt(fc)) {
+               if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
+                       tx_cmd->pm_frame_timeout = cpu_to_le16(3);
+               else
+                       tx_cmd->pm_frame_timeout = cpu_to_le16(2);
+
+               /* The spec allows Action frames in A-MPDU, we don't support
+                * it
+                */
+               WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_AMPDU);
+       } else {
+               tx_cmd->pm_frame_timeout = 0;
+       }
+
+       if (info->flags & IEEE80211_TX_CTL_AMPDU)
+               tx_flags |= TX_CMD_FLG_PROT_REQUIRE;
+
+       if (ieee80211_is_data(fc) && len > mvm->rts_threshold &&
+           !is_multicast_ether_addr(ieee80211_get_DA(hdr)))
+               tx_flags |= TX_CMD_FLG_PROT_REQUIRE;
+
+       tx_cmd->driver_txop = 0;
+       tx_cmd->tx_flags = cpu_to_le32(tx_flags);
+       /* Total # bytes to be transmitted */
+       tx_cmd->len = cpu_to_le16((u16)skb->len);
+       tx_cmd->next_frame_len = 0;
+       tx_cmd->life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE);
+       tx_cmd->sta_id = sta_id;
+}
+
+/*
+ * Sets the fields in the Tx cmd that are rate related
+ */
+static void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm,
+                                   struct iwl_tx_cmd *tx_cmd,
+                                   struct ieee80211_tx_info *info,
+                                   struct ieee80211_sta *sta,
+                                   __le16 fc)
+{
+       u32 rate_flags;
+       int rate_idx;
+       u8 rate_plcp;
+
+       /* Set retry limit on RTS packets */
+       tx_cmd->rts_retry_limit = IWL_RTS_DFAULT_RETRY_LIMIT;
+
+       /* Set retry limit on DATA packets and Probe Responses*/
+       if (ieee80211_is_probe_resp(fc)) {
+               tx_cmd->data_retry_limit = IWL_MGMT_DFAULT_RETRY_LIMIT;
+               tx_cmd->rts_retry_limit =
+                       min(tx_cmd->data_retry_limit, tx_cmd->rts_retry_limit);
+       } else if (ieee80211_is_back_req(fc)) {
+               tx_cmd->data_retry_limit = IWL_BAR_DFAULT_RETRY_LIMIT;
+       } else {
+               tx_cmd->data_retry_limit = IWL_DEFAULT_TX_RETRY;
+       }
+
+       /*
+        * for data packets, rate info comes from the table inside he fw. This
+        * table is controlled by LINK_QUALITY commands
+        */
+
+       if (ieee80211_is_data(fc)) {
+               tx_cmd->initial_rate_index = 0;
+               tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE);
+               return;
+       } else if (ieee80211_is_back_req(fc)) {
+               tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE);
+       }
+
+       /* HT rate doesn't make sense for a non data frame */
+       WARN_ONCE(info->control.rates[0].flags & IEEE80211_TX_RC_MCS,
+                 "Got an HT rate for a non data frame 0x%x\n",
+                 info->control.rates[0].flags);
+
+       rate_idx = info->control.rates[0].idx;
+       /* if the rate isn't a well known legacy rate, take the lowest one */
+       if (rate_idx < 0 || rate_idx > IWL_RATE_COUNT_LEGACY)
+               rate_idx = rate_lowest_index(
+                               &mvm->nvm_data->bands[info->band], sta);
+
+       /* For 5 GHZ band, remap mac80211 rate indices into driver indices */
+       if (info->band == IEEE80211_BAND_5GHZ)
+               rate_idx += IWL_FIRST_OFDM_RATE;
+
+       /* For 2.4 GHZ band, check that there is no need to remap */
+       BUILD_BUG_ON(IWL_FIRST_CCK_RATE != 0);
+
+       /* Get PLCP rate for tx_cmd->rate_n_flags */
+       rate_plcp = iwl_mvm_mac80211_idx_to_hwrate(rate_idx);
+
+       mvm->mgmt_last_antenna_idx =
+               iwl_mvm_next_antenna(mvm, mvm->nvm_data->valid_tx_ant,
+                                    mvm->mgmt_last_antenna_idx);
+       rate_flags = BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS;
+
+       /* Set CCK flag as needed */
+       if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
+               rate_flags |= RATE_MCS_CCK_MSK;
+
+       /* Set the rate in the TX cmd */
+       tx_cmd->rate_n_flags = cpu_to_le32((u32)rate_plcp | rate_flags);
+}
+
+/*
+ * Sets the fields in the Tx cmd that are crypto related
+ */
+static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm,
+                                     struct ieee80211_tx_info *info,
+                                     struct iwl_tx_cmd *tx_cmd,
+                                     struct sk_buff *skb_frag)
+{
+       struct ieee80211_key_conf *keyconf = info->control.hw_key;
+
+       switch (keyconf->cipher) {
+       case WLAN_CIPHER_SUITE_CCMP:
+               tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
+               memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
+               if (info->flags & IEEE80211_TX_CTL_AMPDU)
+                       tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_CCMP_AGG);
+               break;
+
+       case WLAN_CIPHER_SUITE_TKIP:
+               tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
+               ieee80211_get_tkip_p2k(keyconf, skb_frag, tx_cmd->key);
+               break;
+
+       case WLAN_CIPHER_SUITE_WEP104:
+               tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
+               /* fall through */
+       case WLAN_CIPHER_SUITE_WEP40:
+               tx_cmd->sec_ctl |= TX_CMD_SEC_WEP |
+                       ((keyconf->keyidx << TX_CMD_SEC_WEP_KEY_IDX_POS) &
+                         TX_CMD_SEC_WEP_KEY_IDX_MSK);
+
+               memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
+               break;
+       default:
+               IWL_ERR(mvm, "Unknown encode cipher %x\n", keyconf->cipher);
+               break;
+       }
+}
+
+/*
+ * Allocates and sets the Tx cmd the driver data pointers in the skb
+ */
+static struct iwl_device_cmd *
+iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
+                     struct ieee80211_sta *sta, u8 sta_id)
+{
+       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+       struct iwl_device_cmd *dev_cmd;
+       struct iwl_tx_cmd *tx_cmd;
+
+       dev_cmd = iwl_trans_alloc_tx_cmd(mvm->trans);
+
+       if (unlikely(!dev_cmd))
+               return NULL;
+
+       memset(dev_cmd, 0, sizeof(*dev_cmd));
+       tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
+
+       if (info->control.hw_key)
+               iwl_mvm_set_tx_cmd_crypto(mvm, info, tx_cmd, skb);
+
+       iwl_mvm_set_tx_cmd(mvm, skb, tx_cmd, info, sta_id);
+
+       iwl_mvm_set_tx_cmd_rate(mvm, tx_cmd, info, sta, hdr->frame_control);
+
+       memset(&info->status, 0, sizeof(info->status));
+
+       info->driver_data[0] = NULL;
+       info->driver_data[1] = dev_cmd;
+
+       return dev_cmd;
+}
+
+int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
+{
+       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+       struct iwl_device_cmd *dev_cmd;
+       struct iwl_tx_cmd *tx_cmd;
+       u8 sta_id;
+
+       if (WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_AMPDU))
+               return -1;
+
+       if (WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM &&
+                        (!info->control.vif ||
+                         info->hw_queue != info->control.vif->cab_queue)))
+               return -1;
+
+       /*
+        * If the interface on which frame is sent is the P2P_DEVICE
+        * or an AP/GO interface use the broadcast station associated
+        * with it; otherwise use the AUX station.
+        */
+       if (info->control.vif &&
+           (info->control.vif->type == NL80211_IFTYPE_P2P_DEVICE ||
+            info->control.vif->type == NL80211_IFTYPE_AP)) {
+               struct iwl_mvm_vif *mvmvif =
+                       iwl_mvm_vif_from_mac80211(info->control.vif);
+               sta_id = mvmvif->bcast_sta.sta_id;
+       } else {
+               sta_id = mvm->aux_sta.sta_id;
+       }
+
+       IWL_DEBUG_TX(mvm, "station Id %d, queue=%d\n", sta_id, info->hw_queue);
+
+       dev_cmd = iwl_mvm_set_tx_params(mvm, skb, NULL, sta_id);
+       if (!dev_cmd)
+               return -1;
+
+       /* From now on, we cannot access info->control */
+       tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
+
+       /* Copy MAC header from skb into command buffer */
+       memcpy(tx_cmd->hdr, hdr, ieee80211_hdrlen(hdr->frame_control));
+
+       if (iwl_trans_tx(mvm->trans, skb, dev_cmd, info->hw_queue)) {
+               iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
+               return -1;
+       }
+
+       return 0;
+}
+
+/*
+ * Sets the fields in the Tx cmd that are crypto related
+ */
+int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
+                  struct ieee80211_sta *sta)
+{
+       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+       struct iwl_mvm_sta *mvmsta;
+       struct iwl_device_cmd *dev_cmd;
+       struct iwl_tx_cmd *tx_cmd;
+       __le16 fc;
+       u16 seq_number = 0;
+       u8 tid = IWL_MAX_TID_COUNT;
+       u8 txq_id = info->hw_queue;
+       bool is_data_qos = false, is_ampdu = false;
+
+       mvmsta = (void *)sta->drv_priv;
+       fc = hdr->frame_control;
+
+       if (WARN_ON_ONCE(!mvmsta))
+               return -1;
+
+       if (WARN_ON_ONCE(mvmsta->sta_id == IWL_INVALID_STATION))
+               return -1;
+
+       dev_cmd = iwl_mvm_set_tx_params(mvm, skb, sta, mvmsta->sta_id);
+       if (!dev_cmd)
+               goto drop;
+
+       tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
+       /* From now on, we cannot access info->control */
+
+       spin_lock(&mvmsta->lock);
+
+       if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc)) {
+               u8 *qc = NULL;
+               qc = ieee80211_get_qos_ctl(hdr);
+               tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
+               if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
+                       goto drop_unlock_sta;
+
+               seq_number = mvmsta->tid_data[tid].seq_number;
+               seq_number &= IEEE80211_SCTL_SEQ;
+               hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
+               hdr->seq_ctrl |= cpu_to_le16(seq_number);
+               seq_number += 0x10;
+               is_data_qos = true;
+               is_ampdu = info->flags & IEEE80211_TX_CTL_AMPDU;
+       }
+
+       /* Copy MAC header from skb into command buffer */
+       memcpy(tx_cmd->hdr, hdr, ieee80211_hdrlen(fc));
+
+       WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM);
+
+       if (is_ampdu) {
+               if (WARN_ON_ONCE(mvmsta->tid_data[tid].state != IWL_AGG_ON))
+                       goto drop_unlock_sta;
+               txq_id = mvmsta->tid_data[tid].txq_id;
+       }
+
+       IWL_DEBUG_TX(mvm, "TX to [%d|%d] Q:%d - seq: 0x%x\n", mvmsta->sta_id,
+                    tid, txq_id, seq_number);
+
+       /* NOTE: aggregation will need changes here (for txq id) */
+       if (iwl_trans_tx(mvm->trans, skb, dev_cmd, txq_id))
+               goto drop_unlock_sta;
+
+       if (is_data_qos && !ieee80211_has_morefrags(fc))
+               mvmsta->tid_data[tid].seq_number = seq_number;
+
+       spin_unlock(&mvmsta->lock);
+
+       if (mvmsta->vif->type == NL80211_IFTYPE_AP &&
+           txq_id < IWL_FIRST_AMPDU_QUEUE)
+               atomic_inc(&mvmsta->pending_frames);
+
+       return 0;
+
+drop_unlock_sta:
+       iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
+       spin_unlock(&mvmsta->lock);
+drop:
+       return -1;
+}
+
+static void iwl_mvm_check_ratid_empty(struct iwl_mvm *mvm,
+                                     struct ieee80211_sta *sta, u8 tid)
+{
+       struct iwl_mvm_sta *mvmsta = (void *)sta->drv_priv;
+       struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
+       struct ieee80211_vif *vif = mvmsta->vif;
+
+       lockdep_assert_held(&mvmsta->lock);
+
+       if (tid_data->ssn != tid_data->next_reclaimed)
+               return;
+
+       switch (tid_data->state) {
+       case IWL_EMPTYING_HW_QUEUE_ADDBA:
+               IWL_DEBUG_TX_QUEUES(mvm,
+                                   "Can continue addBA flow ssn = next_recl = %d\n",
+                                   tid_data->next_reclaimed);
+               tid_data->state = IWL_AGG_STARTING;
+               ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+               break;
+
+       case IWL_EMPTYING_HW_QUEUE_DELBA:
+               IWL_DEBUG_TX_QUEUES(mvm,
+                                   "Can continue DELBA flow ssn = next_recl = %d\n",
+                                   tid_data->next_reclaimed);
+               iwl_trans_txq_disable(mvm->trans, tid_data->txq_id);
+               tid_data->state = IWL_AGG_OFF;
+               /*
+                * we can't hold the mutex - but since we are after a sequence
+                * point (call to iwl_trans_txq_disable), so we don't even need
+                * a memory barrier.
+                */
+               mvm->queue_to_mac80211[tid_data->txq_id] =
+                                       IWL_INVALID_MAC80211_QUEUE;
+               ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+               break;
+
+       default:
+               break;
+       }
+}
+
+#ifdef CONFIG_IWLWIFI_DEBUG
+const char *iwl_mvm_get_tx_fail_reason(u32 status)
+{
+#define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x
+#define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x
+
+       switch (status & TX_STATUS_MSK) {
+       case TX_STATUS_SUCCESS:
+               return "SUCCESS";
+       TX_STATUS_POSTPONE(DELAY);
+       TX_STATUS_POSTPONE(FEW_BYTES);
+       TX_STATUS_POSTPONE(BT_PRIO);
+       TX_STATUS_POSTPONE(QUIET_PERIOD);
+       TX_STATUS_POSTPONE(CALC_TTAK);
+       TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY);
+       TX_STATUS_FAIL(SHORT_LIMIT);
+       TX_STATUS_FAIL(LONG_LIMIT);
+       TX_STATUS_FAIL(UNDERRUN);
+       TX_STATUS_FAIL(DRAIN_FLOW);
+       TX_STATUS_FAIL(RFKILL_FLUSH);
+       TX_STATUS_FAIL(LIFE_EXPIRE);
+       TX_STATUS_FAIL(DEST_PS);
+       TX_STATUS_FAIL(HOST_ABORTED);
+       TX_STATUS_FAIL(BT_RETRY);
+       TX_STATUS_FAIL(STA_INVALID);
+       TX_STATUS_FAIL(FRAG_DROPPED);
+       TX_STATUS_FAIL(TID_DISABLE);
+       TX_STATUS_FAIL(FIFO_FLUSHED);
+       TX_STATUS_FAIL(SMALL_CF_POLL);
+       TX_STATUS_FAIL(FW_DROP);
+       TX_STATUS_FAIL(STA_COLOR_MISMATCH);
+       }
+
+       return "UNKNOWN";
+
+#undef TX_STATUS_FAIL
+#undef TX_STATUS_POSTPONE
+}
+#endif /* CONFIG_IWLWIFI_DEBUG */
+
+/**
+ * translate ucode response to mac80211 tx status control values
+ */
+static void iwl_mvm_hwrate_to_tx_control(u32 rate_n_flags,
+                                        struct ieee80211_tx_info *info)
+{
+       struct ieee80211_tx_rate *r = &info->status.rates[0];
+
+       info->status.antenna =
+               ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
+       if (rate_n_flags & RATE_HT_MCS_GF_MSK)
+               r->flags |= IEEE80211_TX_RC_GREEN_FIELD;
+       switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) {
+       case RATE_MCS_CHAN_WIDTH_20:
+               break;
+       case RATE_MCS_CHAN_WIDTH_40:
+               r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
+               break;
+       case RATE_MCS_CHAN_WIDTH_80:
+               r->flags |= IEEE80211_TX_RC_80_MHZ_WIDTH;
+               break;
+       case RATE_MCS_CHAN_WIDTH_160:
+               r->flags |= IEEE80211_TX_RC_160_MHZ_WIDTH;
+               break;
+       }
+       if (rate_n_flags & RATE_MCS_SGI_MSK)
+               r->flags |= IEEE80211_TX_RC_SHORT_GI;
+       if (rate_n_flags & RATE_MCS_HT_MSK) {
+               r->flags |= IEEE80211_TX_RC_MCS;
+               r->idx = rate_n_flags & RATE_HT_MCS_INDEX_MSK;
+       } else if (rate_n_flags & RATE_MCS_VHT_MSK) {
+               ieee80211_rate_set_vht(
+                       r, rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK,
+                       ((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >>
+                                               RATE_VHT_MCS_NSS_POS) + 1);
+               r->flags |= IEEE80211_TX_RC_VHT_MCS;
+       } else {
+               r->idx = iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags,
+                                                            info->band);
+       }
+}
+
+static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
+                                    struct iwl_rx_packet *pkt)
+{
+       struct ieee80211_sta *sta;
+       u16 sequence = le16_to_cpu(pkt->hdr.sequence);
+       int txq_id = SEQ_TO_QUEUE(sequence);
+       struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data;
+       int sta_id = IWL_MVM_TX_RES_GET_RA(tx_resp->ra_tid);
+       int tid = IWL_MVM_TX_RES_GET_TID(tx_resp->ra_tid);
+       u32 status = le16_to_cpu(tx_resp->status.status);
+       u16 ssn = iwl_mvm_get_scd_ssn(tx_resp);
+       struct iwl_mvm_sta *mvmsta;
+       struct sk_buff_head skbs;
+       u8 skb_freed = 0;
+       u16 next_reclaimed, seq_ctl;
+
+       __skb_queue_head_init(&skbs);
+
+       seq_ctl = le16_to_cpu(tx_resp->seq_ctl);
+
+       /* we can free until ssn % q.n_bd not inclusive */
+       iwl_trans_reclaim(mvm->trans, txq_id, ssn, &skbs);
+
+       while (!skb_queue_empty(&skbs)) {
+               struct sk_buff *skb = __skb_dequeue(&skbs);
+               struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+               skb_freed++;
+
+               iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]);
+
+               memset(&info->status, 0, sizeof(info->status));
+
+               info->flags &= ~IEEE80211_TX_CTL_AMPDU;
+
+               /* inform mac80211 about what happened with the frame */
+               switch (status & TX_STATUS_MSK) {
+               case TX_STATUS_SUCCESS:
+               case TX_STATUS_DIRECT_DONE:
+                       info->flags |= IEEE80211_TX_STAT_ACK;
+                       break;
+               case TX_STATUS_FAIL_DEST_PS:
+                       info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
+                       break;
+               default:
+                       break;
+               }
+
+               info->status.rates[0].count = tx_resp->failure_frame + 1;
+               iwl_mvm_hwrate_to_tx_control(le32_to_cpu(tx_resp->initial_rate),
+                                            info);
+
+               /* Single frame failure in an AMPDU queue => send BAR */
+               if (txq_id >= IWL_FIRST_AMPDU_QUEUE &&
+                   !(info->flags & IEEE80211_TX_STAT_ACK)) {
+                       /* there must be only one skb in the skb_list */
+                       WARN_ON_ONCE(skb_freed > 1 ||
+                                    !skb_queue_empty(&skbs));
+                       info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
+               }
+
+               /* W/A FW bug: seq_ctl is wrong when the queue is flushed */
+               if (status == TX_STATUS_FAIL_FIFO_FLUSHED) {
+                       struct ieee80211_hdr *hdr = (void *)skb->data;
+                       seq_ctl = le16_to_cpu(hdr->seq_ctrl);
+               }
+
+               ieee80211_tx_status_ni(mvm->hw, skb);
+       }
+
+       if (txq_id >= IWL_FIRST_AMPDU_QUEUE) {
+               /* If this is an aggregation queue, we use the ssn since:
+                * ssn = wifi seq_num % 256.
+                * The seq_ctl is the sequence control of the packet to which
+                * this Tx response relates. But if there is a hole in the
+                * bitmap of the BA we received, this Tx response may allow to
+                * reclaim the hole and all the subsequent packets that were
+                * already acked. In that case, seq_ctl != ssn, and the next
+                * packet to be reclaimed will be ssn and not seq_ctl. In that
+                * case, several packets will be reclaimed even if
+                * frame_count = 1.
+                *
+                * The ssn is the index (% 256) of the latest packet that has
+                * treated (acked / dropped) + 1.
+                */
+               next_reclaimed = ssn;
+       } else {
+               /* The next packet to be reclaimed is the one after this one */
+               next_reclaimed = SEQ_TO_SN(seq_ctl + 0x10);
+       }
+
+       IWL_DEBUG_TX_REPLY(mvm,
+                          "TXQ %d status %s (0x%08x)\n\t\t\t\tinitial_rate 0x%x "
+                           "retries %d, idx=%d ssn=%d next_reclaimed=0x%x seq_ctl=0x%x\n",
+                          txq_id, iwl_mvm_get_tx_fail_reason(status),
+                          status, le32_to_cpu(tx_resp->initial_rate),
+                          tx_resp->failure_frame, SEQ_TO_INDEX(sequence),
+                          ssn, next_reclaimed, seq_ctl);
+
+       rcu_read_lock();
+
+       sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
+
+       if (!IS_ERR_OR_NULL(sta)) {
+               mvmsta = (void *)sta->drv_priv;
+
+               if (tid != IWL_TID_NON_QOS) {
+                       struct iwl_mvm_tid_data *tid_data =
+                               &mvmsta->tid_data[tid];
+
+                       spin_lock_bh(&mvmsta->lock);
+                       tid_data->next_reclaimed = next_reclaimed;
+                       IWL_DEBUG_TX_REPLY(mvm, "Next reclaimed packet:%d\n",
+                                          next_reclaimed);
+                       iwl_mvm_check_ratid_empty(mvm, sta, tid);
+                       spin_unlock_bh(&mvmsta->lock);
+               }
+
+#ifdef CONFIG_PM_SLEEP
+               mvmsta->last_seq_ctl = seq_ctl;
+#endif
+       } else {
+               sta = NULL;
+               mvmsta = NULL;
+       }
+
+       /*
+        * If the txq is not an AMPDU queue, there is no chance we freed
+        * several skbs. Check that out...
+        * If there are no pending frames for this STA, notify mac80211 that
+        * this station can go to sleep in its STA table.
+        */
+       if (txq_id < IWL_FIRST_AMPDU_QUEUE && mvmsta &&
+           !WARN_ON(skb_freed > 1) &&
+           mvmsta->vif->type == NL80211_IFTYPE_AP &&
+           atomic_sub_and_test(skb_freed, &mvmsta->pending_frames)) {
+               ieee80211_sta_block_awake(mvm->hw, sta, false);
+               set_bit(sta_id, mvm->sta_drained);
+               schedule_work(&mvm->sta_drained_wk);
+       }
+
+       rcu_read_unlock();
+}
+
+#ifdef CONFIG_IWLWIFI_DEBUG
+#define AGG_TX_STATE_(x) case AGG_TX_STATE_ ## x: return #x
+static const char *iwl_get_agg_tx_status(u16 status)
+{
+       switch (status & AGG_TX_STATE_STATUS_MSK) {
+       AGG_TX_STATE_(TRANSMITTED);
+       AGG_TX_STATE_(UNDERRUN);
+       AGG_TX_STATE_(BT_PRIO);
+       AGG_TX_STATE_(FEW_BYTES);
+       AGG_TX_STATE_(ABORT);
+       AGG_TX_STATE_(LAST_SENT_TTL);
+       AGG_TX_STATE_(LAST_SENT_TRY_CNT);
+       AGG_TX_STATE_(LAST_SENT_BT_KILL);
+       AGG_TX_STATE_(SCD_QUERY);
+       AGG_TX_STATE_(TEST_BAD_CRC32);
+       AGG_TX_STATE_(RESPONSE);
+       AGG_TX_STATE_(DUMP_TX);
+       AGG_TX_STATE_(DELAY_TX);
+       }
+
+       return "UNKNOWN";
+}
+
+static void iwl_mvm_rx_tx_cmd_agg_dbg(struct iwl_mvm *mvm,
+                                     struct iwl_rx_packet *pkt)
+{
+       struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data;
+       struct agg_tx_status *frame_status = &tx_resp->status;
+       int i;
+
+       for (i = 0; i < tx_resp->frame_count; i++) {
+               u16 fstatus = le16_to_cpu(frame_status[i].status);
+
+               IWL_DEBUG_TX_REPLY(mvm,
+                                  "status %s (0x%04x), try-count (%d) seq (0x%x)\n",
+                                  iwl_get_agg_tx_status(fstatus),
+                                  fstatus & AGG_TX_STATE_STATUS_MSK,
+                                  (fstatus & AGG_TX_STATE_TRY_CNT_MSK) >>
+                                       AGG_TX_STATE_TRY_CNT_POS,
+                                  le16_to_cpu(frame_status[i].sequence));
+       }
+}
+#else
+static void iwl_mvm_rx_tx_cmd_agg_dbg(struct iwl_mvm *mvm,
+                                     struct iwl_rx_packet *pkt)
+{}
+#endif /* CONFIG_IWLWIFI_DEBUG */
+
+static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm,
+                                 struct iwl_rx_packet *pkt)
+{
+       struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data;
+       int sta_id = IWL_MVM_TX_RES_GET_RA(tx_resp->ra_tid);
+       int tid = IWL_MVM_TX_RES_GET_TID(tx_resp->ra_tid);
+       u16 sequence = le16_to_cpu(pkt->hdr.sequence);
+       struct ieee80211_sta *sta;
+
+       if (WARN_ON_ONCE(SEQ_TO_QUEUE(sequence) < IWL_FIRST_AMPDU_QUEUE))
+               return;
+
+       if (WARN_ON_ONCE(tid == IWL_TID_NON_QOS))
+               return;
+
+       iwl_mvm_rx_tx_cmd_agg_dbg(mvm, pkt);
+
+       rcu_read_lock();
+
+       sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
+
+       if (!WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
+               struct iwl_mvm_sta *mvmsta = (void *)sta->drv_priv;
+               mvmsta->tid_data[tid].rate_n_flags =
+                       le32_to_cpu(tx_resp->initial_rate);
+       }
+
+       rcu_read_unlock();
+}
+
+int iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
+                     struct iwl_device_cmd *cmd)
+{
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
+       struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data;
+
+       if (tx_resp->frame_count == 1)
+               iwl_mvm_rx_tx_cmd_single(mvm, pkt);
+       else
+               iwl_mvm_rx_tx_cmd_agg(mvm, pkt);
+
+       return 0;
+}
+
+int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
+                       struct iwl_device_cmd *cmd)
+{
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
+       struct iwl_mvm_ba_notif *ba_notif = (void *)pkt->data;
+       struct sk_buff_head reclaimed_skbs;
+       struct iwl_mvm_tid_data *tid_data;
+       struct ieee80211_tx_info *info;
+       struct ieee80211_sta *sta;
+       struct iwl_mvm_sta *mvmsta;
+       struct ieee80211_hdr *hdr;
+       struct sk_buff *skb;
+       int sta_id, tid, freed;
+
+       /* "flow" corresponds to Tx queue */
+       u16 scd_flow = le16_to_cpu(ba_notif->scd_flow);
+
+       /* "ssn" is start of block-ack Tx window, corresponds to index
+        * (in Tx queue's circular buffer) of first TFD/frame in window */
+       u16 ba_resp_scd_ssn = le16_to_cpu(ba_notif->scd_ssn);
+
+       sta_id = ba_notif->sta_id;
+       tid = ba_notif->tid;
+
+       rcu_read_lock();
+
+       sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
+
+       /* Reclaiming frames for a station that has been deleted ? */
+       if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
+               rcu_read_unlock();
+               return 0;
+       }
+
+       mvmsta = (void *)sta->drv_priv;
+       tid_data = &mvmsta->tid_data[tid];
+
+       if (WARN_ONCE(tid_data->txq_id != scd_flow, "Q %d, tid %d, flow %d",
+                     tid_data->txq_id, tid, scd_flow)) {
+               rcu_read_unlock();
+               return 0;
+       }
+
+       spin_lock_bh(&mvmsta->lock);
+
+       __skb_queue_head_init(&reclaimed_skbs);
+
+       /*
+        * Release all TFDs before the SSN, i.e. all TFDs in front of
+        * block-ack window (we assume that they've been successfully
+        * transmitted ... if not, it's too late anyway).
+        */
+       iwl_trans_reclaim(mvm->trans, scd_flow, ba_resp_scd_ssn,
+                         &reclaimed_skbs);
+
+       IWL_DEBUG_TX_REPLY(mvm,
+                          "BA_NOTIFICATION Received from %pM, sta_id = %d\n",
+                          (u8 *)&ba_notif->sta_addr_lo32,
+                          ba_notif->sta_id);
+       IWL_DEBUG_TX_REPLY(mvm,
+                          "TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = %d, scd_ssn = %d sent:%d, acked:%d\n",
+                          ba_notif->tid, le16_to_cpu(ba_notif->seq_ctl),
+                          (unsigned long long)le64_to_cpu(ba_notif->bitmap),
+                          scd_flow, ba_resp_scd_ssn, ba_notif->txed,
+                          ba_notif->txed_2_done);
+
+       tid_data->next_reclaimed = ba_resp_scd_ssn;
+
+       iwl_mvm_check_ratid_empty(mvm, sta, tid);
+
+       freed = 0;
+
+       skb_queue_walk(&reclaimed_skbs, skb) {
+               hdr = (struct ieee80211_hdr *)skb->data;
+
+               if (ieee80211_is_data_qos(hdr->frame_control))
+                       freed++;
+               else
+                       WARN_ON_ONCE(1);
+
+               info = IEEE80211_SKB_CB(skb);
+               iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]);
+
+               if (freed == 1) {
+                       /* this is the first skb we deliver in this batch */
+                       /* put the rate scaling data there */
+                       info = IEEE80211_SKB_CB(skb);
+                       memset(&info->status, 0, sizeof(info->status));
+                       info->flags |= IEEE80211_TX_STAT_ACK;
+                       info->flags |= IEEE80211_TX_STAT_AMPDU;
+                       info->status.ampdu_ack_len = ba_notif->txed_2_done;
+                       info->status.ampdu_len = ba_notif->txed;
+                       iwl_mvm_hwrate_to_tx_control(tid_data->rate_n_flags,
+                                                    info);
+               }
+       }
+
+       spin_unlock_bh(&mvmsta->lock);
+
+       rcu_read_unlock();
+
+       while (!skb_queue_empty(&reclaimed_skbs)) {
+               skb = __skb_dequeue(&reclaimed_skbs);
+               ieee80211_tx_status_ni(mvm->hw, skb);
+       }
+
+       return 0;
+}
+
+int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk, bool sync)
+{
+       int ret;
+       struct iwl_tx_path_flush_cmd flush_cmd = {
+               .queues_ctl = cpu_to_le32(tfd_msk),
+               .flush_ctl = cpu_to_le16(DUMP_TX_FIFO_FLUSH),
+       };
+
+       u32 flags = sync ? CMD_SYNC : CMD_ASYNC;
+
+       ret = iwl_mvm_send_cmd_pdu(mvm, TXPATH_FLUSH, flags,
+                                  sizeof(flush_cmd), &flush_cmd);
+       if (ret)
+               IWL_ERR(mvm, "Failed to send flush command (%d)\n", ret);
+       return ret;
+}
diff --git a/drivers/net/wireless/iwlwifi/mvm/utils.c b/drivers/net/wireless/iwlwifi/mvm/utils.c
new file mode 100644 (file)
index 0000000..000e842
--- /dev/null
@@ -0,0 +1,472 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include <net/mac80211.h>
+
+#include "iwl-debug.h"
+#include "iwl-io.h"
+
+#include "mvm.h"
+#include "fw-api-rs.h"
+
+/*
+ * Will return 0 even if the cmd failed when RFKILL is asserted unless
+ * CMD_WANT_SKB is set in cmd->flags.
+ */
+int iwl_mvm_send_cmd(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd)
+{
+       int ret;
+
+       /*
+        * Synchronous commands from this op-mode must hold
+        * the mutex, this ensures we don't try to send two
+        * (or more) synchronous commands at a time.
+        */
+       if (!(cmd->flags & CMD_ASYNC))
+               lockdep_assert_held(&mvm->mutex);
+
+       ret = iwl_trans_send_cmd(mvm->trans, cmd);
+
+       /*
+        * If the caller wants the SKB, then don't hide any problems, the
+        * caller might access the response buffer which will be NULL if
+        * the command failed.
+        */
+       if (cmd->flags & CMD_WANT_SKB)
+               return ret;
+
+       /* Silently ignore failures if RFKILL is asserted */
+       if (!ret || ret == -ERFKILL)
+               return 0;
+       return ret;
+}
+
+int iwl_mvm_send_cmd_pdu(struct iwl_mvm *mvm, u8 id,
+                        u32 flags, u16 len, const void *data)
+{
+       struct iwl_host_cmd cmd = {
+               .id = id,
+               .len = { len, },
+               .data = { data, },
+               .flags = flags,
+       };
+
+       return iwl_mvm_send_cmd(mvm, &cmd);
+}
+
+/*
+ * We assume that the caller set the status to the sucess value
+ */
+int iwl_mvm_send_cmd_status(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd,
+                           u32 *status)
+{
+       struct iwl_rx_packet *pkt;
+       struct iwl_cmd_response *resp;
+       int ret, resp_len;
+
+       lockdep_assert_held(&mvm->mutex);
+
+       /*
+        * Only synchronous commands can wait for status,
+        * we use WANT_SKB so the caller can't.
+        */
+       if (WARN_ONCE(cmd->flags & (CMD_ASYNC | CMD_WANT_SKB),
+                     "cmd flags %x", cmd->flags))
+               return -EINVAL;
+
+       cmd->flags |= CMD_SYNC | CMD_WANT_SKB;
+
+       ret = iwl_trans_send_cmd(mvm->trans, cmd);
+       if (ret == -ERFKILL) {
+               /*
+                * The command failed because of RFKILL, don't update
+                * the status, leave it as success and return 0.
+                */
+               return 0;
+       } else if (ret) {
+               return ret;
+       }
+
+       pkt = cmd->resp_pkt;
+       /* Can happen if RFKILL is asserted */
+       if (!pkt) {
+               ret = 0;
+               goto out_free_resp;
+       }
+
+       if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
+               ret = -EIO;
+               goto out_free_resp;
+       }
+
+       resp_len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
+       if (WARN_ON_ONCE(resp_len != sizeof(pkt->hdr) + sizeof(*resp))) {
+               ret = -EIO;
+               goto out_free_resp;
+       }
+
+       resp = (void *)pkt->data;
+       *status = le32_to_cpu(resp->status);
+ out_free_resp:
+       iwl_free_resp(cmd);
+       return ret;
+}
+
+/*
+ * We assume that the caller set the status to the sucess value
+ */
+int iwl_mvm_send_cmd_pdu_status(struct iwl_mvm *mvm, u8 id, u16 len,
+                               const void *data, u32 *status)
+{
+       struct iwl_host_cmd cmd = {
+               .id = id,
+               .len = { len, },
+               .data = { data, },
+       };
+
+       return iwl_mvm_send_cmd_status(mvm, &cmd, status);
+}
+
+#define IWL_DECLARE_RATE_INFO(r) \
+       [IWL_RATE_##r##M_INDEX] = IWL_RATE_##r##M_PLCP
+
+/*
+ * Translate from fw_rate_index (IWL_RATE_XXM_INDEX) to PLCP
+ */
+static const u8 fw_rate_idx_to_plcp[IWL_RATE_COUNT] = {
+       IWL_DECLARE_RATE_INFO(1),
+       IWL_DECLARE_RATE_INFO(2),
+       IWL_DECLARE_RATE_INFO(5),
+       IWL_DECLARE_RATE_INFO(11),
+       IWL_DECLARE_RATE_INFO(6),
+       IWL_DECLARE_RATE_INFO(9),
+       IWL_DECLARE_RATE_INFO(12),
+       IWL_DECLARE_RATE_INFO(18),
+       IWL_DECLARE_RATE_INFO(24),
+       IWL_DECLARE_RATE_INFO(36),
+       IWL_DECLARE_RATE_INFO(48),
+       IWL_DECLARE_RATE_INFO(54),
+};
+
+int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags,
+                                       enum ieee80211_band band)
+{
+       int rate = rate_n_flags & RATE_LEGACY_RATE_MSK;
+       int idx;
+       int band_offset = 0;
+
+       /* Legacy rate format, search for match in table */
+       if (band == IEEE80211_BAND_5GHZ)
+               band_offset = IWL_FIRST_OFDM_RATE;
+       for (idx = band_offset; idx < IWL_RATE_COUNT_LEGACY; idx++)
+               if (fw_rate_idx_to_plcp[idx] == rate)
+                       return idx - band_offset;
+
+       return -1;
+}
+
+u8 iwl_mvm_mac80211_idx_to_hwrate(int rate_idx)
+{
+       /* Get PLCP rate for tx_cmd->rate_n_flags */
+       return fw_rate_idx_to_plcp[rate_idx];
+}
+
+int iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
+                         struct iwl_device_cmd *cmd)
+{
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
+       struct iwl_error_resp *err_resp = (void *)pkt->data;
+
+       IWL_ERR(mvm, "FW Error notification: type 0x%08X cmd_id 0x%02X\n",
+               le32_to_cpu(err_resp->error_type), err_resp->cmd_id);
+       IWL_ERR(mvm, "FW Error notification: seq 0x%04X service 0x%08X\n",
+               le16_to_cpu(err_resp->bad_cmd_seq_num),
+               le32_to_cpu(err_resp->error_service));
+       IWL_ERR(mvm, "FW Error notification: timestamp 0x%16llX\n",
+               le64_to_cpu(err_resp->timestamp));
+       return 0;
+}
+
+/*
+ * Returns the first antenna as ANT_[ABC], as defined in iwl-config.h.
+ * The parameter should also be a combination of ANT_[ABC].
+ */
+u8 first_antenna(u8 mask)
+{
+       BUILD_BUG_ON(ANT_A != BIT(0)); /* using ffs is wrong if not */
+       WARN_ON_ONCE(!mask); /* ffs will return 0 if mask is zeroed */
+       return (u8)(BIT(ffs(mask)));
+}
+
+/*
+ * Toggles between TX antennas to send the probe request on.
+ * Receives the bitmask of valid TX antennas and the *index* used
+ * for the last TX, and returns the next valid *index* to use.
+ * In order to set it in the tx_cmd, must do BIT(idx).
+ */
+u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx)
+{
+       u8 ind = last_idx;
+       int i;
+
+       for (i = 0; i < RATE_MCS_ANT_NUM; i++) {
+               ind = (ind + 1) % RATE_MCS_ANT_NUM;
+               if (valid & BIT(ind))
+                       return ind;
+       }
+
+       WARN_ONCE(1, "Failed to toggle between antennas 0x%x", valid);
+       return last_idx;
+}
+
+static struct {
+       char *name;
+       u8 num;
+} advanced_lookup[] = {
+       { "NMI_INTERRUPT_WDG", 0x34 },
+       { "SYSASSERT", 0x35 },
+       { "UCODE_VERSION_MISMATCH", 0x37 },
+       { "BAD_COMMAND", 0x38 },
+       { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
+       { "FATAL_ERROR", 0x3D },
+       { "NMI_TRM_HW_ERR", 0x46 },
+       { "NMI_INTERRUPT_TRM", 0x4C },
+       { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
+       { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
+       { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
+       { "NMI_INTERRUPT_HOST", 0x66 },
+       { "NMI_INTERRUPT_ACTION_PT", 0x7C },
+       { "NMI_INTERRUPT_UNKNOWN", 0x84 },
+       { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
+       { "ADVANCED_SYSASSERT", 0 },
+};
+
+static const char *desc_lookup(u32 num)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(advanced_lookup) - 1; i++)
+               if (advanced_lookup[i].num == num)
+                       return advanced_lookup[i].name;
+
+       /* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
+       return advanced_lookup[i].name;
+}
+
+/*
+ * Note: This structure is read from the device with IO accesses,
+ * and the reading already does the endian conversion. As it is
+ * read with u32-sized accesses, any members with a different size
+ * need to be ordered correctly though!
+ */
+struct iwl_error_event_table {
+       u32 valid;              /* (nonzero) valid, (0) log is empty */
+       u32 error_id;           /* type of error */
+       u32 pc;                 /* program counter */
+       u32 blink1;             /* branch link */
+       u32 blink2;             /* branch link */
+       u32 ilink1;             /* interrupt link */
+       u32 ilink2;             /* interrupt link */
+       u32 data1;              /* error-specific data */
+       u32 data2;              /* error-specific data */
+       u32 data3;              /* error-specific data */
+       u32 bcon_time;          /* beacon timer */
+       u32 tsf_low;            /* network timestamp function timer */
+       u32 tsf_hi;             /* network timestamp function timer */
+       u32 gp1;                /* GP1 timer register */
+       u32 gp2;                /* GP2 timer register */
+       u32 gp3;                /* GP3 timer register */
+       u32 ucode_ver;          /* uCode version */
+       u32 hw_ver;             /* HW Silicon version */
+       u32 brd_ver;            /* HW board version */
+       u32 log_pc;             /* log program counter */
+       u32 frame_ptr;          /* frame pointer */
+       u32 stack_ptr;          /* stack pointer */
+       u32 hcmd;               /* last host command header */
+       u32 isr0;               /* isr status register LMPM_NIC_ISR0:
+                                * rxtx_flag */
+       u32 isr1;               /* isr status register LMPM_NIC_ISR1:
+                                * host_flag */
+       u32 isr2;               /* isr status register LMPM_NIC_ISR2:
+                                * enc_flag */
+       u32 isr3;               /* isr status register LMPM_NIC_ISR3:
+                                * time_flag */
+       u32 isr4;               /* isr status register LMPM_NIC_ISR4:
+                                * wico interrupt */
+       u32 isr_pref;           /* isr status register LMPM_NIC_PREF_STAT */
+       u32 wait_event;         /* wait event() caller address */
+       u32 l2p_control;        /* L2pControlField */
+       u32 l2p_duration;       /* L2pDurationField */
+       u32 l2p_mhvalid;        /* L2pMhValidBits */
+       u32 l2p_addr_match;     /* L2pAddrMatchStat */
+       u32 lmpm_pmg_sel;       /* indicate which clocks are turned on
+                                * (LMPM_PMG_SEL) */
+       u32 u_timestamp;        /* indicate when the date and time of the
+                                * compilation */
+       u32 flow_handler;       /* FH read/write pointers, RX credit */
+} __packed;
+
+#define ERROR_START_OFFSET  (1 * sizeof(u32))
+#define ERROR_ELEM_SIZE     (7 * sizeof(u32))
+
+void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
+{
+       struct iwl_trans *trans = mvm->trans;
+       struct iwl_error_event_table table;
+       u32 base;
+
+       base = mvm->error_event_table;
+       if (mvm->cur_ucode == IWL_UCODE_INIT) {
+               if (!base)
+                       base = mvm->fw->init_errlog_ptr;
+       } else {
+               if (!base)
+                       base = mvm->fw->inst_errlog_ptr;
+       }
+
+       if (base < 0x800000 || base >= 0x80C000) {
+               IWL_ERR(mvm,
+                       "Not valid error log pointer 0x%08X for %s uCode\n",
+                       base,
+                       (mvm->cur_ucode == IWL_UCODE_INIT)
+                                       ? "Init" : "RT");
+               return;
+       }
+
+       iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table));
+
+       if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
+               IWL_ERR(trans, "Start IWL Error Log Dump:\n");
+               IWL_ERR(trans, "Status: 0x%08lX, count: %d\n",
+                       mvm->status, table.valid);
+       }
+
+       trace_iwlwifi_dev_ucode_error(trans->dev, table.error_id, table.tsf_low,
+                                     table.data1, table.data2, table.data3,
+                                     table.blink1, table.blink2, table.ilink1,
+                                     table.ilink2, table.bcon_time, table.gp1,
+                                     table.gp2, table.gp3, table.ucode_ver,
+                                     table.hw_ver, table.brd_ver);
+       IWL_ERR(mvm, "0x%08X | %-28s\n", table.error_id,
+               desc_lookup(table.error_id));
+       IWL_ERR(mvm, "0x%08X | uPc\n", table.pc);
+       IWL_ERR(mvm, "0x%08X | branchlink1\n", table.blink1);
+       IWL_ERR(mvm, "0x%08X | branchlink2\n", table.blink2);
+       IWL_ERR(mvm, "0x%08X | interruptlink1\n", table.ilink1);
+       IWL_ERR(mvm, "0x%08X | interruptlink2\n", table.ilink2);
+       IWL_ERR(mvm, "0x%08X | data1\n", table.data1);
+       IWL_ERR(mvm, "0x%08X | data2\n", table.data2);
+       IWL_ERR(mvm, "0x%08X | data3\n", table.data3);
+       IWL_ERR(mvm, "0x%08X | beacon time\n", table.bcon_time);
+       IWL_ERR(mvm, "0x%08X | tsf low\n", table.tsf_low);
+       IWL_ERR(mvm, "0x%08X | tsf hi\n", table.tsf_hi);
+       IWL_ERR(mvm, "0x%08X | time gp1\n", table.gp1);
+       IWL_ERR(mvm, "0x%08X | time gp2\n", table.gp2);
+       IWL_ERR(mvm, "0x%08X | time gp3\n", table.gp3);
+       IWL_ERR(mvm, "0x%08X | uCode version\n", table.ucode_ver);
+       IWL_ERR(mvm, "0x%08X | hw version\n", table.hw_ver);
+       IWL_ERR(mvm, "0x%08X | board version\n", table.brd_ver);
+       IWL_ERR(mvm, "0x%08X | hcmd\n", table.hcmd);
+       IWL_ERR(mvm, "0x%08X | isr0\n", table.isr0);
+       IWL_ERR(mvm, "0x%08X | isr1\n", table.isr1);
+       IWL_ERR(mvm, "0x%08X | isr2\n", table.isr2);
+       IWL_ERR(mvm, "0x%08X | isr3\n", table.isr3);
+       IWL_ERR(mvm, "0x%08X | isr4\n", table.isr4);
+       IWL_ERR(mvm, "0x%08X | isr_pref\n", table.isr_pref);
+       IWL_ERR(mvm, "0x%08X | wait_event\n", table.wait_event);
+       IWL_ERR(mvm, "0x%08X | l2p_control\n", table.l2p_control);
+       IWL_ERR(mvm, "0x%08X | l2p_duration\n", table.l2p_duration);
+       IWL_ERR(mvm, "0x%08X | l2p_mhvalid\n", table.l2p_mhvalid);
+       IWL_ERR(mvm, "0x%08X | l2p_addr_match\n", table.l2p_addr_match);
+       IWL_ERR(mvm, "0x%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
+       IWL_ERR(mvm, "0x%08X | timestamp\n", table.u_timestamp);
+       IWL_ERR(mvm, "0x%08X | flow_handler\n", table.flow_handler);
+}
+
+/**
+ * iwl_mvm_send_lq_cmd() - Send link quality command
+ * @init: This command is sent as part of station initialization right
+ *        after station has been added.
+ *
+ * The link quality command is sent as the last step of station creation.
+ * This is the special case in which init is set and we call a callback in
+ * this case to clear the state indicating that station creation is in
+ * progress.
+ */
+int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq,
+                       u8 flags, bool init)
+{
+       struct iwl_host_cmd cmd = {
+               .id = LQ_CMD,
+               .len = { sizeof(struct iwl_lq_cmd), },
+               .flags = flags,
+               .data = { lq, },
+       };
+
+       if (WARN_ON(lq->sta_id == IWL_INVALID_STATION))
+               return -EINVAL;
+
+       if (WARN_ON(init && (cmd.flags & CMD_ASYNC)))
+               return -EINVAL;
+
+       return iwl_mvm_send_cmd(mvm, &cmd);
+}
index f8620ec..ff33897 100644 (file)
@@ -1,6 +1,6 @@
 /******************************************************************************
  *
- * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of version 2 of the GNU General Public License as
index 244019c..e7de331 100644 (file)
@@ -1,6 +1,6 @@
 /******************************************************************************
  *
- * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of version 2 of the GNU General Public License as
index 83ca403..5096f7c 100644 (file)
@@ -1,6 +1,6 @@
 /******************************************************************************
  *
- * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of version 2 of the GNU General Public License as
index d4df976..801ff49 100644 (file)
@@ -1,6 +1,6 @@
 /******************************************************************************
  *
- * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/iwlwifi/pcie/7000.c b/drivers/net/wireless/iwlwifi/pcie/7000.c
new file mode 100644 (file)
index 0000000..6e35b2b
--- /dev/null
@@ -0,0 +1,111 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#include <linux/module.h>
+#include <linux/stringify.h>
+#include "iwl-config.h"
+#include "iwl-agn-hw.h"
+#include "cfg.h"
+
+/* Highest firmware API version supported */
+#define IWL7260_UCODE_API_MAX  6
+#define IWL3160_UCODE_API_MAX  6
+
+/* Oldest version we won't warn about */
+#define IWL7260_UCODE_API_OK   6
+#define IWL3160_UCODE_API_OK   6
+
+/* Lowest firmware API version supported */
+#define IWL7260_UCODE_API_MIN  6
+#define IWL3160_UCODE_API_MIN  6
+
+/* NVM versions */
+#define IWL7260_NVM_VERSION            0x0a1d
+#define IWL7260_TX_POWER_VERSION       0xffff /* meaningless */
+#define IWL3160_NVM_VERSION            0x709
+#define IWL3160_TX_POWER_VERSION       0xffff /* meaningless */
+
+#define IWL7260_FW_PRE "iwlwifi-7260-"
+#define IWL7260_MODULE_FIRMWARE(api) IWL7260_FW_PRE __stringify(api) ".ucode"
+
+#define IWL3160_FW_PRE "iwlwifi-3160-"
+#define IWL3160_MODULE_FIRMWARE(api) IWL3160_FW_PRE __stringify(api) ".ucode"
+
+static const struct iwl_base_params iwl7000_base_params = {
+       .eeprom_size = OTP_LOW_IMAGE_SIZE,
+       .num_of_queues = IWLAGN_NUM_QUEUES,
+       .pll_cfg_val = 0,
+       .shadow_ram_support = true,
+       .led_compensation = 57,
+       .adv_thermal_throttle = true,
+       .support_ct_kill_exit = true,
+       .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
+       .chain_noise_scale = 1000,
+       .wd_timeout = IWL_LONG_WD_TIMEOUT,
+       .max_event_log_size = 512,
+       .shadow_reg_enable = false, /* TODO: fix bugs using this feature */
+};
+
+static const struct iwl_ht_params iwl7000_ht_params = {
+       .ht_greenfield_support = true,
+       .use_rts_for_aggregation = true, /* use rts/cts protection */
+       .ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ),
+};
+
+#define IWL_DEVICE_7000                                                \
+       .ucode_api_max = IWL7260_UCODE_API_MAX,                 \
+       .ucode_api_ok = IWL7260_UCODE_API_OK,                   \
+       .ucode_api_min = IWL7260_UCODE_API_MIN,                 \
+       .device_family = IWL_DEVICE_FAMILY_7000,                \
+       .max_inst_size = IWL60_RTC_INST_SIZE,                   \
+       .max_data_size = IWL60_RTC_DATA_SIZE,                   \
+       .base_params = &iwl7000_base_params,                    \
+       /* TODO: .bt_params? */                                 \
+       .need_temp_offset_calib = true,                         \
+       .led_mode = IWL_LED_RF_STATE,                           \
+       .adv_pm = true                                          \
+
+
+const struct iwl_cfg iwl7260_2ac_cfg = {
+       .name = "Intel(R) Dual Band Wireless AC7260",
+       .fw_name_pre = IWL7260_FW_PRE,
+       IWL_DEVICE_7000,
+       .ht_params = &iwl7000_ht_params,
+       .nvm_ver = IWL7260_NVM_VERSION,
+       .nvm_calib_ver = IWL7260_TX_POWER_VERSION,
+};
+
+const struct iwl_cfg iwl3160_ac_cfg = {
+       .name = "Intel(R) Dual Band Wireless AC3160",
+       .fw_name_pre = IWL3160_FW_PRE,
+       IWL_DEVICE_7000,
+       .ht_params = &iwl7000_ht_params,
+       .nvm_ver = IWL3160_NVM_VERSION,
+       .nvm_calib_ver = IWL3160_TX_POWER_VERSION,
+};
+
+MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
+MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL3160_UCODE_API_OK));
index 8215231..c6f8e83 100644 (file)
@@ -5,7 +5,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
  *
  * BSD LICENSE
  *
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -109,5 +109,7 @@ extern const struct iwl_cfg iwl6035_2agn_cfg;
 extern const struct iwl_cfg iwl105_bgn_cfg;
 extern const struct iwl_cfg iwl105_bgn_d_cfg;
 extern const struct iwl_cfg iwl135_bgn_cfg;
+extern const struct iwl_cfg iwl7260_2ac_cfg;
+extern const struct iwl_cfg iwl3160_ac_cfg;
 
 #endif /* __iwl_pci_h__ */
index c2e141a..7bc0fb9 100644 (file)
@@ -5,7 +5,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
  *
  * BSD LICENSE
  *
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -255,6 +255,12 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
        {IWL_PCI_DEVICE(0x0893, 0x0262, iwl135_bgn_cfg)},
        {IWL_PCI_DEVICE(0x0892, 0x0462, iwl135_bgn_cfg)},
 
+/* 7000 Series */
+       {IWL_PCI_DEVICE(0x08B1, 0x4070, iwl7260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x08B1, 0xC070, iwl7260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x08B3, 0x0070, iwl3160_ac_cfg)},
+       {IWL_PCI_DEVICE(0x08B3, 0x8070, iwl3160_ac_cfg)},
+
        {0}
 };
 MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids);
index 20735a0..aa2a39a 100644 (file)
@@ -1,6 +1,6 @@
 /******************************************************************************
  *
- * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
  *
  * Portions of this file are derived from the ipw3945 project, as well
  * as portions of the ieee80211 subsystem header files.
@@ -235,6 +235,7 @@ struct iwl_txq {
  * @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes)
  * @rx_page_order: page order for receive buffer size
  * @wd_timeout: queue watchdog timeout (jiffies)
+ * @reg_lock: protect hw register access
  */
 struct iwl_trans_pcie {
        struct iwl_rxq rxq;
@@ -248,7 +249,6 @@ struct iwl_trans_pcie {
        int ict_index;
        u32 inta;
        bool use_ict;
-       struct tasklet_struct irq_tasklet;
        struct isr_statistics isr_stats;
 
        spinlock_t irq_lock;
@@ -283,6 +283,9 @@ struct iwl_trans_pcie {
 
        /* queue watchdog */
        unsigned long wd_timeout;
+
+       /*protect hw register */
+       spinlock_t reg_lock;
 };
 
 /**
@@ -326,7 +329,7 @@ void iwl_trans_pcie_free(struct iwl_trans *trans);
 * RX
 ******************************************************/
 int iwl_pcie_rx_init(struct iwl_trans *trans);
-void iwl_pcie_tasklet(struct iwl_trans *trans);
+irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id);
 int iwl_pcie_rx_stop(struct iwl_trans *trans);
 void iwl_pcie_rx_free(struct iwl_trans *trans);
 
index 4e6591d..b0ae06d 100644 (file)
@@ -1,6 +1,6 @@
 /******************************************************************************
  *
- * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
  *
  * Portions of this file are derived from the ipw3945 project, as well
  * as portions of the ieee80211 subsystem header files.
  *   'processed' and 'read' driver indexes as well)
  * + A received packet is processed and handed to the kernel network stack,
  *   detached from the iwl->rxq.  The driver 'processed' index is updated.
- * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
- *   list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
- *   INDEX is not incremented and iwl->status(RX_STALLED) is set.  If there
- *   were enough free buffers and RX_STALLED is set it is cleared.
+ * + The Host/Firmware iwl->rxq is replenished at irq thread time from the
+ *   rx_free list. If there are no allocated buffers in iwl->rxq->rx_free,
+ *   the READ INDEX is not incremented and iwl->status(RX_STALLED) is set.
+ *   If there were enough free buffers and RX_STALLED is set it is cleared.
  *
  *
  * Driver sequence:
@@ -214,9 +214,9 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
        /*
         * If the device isn't enabled - not need to try to add buffers...
         * This can happen when we stop the device and still have an interrupt
-        * pending. We stop the APM before we sync the interrupts / tasklets
-        * because we have to (see comment there). On the other hand, since
-        * the APM is stopped, we cannot access the HW (in particular not prph).
+        * pending. We stop the APM before we sync the interrupts because we
+        * have to (see comment there). On the other hand, since the APM is
+        * stopped, we cannot access the HW (in particular not prph).
         * So don't try to restock if the APM has been already stopped.
         */
        if (!test_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status))
@@ -594,6 +594,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
                int index, cmd_index, err, len;
                struct iwl_rx_cmd_buffer rxcb = {
                        ._offset = offset,
+                       ._rx_page_order = trans_pcie->rx_page_order,
                        ._page = rxb->page,
                        ._page_stolen = false,
                        .truesize = max_len,
@@ -795,11 +796,14 @@ static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
        clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
        wake_up(&trans_pcie->wait_command_queue);
 
+       local_bh_disable();
        iwl_op_mode_nic_error(trans->op_mode);
+       local_bh_enable();
 }
 
-void iwl_pcie_tasklet(struct iwl_trans *trans)
+irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
 {
+       struct iwl_trans *trans = dev_id;
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
        u32 inta = 0;
@@ -810,6 +814,8 @@ void iwl_pcie_tasklet(struct iwl_trans *trans)
        u32 inta_mask;
 #endif
 
+       lock_map_acquire(&trans->sync_cmd_lockdep_map);
+
        spin_lock_irqsave(&trans_pcie->irq_lock, flags);
 
        /* Ack/clear/reset pending uCode interrupts.
@@ -854,7 +860,7 @@ void iwl_pcie_tasklet(struct iwl_trans *trans)
 
                handled |= CSR_INT_BIT_HW_ERR;
 
-               return;
+               goto out;
        }
 
 #ifdef CONFIG_IWLWIFI_DEBUG
@@ -1004,6 +1010,10 @@ void iwl_pcie_tasklet(struct iwl_trans *trans)
        /* Re-enable RF_KILL if it occurred */
        else if (handled & CSR_INT_BIT_RF_KILL)
                iwl_enable_rfkill_int(trans);
+
+out:
+       lock_map_release(&trans->sync_cmd_lockdep_map);
+       return IRQ_HANDLED;
 }
 
 /******************************************************************************
@@ -1126,7 +1136,7 @@ static irqreturn_t iwl_pcie_isr(int irq, void *data)
 
        /* Disable (but don't clear!) interrupts here to avoid
         *    back-to-back ISRs and sporadic interrupts from our NIC.
-        * If we have something to service, the tasklet will re-enable ints.
+        * If we have something to service, the irq thread will re-enable ints.
         * If we *don't* have something, we'll re-enable before leaving here. */
        inta_mask = iwl_read32(trans, CSR_INT_MASK);
        iwl_write32(trans, CSR_INT_MASK, 0x00000000);
@@ -1166,9 +1176,9 @@ static irqreturn_t iwl_pcie_isr(int irq, void *data)
 #endif
 
        trans_pcie->inta |= inta;
-       /* iwl_pcie_tasklet() will service interrupts and re-enable them */
+       /* the thread will service interrupts and re-enable them */
        if (likely(inta))
-               tasklet_schedule(&trans_pcie->irq_tasklet);
+               return IRQ_WAKE_THREAD;
        else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
                 !trans_pcie->inta)
                iwl_enable_interrupts(trans);
@@ -1276,9 +1286,10 @@ irqreturn_t iwl_pcie_isr_ict(int irq, void *data)
        trans_pcie->inta |= inta;
 
        /* iwl_pcie_tasklet() will service interrupts and re-enable them */
-       if (likely(inta))
-               tasklet_schedule(&trans_pcie->irq_tasklet);
-       else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
+       if (likely(inta)) {
+               spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
+               return IRQ_WAKE_THREAD;
+       } else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
                 !trans_pcie->inta) {
                /* Allow interrupt if was disabled by this handler and
                 * no tasklet was schedules, We should not enable interrupt,
index c57641e..17bedc5 100644 (file)
@@ -5,7 +5,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
  *
  * BSD LICENSE
  *
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
 #include "iwl-agn-hw.h"
 #include "internal.h"
 
+static void __iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans,
+                                                 u32 reg, u32 mask, u32 value)
+{
+       u32 v;
+
+#ifdef CONFIG_IWLWIFI_DEBUG
+       WARN_ON_ONCE(value & ~mask);
+#endif
+
+       v = iwl_read32(trans, reg);
+       v &= ~mask;
+       v |= value;
+       iwl_write32(trans, reg, v);
+}
+
+static inline void __iwl_trans_pcie_clear_bit(struct iwl_trans *trans,
+                                             u32 reg, u32 mask)
+{
+       __iwl_trans_pcie_set_bits_mask(trans, reg, mask, 0);
+}
+
+static inline void __iwl_trans_pcie_set_bit(struct iwl_trans *trans,
+                                           u32 reg, u32 mask)
+{
+       __iwl_trans_pcie_set_bits_mask(trans, reg, mask, mask);
+}
+
 static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux)
 {
        if (vaux && pci_pme_capable(to_pci_dev(trans->dev), PCI_D3cold))
@@ -733,7 +760,6 @@ void iwl_trans_pcie_free(struct iwl_trans *trans)
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 
        synchronize_irq(trans_pcie->pci_dev->irq);
-       tasklet_kill(&trans_pcie->irq_tasklet);
 
        iwl_pcie_tx_free(trans);
        iwl_pcie_rx_free(trans);
@@ -779,15 +805,16 @@ static int iwl_trans_pcie_resume(struct iwl_trans *trans)
 }
 #endif /* CONFIG_PM_SLEEP */
 
-static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent)
+static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent,
+                                               unsigned long *flags)
 {
        int ret;
-
-       lockdep_assert_held(&trans->reg_lock);
+       struct iwl_trans_pcie *pcie_trans = IWL_TRANS_GET_PCIE_TRANS(trans);
+       spin_lock_irqsave(&pcie_trans->reg_lock, *flags);
 
        /* this bit wakes up the NIC */
-       __iwl_set_bit(trans, CSR_GP_CNTRL,
-                     CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+       __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
+                                CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
 
        /*
         * These bits say the device is running, and should keep running for
@@ -819,18 +846,34 @@ static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent)
                        WARN_ONCE(1,
                                  "Timeout waiting for hardware access (CSR_GP_CNTRL 0x%08x)\n",
                                  val);
+                       spin_unlock_irqrestore(&pcie_trans->reg_lock, *flags);
                        return false;
                }
        }
 
+       /*
+        * Fool sparse by faking we release the lock - sparse will
+        * track nic_access anyway.
+        */
+       __release(&pcie_trans->reg_lock);
        return true;
 }
 
-static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans)
+static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans,
+                                             unsigned long *flags)
 {
-       lockdep_assert_held(&trans->reg_lock);
-       __iwl_clear_bit(trans, CSR_GP_CNTRL,
-                       CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+       struct iwl_trans_pcie *pcie_trans = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+       lockdep_assert_held(&pcie_trans->reg_lock);
+
+       /*
+        * Fool sparse by faking we acquiring the lock - sparse will
+        * track nic_access anyway.
+        */
+       __acquire(&pcie_trans->reg_lock);
+
+       __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
+                                  CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
        /*
         * Above we read the CSR_GP_CNTRL register, which will flush
         * any previous writes, but we need the write that clears the
@@ -838,6 +881,7 @@ static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans)
         * scheduled on different CPUs (after we drop reg_lock).
         */
        mmiowb();
+       spin_unlock_irqrestore(&pcie_trans->reg_lock, *flags);
 }
 
 static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr,
@@ -847,16 +891,14 @@ static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr,
        int offs, ret = 0;
        u32 *vals = buf;
 
-       spin_lock_irqsave(&trans->reg_lock, flags);
-       if (iwl_trans_grab_nic_access(trans, false)) {
+       if (iwl_trans_grab_nic_access(trans, false, &flags)) {
                iwl_write32(trans, HBUS_TARG_MEM_RADDR, addr);
                for (offs = 0; offs < dwords; offs++)
                        vals[offs] = iwl_read32(trans, HBUS_TARG_MEM_RDAT);
-               iwl_trans_release_nic_access(trans);
+               iwl_trans_release_nic_access(trans, &flags);
        } else {
                ret = -EBUSY;
        }
-       spin_unlock_irqrestore(&trans->reg_lock, flags);
        return ret;
 }
 
@@ -867,17 +909,15 @@ static int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr,
        int offs, ret = 0;
        u32 *vals = buf;
 
-       spin_lock_irqsave(&trans->reg_lock, flags);
-       if (iwl_trans_grab_nic_access(trans, false)) {
+       if (iwl_trans_grab_nic_access(trans, false, &flags)) {
                iwl_write32(trans, HBUS_TARG_MEM_WADDR, addr);
                for (offs = 0; offs < dwords; offs++)
                        iwl_write32(trans, HBUS_TARG_MEM_WDAT,
                                    vals ? vals[offs] : 0);
-               iwl_trans_release_nic_access(trans);
+               iwl_trans_release_nic_access(trans, &flags);
        } else {
                ret = -EBUSY;
        }
-       spin_unlock_irqrestore(&trans->reg_lock, flags);
        return ret;
 }
 
@@ -952,6 +992,17 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans)
        return ret;
 }
 
+static void iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, u32 reg,
+                                        u32 mask, u32 value)
+{
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+       unsigned long flags;
+
+       spin_lock_irqsave(&trans_pcie->reg_lock, flags);
+       __iwl_trans_pcie_set_bits_mask(trans, reg, mask, value);
+       spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
+}
+
 static const char *get_fh_string(int cmd)
 {
 #define IWL_CMD(x) case x: return #x
@@ -1405,7 +1456,8 @@ static const struct iwl_trans_ops trans_ops_pcie = {
        .configure = iwl_trans_pcie_configure,
        .set_pmi = iwl_trans_pcie_set_pmi,
        .grab_nic_access = iwl_trans_pcie_grab_nic_access,
-       .release_nic_access = iwl_trans_pcie_release_nic_access
+       .release_nic_access = iwl_trans_pcie_release_nic_access,
+       .set_bits_mask = iwl_trans_pcie_set_bits_mask,
 };
 
 struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
@@ -1427,8 +1479,10 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
 
        trans->ops = &trans_ops_pcie;
        trans->cfg = cfg;
+       trans_lockdep_init(trans);
        trans_pcie->trans = trans;
        spin_lock_init(&trans_pcie->irq_lock);
+       spin_lock_init(&trans_pcie->reg_lock);
        init_waitqueue_head(&trans_pcie->ucode_write_waitq);
 
        /* W/A - seems to solve weird behavior. We need to remove this if we
@@ -1495,7 +1549,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
 
        /* Initialize the wait queue for commands */
        init_waitqueue_head(&trans_pcie->wait_command_queue);
-       spin_lock_init(&trans->reg_lock);
 
        snprintf(trans->dev_cmd_pool_name, sizeof(trans->dev_cmd_pool_name),
                 "iwl_cmd_pool:%s", dev_name(trans->dev));
@@ -1514,15 +1567,12 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
 
        trans_pcie->inta_mask = CSR_INI_SET_MASK;
 
-       tasklet_init(&trans_pcie->irq_tasklet, (void (*)(unsigned long))
-                    iwl_pcie_tasklet, (unsigned long)trans);
-
        if (iwl_pcie_alloc_ict(trans))
                goto out_free_cmd_pool;
 
-       err = request_irq(pdev->irq, iwl_pcie_isr_ict,
-                         IRQF_SHARED, DRV_NAME, trans);
-       if (err) {
+       if (request_threaded_irq(pdev->irq, iwl_pcie_isr_ict,
+                                iwl_pcie_irq_handler,
+                                IRQF_SHARED, DRV_NAME, trans)) {
                IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq);
                goto out_free_ict;
        }
index a93f067..8e9e321 100644 (file)
@@ -1,6 +1,6 @@
 /******************************************************************************
  *
- * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
  *
  * Portions of this file are derived from the ipw3945 project, as well
  * as portions of the ieee80211 subsystem header files.
@@ -926,7 +926,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
        if (WARN_ON(txq_id == trans_pcie->cmd_queue))
                return;
 
-       spin_lock(&txq->lock);
+       spin_lock_bh(&txq->lock);
 
        if (txq->q.read_ptr == tfd_num)
                goto out;
@@ -970,7 +970,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
        if (iwl_queue_space(&txq->q) > txq->q.low_mark)
                iwl_wake_queue(trans, txq);
 out:
-       spin_unlock(&txq->lock);
+       spin_unlock_bh(&txq->lock);
 }
 
 /*
@@ -1371,7 +1371,7 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
                return;
        }
 
-       spin_lock(&txq->lock);
+       spin_lock_bh(&txq->lock);
 
        cmd_index = get_cmd_index(&txq->q, index);
        cmd = txq->entries[cmd_index].cmd;
@@ -1405,7 +1405,7 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
 
        meta->flags = 0;
 
-       spin_unlock(&txq->lock);
+       spin_unlock_bh(&txq->lock);
 }
 
 #define HOST_COMPLETE_TIMEOUT (2 * HZ)
index a7dcb2e..116f4ab 100644 (file)
@@ -657,7 +657,7 @@ static int lbs_ret_scan(struct lbs_private *priv, unsigned long dummy,
                                        capa, intvl, ie, ielen,
                                        LBS_SCAN_RSSI_TO_MBM(rssi),
                                        GFP_KERNEL);
-                               cfg80211_put_bss(bss);
+                               cfg80211_put_bss(wiphy, bss);
                        }
                } else
                        lbs_deb_scan("scan response: missing BSS channel IE\n");
@@ -1444,7 +1444,7 @@ static int lbs_cfg_connect(struct wiphy *wiphy, struct net_device *dev,
 
  done:
        if (bss)
-               cfg80211_put_bss(bss);
+               cfg80211_put_bss(wiphy, bss);
        lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret);
        return ret;
 }
@@ -1766,7 +1766,7 @@ static void lbs_join_post(struct lbs_private *priv,
                                  params->beacon_interval,
                                  fake_ie, fake - fake_ie,
                                  0, GFP_KERNEL);
-       cfg80211_put_bss(bss);
+       cfg80211_put_bss(priv->wdev->wiphy, bss);
 
        memcpy(priv->wdev->ssid, params->ssid, params->ssid_len);
        priv->wdev->ssid_len = params->ssid_len;
@@ -2011,7 +2011,7 @@ static int lbs_join_ibss(struct wiphy *wiphy, struct net_device *dev,
 
        if (bss) {
                ret = lbs_ibss_join_existing(priv, params, bss);
-               cfg80211_put_bss(bss);
+               cfg80211_put_bss(wiphy, bss);
        } else
                ret = lbs_ibss_start_new(priv, params);
 
index b73e497..cffdf4f 100644 (file)
@@ -2247,6 +2247,7 @@ static int __init init_mac80211_hwsim(void)
                /* ask mac80211 to reserve space for magic */
                hw->vif_data_size = sizeof(struct hwsim_vif_priv);
                hw->sta_data_size = sizeof(struct hwsim_sta_priv);
+               hw->chanctx_data_size = sizeof(struct hwsim_chanctx_priv);
 
                memcpy(data->channels_2ghz, hwsim_channels_2ghz,
                        sizeof(hwsim_channels_2ghz));
index 7b0ae24..25596ab 100644 (file)
@@ -400,45 +400,6 @@ mwifiex_cmd_append_11n_tlv(struct mwifiex_private *priv,
 }
 
 /*
- * This function reconfigures the Tx buffer size in firmware.
- *
- * This function prepares a firmware command and issues it, if
- * the current Tx buffer size is different from the one requested.
- * Maximum configurable Tx buffer size is limited by the HT capability
- * field value.
- */
-void
-mwifiex_cfg_tx_buf(struct mwifiex_private *priv,
-                  struct mwifiex_bssdescriptor *bss_desc)
-{
-       u16 max_amsdu = MWIFIEX_TX_DATA_BUF_SIZE_2K;
-       u16 tx_buf, curr_tx_buf_size = 0;
-
-       if (bss_desc->bcn_ht_cap) {
-               if (le16_to_cpu(bss_desc->bcn_ht_cap->cap_info) &
-                               IEEE80211_HT_CAP_MAX_AMSDU)
-                       max_amsdu = MWIFIEX_TX_DATA_BUF_SIZE_8K;
-               else
-                       max_amsdu = MWIFIEX_TX_DATA_BUF_SIZE_4K;
-       }
-
-       tx_buf = min(priv->adapter->max_tx_buf_size, max_amsdu);
-
-       dev_dbg(priv->adapter->dev, "info: max_amsdu=%d, max_tx_buf=%d\n",
-               max_amsdu, priv->adapter->max_tx_buf_size);
-
-       if (priv->adapter->curr_tx_buf_size <= MWIFIEX_TX_DATA_BUF_SIZE_2K)
-               curr_tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K;
-       else if (priv->adapter->curr_tx_buf_size <= MWIFIEX_TX_DATA_BUF_SIZE_4K)
-               curr_tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K;
-       else if (priv->adapter->curr_tx_buf_size <= MWIFIEX_TX_DATA_BUF_SIZE_8K)
-               curr_tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_8K;
-       if (curr_tx_buf_size != tx_buf)
-               mwifiex_send_cmd_async(priv, HostCmd_CMD_RECONFIGURE_TX_BUFF,
-                                      HostCmd_ACT_GEN_SET, 0, &tx_buf);
-}
-
-/*
  * This function checks if the given pointer is valid entry of
  * Tx BA Stream table.
  */
index 46006a5..29a4c02 100644 (file)
@@ -34,8 +34,6 @@ int mwifiex_cmd_11n_cfg(struct host_cmd_ds_command *cmd, u16 cmd_action,
 int mwifiex_cmd_append_11n_tlv(struct mwifiex_private *priv,
                               struct mwifiex_bssdescriptor *bss_desc,
                               u8 **buffer);
-void mwifiex_cfg_tx_buf(struct mwifiex_private *priv,
-                       struct mwifiex_bssdescriptor *bss_desc);
 void mwifiex_fill_cap_info(struct mwifiex_private *, u8 radio_type,
                           struct mwifiex_ie_types_htcap *);
 int mwifiex_set_get_11n_htcap_cfg(struct mwifiex_private *priv,
index b2e2772..4f614aa 100644 (file)
@@ -20,12 +20,12 @@ config MWIFIEX_SDIO
          mwifiex_sdio.
 
 config MWIFIEX_PCIE
-       tristate "Marvell WiFi-Ex Driver for PCIE 8766"
+       tristate "Marvell WiFi-Ex Driver for PCIE 8766/8897"
        depends on MWIFIEX && PCI
        select FW_LOADER
        ---help---
          This adds support for wireless adapters based on Marvell
-         8766 chipset with PCIe interface.
+         8766/8897 chipsets with PCIe interface.
 
          If you choose to build it as a module, it will be called
          mwifiex_pcie.
index b55bade..3d64613 100644 (file)
@@ -121,7 +121,6 @@ info
        wmm_ac_vi = <number of packets sent to device from WMM AcVi queue>
        wmm_ac_be = <number of packets sent to device from WMM AcBE queue>
        wmm_ac_bk = <number of packets sent to device from WMM AcBK queue>
-       max_tx_buf_size = <maximum Tx buffer size>
        tx_buf_size = <current Tx buffer size>
        curr_tx_buf_size = <current Tx buffer size>
        ps_mode = <0/1, CAM mode/PS mode>
index 8ba4819..dc5357c 100644 (file)
@@ -1430,7 +1430,7 @@ static int mwifiex_cfg80211_inform_ibss_bss(struct mwifiex_private *priv)
        bss = cfg80211_inform_bss(priv->wdev->wiphy, chan,
                                  bss_info.bssid, 0, WLAN_CAPABILITY_IBSS,
                                  0, ie_buf, ie_len, 0, GFP_KERNEL);
-       cfg80211_put_bss(bss);
+       cfg80211_put_bss(priv->wdev->wiphy, bss);
        memcpy(priv->cfg_bssid, bss_info.bssid, ETH_ALEN);
 
        return 0;
index 46e34aa..753b568 100644 (file)
@@ -58,8 +58,6 @@ static struct mwifiex_debug_data items[] = {
         item_addr(packets_out[WMM_AC_BE]), 1},
        {"wmm_ac_bk", item_size(packets_out[WMM_AC_BK]),
         item_addr(packets_out[WMM_AC_BK]), 1},
-       {"max_tx_buf_size", item_size(max_tx_buf_size),
-        item_addr(max_tx_buf_size), 1},
        {"tx_buf_size", item_size(tx_buf_size),
         item_addr(tx_buf_size), 1},
        {"curr_tx_buf_size", item_size(curr_tx_buf_size),
index 84848c3..e38aa9b 100644 (file)
@@ -314,7 +314,6 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter)
 
        adapter->pm_wakeup_fw_try = false;
 
-       adapter->max_tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K;
        adapter->tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K;
        adapter->curr_tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K;
 
index 6095b3e..f3d9d04 100644 (file)
@@ -178,7 +178,6 @@ struct mwifiex_ds_tx_ba_stream_tbl {
 struct mwifiex_debug_info {
        u32 int_counter;
        u32 packets_out[MAX_NUM_TID];
-       u32 max_tx_buf_size;
        u32 tx_buf_size;
        u32 curr_tx_buf_size;
        u32 tx_tbl_num;
index 893d809..a537297 100644 (file)
@@ -157,8 +157,8 @@ static int mwifiex_get_common_rates(struct mwifiex_private *priv, u8 *rate1,
 
        memset(rate1, 0, rate1_size);
 
-       for (i = 0; rate2[i] && i < rate2_size; i++) {
-               for (j = 0; tmp[j] && j < rate1_size; j++) {
+       for (i = 0; i < rate2_size && rate2[i]; i++) {
+               for (j = 0; j < rate1_size && tmp[j]; j++) {
                        /* Check common rate, excluding the bit for
                           basic rate */
                        if ((rate2[i] & 0x7F) == (tmp[j] & 0x7F)) {
@@ -398,8 +398,6 @@ int mwifiex_cmd_802_11_associate(struct mwifiex_private *priv,
 
        pos = (u8 *) assoc;
 
-       mwifiex_cfg_tx_buf(priv, bss_desc);
-
        cmd->command = cpu_to_le16(HostCmd_CMD_802_11_ASSOCIATE);
 
        /* Save so we know which BSS Desc to use in the response handler */
index 51044e3..ac799a0 100644 (file)
@@ -631,7 +631,6 @@ struct mwifiex_adapter {
        /* spin lock for main process */
        spinlock_t main_proc_lock;
        u32 mwifiex_processing;
-       u16 max_tx_buf_size;
        u16 tx_buf_size;
        u16 curr_tx_buf_size;
        u32 ioport;
index 237949c..492655c 100644 (file)
@@ -62,6 +62,10 @@ static bool mwifiex_pcie_ok_to_access_hw(struct mwifiex_adapter *adapter)
 {
        u32 *cookie_addr;
        struct pcie_service_card *card = adapter->card;
+       const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
+
+       if (!reg->sleep_cookie)
+               return true;
 
        if (card->sleep_cookie_vbase) {
                cookie_addr = (u32 *)card->sleep_cookie_vbase;
@@ -94,6 +98,13 @@ static int mwifiex_pcie_probe(struct pci_dev *pdev,
 
        card->dev = pdev;
 
+       if (ent->driver_data) {
+               struct mwifiex_pcie_device *data = (void *)ent->driver_data;
+               card->pcie.firmware = data->firmware;
+               card->pcie.reg = data->reg;
+               card->pcie.blksz_fw_dl = data->blksz_fw_dl;
+       }
+
        if (mwifiex_add_card(card, &add_remove_card_sem, &pcie_ops,
                             MWIFIEX_PCIE)) {
                pr_err("%s failed\n", __func__);
@@ -230,13 +241,16 @@ static int mwifiex_pcie_resume(struct pci_dev *pdev)
        return 0;
 }
 
-#define PCIE_VENDOR_ID_MARVELL              (0x11ab)
-#define PCIE_DEVICE_ID_MARVELL_88W8766P                (0x2b30)
-
 static DEFINE_PCI_DEVICE_TABLE(mwifiex_ids) = {
        {
                PCIE_VENDOR_ID_MARVELL, PCIE_DEVICE_ID_MARVELL_88W8766P,
                PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               .driver_data = (unsigned long) &mwifiex_pcie8766,
+       },
+       {
+               PCIE_VENDOR_ID_MARVELL, PCIE_DEVICE_ID_MARVELL_88W8897,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               .driver_data = (unsigned long) &mwifiex_pcie8897,
        },
        {},
 };
@@ -289,8 +303,10 @@ static int mwifiex_read_reg(struct mwifiex_adapter *adapter, int reg, u32 *data)
 static int mwifiex_pm_wakeup_card(struct mwifiex_adapter *adapter)
 {
        int i = 0;
+       struct pcie_service_card *card = adapter->card;
+       const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
 
-       while (mwifiex_pcie_ok_to_access_hw(adapter)) {
+       while (reg->sleep_cookie && mwifiex_pcie_ok_to_access_hw(adapter)) {
                i++;
                usleep_range(10, 20);
                /* 50ms max wait */
@@ -364,25 +380,268 @@ static int mwifiex_pcie_enable_host_int(struct mwifiex_adapter *adapter)
 }
 
 /*
- * This function creates buffer descriptor ring for TX
+ * This function initializes TX buffer ring descriptors
  */
-static int mwifiex_pcie_create_txbd_ring(struct mwifiex_adapter *adapter)
+static int mwifiex_init_txq_ring(struct mwifiex_adapter *adapter)
+{
+       struct pcie_service_card *card = adapter->card;
+       const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
+       struct mwifiex_pcie_buf_desc *desc;
+       struct mwifiex_pfu_buf_desc *desc2;
+       int i;
+
+       for (i = 0; i < MWIFIEX_MAX_TXRX_BD; i++) {
+               card->tx_buf_list[i] = NULL;
+               if (reg->pfu_enabled) {
+                       card->txbd_ring[i] = (void *)card->txbd_ring_vbase +
+                                            (sizeof(*desc2) * i);
+                       desc2 = card->txbd_ring[i];
+                       memset(desc2, 0, sizeof(*desc2));
+               } else {
+                       card->txbd_ring[i] = (void *)card->txbd_ring_vbase +
+                                            (sizeof(*desc) * i);
+                       desc = card->txbd_ring[i];
+                       memset(desc, 0, sizeof(*desc));
+               }
+       }
+
+       return 0;
+}
+
+/* This function initializes RX buffer ring descriptors. Each SKB is allocated
+ * here and after mapping PCI memory, its physical address is assigned to
+ * PCIE Rx buffer descriptor's physical address.
+ */
+static int mwifiex_init_rxq_ring(struct mwifiex_adapter *adapter)
+{
+       struct pcie_service_card *card = adapter->card;
+       const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
+       struct sk_buff *skb;
+       struct mwifiex_pcie_buf_desc *desc;
+       struct mwifiex_pfu_buf_desc *desc2;
+       dma_addr_t buf_pa;
+       int i;
+
+       for (i = 0; i < MWIFIEX_MAX_TXRX_BD; i++) {
+               /* Allocate skb here so that firmware can DMA data from it */
+               skb = dev_alloc_skb(MWIFIEX_RX_DATA_BUF_SIZE);
+               if (!skb) {
+                       dev_err(adapter->dev,
+                               "Unable to allocate skb for RX ring.\n");
+                       kfree(card->rxbd_ring_vbase);
+                       return -ENOMEM;
+               }
+
+               if (mwifiex_map_pci_memory(adapter, skb,
+                                          MWIFIEX_RX_DATA_BUF_SIZE,
+                                          PCI_DMA_FROMDEVICE))
+                       return -1;
+
+               MWIFIEX_SKB_PACB(skb, &buf_pa);
+
+               dev_dbg(adapter->dev,
+                       "info: RX ring: skb=%p len=%d data=%p buf_pa=%#x:%x\n",
+                       skb, skb->len, skb->data, (u32)buf_pa,
+                       (u32)((u64)buf_pa >> 32));
+
+               card->rx_buf_list[i] = skb;
+               if (reg->pfu_enabled) {
+                       card->rxbd_ring[i] = (void *)card->rxbd_ring_vbase +
+                                            (sizeof(*desc2) * i);
+                       desc2 = card->rxbd_ring[i];
+                       desc2->paddr = buf_pa;
+                       desc2->len = (u16)skb->len;
+                       desc2->frag_len = (u16)skb->len;
+                       desc2->flags = reg->ring_flag_eop | reg->ring_flag_sop;
+                       desc2->offset = 0;
+               } else {
+                       card->rxbd_ring[i] = (void *)(card->rxbd_ring_vbase +
+                                            (sizeof(*desc) * i));
+                       desc = card->rxbd_ring[i];
+                       desc->paddr = buf_pa;
+                       desc->len = (u16)skb->len;
+                       desc->flags = 0;
+               }
+       }
+
+       return 0;
+}
+
+/* This function initializes event buffer ring descriptors. Each SKB is
+ * allocated here and after mapping PCI memory, its physical address is assigned
+ * to PCIE Rx buffer descriptor's physical address
+ */
+static int mwifiex_pcie_init_evt_ring(struct mwifiex_adapter *adapter)
 {
        struct pcie_service_card *card = adapter->card;
+       struct mwifiex_evt_buf_desc *desc;
+       struct sk_buff *skb;
+       dma_addr_t buf_pa;
        int i;
 
+       for (i = 0; i < MWIFIEX_MAX_EVT_BD; i++) {
+               /* Allocate skb here so that firmware can DMA data from it */
+               skb = dev_alloc_skb(MAX_EVENT_SIZE);
+               if (!skb) {
+                       dev_err(adapter->dev,
+                               "Unable to allocate skb for EVENT buf.\n");
+                       kfree(card->evtbd_ring_vbase);
+                       return -ENOMEM;
+               }
+               skb_put(skb, MAX_EVENT_SIZE);
+
+               if (mwifiex_map_pci_memory(adapter, skb, MAX_EVENT_SIZE,
+                                          PCI_DMA_FROMDEVICE))
+                       return -1;
+
+               MWIFIEX_SKB_PACB(skb, &buf_pa);
+
+               dev_dbg(adapter->dev,
+                       "info: EVT ring: skb=%p len=%d data=%p buf_pa=%#x:%x\n",
+                       skb, skb->len, skb->data, (u32)buf_pa,
+                       (u32)((u64)buf_pa >> 32));
+
+               card->evt_buf_list[i] = skb;
+               card->evtbd_ring[i] = (void *)(card->evtbd_ring_vbase +
+                                     (sizeof(*desc) * i));
+               desc = card->evtbd_ring[i];
+               desc->paddr = buf_pa;
+               desc->len = (u16)skb->len;
+               desc->flags = 0;
+       }
+
+       return 0;
+}
+
+/* This function cleans up TX buffer rings. If any of the buffer list has valid
+ * SKB address, associated SKB is freed.
+ */
+static void mwifiex_cleanup_txq_ring(struct mwifiex_adapter *adapter)
+{
+       struct pcie_service_card *card = adapter->card;
+       const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
+       struct sk_buff *skb;
+       struct mwifiex_pcie_buf_desc *desc;
+       struct mwifiex_pfu_buf_desc *desc2;
+       int i;
+
+       for (i = 0; i < MWIFIEX_MAX_TXRX_BD; i++) {
+               if (reg->pfu_enabled) {
+                       desc2 = card->txbd_ring[i];
+                       if (card->tx_buf_list[i]) {
+                               skb = card->tx_buf_list[i];
+                               pci_unmap_single(card->dev, desc2->paddr,
+                                                skb->len, PCI_DMA_TODEVICE);
+                               dev_kfree_skb_any(skb);
+                       }
+                       memset(desc2, 0, sizeof(*desc2));
+               } else {
+                       desc = card->txbd_ring[i];
+                       if (card->tx_buf_list[i]) {
+                               skb = card->tx_buf_list[i];
+                               pci_unmap_single(card->dev, desc->paddr,
+                                                skb->len, PCI_DMA_TODEVICE);
+                               dev_kfree_skb_any(skb);
+                       }
+                       memset(desc, 0, sizeof(*desc));
+               }
+               card->tx_buf_list[i] = NULL;
+       }
+
+       return;
+}
+
+/* This function cleans up RX buffer rings. If any of the buffer list has valid
+ * SKB address, associated SKB is freed.
+ */
+static void mwifiex_cleanup_rxq_ring(struct mwifiex_adapter *adapter)
+{
+       struct pcie_service_card *card = adapter->card;
+       const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
+       struct mwifiex_pcie_buf_desc *desc;
+       struct mwifiex_pfu_buf_desc *desc2;
+       struct sk_buff *skb;
+       int i;
+
+       for (i = 0; i < MWIFIEX_MAX_TXRX_BD; i++) {
+               if (reg->pfu_enabled) {
+                       desc2 = card->rxbd_ring[i];
+                       if (card->rx_buf_list[i]) {
+                               skb = card->rx_buf_list[i];
+                               pci_unmap_single(card->dev, desc2->paddr,
+                                                skb->len, PCI_DMA_TODEVICE);
+                               dev_kfree_skb_any(skb);
+                       }
+                       memset(desc2, 0, sizeof(*desc2));
+               } else {
+                       desc = card->rxbd_ring[i];
+                       if (card->rx_buf_list[i]) {
+                               skb = card->rx_buf_list[i];
+                               pci_unmap_single(card->dev, desc->paddr,
+                                                skb->len, PCI_DMA_TODEVICE);
+                               dev_kfree_skb_any(skb);
+                       }
+                       memset(desc, 0, sizeof(*desc));
+               }
+               card->rx_buf_list[i] = NULL;
+       }
+
+       return;
+}
+
+/* This function cleans up event buffer rings. If any of the buffer list has
+ * valid SKB address, associated SKB is freed.
+ */
+static void mwifiex_cleanup_evt_ring(struct mwifiex_adapter *adapter)
+{
+       struct pcie_service_card *card = adapter->card;
+       struct mwifiex_evt_buf_desc *desc;
+       struct sk_buff *skb;
+       int i;
+
+       for (i = 0; i < MWIFIEX_MAX_EVT_BD; i++) {
+               desc = card->evtbd_ring[i];
+               if (card->evt_buf_list[i]) {
+                       skb = card->evt_buf_list[i];
+                       pci_unmap_single(card->dev, desc->paddr, MAX_EVENT_SIZE,
+                                        PCI_DMA_FROMDEVICE);
+                       dev_kfree_skb_any(skb);
+               }
+               card->evt_buf_list[i] = NULL;
+               memset(desc, 0, sizeof(*desc));
+       }
+
+       return;
+}
+
+/* This function creates buffer descriptor ring for TX
+ */
+static int mwifiex_pcie_create_txbd_ring(struct mwifiex_adapter *adapter)
+{
+       struct pcie_service_card *card = adapter->card;
+       const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
+
        /*
         * driver maintaines the write pointer and firmware maintaines the read
         * pointer. The write pointer starts at 0 (zero) while the read pointer
         * starts at zero with rollover bit set
         */
        card->txbd_wrptr = 0;
-       card->txbd_rdptr |= MWIFIEX_BD_FLAG_ROLLOVER_IND;
+
+       if (reg->pfu_enabled)
+               card->txbd_rdptr = 0;
+       else
+               card->txbd_rdptr |= reg->tx_rollover_ind;
 
        /* allocate shared memory for the BD ring and divide the same in to
           several descriptors */
-       card->txbd_ring_size = sizeof(struct mwifiex_pcie_buf_desc) *
-                                                       MWIFIEX_MAX_TXRX_BD;
+       if (reg->pfu_enabled)
+               card->txbd_ring_size = sizeof(struct mwifiex_pfu_buf_desc) *
+                                      MWIFIEX_MAX_TXRX_BD;
+       else
+               card->txbd_ring_size = sizeof(struct mwifiex_pcie_buf_desc) *
+                                      MWIFIEX_MAX_TXRX_BD;
+
        dev_dbg(adapter->dev, "info: txbd_ring: Allocating %d bytes\n",
                card->txbd_ring_size);
        card->txbd_ring_vbase = pci_alloc_consistent(card->dev,
@@ -399,40 +658,15 @@ static int mwifiex_pcie_create_txbd_ring(struct mwifiex_adapter *adapter)
                card->txbd_ring_vbase, (unsigned int)card->txbd_ring_pbase,
                (u32)((u64)card->txbd_ring_pbase >> 32), card->txbd_ring_size);
 
-       for (i = 0; i < MWIFIEX_MAX_TXRX_BD; i++) {
-               card->txbd_ring[i] = (struct mwifiex_pcie_buf_desc *)
-                                    (card->txbd_ring_vbase +
-                                     (sizeof(struct mwifiex_pcie_buf_desc)
-                                      * i));
-
-               card->tx_buf_list[i] = NULL;
-               card->txbd_ring[i]->paddr = 0;
-               card->txbd_ring[i]->len = 0;
-               card->txbd_ring[i]->flags = 0;
-       }
-
-       return 0;
+       return mwifiex_init_txq_ring(adapter);
 }
 
 static int mwifiex_pcie_delete_txbd_ring(struct mwifiex_adapter *adapter)
 {
        struct pcie_service_card *card = adapter->card;
-       struct sk_buff *skb;
-       int i;
+       const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
 
-       for (i = 0; i < MWIFIEX_MAX_TXRX_BD; i++) {
-               if (card->tx_buf_list[i]) {
-                       skb = card->tx_buf_list[i];
-                       pci_unmap_single(card->dev, card->txbd_ring[i]->paddr,
-                                        skb->len, PCI_DMA_TODEVICE);
-                       dev_kfree_skb_any(skb);
-               }
-               card->tx_buf_list[i] = NULL;
-               card->txbd_ring[i]->paddr = 0;
-               card->txbd_ring[i]->len = 0;
-               card->txbd_ring[i]->flags = 0;
-               card->txbd_ring[i] = NULL;
-       }
+       mwifiex_cleanup_txq_ring(adapter);
 
        if (card->txbd_ring_vbase)
                pci_free_consistent(card->dev, card->txbd_ring_size,
@@ -440,7 +674,7 @@ static int mwifiex_pcie_delete_txbd_ring(struct mwifiex_adapter *adapter)
                                    card->txbd_ring_pbase);
        card->txbd_ring_size = 0;
        card->txbd_wrptr = 0;
-       card->txbd_rdptr = 0 | MWIFIEX_BD_FLAG_ROLLOVER_IND;
+       card->txbd_rdptr = 0 | reg->tx_rollover_ind;
        card->txbd_ring_vbase = NULL;
        card->txbd_ring_pbase = 0;
 
@@ -453,9 +687,7 @@ static int mwifiex_pcie_delete_txbd_ring(struct mwifiex_adapter *adapter)
 static int mwifiex_pcie_create_rxbd_ring(struct mwifiex_adapter *adapter)
 {
        struct pcie_service_card *card = adapter->card;
-       struct sk_buff *skb;
-       int i;
-       dma_addr_t buf_pa;
+       const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
 
        /*
         * driver maintaines the read pointer and firmware maintaines the write
@@ -463,10 +695,15 @@ static int mwifiex_pcie_create_rxbd_ring(struct mwifiex_adapter *adapter)
         * starts at zero with rollover bit set
         */
        card->rxbd_wrptr = 0;
-       card->rxbd_rdptr |= MWIFIEX_BD_FLAG_ROLLOVER_IND;
+       card->rxbd_rdptr = reg->rx_rollover_ind;
+
+       if (reg->pfu_enabled)
+               card->rxbd_ring_size = sizeof(struct mwifiex_pfu_buf_desc) *
+                                      MWIFIEX_MAX_TXRX_BD;
+       else
+               card->rxbd_ring_size = sizeof(struct mwifiex_pcie_buf_desc) *
+                                      MWIFIEX_MAX_TXRX_BD;
 
-       card->rxbd_ring_size = sizeof(struct mwifiex_pcie_buf_desc) *
-                                                       MWIFIEX_MAX_TXRX_BD;
        dev_dbg(adapter->dev, "info: rxbd_ring: Allocating %d bytes\n",
                card->rxbd_ring_size);
        card->rxbd_ring_vbase = pci_alloc_consistent(card->dev,
@@ -485,39 +722,7 @@ static int mwifiex_pcie_create_rxbd_ring(struct mwifiex_adapter *adapter)
                (u32)((u64)card->rxbd_ring_pbase >> 32),
                card->rxbd_ring_size);
 
-       for (i = 0; i < MWIFIEX_MAX_TXRX_BD; i++) {
-               card->rxbd_ring[i] = (struct mwifiex_pcie_buf_desc *)
-                                    (card->rxbd_ring_vbase +
-                                     (sizeof(struct mwifiex_pcie_buf_desc)
-                                      * i));
-
-               /* Allocate skb here so that firmware can DMA data from it */
-               skb = dev_alloc_skb(MWIFIEX_RX_DATA_BUF_SIZE);
-               if (!skb) {
-                       dev_err(adapter->dev,
-                               "Unable to allocate skb for RX ring.\n");
-                       kfree(card->rxbd_ring_vbase);
-                       return -ENOMEM;
-               }
-               if (mwifiex_map_pci_memory(adapter, skb,
-                                          MWIFIEX_RX_DATA_BUF_SIZE,
-                                          PCI_DMA_FROMDEVICE))
-                       return -1;
-
-               MWIFIEX_SKB_PACB(skb, &buf_pa);
-
-               dev_dbg(adapter->dev, "info: RX ring: add new skb base: %p, "
-                       "buf_base: %p, buf_pbase: %#x:%x, buf_len: %#x\n",
-                       skb, skb->data, (u32)buf_pa, (u32)((u64)buf_pa >> 32),
-                       skb->len);
-
-               card->rx_buf_list[i] = skb;
-               card->rxbd_ring[i]->paddr = buf_pa;
-               card->rxbd_ring[i]->len = (u16)skb->len;
-               card->rxbd_ring[i]->flags = 0;
-       }
-
-       return 0;
+       return mwifiex_init_rxq_ring(adapter);
 }
 
 /*
@@ -526,23 +731,9 @@ static int mwifiex_pcie_create_rxbd_ring(struct mwifiex_adapter *adapter)
 static int mwifiex_pcie_delete_rxbd_ring(struct mwifiex_adapter *adapter)
 {
        struct pcie_service_card *card = adapter->card;
-       struct sk_buff *skb;
-       int i;
+       const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
 
-       for (i = 0; i < MWIFIEX_MAX_TXRX_BD; i++) {
-               if (card->rx_buf_list[i]) {
-                       skb = card->rx_buf_list[i];
-                       pci_unmap_single(card->dev, card->rxbd_ring[i]->paddr ,
-                                        MWIFIEX_RX_DATA_BUF_SIZE,
-                                        PCI_DMA_FROMDEVICE);
-                       dev_kfree_skb_any(skb);
-               }
-               card->rx_buf_list[i] = NULL;
-               card->rxbd_ring[i]->paddr = 0;
-               card->rxbd_ring[i]->len = 0;
-               card->rxbd_ring[i]->flags = 0;
-               card->rxbd_ring[i] = NULL;
-       }
+       mwifiex_cleanup_rxq_ring(adapter);
 
        if (card->rxbd_ring_vbase)
                pci_free_consistent(card->dev, card->rxbd_ring_size,
@@ -550,7 +741,7 @@ static int mwifiex_pcie_delete_rxbd_ring(struct mwifiex_adapter *adapter)
                                    card->rxbd_ring_pbase);
        card->rxbd_ring_size = 0;
        card->rxbd_wrptr = 0;
-       card->rxbd_rdptr = 0 | MWIFIEX_BD_FLAG_ROLLOVER_IND;
+       card->rxbd_rdptr = 0 | reg->rx_rollover_ind;
        card->rxbd_ring_vbase = NULL;
        card->rxbd_ring_pbase = 0;
 
@@ -563,9 +754,7 @@ static int mwifiex_pcie_delete_rxbd_ring(struct mwifiex_adapter *adapter)
 static int mwifiex_pcie_create_evtbd_ring(struct mwifiex_adapter *adapter)
 {
        struct pcie_service_card *card = adapter->card;
-       struct sk_buff *skb;
-       int i;
-       dma_addr_t buf_pa;
+       const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
 
        /*
         * driver maintaines the read pointer and firmware maintaines the write
@@ -573,10 +762,11 @@ static int mwifiex_pcie_create_evtbd_ring(struct mwifiex_adapter *adapter)
         * starts at zero with rollover bit set
         */
        card->evtbd_wrptr = 0;
-       card->evtbd_rdptr |= MWIFIEX_BD_FLAG_ROLLOVER_IND;
+       card->evtbd_rdptr = reg->evt_rollover_ind;
+
+       card->evtbd_ring_size = sizeof(struct mwifiex_evt_buf_desc) *
+                               MWIFIEX_MAX_EVT_BD;
 
-       card->evtbd_ring_size = sizeof(struct mwifiex_pcie_buf_desc) *
-                                                       MWIFIEX_MAX_EVT_BD;
        dev_dbg(adapter->dev, "info: evtbd_ring: Allocating %d bytes\n",
                card->evtbd_ring_size);
        card->evtbd_ring_vbase = pci_alloc_consistent(card->dev,
@@ -595,39 +785,7 @@ static int mwifiex_pcie_create_evtbd_ring(struct mwifiex_adapter *adapter)
                (u32)((u64)card->evtbd_ring_pbase >> 32),
                card->evtbd_ring_size);
 
-       for (i = 0; i < MWIFIEX_MAX_EVT_BD; i++) {
-               card->evtbd_ring[i] = (struct mwifiex_pcie_buf_desc *)
-                                     (card->evtbd_ring_vbase +
-                                      (sizeof(struct mwifiex_pcie_buf_desc)
-                                       * i));
-
-               /* Allocate skb here so that firmware can DMA data from it */
-               skb = dev_alloc_skb(MAX_EVENT_SIZE);
-               if (!skb) {
-                       dev_err(adapter->dev,
-                               "Unable to allocate skb for EVENT buf.\n");
-                       kfree(card->evtbd_ring_vbase);
-                       return -ENOMEM;
-               }
-               skb_put(skb, MAX_EVENT_SIZE);
-
-               if (mwifiex_map_pci_memory(adapter, skb, MAX_EVENT_SIZE,
-                                          PCI_DMA_FROMDEVICE))
-                       return -1;
-
-               MWIFIEX_SKB_PACB(skb, &buf_pa);
-               dev_dbg(adapter->dev, "info: Evt ring: add new skb. base: %p, "
-                       "buf_base: %p, buf_pbase: %#x:%x, buf_len: %#x\n",
-                       skb, skb->data, (u32)buf_pa, (u32)((u64)buf_pa >> 32),
-                       skb->len);
-
-               card->evt_buf_list[i] = skb;
-               card->evtbd_ring[i]->paddr = buf_pa;
-               card->evtbd_ring[i]->len = (u16)skb->len;
-               card->evtbd_ring[i]->flags = 0;
-       }
-
-       return 0;
+       return mwifiex_pcie_init_evt_ring(adapter);
 }
 
 /*
@@ -636,29 +794,16 @@ static int mwifiex_pcie_create_evtbd_ring(struct mwifiex_adapter *adapter)
 static int mwifiex_pcie_delete_evtbd_ring(struct mwifiex_adapter *adapter)
 {
        struct pcie_service_card *card = adapter->card;
-       struct sk_buff *skb;
-       int i;
+       const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
 
-       for (i = 0; i < MWIFIEX_MAX_EVT_BD; i++) {
-               if (card->evt_buf_list[i]) {
-                       skb = card->evt_buf_list[i];
-                       pci_unmap_single(card->dev, card->evtbd_ring[i]->paddr,
-                                        MAX_EVENT_SIZE, PCI_DMA_FROMDEVICE);
-                       dev_kfree_skb_any(skb);
-               }
-               card->evt_buf_list[i] = NULL;
-               card->evtbd_ring[i]->paddr = 0;
-               card->evtbd_ring[i]->len = 0;
-               card->evtbd_ring[i]->flags = 0;
-               card->evtbd_ring[i] = NULL;
-       }
+       mwifiex_cleanup_evt_ring(adapter);
 
        if (card->evtbd_ring_vbase)
                pci_free_consistent(card->dev, card->evtbd_ring_size,
                                    card->evtbd_ring_vbase,
                                    card->evtbd_ring_pbase);
        card->evtbd_wrptr = 0;
-       card->evtbd_rdptr = 0 | MWIFIEX_BD_FLAG_ROLLOVER_IND;
+       card->evtbd_rdptr = 0 | reg->evt_rollover_ind;
        card->evtbd_ring_size = 0;
        card->evtbd_ring_vbase = NULL;
        card->evtbd_ring_pbase = 0;
@@ -771,12 +916,13 @@ static int mwifiex_pcie_delete_sleep_cookie_buf(struct mwifiex_adapter *adapter)
 static int mwifiex_clean_pcie_ring_buf(struct mwifiex_adapter *adapter)
 {
        struct pcie_service_card *card = adapter->card;
+       const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
        u32 rdptr;
 
        /* Read the TX ring read pointer set by firmware */
-       if (mwifiex_read_reg(adapter, REG_TXBD_RDPTR, &rdptr)) {
+       if (mwifiex_read_reg(adapter, reg->tx_rdptr, &rdptr)) {
                dev_err(adapter->dev,
-                       "Flush TXBD: failed to read REG_TXBD_RDPTR\n");
+                       "Flush TXBD: failed to read reg->tx_rdptr\n");
                return -1;
        }
 
@@ -800,31 +946,35 @@ static int mwifiex_clean_pcie_ring_buf(struct mwifiex_adapter *adapter)
  */
 static int mwifiex_pcie_send_data_complete(struct mwifiex_adapter *adapter)
 {
-       const u32 num_tx_buffs = MWIFIEX_MAX_TXRX_BD;
        struct sk_buff *skb;
        dma_addr_t buf_pa;
-       u32 wrdoneidx, rdptr, unmap_count = 0;
+       u32 wrdoneidx, rdptr, num_tx_buffs, unmap_count = 0;
+       struct mwifiex_pcie_buf_desc *desc;
+       struct mwifiex_pfu_buf_desc *desc2;
        struct pcie_service_card *card = adapter->card;
+       const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
 
        if (!mwifiex_pcie_ok_to_access_hw(adapter))
                mwifiex_pm_wakeup_card(adapter);
 
        /* Read the TX ring read pointer set by firmware */
-       if (mwifiex_read_reg(adapter, REG_TXBD_RDPTR, &rdptr)) {
+       if (mwifiex_read_reg(adapter, reg->tx_rdptr, &rdptr)) {
                dev_err(adapter->dev,
-                       "SEND COMP: failed to read REG_TXBD_RDPTR\n");
+                       "SEND COMP: failed to read reg->tx_rdptr\n");
                return -1;
        }
 
        dev_dbg(adapter->dev, "SEND COMP: rdptr_prev=0x%x, rdptr=0x%x\n",
                card->txbd_rdptr, rdptr);
 
+       num_tx_buffs = MWIFIEX_MAX_TXRX_BD << reg->tx_start_ptr;
        /* free from previous txbd_rdptr to current txbd_rdptr */
-       while (((card->txbd_rdptr & MWIFIEX_TXBD_MASK) !=
-               (rdptr & MWIFIEX_TXBD_MASK)) ||
-              ((card->txbd_rdptr & MWIFIEX_BD_FLAG_ROLLOVER_IND) !=
-               (rdptr & MWIFIEX_BD_FLAG_ROLLOVER_IND))) {
-               wrdoneidx = card->txbd_rdptr & MWIFIEX_TXBD_MASK;
+       while (((card->txbd_rdptr & reg->tx_mask) !=
+               (rdptr & reg->tx_mask)) ||
+              ((card->txbd_rdptr & reg->tx_rollover_ind) !=
+               (rdptr & reg->tx_rollover_ind))) {
+               wrdoneidx = (card->txbd_rdptr & reg->tx_mask) >>
+                           reg->tx_start_ptr;
 
                skb = card->tx_buf_list[wrdoneidx];
                if (skb) {
@@ -845,25 +995,38 @@ static int mwifiex_pcie_send_data_complete(struct mwifiex_adapter *adapter)
                }
 
                card->tx_buf_list[wrdoneidx] = NULL;
-               card->txbd_ring[wrdoneidx]->paddr = 0;
-               card->rxbd_ring[wrdoneidx]->len = 0;
-               card->rxbd_ring[wrdoneidx]->flags = 0;
-               card->txbd_rdptr++;
 
-               if ((card->txbd_rdptr & MWIFIEX_TXBD_MASK) == num_tx_buffs)
+               if (reg->pfu_enabled) {
+                       desc2 = (void *)card->txbd_ring[wrdoneidx];
+                       memset(desc2, 0, sizeof(*desc2));
+               } else {
+                       desc = card->txbd_ring[wrdoneidx];
+                       memset(desc, 0, sizeof(*desc));
+               }
+               switch (card->dev->device) {
+               case PCIE_DEVICE_ID_MARVELL_88W8766P:
+                       card->txbd_rdptr++;
+                       break;
+               case PCIE_DEVICE_ID_MARVELL_88W8897:
+                       card->txbd_rdptr += reg->ring_tx_start_ptr;
+                       break;
+               }
+
+
+               if ((card->txbd_rdptr & reg->tx_mask) == num_tx_buffs)
                        card->txbd_rdptr = ((card->txbd_rdptr &
-                                           MWIFIEX_BD_FLAG_ROLLOVER_IND) ^
-                                           MWIFIEX_BD_FLAG_ROLLOVER_IND);
+                                            reg->tx_rollover_ind) ^
+                                            reg->tx_rollover_ind);
        }
 
        if (unmap_count)
                adapter->data_sent = false;
 
        if (card->txbd_flush) {
-               if (((card->txbd_wrptr & MWIFIEX_TXBD_MASK) ==
-                    (card->txbd_rdptr & MWIFIEX_TXBD_MASK)) &&
-                   ((card->txbd_wrptr & MWIFIEX_BD_FLAG_ROLLOVER_IND) !=
-                    (card->txbd_rdptr & MWIFIEX_BD_FLAG_ROLLOVER_IND)))
+               if (((card->txbd_wrptr & reg->tx_mask) ==
+                    (card->txbd_rdptr & reg->tx_mask)) &&
+                   ((card->txbd_wrptr & reg->tx_rollover_ind) !=
+                    (card->txbd_rdptr & reg->tx_rollover_ind)))
                        card->txbd_flush = 0;
                else
                        mwifiex_clean_pcie_ring_buf(adapter);
@@ -883,9 +1046,12 @@ mwifiex_pcie_send_data(struct mwifiex_adapter *adapter, struct sk_buff *skb,
                       struct mwifiex_tx_param *tx_param)
 {
        struct pcie_service_card *card = adapter->card;
-       u32 wrindx;
+       const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
+       u32 wrindx, num_tx_buffs, rx_val;
        int ret;
        dma_addr_t buf_pa;
+       struct mwifiex_pcie_buf_desc *desc;
+       struct mwifiex_pfu_buf_desc *desc2;
        __le16 *tmp;
 
        if (!(skb->data && skb->len)) {
@@ -897,6 +1063,7 @@ mwifiex_pcie_send_data(struct mwifiex_adapter *adapter, struct sk_buff *skb,
        if (!mwifiex_pcie_ok_to_access_hw(adapter))
                mwifiex_pm_wakeup_card(adapter);
 
+       num_tx_buffs = MWIFIEX_MAX_TXRX_BD << reg->tx_start_ptr;
        dev_dbg(adapter->dev, "info: SEND DATA: <Rd: %#x, Wr: %#x>\n",
                card->txbd_rdptr, card->txbd_wrptr);
        if (mwifiex_pcie_txbd_not_full(card)) {
@@ -913,25 +1080,46 @@ mwifiex_pcie_send_data(struct mwifiex_adapter *adapter, struct sk_buff *skb,
                                           PCI_DMA_TODEVICE))
                        return -1;
 
-               wrindx = card->txbd_wrptr & MWIFIEX_TXBD_MASK;
+               wrindx = (card->txbd_wrptr & reg->tx_mask) >> reg->tx_start_ptr;
                MWIFIEX_SKB_PACB(skb, &buf_pa);
                card->tx_buf_list[wrindx] = skb;
-               card->txbd_ring[wrindx]->paddr = buf_pa;
-               card->txbd_ring[wrindx]->len = (u16)skb->len;
-               card->txbd_ring[wrindx]->flags = MWIFIEX_BD_FLAG_FIRST_DESC |
-                                               MWIFIEX_BD_FLAG_LAST_DESC;
 
-               if ((++card->txbd_wrptr & MWIFIEX_TXBD_MASK) ==
-                                                       MWIFIEX_MAX_TXRX_BD)
+               if (reg->pfu_enabled) {
+                       desc2 = (void *)card->txbd_ring[wrindx];
+                       desc2->paddr = buf_pa;
+                       desc2->len = (u16)skb->len;
+                       desc2->frag_len = (u16)skb->len;
+                       desc2->offset = 0;
+                       desc2->flags = MWIFIEX_BD_FLAG_FIRST_DESC |
+                                        MWIFIEX_BD_FLAG_LAST_DESC;
+               } else {
+                       desc = card->txbd_ring[wrindx];
+                       desc->paddr = buf_pa;
+                       desc->len = (u16)skb->len;
+                       desc->flags = MWIFIEX_BD_FLAG_FIRST_DESC |
+                                     MWIFIEX_BD_FLAG_LAST_DESC;
+               }
+
+               switch (card->dev->device) {
+               case PCIE_DEVICE_ID_MARVELL_88W8766P:
+                       card->txbd_wrptr++;
+                       break;
+               case PCIE_DEVICE_ID_MARVELL_88W8897:
+                       card->txbd_wrptr += reg->ring_tx_start_ptr;
+                       break;
+               }
+
+               if ((card->txbd_wrptr & reg->tx_mask) == num_tx_buffs)
                        card->txbd_wrptr = ((card->txbd_wrptr &
-                                               MWIFIEX_BD_FLAG_ROLLOVER_IND) ^
-                                               MWIFIEX_BD_FLAG_ROLLOVER_IND);
+                                               reg->tx_rollover_ind) ^
+                                               reg->tx_rollover_ind);
 
-               /* Write the TX ring write pointer in to REG_TXBD_WRPTR */
-               if (mwifiex_write_reg(adapter, REG_TXBD_WRPTR,
-                                     card->txbd_wrptr)) {
+               rx_val = card->rxbd_rdptr & reg->rx_wrap_mask;
+               /* Write the TX ring write pointer in to reg->tx_wrptr */
+               if (mwifiex_write_reg(adapter, reg->tx_wrptr,
+                                     card->txbd_wrptr | rx_val)) {
                        dev_err(adapter->dev,
-                               "SEND DATA: failed to write REG_TXBD_WRPTR\n");
+                               "SEND DATA: failed to write reg->tx_wrptr\n");
                        ret = -1;
                        goto done_unmap;
                }
@@ -971,9 +1159,11 @@ done_unmap:
        MWIFIEX_SKB_PACB(skb, &buf_pa);
        pci_unmap_single(card->dev, buf_pa, skb->len, PCI_DMA_TODEVICE);
        card->tx_buf_list[wrindx] = NULL;
-       card->txbd_ring[wrindx]->paddr = 0;
-       card->txbd_ring[wrindx]->len = 0;
-       card->txbd_ring[wrindx]->flags = 0;
+       if (reg->pfu_enabled)
+               memset(desc2, 0, sizeof(*desc2));
+       else
+               memset(desc, 0, sizeof(*desc));
+
        return ret;
 }
 
@@ -984,32 +1174,35 @@ done_unmap:
 static int mwifiex_pcie_process_recv_data(struct mwifiex_adapter *adapter)
 {
        struct pcie_service_card *card = adapter->card;
-       u32 wrptr, rd_index;
+       const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
+       u32 wrptr, rd_index, tx_val;
        dma_addr_t buf_pa;
        int ret = 0;
        struct sk_buff *skb_tmp = NULL;
+       struct mwifiex_pcie_buf_desc *desc;
+       struct mwifiex_pfu_buf_desc *desc2;
 
        if (!mwifiex_pcie_ok_to_access_hw(adapter))
                mwifiex_pm_wakeup_card(adapter);
 
        /* Read the RX ring Write pointer set by firmware */
-       if (mwifiex_read_reg(adapter, REG_RXBD_WRPTR, &wrptr)) {
+       if (mwifiex_read_reg(adapter, reg->rx_wrptr, &wrptr)) {
                dev_err(adapter->dev,
-                       "RECV DATA: failed to read REG_TXBD_RDPTR\n");
+                       "RECV DATA: failed to read reg->rx_wrptr\n");
                ret = -1;
                goto done;
        }
        card->rxbd_wrptr = wrptr;
 
-       while (((wrptr & MWIFIEX_RXBD_MASK) !=
-               (card->rxbd_rdptr & MWIFIEX_RXBD_MASK)) ||
-              ((wrptr & MWIFIEX_BD_FLAG_ROLLOVER_IND) ==
-               (card->rxbd_rdptr & MWIFIEX_BD_FLAG_ROLLOVER_IND))) {
+       while (((wrptr & reg->rx_mask) !=
+               (card->rxbd_rdptr & reg->rx_mask)) ||
+              ((wrptr & reg->rx_rollover_ind) ==
+               (card->rxbd_rdptr & reg->rx_rollover_ind))) {
                struct sk_buff *skb_data;
                u16 rx_len;
                __le16 pkt_len;
 
-               rd_index = card->rxbd_rdptr & MWIFIEX_RXBD_MASK;
+               rd_index = card->rxbd_rdptr & reg->rx_mask;
                skb_data = card->rx_buf_list[rd_index];
 
                MWIFIEX_SKB_PACB(skb_data, &buf_pa);
@@ -1047,32 +1240,44 @@ static int mwifiex_pcie_process_recv_data(struct mwifiex_adapter *adapter)
                        "RECV DATA: Attach new sk_buff %p at rxbd_rdidx=%d\n",
                        skb_tmp, rd_index);
                card->rx_buf_list[rd_index] = skb_tmp;
-               card->rxbd_ring[rd_index]->paddr = buf_pa;
-               card->rxbd_ring[rd_index]->len = skb_tmp->len;
-               card->rxbd_ring[rd_index]->flags = 0;
 
-               if ((++card->rxbd_rdptr & MWIFIEX_RXBD_MASK) ==
+               if (reg->pfu_enabled) {
+                       desc2 = (void *)card->rxbd_ring[rd_index];
+                       desc2->paddr = buf_pa;
+                       desc2->len = skb_tmp->len;
+                       desc2->frag_len = skb_tmp->len;
+                       desc2->offset = 0;
+                       desc2->flags = reg->ring_flag_sop | reg->ring_flag_eop;
+               } else {
+                       desc = card->rxbd_ring[rd_index];
+                       desc->paddr = buf_pa;
+                       desc->len = skb_tmp->len;
+                       desc->flags = 0;
+               }
+
+               if ((++card->rxbd_rdptr & reg->rx_mask) ==
                                                        MWIFIEX_MAX_TXRX_BD) {
                        card->rxbd_rdptr = ((card->rxbd_rdptr &
-                                            MWIFIEX_BD_FLAG_ROLLOVER_IND) ^
-                                           MWIFIEX_BD_FLAG_ROLLOVER_IND);
+                                            reg->rx_rollover_ind) ^
+                                            reg->rx_rollover_ind);
                }
                dev_dbg(adapter->dev, "info: RECV DATA: <Rd: %#x, Wr: %#x>\n",
                        card->rxbd_rdptr, wrptr);
 
-               /* Write the RX ring read pointer in to REG_RXBD_RDPTR */
-               if (mwifiex_write_reg(adapter, REG_RXBD_RDPTR,
-                                     card->rxbd_rdptr)) {
+               tx_val = card->txbd_wrptr & reg->tx_wrap_mask;
+               /* Write the RX ring read pointer in to reg->rx_rdptr */
+               if (mwifiex_write_reg(adapter, reg->rx_rdptr,
+                                     card->rxbd_rdptr | tx_val)) {
                        dev_err(adapter->dev,
-                               "RECV DATA: failed to write REG_RXBD_RDPTR\n");
+                               "RECV DATA: failed to write reg->rx_rdptr\n");
                        ret = -1;
                        goto done;
                }
 
                /* Read the RX ring Write pointer set by firmware */
-               if (mwifiex_read_reg(adapter, REG_RXBD_WRPTR, &wrptr)) {
+               if (mwifiex_read_reg(adapter, reg->rx_wrptr, &wrptr)) {
                        dev_err(adapter->dev,
-                               "RECV DATA: failed to read REG_TXBD_RDPTR\n");
+                               "RECV DATA: failed to read reg->rx_wrptr\n");
                        ret = -1;
                        goto done;
                }
@@ -1093,6 +1298,7 @@ mwifiex_pcie_send_boot_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
 {
        dma_addr_t buf_pa;
        struct pcie_service_card *card = adapter->card;
+       const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
 
        if (!(skb->data && skb->len)) {
                dev_err(adapter->dev,
@@ -1106,9 +1312,10 @@ mwifiex_pcie_send_boot_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
 
        MWIFIEX_SKB_PACB(skb, &buf_pa);
 
-       /* Write the lower 32bits of the physical address to scratch
-        * register 0 */
-       if (mwifiex_write_reg(adapter, PCIE_SCRATCH_0_REG, (u32)buf_pa)) {
+       /* Write the lower 32bits of the physical address to low command
+        * address scratch register
+        */
+       if (mwifiex_write_reg(adapter, reg->cmd_addr_lo, (u32)buf_pa)) {
                dev_err(adapter->dev,
                        "%s: failed to write download command to boot code.\n",
                        __func__);
@@ -1117,9 +1324,10 @@ mwifiex_pcie_send_boot_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
                return -1;
        }
 
-       /* Write the upper 32bits of the physical address to scratch
-        * register 1 */
-       if (mwifiex_write_reg(adapter, PCIE_SCRATCH_1_REG,
+       /* Write the upper 32bits of the physical address to high command
+        * address scratch register
+        */
+       if (mwifiex_write_reg(adapter, reg->cmd_addr_hi,
                              (u32)((u64)buf_pa >> 32))) {
                dev_err(adapter->dev,
                        "%s: failed to write download command to boot code.\n",
@@ -1129,10 +1337,10 @@ mwifiex_pcie_send_boot_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
                return -1;
        }
 
-       /* Write the command length to scratch register 2 */
-       if (mwifiex_write_reg(adapter, PCIE_SCRATCH_2_REG, skb->len)) {
+       /* Write the command length to cmd_size scratch register */
+       if (mwifiex_write_reg(adapter, reg->cmd_size, skb->len)) {
                dev_err(adapter->dev,
-                       "%s: failed to write command len to scratch reg 2\n",
+                       "%s: failed to write command len to cmd_size scratch reg\n",
                        __func__);
                pci_unmap_single(card->dev, buf_pa, MWIFIEX_UPLD_SIZE,
                                 PCI_DMA_TODEVICE);
@@ -1158,11 +1366,14 @@ mwifiex_pcie_send_boot_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
 static int mwifiex_pcie_init_fw_port(struct mwifiex_adapter *adapter)
 {
        struct pcie_service_card *card = adapter->card;
+       const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
+       int tx_wrap = card->txbd_wrptr & reg->tx_wrap_mask;
 
-       /* Write the RX ring read pointer in to REG_RXBD_RDPTR */
-       if (mwifiex_write_reg(adapter, REG_RXBD_RDPTR, card->rxbd_rdptr | 0)) {
+       /* Write the RX ring read pointer in to reg->rx_rdptr */
+       if (mwifiex_write_reg(adapter, reg->rx_rdptr, card->rxbd_rdptr |
+                             tx_wrap)) {
                dev_err(adapter->dev,
-                       "RECV DATA: failed to write REG_RXBD_RDPTR\n");
+                       "RECV DATA: failed to write reg->rx_rdptr\n");
                return -1;
        }
        return 0;
@@ -1174,6 +1385,7 @@ static int
 mwifiex_pcie_send_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
 {
        struct pcie_service_card *card = adapter->card;
+       const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
        int ret = 0;
        dma_addr_t cmd_buf_pa, cmdrsp_buf_pa;
        u8 *payload = (u8 *)skb->data;
@@ -1206,7 +1418,7 @@ mwifiex_pcie_send_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
 
        /* To send a command, the driver will:
                1. Write the 64bit physical address of the data buffer to
-                  SCRATCH1 + SCRATCH0
+                  cmd response address low  + cmd response address high
                2. Ring the door bell (i.e. set the door bell interrupt)
 
                In response to door bell interrupt, the firmware will perform
@@ -1218,7 +1430,7 @@ mwifiex_pcie_send_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
                MWIFIEX_SKB_PACB(card->cmdrsp_buf, &cmdrsp_buf_pa);
                /* Write the lower 32bits of the cmdrsp buffer physical
                   address */
-               if (mwifiex_write_reg(adapter, REG_CMDRSP_ADDR_LO,
+               if (mwifiex_write_reg(adapter, reg->cmdrsp_addr_lo,
                                      (u32)cmdrsp_buf_pa)) {
                        dev_err(adapter->dev,
                                "Failed to write download cmd to boot code.\n");
@@ -1227,7 +1439,7 @@ mwifiex_pcie_send_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
                }
                /* Write the upper 32bits of the cmdrsp buffer physical
                   address */
-               if (mwifiex_write_reg(adapter, REG_CMDRSP_ADDR_HI,
+               if (mwifiex_write_reg(adapter, reg->cmdrsp_addr_hi,
                                      (u32)((u64)cmdrsp_buf_pa >> 32))) {
                        dev_err(adapter->dev,
                                "Failed to write download cmd to boot code.\n");
@@ -1237,15 +1449,16 @@ mwifiex_pcie_send_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
        }
 
        MWIFIEX_SKB_PACB(card->cmd_buf, &cmd_buf_pa);
-       /* Write the lower 32bits of the physical address to REG_CMD_ADDR_LO */
-       if (mwifiex_write_reg(adapter, REG_CMD_ADDR_LO, (u32)cmd_buf_pa)) {
+       /* Write the lower 32bits of the physical address to reg->cmd_addr_lo */
+       if (mwifiex_write_reg(adapter, reg->cmd_addr_lo,
+                             (u32)cmd_buf_pa)) {
                dev_err(adapter->dev,
                        "Failed to write download cmd to boot code.\n");
                ret = -1;
                goto done;
        }
-       /* Write the upper 32bits of the physical address to REG_CMD_ADDR_HI */
-       if (mwifiex_write_reg(adapter, REG_CMD_ADDR_HI,
+       /* Write the upper 32bits of the physical address to reg->cmd_addr_hi */
+       if (mwifiex_write_reg(adapter, reg->cmd_addr_hi,
                              (u32)((u64)cmd_buf_pa >> 32))) {
                dev_err(adapter->dev,
                        "Failed to write download cmd to boot code.\n");
@@ -1253,10 +1466,11 @@ mwifiex_pcie_send_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
                goto done;
        }
 
-       /* Write the command length to REG_CMD_SIZE */
-       if (mwifiex_write_reg(adapter, REG_CMD_SIZE, card->cmd_buf->len)) {
+       /* Write the command length to reg->cmd_size */
+       if (mwifiex_write_reg(adapter, reg->cmd_size,
+                             card->cmd_buf->len)) {
                dev_err(adapter->dev,
-                       "Failed to write cmd len to REG_CMD_SIZE\n");
+                       "Failed to write cmd len to reg->cmd_size\n");
                ret = -1;
                goto done;
        }
@@ -1283,6 +1497,7 @@ done:
 static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter)
 {
        struct pcie_service_card *card = adapter->card;
+       const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
        struct sk_buff *skb = card->cmdrsp_buf;
        int count = 0;
        u16 rx_len;
@@ -1304,8 +1519,8 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter)
                if (adapter->ps_state == PS_STATE_SLEEP_CFM) {
                        mwifiex_process_sleep_confirm_resp(adapter, skb->data,
                                                           skb->len);
-                       while (mwifiex_pcie_ok_to_access_hw(adapter) &&
-                                                       (count++ < 10))
+                       while (reg->sleep_cookie && (count++ < 10) &&
+                              mwifiex_pcie_ok_to_access_hw(adapter))
                                usleep_range(50, 60);
                } else {
                        dev_err(adapter->dev,
@@ -1328,14 +1543,14 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter)
                /* Clear the cmd-rsp buffer address in scratch registers. This
                   will prevent firmware from writing to the same response
                   buffer again. */
-               if (mwifiex_write_reg(adapter, REG_CMDRSP_ADDR_LO, 0)) {
+               if (mwifiex_write_reg(adapter, reg->cmdrsp_addr_lo, 0)) {
                        dev_err(adapter->dev,
                                "cmd_done: failed to clear cmd_rsp_addr_lo\n");
                        return -1;
                }
                /* Write the upper 32bits of the cmdrsp buffer physical
                   address */
-               if (mwifiex_write_reg(adapter, REG_CMDRSP_ADDR_HI, 0)) {
+               if (mwifiex_write_reg(adapter, reg->cmdrsp_addr_hi, 0)) {
                        dev_err(adapter->dev,
                                "cmd_done: failed to clear cmd_rsp_addr_hi\n");
                        return -1;
@@ -1380,9 +1595,11 @@ static int mwifiex_pcie_cmdrsp_complete(struct mwifiex_adapter *adapter,
 static int mwifiex_pcie_process_event_ready(struct mwifiex_adapter *adapter)
 {
        struct pcie_service_card *card = adapter->card;
+       const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
        u32 rdptr = card->evtbd_rdptr & MWIFIEX_EVTBD_MASK;
        u32 wrptr, event;
        dma_addr_t buf_pa;
+       struct mwifiex_evt_buf_desc *desc;
 
        if (!mwifiex_pcie_ok_to_access_hw(adapter))
                mwifiex_pm_wakeup_card(adapter);
@@ -1399,9 +1616,9 @@ static int mwifiex_pcie_process_event_ready(struct mwifiex_adapter *adapter)
        }
 
        /* Read the event ring write pointer set by firmware */
-       if (mwifiex_read_reg(adapter, REG_EVTBD_WRPTR, &wrptr)) {
+       if (mwifiex_read_reg(adapter, reg->evt_wrptr, &wrptr)) {
                dev_err(adapter->dev,
-                       "EventReady: failed to read REG_EVTBD_WRPTR\n");
+                       "EventReady: failed to read reg->evt_wrptr\n");
                return -1;
        }
 
@@ -1409,8 +1626,8 @@ static int mwifiex_pcie_process_event_ready(struct mwifiex_adapter *adapter)
                card->evtbd_rdptr, wrptr);
        if (((wrptr & MWIFIEX_EVTBD_MASK) != (card->evtbd_rdptr
                                              & MWIFIEX_EVTBD_MASK)) ||
-           ((wrptr & MWIFIEX_BD_FLAG_ROLLOVER_IND) ==
-            (card->evtbd_rdptr & MWIFIEX_BD_FLAG_ROLLOVER_IND))) {
+           ((wrptr & reg->evt_rollover_ind) ==
+            (card->evtbd_rdptr & reg->evt_rollover_ind))) {
                struct sk_buff *skb_cmd;
                __le16 data_len = 0;
                u16 evt_len;
@@ -1424,9 +1641,8 @@ static int mwifiex_pcie_process_event_ready(struct mwifiex_adapter *adapter)
                /* Take the pointer and set it to event pointer in adapter
                   and will return back after event handling callback */
                card->evt_buf_list[rdptr] = NULL;
-               card->evtbd_ring[rdptr]->paddr = 0;
-               card->evtbd_ring[rdptr]->len = 0;
-               card->evtbd_ring[rdptr]->flags = 0;
+               desc = card->evtbd_ring[rdptr];
+               memset(desc, 0, sizeof(*desc));
 
                event = *(u32 *) &skb_cmd->data[INTF_HEADER_LEN];
                adapter->event_cause = event;
@@ -1462,10 +1678,12 @@ static int mwifiex_pcie_event_complete(struct mwifiex_adapter *adapter,
                                       struct sk_buff *skb)
 {
        struct pcie_service_card *card = adapter->card;
+       const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
        int ret = 0;
        u32 rdptr = card->evtbd_rdptr & MWIFIEX_EVTBD_MASK;
        u32 wrptr;
        dma_addr_t buf_pa;
+       struct mwifiex_evt_buf_desc *desc;
 
        if (!skb)
                return 0;
@@ -1477,9 +1695,9 @@ static int mwifiex_pcie_event_complete(struct mwifiex_adapter *adapter,
        }
 
        /* Read the event ring write pointer set by firmware */
-       if (mwifiex_read_reg(adapter, REG_EVTBD_WRPTR, &wrptr)) {
+       if (mwifiex_read_reg(adapter, reg->evt_wrptr, &wrptr)) {
                dev_err(adapter->dev,
-                       "event_complete: failed to read REG_EVTBD_WRPTR\n");
+                       "event_complete: failed to read reg->evt_wrptr\n");
                return -1;
        }
 
@@ -1492,9 +1710,10 @@ static int mwifiex_pcie_event_complete(struct mwifiex_adapter *adapter,
                MWIFIEX_SKB_PACB(skb, &buf_pa);
                card->evt_buf_list[rdptr] = skb;
                MWIFIEX_SKB_PACB(skb, &buf_pa);
-               card->evtbd_ring[rdptr]->paddr = buf_pa;
-               card->evtbd_ring[rdptr]->len = (u16)skb->len;
-               card->evtbd_ring[rdptr]->flags = 0;
+               desc = card->evtbd_ring[rdptr];
+               desc->paddr = buf_pa;
+               desc->len = (u16)skb->len;
+               desc->flags = 0;
                skb = NULL;
        } else {
                dev_dbg(adapter->dev,
@@ -1504,17 +1723,18 @@ static int mwifiex_pcie_event_complete(struct mwifiex_adapter *adapter,
 
        if ((++card->evtbd_rdptr & MWIFIEX_EVTBD_MASK) == MWIFIEX_MAX_EVT_BD) {
                card->evtbd_rdptr = ((card->evtbd_rdptr &
-                                       MWIFIEX_BD_FLAG_ROLLOVER_IND) ^
-                                       MWIFIEX_BD_FLAG_ROLLOVER_IND);
+                                       reg->evt_rollover_ind) ^
+                                       reg->evt_rollover_ind);
        }
 
        dev_dbg(adapter->dev, "info: Updated <Rd: 0x%x, Wr: 0x%x>",
                card->evtbd_rdptr, wrptr);
 
-       /* Write the event ring read pointer in to REG_EVTBD_RDPTR */
-       if (mwifiex_write_reg(adapter, REG_EVTBD_RDPTR, card->evtbd_rdptr)) {
+       /* Write the event ring read pointer in to reg->evt_rdptr */
+       if (mwifiex_write_reg(adapter, reg->evt_rdptr,
+                             card->evtbd_rdptr)) {
                dev_err(adapter->dev,
-                       "event_complete: failed to read REG_EVTBD_RDPTR\n");
+                       "event_complete: failed to read reg->evt_rdptr\n");
                return -1;
        }
 
@@ -1543,6 +1763,7 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
        u32 block_retry_cnt = 0;
        dma_addr_t buf_pa;
        struct pcie_service_card *card = adapter->card;
+       const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
 
        if (!firmware || !firmware_len) {
                dev_err(adapter->dev,
@@ -1574,7 +1795,7 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
                        break;
 
                for (tries = 0; tries < MAX_POLL_TRIES; tries++) {
-                       ret = mwifiex_read_reg(adapter, PCIE_SCRATCH_2_REG,
+                       ret = mwifiex_read_reg(adapter, reg->cmd_size,
                                               &len);
                        if (ret) {
                                dev_warn(adapter->dev,
@@ -1620,16 +1841,15 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
 
                        dev_dbg(adapter->dev, ".");
 
-                       tx_blocks = (txlen +
-                                    MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD - 1) /
-                                    MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD;
+                       tx_blocks = (txlen + card->pcie.blksz_fw_dl - 1) /
+                                   card->pcie.blksz_fw_dl;
 
                        /* Copy payload to buffer */
                        memmove(skb->data, &firmware[offset], txlen);
                }
 
                skb_put(skb, MWIFIEX_UPLD_SIZE - skb->len);
-               skb_trim(skb, tx_blocks * MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD);
+               skb_trim(skb, tx_blocks * card->pcie.blksz_fw_dl);
 
                /* Send the boot command to device */
                if (mwifiex_pcie_send_boot_cmd(adapter, skb)) {
@@ -1682,6 +1902,8 @@ mwifiex_check_fw_status(struct mwifiex_adapter *adapter, u32 poll_num)
 {
        int ret = 0;
        u32 firmware_stat, winner_status;
+       struct pcie_service_card *card = adapter->card;
+       const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
        u32 tries;
 
        /* Mask spurios interrupts */
@@ -1692,7 +1914,8 @@ mwifiex_check_fw_status(struct mwifiex_adapter *adapter, u32 poll_num)
        }
 
        dev_dbg(adapter->dev, "Setting driver ready signature\n");
-       if (mwifiex_write_reg(adapter, REG_DRV_READY, FIRMWARE_READY_PCIE)) {
+       if (mwifiex_write_reg(adapter, reg->drv_rdy,
+                             FIRMWARE_READY_PCIE)) {
                dev_err(adapter->dev,
                        "Failed to write driver ready signature\n");
                return -1;
@@ -1700,7 +1923,7 @@ mwifiex_check_fw_status(struct mwifiex_adapter *adapter, u32 poll_num)
 
        /* Wait for firmware initialization event */
        for (tries = 0; tries < poll_num; tries++) {
-               if (mwifiex_read_reg(adapter, PCIE_SCRATCH_3_REG,
+               if (mwifiex_read_reg(adapter, reg->fw_status,
                                     &firmware_stat))
                        ret = -1;
                else
@@ -1717,7 +1940,7 @@ mwifiex_check_fw_status(struct mwifiex_adapter *adapter, u32 poll_num)
        }
 
        if (ret) {
-               if (mwifiex_read_reg(adapter, PCIE_SCRATCH_3_REG,
+               if (mwifiex_read_reg(adapter, reg->fw_status,
                                     &winner_status))
                        ret = -1;
                else if (!winner_status) {
@@ -1955,6 +2178,7 @@ static int mwifiex_pcie_init(struct mwifiex_adapter *adapter)
        struct pcie_service_card *card = adapter->card;
        int ret;
        struct pci_dev *pdev = card->dev;
+       const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
 
        pci_set_drvdata(pdev, card);
 
@@ -1985,6 +2209,7 @@ static int mwifiex_pcie_init(struct mwifiex_adapter *adapter)
        card->pci_mmap = pci_iomap(pdev, 0, 0);
        if (!card->pci_mmap) {
                dev_err(adapter->dev, "iomap(0) error\n");
+               ret = -EIO;
                goto err_iomap0;
        }
        ret = pci_request_region(pdev, 2, DRV_NAME);
@@ -1995,6 +2220,7 @@ static int mwifiex_pcie_init(struct mwifiex_adapter *adapter)
        card->pci_mmap1 = pci_iomap(pdev, 2, 0);
        if (!card->pci_mmap1) {
                dev_err(adapter->dev, "iomap(2) error\n");
+               ret = -EIO;
                goto err_iomap2;
        }
 
@@ -2015,10 +2241,13 @@ static int mwifiex_pcie_init(struct mwifiex_adapter *adapter)
        ret = mwifiex_pcie_alloc_cmdrsp_buf(adapter);
        if (ret)
                goto err_alloc_cmdbuf;
-       ret = mwifiex_pcie_alloc_sleep_cookie_buf(adapter);
-       if (ret)
-               goto err_alloc_cookie;
-
+       if (reg->sleep_cookie) {
+               ret = mwifiex_pcie_alloc_sleep_cookie_buf(adapter);
+               if (ret)
+                       goto err_alloc_cookie;
+       } else {
+               card->sleep_cookie_vbase = NULL;
+       }
        return ret;
 
 err_alloc_cookie:
@@ -2059,10 +2288,11 @@ static void mwifiex_pcie_cleanup(struct mwifiex_adapter *adapter)
 {
        struct pcie_service_card *card = adapter->card;
        struct pci_dev *pdev = card->dev;
+       const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
 
        if (user_rmmod) {
                dev_dbg(adapter->dev, "Clearing driver ready signature\n");
-               if (mwifiex_write_reg(adapter, REG_DRV_READY, 0x00000000))
+               if (mwifiex_write_reg(adapter, reg->drv_rdy, 0x00000000))
                        dev_err(adapter->dev,
                                "Failed to write driver not-ready signature\n");
        }
@@ -2100,7 +2330,7 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter)
        }
 
        adapter->dev = &pdev->dev;
-       strcpy(adapter->fw_name, PCIE8766_DEFAULT_FW_NAME);
+       strcpy(adapter->fw_name, card->pcie.firmware);
 
        return 0;
 }
@@ -2114,12 +2344,16 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter)
 static void mwifiex_unregister_dev(struct mwifiex_adapter *adapter)
 {
        struct pcie_service_card *card = adapter->card;
+       const struct mwifiex_pcie_card_reg *reg;
 
        if (card) {
                dev_dbg(adapter->dev, "%s(): calling free_irq()\n", __func__);
                free_irq(card->dev->irq, card->dev);
 
-               mwifiex_pcie_delete_sleep_cookie_buf(adapter);
+               reg = card->pcie.reg;
+               if (reg->sleep_cookie)
+                       mwifiex_pcie_delete_sleep_cookie_buf(adapter);
+
                mwifiex_pcie_delete_cmdrsp_buf(adapter);
                mwifiex_pcie_delete_evtbd_ring(adapter);
                mwifiex_pcie_delete_rxbd_ring(adapter);
@@ -2160,7 +2394,7 @@ static int mwifiex_pcie_init_module(void)
 {
        int ret;
 
-       pr_debug("Marvell 8766 PCIe Driver\n");
+       pr_debug("Marvell PCIe Driver\n");
 
        sema_init(&add_remove_card_sem, 1);
 
@@ -2203,4 +2437,5 @@ MODULE_AUTHOR("Marvell International Ltd.");
 MODULE_DESCRIPTION("Marvell WiFi-Ex PCI-Express Driver version " PCIE_VERSION);
 MODULE_VERSION(PCIE_VERSION);
 MODULE_LICENSE("GPL v2");
-MODULE_FIRMWARE("mrvl/pcie8766_uapsta.bin");
+MODULE_FIRMWARE(PCIE8766_DEFAULT_FW_NAME);
+MODULE_FIRMWARE(PCIE8897_DEFAULT_FW_NAME);
index 37eeb2c..d322ab8 100644 (file)
 #include    "main.h"
 
 #define PCIE8766_DEFAULT_FW_NAME "mrvl/pcie8766_uapsta.bin"
+#define PCIE8897_DEFAULT_FW_NAME "mrvl/pcie8897_uapsta.bin"
+
+#define PCIE_VENDOR_ID_MARVELL              (0x11ab)
+#define PCIE_DEVICE_ID_MARVELL_88W8766P                (0x2b30)
+#define PCIE_DEVICE_ID_MARVELL_88W8897         (0x2b38)
 
 /* Constants for Buffer Descriptor (BD) rings */
 #define MWIFIEX_MAX_TXRX_BD                    0x20
@@ -57,6 +62,8 @@
 #define PCIE_SCRATCH_10_REG                            0xCE8
 #define PCIE_SCRATCH_11_REG                            0xCEC
 #define PCIE_SCRATCH_12_REG                            0xCF0
+#define PCIE_RD_DATA_PTR_Q0_Q1                          0xC08C
+#define PCIE_WR_DATA_PTR_Q0_Q1                          0xC05C
 
 #define CPU_INTR_DNLD_RDY                              BIT(0)
 #define CPU_INTR_DOOR_BELL                             BIT(1)
 #define MWIFIEX_BD_FLAG_ROLLOVER_IND                   BIT(7)
 #define MWIFIEX_BD_FLAG_FIRST_DESC                     BIT(0)
 #define MWIFIEX_BD_FLAG_LAST_DESC                      BIT(1)
-#define REG_CMD_ADDR_LO                                        PCIE_SCRATCH_0_REG
-#define REG_CMD_ADDR_HI                                        PCIE_SCRATCH_1_REG
-#define REG_CMD_SIZE                                   PCIE_SCRATCH_2_REG
-
-#define REG_CMDRSP_ADDR_LO                             PCIE_SCRATCH_4_REG
-#define REG_CMDRSP_ADDR_HI                             PCIE_SCRATCH_5_REG
-
-/* TX buffer description read pointer */
-#define REG_TXBD_RDPTR                                 PCIE_SCRATCH_6_REG
-/* TX buffer description write pointer */
-#define REG_TXBD_WRPTR                                 PCIE_SCRATCH_7_REG
-/* RX buffer description read pointer */
-#define REG_RXBD_RDPTR                                 PCIE_SCRATCH_8_REG
-/* RX buffer description write pointer */
-#define REG_RXBD_WRPTR                                 PCIE_SCRATCH_9_REG
-/* Event buffer description read pointer */
-#define REG_EVTBD_RDPTR                                        PCIE_SCRATCH_10_REG
-/* Event buffer description write pointer */
-#define REG_EVTBD_WRPTR                                        PCIE_SCRATCH_11_REG
-/* Driver ready signature write pointer */
-#define REG_DRV_READY                                  PCIE_SCRATCH_12_REG
+#define MWIFIEX_BD_FLAG_SOP                            BIT(0)
+#define MWIFIEX_BD_FLAG_EOP                            BIT(1)
+#define MWIFIEX_BD_FLAG_XS_SOP                         BIT(2)
+#define MWIFIEX_BD_FLAG_XS_EOP                         BIT(3)
+#define MWIFIEX_BD_FLAG_EVT_ROLLOVER_IND               BIT(7)
+#define MWIFIEX_BD_FLAG_RX_ROLLOVER_IND                        BIT(10)
+#define MWIFIEX_BD_FLAG_TX_START_PTR                   BIT(16)
+#define MWIFIEX_BD_FLAG_TX_ROLLOVER_IND                        BIT(26)
 
 /* Max retry number of command write */
 #define MAX_WRITE_IOMEM_RETRY                          2
 /* FW awake cookie after FW ready */
 #define FW_AWAKE_COOKIE                                                (0xAA55AA55)
 
+struct mwifiex_pcie_card_reg {
+       u16 cmd_addr_lo;
+       u16 cmd_addr_hi;
+       u16 fw_status;
+       u16 cmd_size;
+       u16 cmdrsp_addr_lo;
+       u16 cmdrsp_addr_hi;
+       u16 tx_rdptr;
+       u16 tx_wrptr;
+       u16 rx_rdptr;
+       u16 rx_wrptr;
+       u16 evt_rdptr;
+       u16 evt_wrptr;
+       u16 drv_rdy;
+       u16 tx_start_ptr;
+       u32 tx_mask;
+       u32 tx_wrap_mask;
+       u32 rx_mask;
+       u32 rx_wrap_mask;
+       u32 tx_rollover_ind;
+       u32 rx_rollover_ind;
+       u32 evt_rollover_ind;
+       u8 ring_flag_sop;
+       u8 ring_flag_eop;
+       u8 ring_flag_xs_sop;
+       u8 ring_flag_xs_eop;
+       u32 ring_tx_start_ptr;
+       u8 pfu_enabled;
+       u8 sleep_cookie;
+};
+
+static const struct mwifiex_pcie_card_reg mwifiex_reg_8766 = {
+       .cmd_addr_lo = PCIE_SCRATCH_0_REG,
+       .cmd_addr_hi = PCIE_SCRATCH_1_REG,
+       .cmd_size = PCIE_SCRATCH_2_REG,
+       .fw_status = PCIE_SCRATCH_3_REG,
+       .cmdrsp_addr_lo = PCIE_SCRATCH_4_REG,
+       .cmdrsp_addr_hi = PCIE_SCRATCH_5_REG,
+       .tx_rdptr = PCIE_SCRATCH_6_REG,
+       .tx_wrptr = PCIE_SCRATCH_7_REG,
+       .rx_rdptr = PCIE_SCRATCH_8_REG,
+       .rx_wrptr = PCIE_SCRATCH_9_REG,
+       .evt_rdptr = PCIE_SCRATCH_10_REG,
+       .evt_wrptr = PCIE_SCRATCH_11_REG,
+       .drv_rdy = PCIE_SCRATCH_12_REG,
+       .tx_start_ptr = 0,
+       .tx_mask = MWIFIEX_TXBD_MASK,
+       .tx_wrap_mask = 0,
+       .rx_mask = MWIFIEX_RXBD_MASK,
+       .rx_wrap_mask = 0,
+       .tx_rollover_ind = MWIFIEX_BD_FLAG_ROLLOVER_IND,
+       .rx_rollover_ind = MWIFIEX_BD_FLAG_ROLLOVER_IND,
+       .evt_rollover_ind = MWIFIEX_BD_FLAG_ROLLOVER_IND,
+       .ring_flag_sop = 0,
+       .ring_flag_eop = 0,
+       .ring_flag_xs_sop = 0,
+       .ring_flag_xs_eop = 0,
+       .ring_tx_start_ptr = 0,
+       .pfu_enabled = 0,
+       .sleep_cookie = 1,
+};
+
+static const struct mwifiex_pcie_card_reg mwifiex_reg_8897 = {
+       .cmd_addr_lo = PCIE_SCRATCH_0_REG,
+       .cmd_addr_hi = PCIE_SCRATCH_1_REG,
+       .cmd_size = PCIE_SCRATCH_2_REG,
+       .fw_status = PCIE_SCRATCH_3_REG,
+       .cmdrsp_addr_lo = PCIE_SCRATCH_4_REG,
+       .cmdrsp_addr_hi = PCIE_SCRATCH_5_REG,
+       .tx_rdptr = PCIE_RD_DATA_PTR_Q0_Q1,
+       .tx_wrptr = PCIE_WR_DATA_PTR_Q0_Q1,
+       .rx_rdptr = PCIE_WR_DATA_PTR_Q0_Q1,
+       .rx_wrptr = PCIE_RD_DATA_PTR_Q0_Q1,
+       .evt_rdptr = PCIE_SCRATCH_10_REG,
+       .evt_wrptr = PCIE_SCRATCH_11_REG,
+       .drv_rdy = PCIE_SCRATCH_12_REG,
+       .tx_start_ptr = 16,
+       .tx_mask = 0x03FF0000,
+       .tx_wrap_mask = 0x07FF0000,
+       .rx_mask = 0x000003FF,
+       .rx_wrap_mask = 0x000007FF,
+       .tx_rollover_ind = MWIFIEX_BD_FLAG_TX_ROLLOVER_IND,
+       .rx_rollover_ind = MWIFIEX_BD_FLAG_RX_ROLLOVER_IND,
+       .evt_rollover_ind = MWIFIEX_BD_FLAG_EVT_ROLLOVER_IND,
+       .ring_flag_sop = MWIFIEX_BD_FLAG_SOP,
+       .ring_flag_eop = MWIFIEX_BD_FLAG_EOP,
+       .ring_flag_xs_sop = MWIFIEX_BD_FLAG_XS_SOP,
+       .ring_flag_xs_eop = MWIFIEX_BD_FLAG_XS_EOP,
+       .ring_tx_start_ptr = MWIFIEX_BD_FLAG_TX_START_PTR,
+       .pfu_enabled = 1,
+       .sleep_cookie = 0,
+};
+
+struct mwifiex_pcie_device {
+       const char *firmware;
+       const struct mwifiex_pcie_card_reg *reg;
+       u16 blksz_fw_dl;
+};
+
+static const struct mwifiex_pcie_device mwifiex_pcie8766 = {
+       .firmware       = PCIE8766_DEFAULT_FW_NAME,
+       .reg            = &mwifiex_reg_8766,
+       .blksz_fw_dl = MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD,
+};
+
+static const struct mwifiex_pcie_device mwifiex_pcie8897 = {
+       .firmware       = PCIE8897_DEFAULT_FW_NAME,
+       .reg            = &mwifiex_reg_8897,
+       .blksz_fw_dl = MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD,
+};
+
+struct mwifiex_evt_buf_desc {
+       u64 paddr;
+       u16 len;
+       u16 flags;
+} __packed;
+
 struct mwifiex_pcie_buf_desc {
        u64 paddr;
        u16 len;
        u16 flags;
 } __packed;
 
+struct mwifiex_pfu_buf_desc {
+       u16 flags;
+       u16 offset;
+       u16 frag_len;
+       u16 len;
+       u64 paddr;
+       u32 reserved;
+} __packed;
+
 struct pcie_service_card {
        struct pci_dev *dev;
        struct mwifiex_adapter *adapter;
+       struct mwifiex_pcie_device pcie;
 
        u8 txbd_flush;
        u32 txbd_wrptr;
@@ -120,7 +241,7 @@ struct pcie_service_card {
        u32 txbd_ring_size;
        u8 *txbd_ring_vbase;
        dma_addr_t txbd_ring_pbase;
-       struct mwifiex_pcie_buf_desc *txbd_ring[MWIFIEX_MAX_TXRX_BD];
+       void *txbd_ring[MWIFIEX_MAX_TXRX_BD];
        struct sk_buff *tx_buf_list[MWIFIEX_MAX_TXRX_BD];
 
        u32 rxbd_wrptr;
@@ -128,7 +249,7 @@ struct pcie_service_card {
        u32 rxbd_ring_size;
        u8 *rxbd_ring_vbase;
        dma_addr_t rxbd_ring_pbase;
-       struct mwifiex_pcie_buf_desc *rxbd_ring[MWIFIEX_MAX_TXRX_BD];
+       void *rxbd_ring[MWIFIEX_MAX_TXRX_BD];
        struct sk_buff *rx_buf_list[MWIFIEX_MAX_TXRX_BD];
 
        u32 evtbd_wrptr;
@@ -136,7 +257,7 @@ struct pcie_service_card {
        u32 evtbd_ring_size;
        u8 *evtbd_ring_vbase;
        dma_addr_t evtbd_ring_pbase;
-       struct mwifiex_pcie_buf_desc *evtbd_ring[MWIFIEX_MAX_EVT_BD];
+       void *evtbd_ring[MWIFIEX_MAX_EVT_BD];
        struct sk_buff *evt_buf_list[MWIFIEX_MAX_EVT_BD];
 
        struct sk_buff *cmd_buf;
@@ -150,11 +271,24 @@ struct pcie_service_card {
 static inline int
 mwifiex_pcie_txbd_empty(struct pcie_service_card *card, u32 rdptr)
 {
-       if (((card->txbd_wrptr & MWIFIEX_TXBD_MASK) ==
-                       (rdptr & MWIFIEX_TXBD_MASK)) &&
-           ((card->txbd_wrptr & MWIFIEX_BD_FLAG_ROLLOVER_IND) !=
-                       (rdptr & MWIFIEX_BD_FLAG_ROLLOVER_IND)))
-               return 1;
+       const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
+
+       switch (card->dev->device) {
+       case PCIE_DEVICE_ID_MARVELL_88W8766P:
+               if (((card->txbd_wrptr & reg->tx_mask) ==
+                    (rdptr & reg->tx_mask)) &&
+                   ((card->txbd_wrptr & reg->tx_rollover_ind) !=
+                    (rdptr & reg->tx_rollover_ind)))
+                       return 1;
+               break;
+       case PCIE_DEVICE_ID_MARVELL_88W8897:
+               if (((card->txbd_wrptr & reg->tx_mask) ==
+                    (rdptr & reg->tx_mask)) &&
+                   ((card->txbd_wrptr & reg->tx_rollover_ind) ==
+                       (rdptr & reg->tx_rollover_ind)))
+                       return 1;
+               break;
+       }
 
        return 0;
 }
@@ -162,11 +296,24 @@ mwifiex_pcie_txbd_empty(struct pcie_service_card *card, u32 rdptr)
 static inline int
 mwifiex_pcie_txbd_not_full(struct pcie_service_card *card)
 {
-       if (((card->txbd_wrptr & MWIFIEX_TXBD_MASK) !=
-            (card->txbd_rdptr & MWIFIEX_TXBD_MASK)) ||
-           ((card->txbd_wrptr & MWIFIEX_BD_FLAG_ROLLOVER_IND) !=
-            (card->txbd_rdptr & MWIFIEX_BD_FLAG_ROLLOVER_IND)))
-               return 1;
+       const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
+
+       switch (card->dev->device) {
+       case PCIE_DEVICE_ID_MARVELL_88W8766P:
+               if (((card->txbd_wrptr & reg->tx_mask) !=
+                    (card->txbd_rdptr & reg->tx_mask)) ||
+                   ((card->txbd_wrptr & reg->tx_rollover_ind) !=
+                    (card->txbd_rdptr & reg->tx_rollover_ind)))
+                       return 1;
+               break;
+       case PCIE_DEVICE_ID_MARVELL_88W8897:
+               if (((card->txbd_wrptr & reg->tx_mask) !=
+                    (card->txbd_rdptr & reg->tx_mask)) ||
+                   ((card->txbd_wrptr & reg->tx_rollover_ind) ==
+                    (card->txbd_rdptr & reg->tx_rollover_ind)))
+                       return 1;
+               break;
+       }
 
        return 0;
 }
index f0de401..e0cce1b 100644 (file)
@@ -1557,7 +1557,7 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
                dev_err(adapter->dev, "SCAN_RESP: too many AP returned (%d)\n",
                        scan_rsp->number_of_sets);
                ret = -1;
-               goto done;
+               goto check_next_scan;
        }
 
        bytes_left = le16_to_cpu(scan_rsp->bss_descript_size);
@@ -1628,7 +1628,8 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
                if (!beacon_size || beacon_size > bytes_left) {
                        bss_info += bytes_left;
                        bytes_left = 0;
-                       return -1;
+                       ret = -1;
+                       goto check_next_scan;
                }
 
                /* Initialize the current working beacon pointer for this BSS
@@ -1684,7 +1685,7 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
                                dev_err(priv->adapter->dev,
                                        "%s: bytes left < IE length\n",
                                        __func__);
-                               goto done;
+                               goto check_next_scan;
                        }
                        if (element_id == WLAN_EID_DS_PARAMS) {
                                channel = *(current_ptr + sizeof(struct ieee_types_header));
@@ -1740,13 +1741,14 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
                                            .mac_address, ETH_ALEN))
                                        mwifiex_update_curr_bss_params(priv,
                                                                       bss);
-                               cfg80211_put_bss(bss);
+                               cfg80211_put_bss(priv->wdev->wiphy, bss);
                        }
                } else {
                        dev_dbg(adapter->dev, "missing BSS channel IE\n");
                }
        }
 
+check_next_scan:
        spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
        if (list_empty(&adapter->scan_pending_q)) {
                spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
@@ -1807,7 +1809,6 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
                }
        }
 
-done:
        return ret;
 }
 
index 31d7b2b..d3fb9a1 100644 (file)
@@ -332,7 +332,7 @@ mwifiex_write_data_sync(struct mwifiex_adapter *adapter,
                        u8 *buffer, u32 pkt_len, u32 port)
 {
        struct sdio_mmc_card *card = adapter->card;
-       int ret = -1;
+       int ret;
        u8 blk_mode =
                (port & MWIFIEX_SDIO_BYTE_MODE_MASK) ? BYTE_MODE : BLOCK_MODE;
        u32 blk_size = (blk_mode == BLOCK_MODE) ? MWIFIEX_SDIO_BLOCK_SIZE : 1;
@@ -350,8 +350,7 @@ mwifiex_write_data_sync(struct mwifiex_adapter *adapter,
 
        sdio_claim_host(card->func);
 
-       if (!sdio_writesb(card->func, ioport, buffer, blk_cnt * blk_size))
-               ret = 0;
+       ret = sdio_writesb(card->func, ioport, buffer, blk_cnt * blk_size);
 
        sdio_release_host(card->func);
 
@@ -365,7 +364,7 @@ static int mwifiex_read_data_sync(struct mwifiex_adapter *adapter, u8 *buffer,
                                  u32 len, u32 port, u8 claim)
 {
        struct sdio_mmc_card *card = adapter->card;
-       int ret = -1;
+       int ret;
        u8 blk_mode = (port & MWIFIEX_SDIO_BYTE_MODE_MASK) ? BYTE_MODE
                       : BLOCK_MODE;
        u32 blk_size = (blk_mode == BLOCK_MODE) ? MWIFIEX_SDIO_BLOCK_SIZE : 1;
@@ -376,8 +375,7 @@ static int mwifiex_read_data_sync(struct mwifiex_adapter *adapter, u8 *buffer,
        if (claim)
                sdio_claim_host(card->func);
 
-       if (!sdio_readsb(card->func, buffer, ioport, blk_cnt * blk_size))
-               ret = 0;
+       ret = sdio_readsb(card->func, buffer, ioport, blk_cnt * blk_size);
 
        if (claim)
                sdio_release_host(card->func);
index 65c12eb..8470564 100644 (file)
@@ -935,9 +935,8 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no,
                                        / MWIFIEX_SDIO_BLOCK_SIZE)
                                       * MWIFIEX_SDIO_BLOCK_SIZE;
                adapter->curr_tx_buf_size = adapter->tx_buf_size;
-               dev_dbg(adapter->dev,
-                       "cmd: max_tx_buf_size=%d, tx_buf_size=%d\n",
-                       adapter->max_tx_buf_size, adapter->tx_buf_size);
+               dev_dbg(adapter->dev, "cmd: curr_tx_buf_size=%d\n",
+                       adapter->curr_tx_buf_size);
 
                if (adapter->if_ops.update_mp_end_port)
                        adapter->if_ops.update_mp_end_port(adapter,
index b8fa76a..7eef745 100644 (file)
@@ -162,13 +162,9 @@ int mwifiex_fill_new_bss_desc(struct mwifiex_private *priv,
 
        rcu_read_lock();
        ies = rcu_dereference(bss->ies);
-       if (WARN_ON(!ies)) {
-               /* should never happen */
-               rcu_read_unlock();
-               return -EINVAL;
-       }
        beacon_ie = kmemdup(ies->data, ies->len, GFP_ATOMIC);
        beacon_ie_len = ies->len;
+       bss_desc->timestamp = ies->tsf;
        rcu_read_unlock();
 
        if (!beacon_ie) {
@@ -184,7 +180,6 @@ int mwifiex_fill_new_bss_desc(struct mwifiex_private *priv,
        bss_desc->cap_info_bitmap = bss->capability;
        bss_desc->bss_band = bss_priv->band;
        bss_desc->fw_tsf = bss_priv->fw_tsf;
-       bss_desc->timestamp = bss->tsf;
        if (bss_desc->cap_info_bitmap & WLAN_CAPABILITY_PRIVACY) {
                dev_dbg(priv->adapter->dev, "info: InterpretIE: AP WEP enabled\n");
                bss_desc->privacy = MWIFIEX_802_11_PRIV_FILTER_8021X_WEP;
@@ -322,7 +317,7 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss,
                }
 
                if (bss)
-                       cfg80211_put_bss(bss);
+                       cfg80211_put_bss(priv->adapter->wiphy, bss);
        } else {
                /* Adhoc mode */
                /* If the requested SSID matches current SSID, return */
@@ -352,7 +347,7 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss,
                                                        " list. Joining...\n");
                        ret = mwifiex_adhoc_join(priv, bss_desc);
                        if (bss)
-                               cfg80211_put_bss(bss);
+                               cfg80211_put_bss(priv->adapter->wiphy, bss);
                } else {
                        dev_dbg(adapter->dev, "info: Network not found in "
                                "the list, creating adhoc with ssid = %s\n",
index 5d4a10a..f90fe21 100644 (file)
@@ -672,7 +672,7 @@ static int mwifiex_write_data_sync(struct mwifiex_adapter *adapter, u8 *pbuf,
                           *len, &actual_length, timeout);
        if (ret) {
                dev_err(adapter->dev, "usb_bulk_msg for tx failed: %d\n", ret);
-               ret = -1;
+               return ret;
        }
 
        *len = actual_length;
@@ -691,7 +691,7 @@ static int mwifiex_read_data_sync(struct mwifiex_adapter *adapter, u8 *pbuf,
                           *len, &actual_length, timeout);
        if (ret) {
                dev_err(adapter->dev, "usb_bulk_msg for rx failed: %d\n", ret);
-               ret = -1;
+               return ret;
        }
 
        *len = actual_length;
index 0982375..2155397 100644 (file)
@@ -91,7 +91,7 @@ int mwifiex_get_debug_info(struct mwifiex_private *priv,
                memcpy(info->packets_out,
                       priv->wmm.packets_out,
                       sizeof(priv->wmm.packets_out));
-               info->max_tx_buf_size = (u32) adapter->max_tx_buf_size;
+               info->curr_tx_buf_size = (u32) adapter->curr_tx_buf_size;
                info->tx_buf_size = (u32) adapter->tx_buf_size;
                info->rx_tbl_num = mwifiex_get_rx_reorder_tbl(priv,
                                                              info->rx_tbl);
index 224cf91..091d9a6 100644 (file)
@@ -285,6 +285,9 @@ struct mwl8k_priv {
        char *fw_pref;
        char *fw_alt;
        struct completion firmware_loading_complete;
+
+       /* bitmap of running BSSes */
+       u32 running_bsses;
 };
 
 #define MAX_WEP_KEY_LEN         13
@@ -331,20 +334,20 @@ struct mwl8k_sta {
 #define MWL8K_STA(_sta) ((struct mwl8k_sta *)&((_sta)->drv_priv))
 
 static const struct ieee80211_channel mwl8k_channels_24[] = {
-       { .center_freq = 2412, .hw_value = 1, },
-       { .center_freq = 2417, .hw_value = 2, },
-       { .center_freq = 2422, .hw_value = 3, },
-       { .center_freq = 2427, .hw_value = 4, },
-       { .center_freq = 2432, .hw_value = 5, },
-       { .center_freq = 2437, .hw_value = 6, },
-       { .center_freq = 2442, .hw_value = 7, },
-       { .center_freq = 2447, .hw_value = 8, },
-       { .center_freq = 2452, .hw_value = 9, },
-       { .center_freq = 2457, .hw_value = 10, },
-       { .center_freq = 2462, .hw_value = 11, },
-       { .center_freq = 2467, .hw_value = 12, },
-       { .center_freq = 2472, .hw_value = 13, },
-       { .center_freq = 2484, .hw_value = 14, },
+       { .band = IEEE80211_BAND_2GHZ, .center_freq = 2412, .hw_value = 1, },
+       { .band = IEEE80211_BAND_2GHZ, .center_freq = 2417, .hw_value = 2, },
+       { .band = IEEE80211_BAND_2GHZ, .center_freq = 2422, .hw_value = 3, },
+       { .band = IEEE80211_BAND_2GHZ, .center_freq = 2427, .hw_value = 4, },
+       { .band = IEEE80211_BAND_2GHZ, .center_freq = 2432, .hw_value = 5, },
+       { .band = IEEE80211_BAND_2GHZ, .center_freq = 2437, .hw_value = 6, },
+       { .band = IEEE80211_BAND_2GHZ, .center_freq = 2442, .hw_value = 7, },
+       { .band = IEEE80211_BAND_2GHZ, .center_freq = 2447, .hw_value = 8, },
+       { .band = IEEE80211_BAND_2GHZ, .center_freq = 2452, .hw_value = 9, },
+       { .band = IEEE80211_BAND_2GHZ, .center_freq = 2457, .hw_value = 10, },
+       { .band = IEEE80211_BAND_2GHZ, .center_freq = 2462, .hw_value = 11, },
+       { .band = IEEE80211_BAND_2GHZ, .center_freq = 2467, .hw_value = 12, },
+       { .band = IEEE80211_BAND_2GHZ, .center_freq = 2472, .hw_value = 13, },
+       { .band = IEEE80211_BAND_2GHZ, .center_freq = 2484, .hw_value = 14, },
 };
 
 static const struct ieee80211_rate mwl8k_rates_24[] = {
@@ -365,10 +368,10 @@ static const struct ieee80211_rate mwl8k_rates_24[] = {
 };
 
 static const struct ieee80211_channel mwl8k_channels_50[] = {
-       { .center_freq = 5180, .hw_value = 36, },
-       { .center_freq = 5200, .hw_value = 40, },
-       { .center_freq = 5220, .hw_value = 44, },
-       { .center_freq = 5240, .hw_value = 48, },
+       { .band = IEEE80211_BAND_5GHZ, .center_freq = 5180, .hw_value = 36, },
+       { .band = IEEE80211_BAND_5GHZ, .center_freq = 5200, .hw_value = 40, },
+       { .band = IEEE80211_BAND_5GHZ, .center_freq = 5220, .hw_value = 44, },
+       { .band = IEEE80211_BAND_5GHZ, .center_freq = 5240, .hw_value = 48, },
 };
 
 static const struct ieee80211_rate mwl8k_rates_50[] = {
@@ -1146,7 +1149,6 @@ static int mwl8k_rxq_init(struct ieee80211_hw *hw, int index)
 
        rxq->buf = kcalloc(MWL8K_RX_DESCS, sizeof(*rxq->buf), GFP_KERNEL);
        if (rxq->buf == NULL) {
-               wiphy_err(hw->wiphy, "failed to alloc RX skbuff list\n");
                pci_free_consistent(priv->pdev, size, rxq->rxd, rxq->rxd_dma);
                return -ENOMEM;
        }
@@ -1439,7 +1441,6 @@ static int mwl8k_txq_init(struct ieee80211_hw *hw, int index)
 
        txq->skb = kcalloc(MWL8K_TX_DESCS, sizeof(*txq->skb), GFP_KERNEL);
        if (txq->skb == NULL) {
-               wiphy_err(hw->wiphy, "failed to alloc TX skbuff list\n");
                pci_free_consistent(priv->pdev, size, txq->txd, txq->txd_dma);
                return -ENOMEM;
        }
@@ -2156,6 +2157,8 @@ static void mwl8k_fw_unlock(struct ieee80211_hw *hw)
        }
 }
 
+static void mwl8k_enable_bsses(struct ieee80211_hw *hw, bool enable,
+                              u32 bitmap);
 
 /*
  * Command processing.
@@ -2174,6 +2177,34 @@ static int mwl8k_post_cmd(struct ieee80211_hw *hw, struct mwl8k_cmd_pkt *cmd)
        int rc;
        unsigned long timeout = 0;
        u8 buf[32];
+       u32 bitmap = 0;
+
+       wiphy_dbg(hw->wiphy, "Posting %s [%d]\n",
+                 mwl8k_cmd_name(cmd->code, buf, sizeof(buf)), cmd->macid);
+
+       /* Before posting firmware commands that could change the hardware
+        * characteristics, make sure that all BSSes are stopped temporary.
+        * Enable these stopped BSSes after completion of the commands
+        */
+
+       rc = mwl8k_fw_lock(hw);
+       if (rc)
+               return rc;
+
+       if (priv->ap_fw && priv->running_bsses) {
+               switch (le16_to_cpu(cmd->code)) {
+               case MWL8K_CMD_SET_RF_CHANNEL:
+               case MWL8K_CMD_RADIO_CONTROL:
+               case MWL8K_CMD_RF_TX_POWER:
+               case MWL8K_CMD_TX_POWER:
+               case MWL8K_CMD_RF_ANTENNA:
+               case MWL8K_CMD_RTS_THRESHOLD:
+               case MWL8K_CMD_MIMO_CONFIG:
+                       bitmap = priv->running_bsses;
+                       mwl8k_enable_bsses(hw, false, bitmap);
+                       break;
+               }
+       }
 
        cmd->result = (__force __le16) 0xffff;
        dma_size = le16_to_cpu(cmd->length);
@@ -2182,13 +2213,6 @@ static int mwl8k_post_cmd(struct ieee80211_hw *hw, struct mwl8k_cmd_pkt *cmd)
        if (pci_dma_mapping_error(priv->pdev, dma_addr))
                return -ENOMEM;
 
-       rc = mwl8k_fw_lock(hw);
-       if (rc) {
-               pci_unmap_single(priv->pdev, dma_addr, dma_size,
-                                               PCI_DMA_BIDIRECTIONAL);
-               return rc;
-       }
-
        priv->hostcmd_wait = &cmd_wait;
        iowrite32(dma_addr, regs + MWL8K_HIU_GEN_PTR);
        iowrite32(MWL8K_H2A_INT_DOORBELL,
@@ -2201,7 +2225,6 @@ static int mwl8k_post_cmd(struct ieee80211_hw *hw, struct mwl8k_cmd_pkt *cmd)
 
        priv->hostcmd_wait = NULL;
 
-       mwl8k_fw_unlock(hw);
 
        pci_unmap_single(priv->pdev, dma_addr, dma_size,
                                        PCI_DMA_BIDIRECTIONAL);
@@ -2228,6 +2251,11 @@ static int mwl8k_post_cmd(struct ieee80211_hw *hw, struct mwl8k_cmd_pkt *cmd)
                                     ms);
        }
 
+       if (bitmap)
+               mwl8k_enable_bsses(hw, true, bitmap);
+
+       mwl8k_fw_unlock(hw);
+
        return rc;
 }
 
@@ -2489,7 +2517,7 @@ static int mwl8k_cmd_get_hw_spec_ap(struct ieee80211_hw *hw)
                priv->hw_rev = cmd->hw_rev;
                mwl8k_set_caps(hw, le32_to_cpu(cmd->caps));
                priv->ap_macids_supported = 0x000000ff;
-               priv->sta_macids_supported = 0x00000000;
+               priv->sta_macids_supported = 0x00000100;
                priv->num_ampdu_queues = le32_to_cpu(cmd->num_of_ampdu_queues);
                if (priv->num_ampdu_queues > MWL8K_MAX_AMPDU_QUEUES) {
                        wiphy_warn(hw->wiphy, "fw reported %d ampdu queues"
@@ -3508,7 +3536,10 @@ static int mwl8k_cmd_update_mac_addr(struct ieee80211_hw *hw,
        mac_type = MWL8K_MAC_TYPE_PRIMARY_AP;
        if (vif != NULL && vif->type == NL80211_IFTYPE_STATION) {
                if (mwl8k_vif->macid + 1 == ffs(priv->sta_macids_supported))
-                       mac_type = MWL8K_MAC_TYPE_PRIMARY_CLIENT;
+                       if (priv->ap_fw)
+                               mac_type = MWL8K_MAC_TYPE_SECONDARY_CLIENT;
+                       else
+                               mac_type = MWL8K_MAC_TYPE_PRIMARY_CLIENT;
                else
                        mac_type = MWL8K_MAC_TYPE_SECONDARY_CLIENT;
        } else if (vif != NULL && vif->type == NL80211_IFTYPE_AP) {
@@ -3680,8 +3711,16 @@ static int mwl8k_cmd_bss_start(struct ieee80211_hw *hw,
                               struct ieee80211_vif *vif, int enable)
 {
        struct mwl8k_cmd_bss_start *cmd;
+       struct mwl8k_vif *mwl8k_vif = MWL8K_VIF(vif);
+       struct mwl8k_priv *priv = hw->priv;
        int rc;
 
+       if (enable && (priv->running_bsses & (1 << mwl8k_vif->macid)))
+               return 0;
+
+       if (!enable && !(priv->running_bsses & (1 << mwl8k_vif->macid)))
+               return 0;
+
        cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
        if (cmd == NULL)
                return -ENOMEM;
@@ -3693,9 +3732,31 @@ static int mwl8k_cmd_bss_start(struct ieee80211_hw *hw,
        rc = mwl8k_post_pervif_cmd(hw, vif, &cmd->header);
        kfree(cmd);
 
+       if (!rc) {
+               if (enable)
+                       priv->running_bsses |= (1 << mwl8k_vif->macid);
+               else
+                       priv->running_bsses &= ~(1 << mwl8k_vif->macid);
+       }
        return rc;
 }
 
+static void mwl8k_enable_bsses(struct ieee80211_hw *hw, bool enable, u32 bitmap)
+{
+       struct mwl8k_priv *priv = hw->priv;
+       struct mwl8k_vif *mwl8k_vif, *tmp_vif;
+       struct ieee80211_vif *vif;
+
+       list_for_each_entry_safe(mwl8k_vif, tmp_vif, &priv->vif_list, list) {
+               vif = mwl8k_vif->vif;
+
+               if (!(bitmap & (1 << mwl8k_vif->macid)))
+                       continue;
+
+               if (vif->type == NL80211_IFTYPE_AP)
+                       mwl8k_cmd_bss_start(hw, vif, enable);
+       }
+}
 /*
  * CMD_BASTREAM.
  */
@@ -4202,8 +4263,9 @@ static int mwl8k_set_key(struct ieee80211_hw *hw,
        u8 encr_type;
        u8 *addr;
        struct mwl8k_vif *mwl8k_vif = MWL8K_VIF(vif);
+       struct mwl8k_priv *priv = hw->priv;
 
-       if (vif->type == NL80211_IFTYPE_STATION)
+       if (vif->type == NL80211_IFTYPE_STATION && !priv->ap_fw)
                return -EOPNOTSUPP;
 
        if (sta == NULL)
@@ -4609,12 +4671,18 @@ static int mwl8k_add_interface(struct ieee80211_hw *hw,
                break;
        case NL80211_IFTYPE_STATION:
                if (priv->ap_fw && di->fw_image_sta) {
-                       /* we must load the sta fw to meet this request */
-                       if (!list_empty(&priv->vif_list))
-                               return -EBUSY;
-                       rc = mwl8k_reload_firmware(hw, di->fw_image_sta);
-                       if (rc)
-                               return rc;
+                       if (!list_empty(&priv->vif_list)) {
+                               wiphy_warn(hw->wiphy, "AP interface is running.\n"
+                                          "Adding STA interface for WDS");
+                       } else {
+                               /* we must load the sta fw to
+                                * meet this request.
+                                */
+                               rc = mwl8k_reload_firmware(hw,
+                                                          di->fw_image_sta);
+                               if (rc)
+                                       return rc;
+                       }
                }
                macids_supported = priv->sta_macids_supported;
                break;
@@ -4638,7 +4706,7 @@ static int mwl8k_add_interface(struct ieee80211_hw *hw,
        /* Set the mac address.  */
        mwl8k_cmd_set_mac_addr(hw, vif, vif->addr);
 
-       if (priv->ap_fw)
+       if (vif->type == NL80211_IFTYPE_AP)
                mwl8k_cmd_set_new_stn_add_self(hw, vif);
 
        priv->macids_used |= 1 << mwl8k_vif->macid;
@@ -4663,7 +4731,7 @@ static void mwl8k_remove_interface(struct ieee80211_hw *hw,
        struct mwl8k_priv *priv = hw->priv;
        struct mwl8k_vif *mwl8k_vif = MWL8K_VIF(vif);
 
-       if (priv->ap_fw)
+       if (vif->type == NL80211_IFTYPE_AP)
                mwl8k_cmd_set_new_stn_del(hw, vif, vif->addr);
 
        mwl8k_cmd_del_mac_addr(hw, vif, vif->addr);
@@ -4737,9 +4805,11 @@ static int mwl8k_config(struct ieee80211_hw *hw, u32 changed)
        if (rc)
                goto out;
 
-       rc = mwl8k_cmd_set_rf_channel(hw, conf);
-       if (rc)
-               goto out;
+       if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
+               rc = mwl8k_cmd_set_rf_channel(hw, conf);
+               if (rc)
+                       goto out;
+       }
 
        if (conf->power_level > 18)
                conf->power_level = 18;
@@ -4752,12 +4822,6 @@ static int mwl8k_config(struct ieee80211_hw *hw, u32 changed)
                                goto out;
                }
 
-               rc = mwl8k_cmd_rf_antenna(hw, MWL8K_RF_ANTENNA_RX, 0x3);
-               if (rc)
-                       wiphy_warn(hw->wiphy, "failed to set # of RX antennas");
-               rc = mwl8k_cmd_rf_antenna(hw, MWL8K_RF_ANTENNA_TX, 0x7);
-               if (rc)
-                       wiphy_warn(hw->wiphy, "failed to set # of TX antennas");
 
        } else {
                rc = mwl8k_cmd_rf_tx_power(hw, conf->power_level);
@@ -4815,7 +4879,8 @@ mwl8k_bss_info_changed_sta(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
                rcu_read_unlock();
        }
 
-       if ((changed & BSS_CHANGED_ASSOC) && vif->bss_conf.assoc) {
+       if ((changed & BSS_CHANGED_ASSOC) && vif->bss_conf.assoc &&
+           !priv->ap_fw) {
                rc = mwl8k_cmd_set_rate(hw, vif, ap_legacy_rates, ap_mcs_rates);
                if (rc)
                        goto out;
@@ -4823,6 +4888,25 @@ mwl8k_bss_info_changed_sta(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
                rc = mwl8k_cmd_use_fixed_rate_sta(hw);
                if (rc)
                        goto out;
+       } else {
+               if ((changed & BSS_CHANGED_ASSOC) && vif->bss_conf.assoc &&
+                   priv->ap_fw) {
+                       int idx;
+                       int rate;
+
+                       /* Use AP firmware specific rate command.
+                        */
+                       idx = ffs(vif->bss_conf.basic_rates);
+                       if (idx)
+                               idx--;
+
+                       if (hw->conf.channel->band == IEEE80211_BAND_2GHZ)
+                               rate = mwl8k_rates_24[idx].hw_value;
+                       else
+                               rate = mwl8k_rates_50[idx].hw_value;
+
+                       mwl8k_cmd_use_fixed_rate_ap(hw, rate, rate);
+               }
        }
 
        if (changed & BSS_CHANGED_ERP_PREAMBLE) {
@@ -4832,13 +4916,13 @@ mwl8k_bss_info_changed_sta(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
                        goto out;
        }
 
-       if (changed & BSS_CHANGED_ERP_SLOT) {
+       if ((changed & BSS_CHANGED_ERP_SLOT) && !priv->ap_fw)  {
                rc = mwl8k_cmd_set_slot(hw, vif->bss_conf.use_short_slot);
                if (rc)
                        goto out;
        }
 
-       if (vif->bss_conf.assoc &&
+       if (vif->bss_conf.assoc && !priv->ap_fw &&
            (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_ERP_CTS_PROT |
                        BSS_CHANGED_HT))) {
                rc = mwl8k_cmd_set_aid(hw, vif, ap_legacy_rates);
@@ -4918,11 +5002,9 @@ static void
 mwl8k_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
                       struct ieee80211_bss_conf *info, u32 changed)
 {
-       struct mwl8k_priv *priv = hw->priv;
-
-       if (!priv->ap_fw)
+       if (vif->type == NL80211_IFTYPE_STATION)
                mwl8k_bss_info_changed_sta(hw, vif, info, changed);
-       else
+       if (vif->type == NL80211_IFTYPE_AP)
                mwl8k_bss_info_changed_ap(hw, vif, info, changed);
 }
 
@@ -5389,6 +5471,8 @@ static DEFINE_PCI_DEVICE_TABLE(mwl8k_pci_id_table) = {
        { PCI_VDEVICE(MARVELL, 0x2a2b), .driver_data = MWL8687, },
        { PCI_VDEVICE(MARVELL, 0x2a30), .driver_data = MWL8687, },
        { PCI_VDEVICE(MARVELL, 0x2a40), .driver_data = MWL8366, },
+       { PCI_VDEVICE(MARVELL, 0x2a41), .driver_data = MWL8366, },
+       { PCI_VDEVICE(MARVELL, 0x2a42), .driver_data = MWL8366, },
        { PCI_VDEVICE(MARVELL, 0x2a43), .driver_data = MWL8366, },
        { },
 };
@@ -5647,6 +5731,15 @@ static int mwl8k_probe_hw(struct ieee80211_hw *hw)
                goto err_free_irq;
        }
 
+       /* Configure Antennas */
+       rc = mwl8k_cmd_rf_antenna(hw, MWL8K_RF_ANTENNA_RX, 0x3);
+       if (rc)
+               wiphy_warn(hw->wiphy, "failed to set # of RX antennas");
+       rc = mwl8k_cmd_rf_antenna(hw, MWL8K_RF_ANTENNA_TX, 0x7);
+       if (rc)
+               wiphy_warn(hw->wiphy, "failed to set # of TX antennas");
+
+
        /* Disable interrupts */
        iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK);
        free_irq(priv->pdev->irq, hw);
@@ -5734,6 +5827,7 @@ fail:
 
 static const struct ieee80211_iface_limit ap_if_limits[] = {
        { .max = 8,     .types = BIT(NL80211_IFTYPE_AP) },
+       { .max = 1,     .types = BIT(NL80211_IFTYPE_STATION) },
 };
 
 static const struct ieee80211_iface_combination ap_if_comb = {
@@ -5826,6 +5920,7 @@ static int mwl8k_firmware_load_success(struct mwl8k_priv *priv)
 
        if (priv->ap_macids_supported || priv->device_info->fw_image_ap) {
                hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_AP);
+               hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_STATION);
                hw->wiphy->iface_combinations = &ap_if_comb;
                hw->wiphy->n_iface_combinations = 1;
        }
@@ -5948,6 +6043,8 @@ static int mwl8k_probe(struct pci_dev *pdev,
 
        priv->hw_restart_in_progress = false;
 
+       priv->running_bsses = 0;
+
        return rc;
 
 err_stop_firmware:
index 96e39ed..e8c5714 100644 (file)
@@ -125,7 +125,7 @@ static void orinoco_add_hostscan_result(struct orinoco_private *priv,
        cbss = cfg80211_inform_bss(wiphy, channel, bss->a.bssid, timestamp,
                                   capability, beacon_interval, ie_buf, ie_len,
                                   signal, GFP_KERNEL);
-       cfg80211_put_bss(cbss);
+       cfg80211_put_bss(wiphy, cbss);
 }
 
 void orinoco_add_extscan_result(struct orinoco_private *priv,
@@ -158,7 +158,7 @@ void orinoco_add_extscan_result(struct orinoco_private *priv,
        cbss = cfg80211_inform_bss(wiphy, channel, bss->bssid, timestamp,
                                   capability, beacon_interval, ie, ie_len,
                                   signal, GFP_KERNEL);
-       cfg80211_put_bss(cbss);
+       cfg80211_put_bss(wiphy, cbss);
 }
 
 void orinoco_add_hostscan_results(struct orinoco_private *priv,
index 62ac607..b9deef6 100644 (file)
@@ -84,8 +84,8 @@ static struct usb_device_id p54u_table[] = {
        {USB_DEVICE(0x06b9, 0x0121)},   /* Thomson SpeedTouch 121g */
        {USB_DEVICE(0x0707, 0xee13)},   /* SMC 2862W-G version 2 */
        {USB_DEVICE(0x0803, 0x4310)},   /* Zoom 4410a */
-       {USB_DEVICE(0x083a, 0x4503)},   /* T-Com Sinus 154 data II */
        {USB_DEVICE(0x083a, 0x4521)},   /* Siemens Gigaset USB Adapter 54 version 2 */
+       {USB_DEVICE(0x083a, 0x4531)},   /* T-Com Sinus 154 data II */
        {USB_DEVICE(0x083a, 0xc501)},   /* Zoom Wireless-G 4410 */
        {USB_DEVICE(0x083a, 0xf503)},   /* Accton FD7050E ver 1010ec  */
        {USB_DEVICE(0x0846, 0x4240)},   /* Netgear WG111 (v2) */
index 9bb3f22..525fd75 100644 (file)
@@ -2026,7 +2026,7 @@ static bool rndis_bss_info_update(struct usbnet *usbdev,
        bss = cfg80211_inform_bss(priv->wdev.wiphy, channel, bssid->mac,
                timestamp, capability, beacon_interval, ie, ie_len, signal,
                GFP_KERNEL);
-       cfg80211_put_bss(bss);
+       cfg80211_put_bss(priv->wdev.wiphy, bss);
 
        return (bss != NULL);
 }
@@ -2715,7 +2715,7 @@ static void rndis_wlan_craft_connected_bss(struct usbnet *usbdev, u8 *bssid,
        bss = cfg80211_inform_bss(priv->wdev.wiphy, channel, bssid,
                timestamp, capability, beacon_period, ie_buf, ie_len,
                signal, GFP_KERNEL);
-       cfg80211_put_bss(bss);
+       cfg80211_put_bss(priv->wdev.wiphy, bss);
 }
 
 /*
index a2d2bc2..221beaa 100644 (file)
@@ -1185,8 +1185,14 @@ static void rt2400pci_write_beacon(struct queue_entry *entry,
        rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 0);
        rt2x00pci_register_write(rt2x00dev, CSR14, reg);
 
-       rt2x00queue_map_txskb(entry);
-
+       if (rt2x00queue_map_txskb(entry)) {
+               ERROR(rt2x00dev, "Fail to map beacon, aborting\n");
+               goto out;
+       }
+       /*
+        * Enable beaconing again.
+        */
+       rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 1);
        /*
         * Write the TX descriptor for the beacon.
         */
@@ -1196,7 +1202,7 @@ static void rt2400pci_write_beacon(struct queue_entry *entry,
         * Dump beacon to userspace through debugfs.
         */
        rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_BEACON, entry->skb);
-
+out:
        /*
         * Enable beaconing again.
         */
index 9bea10f..39edc59 100644 (file)
@@ -1338,7 +1338,10 @@ static void rt2500pci_write_beacon(struct queue_entry *entry,
        rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 0);
        rt2x00pci_register_write(rt2x00dev, CSR14, reg);
 
-       rt2x00queue_map_txskb(entry);
+       if (rt2x00queue_map_txskb(entry)) {
+               ERROR(rt2x00dev, "Fail to map beacon, aborting\n");
+               goto out;
+       }
 
        /*
         * Write the TX descriptor for the beacon.
@@ -1349,7 +1352,7 @@ static void rt2500pci_write_beacon(struct queue_entry *entry,
         * Dump beacon to userspace through debugfs.
         */
        rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_BEACON, entry->skb);
-
+out:
        /*
         * Enable beaconing again.
         */
index a5c694f..a658b4b 100644 (file)
@@ -80,7 +80,7 @@ static inline bool rt2800_is_305x_soc(struct rt2x00_dev *rt2x00dev)
            rt2x00_rf(rt2x00dev, RF3022))
                return true;
 
-       NOTICE(rt2x00dev, "Unknown RF chipset on rt305x\n");
+       WARNING(rt2x00dev, "Unknown RF chipset on rt305x\n");
        return false;
 }
 
index 0e8d170..48a01aa 100644 (file)
@@ -1152,6 +1152,7 @@ static DEFINE_PCI_DEVICE_TABLE(rt2800pci_device_table) = {
        { PCI_DEVICE(0x1814, 0x3562) },
        { PCI_DEVICE(0x1814, 0x3592) },
        { PCI_DEVICE(0x1814, 0x3593) },
+       { PCI_DEVICE(0x1814, 0x359f) },
 #endif
 #ifdef CONFIG_RT2800PCI_RT53XX
        { PCI_DEVICE(0x1814, 0x5360) },
index 4721cad..098613e 100644 (file)
@@ -540,9 +540,9 @@ rt2800usb_txdone_entry_check(struct queue_entry *entry, u32 reg)
        tx_pid  = rt2x00_get_field32(word, TXWI_W1_PACKETID);
 
        if (wcid != tx_wcid || ack != tx_ack || (!is_agg && pid != tx_pid)) {
-               WARNING(entry->queue->rt2x00dev,
-                       "TX status report missed for queue %d entry %d\n",
-                       entry->queue->qid, entry->entry_idx);
+               DEBUG(entry->queue->rt2x00dev,
+                     "TX status report missed for queue %d entry %d\n",
+                     entry->queue->qid, entry->entry_idx);
                return TXDONE_UNKNOWN;
        }
 
@@ -968,6 +968,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
        { USB_DEVICE(0x07d1, 0x3c13) },
        { USB_DEVICE(0x07d1, 0x3c15) },
        { USB_DEVICE(0x07d1, 0x3c16) },
+       { USB_DEVICE(0x07d1, 0x3c17) },
        { USB_DEVICE(0x2001, 0x3c1b) },
        /* Draytek */
        { USB_DEVICE(0x07fa, 0x7712) },
@@ -1098,9 +1099,11 @@ static struct usb_device_id rt2800usb_device_table[] = {
        { USB_DEVICE(0x15a9, 0x0006) },
        /* Sweex */
        { USB_DEVICE(0x177f, 0x0153) },
+       { USB_DEVICE(0x177f, 0x0164) },
        { USB_DEVICE(0x177f, 0x0302) },
        { USB_DEVICE(0x177f, 0x0313) },
        { USB_DEVICE(0x177f, 0x0323) },
+       { USB_DEVICE(0x177f, 0x0324) },
        /* U-Media */
        { USB_DEVICE(0x157e, 0x300e) },
        { USB_DEVICE(0x157e, 0x3013) },
@@ -1115,6 +1118,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
        /* Zyxel */
        { USB_DEVICE(0x0586, 0x3416) },
        { USB_DEVICE(0x0586, 0x3418) },
+       { USB_DEVICE(0x0586, 0x341a) },
        { USB_DEVICE(0x0586, 0x341e) },
        { USB_DEVICE(0x0586, 0x343e) },
 #ifdef CONFIG_RT2800USB_RT33XX
@@ -1131,6 +1135,9 @@ static struct usb_device_id rt2800usb_device_table[] = {
        { USB_DEVICE(0x148f, 0x8070) },
        /* Sitecom */
        { USB_DEVICE(0x0df6, 0x0050) },
+       /* Sweex */
+       { USB_DEVICE(0x177f, 0x0163) },
+       { USB_DEVICE(0x177f, 0x0165) },
 #endif
 #ifdef CONFIG_RT2800USB_RT35XX
        /* Allwin */
@@ -1166,6 +1173,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
 #ifdef CONFIG_RT2800USB_RT53XX
        /* Arcadyan */
        { USB_DEVICE(0x043e, 0x7a12) },
+       { USB_DEVICE(0x043e, 0x7a32) },
        /* Azurewave */
        { USB_DEVICE(0x13d3, 0x3329) },
        { USB_DEVICE(0x13d3, 0x3365) },
@@ -1177,16 +1185,20 @@ static struct usb_device_id rt2800usb_device_table[] = {
        { USB_DEVICE(0x2001, 0x3c1e) },
        /* LG innotek */
        { USB_DEVICE(0x043e, 0x7a22) },
+       { USB_DEVICE(0x043e, 0x7a42) },
        /* Panasonic */
        { USB_DEVICE(0x04da, 0x1801) },
        { USB_DEVICE(0x04da, 0x1800) },
+       { USB_DEVICE(0x04da, 0x23f6) },
        /* Philips */
        { USB_DEVICE(0x0471, 0x2104) },
+       { USB_DEVICE(0x0471, 0x2126) },
+       { USB_DEVICE(0x0471, 0x2180) },
+       { USB_DEVICE(0x0471, 0x2181) },
+       { USB_DEVICE(0x0471, 0x2182) },
        /* Ralink */
        { USB_DEVICE(0x148f, 0x5370) },
        { USB_DEVICE(0x148f, 0x5372) },
-       /* Unknown */
-       { USB_DEVICE(0x04da, 0x23f6) },
 #endif
 #ifdef CONFIG_RT2800USB_UNKNOWN
        /*
@@ -1207,10 +1219,15 @@ static struct usb_device_id rt2800usb_device_table[] = {
        { USB_DEVICE(0x0b05, 0x1760) },
        { USB_DEVICE(0x0b05, 0x1761) },
        { USB_DEVICE(0x0b05, 0x1790) },
+       { USB_DEVICE(0x0b05, 0x17a7) },
        /* AzureWave */
        { USB_DEVICE(0x13d3, 0x3262) },
        { USB_DEVICE(0x13d3, 0x3284) },
        { USB_DEVICE(0x13d3, 0x3322) },
+       { USB_DEVICE(0x13d3, 0x3340) },
+       { USB_DEVICE(0x13d3, 0x3399) },
+       { USB_DEVICE(0x13d3, 0x3400) },
+       { USB_DEVICE(0x13d3, 0x3401) },
        /* Belkin */
        { USB_DEVICE(0x050d, 0x1003) },
        /* Buffalo */
@@ -1223,13 +1240,17 @@ static struct usb_device_id rt2800usb_device_table[] = {
        { USB_DEVICE(0x18c5, 0x0008) },
        /* D-Link */
        { USB_DEVICE(0x07d1, 0x3c0b) },
-       { USB_DEVICE(0x07d1, 0x3c17) },
        /* Encore */
        { USB_DEVICE(0x203d, 0x14a1) },
+       /* EnGenius */
+       { USB_DEVICE(0x1740, 0x0600) },
+       { USB_DEVICE(0x1740, 0x0602) },
        /* Gemtek */
        { USB_DEVICE(0x15a9, 0x0010) },
        /* Gigabyte */
        { USB_DEVICE(0x1044, 0x800c) },
+       /* Hercules */
+       { USB_DEVICE(0x06f8, 0xe036) },
        /* Huawei */
        { USB_DEVICE(0x148f, 0xf101) },
        /* I-O DATA */
@@ -1256,13 +1277,17 @@ static struct usb_device_id rt2800usb_device_table[] = {
        { USB_DEVICE(0x0df6, 0x004a) },
        { USB_DEVICE(0x0df6, 0x004d) },
        { USB_DEVICE(0x0df6, 0x0053) },
+       { USB_DEVICE(0x0df6, 0x0069) },
+       { USB_DEVICE(0x0df6, 0x006f) },
        /* SMC */
        { USB_DEVICE(0x083a, 0xa512) },
        { USB_DEVICE(0x083a, 0xc522) },
        { USB_DEVICE(0x083a, 0xd522) },
        { USB_DEVICE(0x083a, 0xf511) },
-       /* Zyxel */
-       { USB_DEVICE(0x0586, 0x341a) },
+       /* Sweex */
+       { USB_DEVICE(0x177f, 0x0254) },
+       /* TP-LINK */
+       { USB_DEVICE(0xf201, 0x5370) },
 #endif
        { 0, }
 };
index b52512b..086abb4 100644 (file)
 #define ERROR_PROBE(__msg, __args...) \
        DEBUG_PRINTK_PROBE(KERN_ERR, "Error", __msg, ##__args)
 #define WARNING(__dev, __msg, __args...) \
-       DEBUG_PRINTK(__dev, KERN_WARNING, "Warning", __msg, ##__args)
-#define NOTICE(__dev, __msg, __args...) \
-       DEBUG_PRINTK(__dev, KERN_NOTICE, "Notice", __msg, ##__args)
+       DEBUG_PRINTK_MSG(__dev, KERN_WARNING, "Warning", __msg, ##__args)
 #define INFO(__dev, __msg, __args...) \
-       DEBUG_PRINTK(__dev, KERN_INFO, "Info", __msg, ##__args)
+       DEBUG_PRINTK_MSG(__dev, KERN_INFO, "Info", __msg, ##__args)
 #define DEBUG(__dev, __msg, __args...) \
        DEBUG_PRINTK(__dev, KERN_DEBUG, "Debug", __msg, ##__args)
 #define EEPROM(__dev, __msg, __args...) \
@@ -1171,8 +1169,10 @@ static inline bool rt2x00_is_soc(struct rt2x00_dev *rt2x00dev)
 /**
  * rt2x00queue_map_txskb - Map a skb into DMA for TX purposes.
  * @entry: Pointer to &struct queue_entry
+ *
+ * Returns -ENOMEM if mapping fail, 0 otherwise.
  */
-void rt2x00queue_map_txskb(struct queue_entry *entry);
+int rt2x00queue_map_txskb(struct queue_entry *entry);
 
 /**
  * rt2x00queue_unmap_skb - Unmap a skb from DMA.
index b40a538..1031db6 100644 (file)
@@ -1236,7 +1236,8 @@ static inline void rt2x00lib_set_if_combinations(struct rt2x00_dev *rt2x00dev)
         */
        if_limit = &rt2x00dev->if_limits_ap;
        if_limit->max = rt2x00dev->ops->max_ap_intf;
-       if_limit->types = BIT(NL80211_IFTYPE_AP);
+       if_limit->types = BIT(NL80211_IFTYPE_AP) |
+                       BIT(NL80211_IFTYPE_MESH_POINT);
 
        /*
         * Build up AP interface combinations structure.
@@ -1446,7 +1447,7 @@ EXPORT_SYMBOL_GPL(rt2x00lib_remove_dev);
 #ifdef CONFIG_PM
 int rt2x00lib_suspend(struct rt2x00_dev *rt2x00dev, pm_message_t state)
 {
-       NOTICE(rt2x00dev, "Going to sleep.\n");
+       DEBUG(rt2x00dev, "Going to sleep.\n");
 
        /*
         * Prevent mac80211 from accessing driver while suspended.
@@ -1486,7 +1487,7 @@ EXPORT_SYMBOL_GPL(rt2x00lib_suspend);
 
 int rt2x00lib_resume(struct rt2x00_dev *rt2x00dev)
 {
-       NOTICE(rt2x00dev, "Waking up.\n");
+       DEBUG(rt2x00dev, "Waking up.\n");
 
        /*
         * Restore/enable extra components.
index ed7a1bb..20c6ecc 100644 (file)
@@ -731,9 +731,9 @@ int rt2x00mac_conf_tx(struct ieee80211_hw *hw,
        queue->aifs = params->aifs;
        queue->txop = params->txop;
 
-       INFO(rt2x00dev,
-            "Configured TX queue %d - CWmin: %d, CWmax: %d, Aifs: %d, TXop: %d.\n",
-            queue_idx, queue->cw_min, queue->cw_max, queue->aifs, queue->txop);
+       DEBUG(rt2x00dev,
+             "Configured TX queue %d - CWmin: %d, CWmax: %d, Aifs: %d, TXop: %d.\n",
+             queue_idx, queue->cw_min, queue->cw_max, queue->aifs, queue->txop);
 
        return 0;
 }
index f35d85a..4d91795 100644 (file)
@@ -87,24 +87,35 @@ struct sk_buff *rt2x00queue_alloc_rxskb(struct queue_entry *entry, gfp_t gfp)
        skbdesc->entry = entry;
 
        if (test_bit(REQUIRE_DMA, &rt2x00dev->cap_flags)) {
-               skbdesc->skb_dma = dma_map_single(rt2x00dev->dev,
-                                                 skb->data,
-                                                 skb->len,
-                                                 DMA_FROM_DEVICE);
+               dma_addr_t skb_dma;
+
+               skb_dma = dma_map_single(rt2x00dev->dev, skb->data, skb->len,
+                                        DMA_FROM_DEVICE);
+               if (unlikely(dma_mapping_error(rt2x00dev->dev, skb_dma))) {
+                       dev_kfree_skb_any(skb);
+                       return NULL;
+               }
+
+               skbdesc->skb_dma = skb_dma;
                skbdesc->flags |= SKBDESC_DMA_MAPPED_RX;
        }
 
        return skb;
 }
 
-void rt2x00queue_map_txskb(struct queue_entry *entry)
+int rt2x00queue_map_txskb(struct queue_entry *entry)
 {
        struct device *dev = entry->queue->rt2x00dev->dev;
        struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
 
        skbdesc->skb_dma =
            dma_map_single(dev, entry->skb->data, entry->skb->len, DMA_TO_DEVICE);
+
+       if (unlikely(dma_mapping_error(dev, skbdesc->skb_dma)))
+               return -ENOMEM;
+
        skbdesc->flags |= SKBDESC_DMA_MAPPED_TX;
+       return 0;
 }
 EXPORT_SYMBOL_GPL(rt2x00queue_map_txskb);
 
@@ -343,10 +354,7 @@ static void rt2x00queue_create_tx_descriptor_ht(struct rt2x00_dev *rt2x00dev,
                 * when using more then one tx stream (>MCS7).
                 */
                if (sta && txdesc->u.ht.mcs > 7 &&
-                   ((sta->ht_cap.cap &
-                     IEEE80211_HT_CAP_SM_PS) >>
-                    IEEE80211_HT_CAP_SM_PS_SHIFT) ==
-                   WLAN_HT_CAP_SM_PS_DYNAMIC)
+                   sta->smps_mode == IEEE80211_SMPS_DYNAMIC)
                        __set_bit(ENTRY_TXD_HT_MIMO_PS, &txdesc->flags);
        } else {
                txdesc->u.ht.mcs = rt2x00_get_rate_mcs(hwrate->mcs);
@@ -545,8 +553,9 @@ static int rt2x00queue_write_tx_data(struct queue_entry *entry,
        /*
         * Map the skb to DMA.
         */
-       if (test_bit(REQUIRE_DMA, &rt2x00dev->cap_flags))
-               rt2x00queue_map_txskb(entry);
+       if (test_bit(REQUIRE_DMA, &rt2x00dev->cap_flags) &&
+           rt2x00queue_map_txskb(entry))
+               return -ENOMEM;
 
        return 0;
 }
index b80bc46..b6aa0c4 100644 (file)
@@ -1,8 +1,26 @@
+config RTLWIFI
+       tristate "Realtek wireless card support"
+       depends on MAC80211
+       select FW_LOADER
+       ---help---
+         This is common code for RTL8192CE/RTL8192CU/RTL8192SE/RTL8723AE
+         drivers.  This module does nothing by itself - the various front-end
+         drivers need to be enabled to support any desired devices.
+
+         If you choose to build as a module, it'll be called rtlwifi.
+
+config RTLWIFI_DEBUG
+       bool "Debugging output for rtlwifi driver family"
+       depends on RTLWIFI
+       default y
+       ---help---
+       To use the module option that sets the dynamic-debugging level for,
+       the front-end driver, this parameter must be "Y". For memory-limited
+       systems, choose "N". If in doubt, choose "Y".
+
 config RTL8192CE
        tristate "Realtek RTL8192CE/RTL8188CE Wireless Network Adapter"
-       depends on MAC80211 && PCI
-       select FW_LOADER
-       select RTLWIFI
+       depends on RTLWIFI && PCI
        select RTL8192C_COMMON
        ---help---
        This is the driver for Realtek RTL8192CE/RTL8188CE 802.11n PCIe
@@ -12,9 +30,7 @@ config RTL8192CE
 
 config RTL8192SE
        tristate "Realtek RTL8192SE/RTL8191SE PCIe Wireless Network Adapter"
-       depends on MAC80211 && PCI
-       select FW_LOADER
-       select RTLWIFI
+       depends on RTLWIFI && PCI
        ---help---
        This is the driver for Realtek RTL8192SE/RTL8191SE 802.11n PCIe
        wireless network adapters.
@@ -23,9 +39,7 @@ config RTL8192SE
 
 config RTL8192DE
        tristate "Realtek RTL8192DE/RTL8188DE PCIe Wireless Network Adapter"
-       depends on MAC80211 && PCI
-       select FW_LOADER
-       select RTLWIFI
+       depends on RTLWIFI && PCI
        ---help---
        This is the driver for Realtek RTL8192DE/RTL8188DE 802.11n PCIe
        wireless network adapters.
@@ -34,9 +48,7 @@ config RTL8192DE
 
 config RTL8723AE
        tristate "Realtek RTL8723AE PCIe Wireless Network Adapter"
-       depends on MAC80211 && PCI && EXPERIMENTAL
-       select FW_LOADER
-       select RTLWIFI
+       depends on RTLWIFI && PCI
        ---help---
        This is the driver for Realtek RTL8723AE 802.11n PCIe
        wireless network adapters.
@@ -45,9 +57,7 @@ config RTL8723AE
 
 config RTL8192CU
        tristate "Realtek RTL8192CU/RTL8188CU USB Wireless Network Adapter"
-       depends on MAC80211 && USB
-       select FW_LOADER
-       select RTLWIFI
+       depends on RTLWIFI && USB
        select RTL8192C_COMMON
        ---help---
        This is the driver for Realtek RTL8192CU/RTL8188CU 802.11n USB
@@ -55,16 +65,6 @@ config RTL8192CU
 
        If you choose to build it as a module, it will be called rtl8192cu
 
-config RTLWIFI
-       tristate
-       depends on RTL8192CE || RTL8192CU || RTL8192SE || RTL8192DE || RTL8723AE
-       default m
-
-config RTLWIFI_DEBUG
-       bool "Additional debugging output"
-       depends on RTL8192CE || RTL8192CU || RTL8192SE || RTL8192DE || RTL8723AE
-       default y
-
 config RTL8192C_COMMON
        tristate
        depends on RTL8192CE || RTL8192CU
index 4494d13..99c5cea 100644 (file)
@@ -523,8 +523,8 @@ static void _rtl_query_shortgi(struct ieee80211_hw *hw,
        if (mac->opmode == NL80211_IFTYPE_STATION)
                bw_40 = mac->bw_40;
        else if (mac->opmode == NL80211_IFTYPE_AP ||
-               mac->opmode == NL80211_IFTYPE_ADHOC)
-               bw_40 = sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+                mac->opmode == NL80211_IFTYPE_ADHOC)
+               bw_40 = sta->bandwidth >= IEEE80211_STA_RX_BW_40;
 
        if (bw_40 && sgi_40)
                tcb_desc->use_shortgi = true;
@@ -634,8 +634,7 @@ static void _rtl_query_bandwidth_mode(struct ieee80211_hw *hw,
                return;
        if (mac->opmode == NL80211_IFTYPE_AP ||
            mac->opmode == NL80211_IFTYPE_ADHOC) {
-               if (!(sta->ht_cap.ht_supported) ||
-                   !(sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40))
+               if (sta->bandwidth == IEEE80211_STA_RX_BW_20)
                        return;
        } else if (mac->opmode == NL80211_IFTYPE_STATION) {
                if (!mac->bw_40 || !(sta->ht_cap.ht_supported))
@@ -1004,7 +1003,8 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
                                         is_tx ? "Tx" : "Rx");
 
                                if (is_tx) {
-                                       rtl_lps_leave(hw);
+                                       schedule_work(&rtlpriv->
+                                                     works.lps_leave_work);
                                        ppsc->last_delaylps_stamp_jiffies =
                                            jiffies;
                                }
@@ -1014,7 +1014,7 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
                }
        } else if (ETH_P_ARP == ether_type) {
                if (is_tx) {
-                       rtl_lps_leave(hw);
+                       schedule_work(&rtlpriv->works.lps_leave_work);
                        ppsc->last_delaylps_stamp_jiffies = jiffies;
                }
 
@@ -1024,7 +1024,7 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
                         "802.1X %s EAPOL pkt!!\n", is_tx ? "Tx" : "Rx");
 
                if (is_tx) {
-                       rtl_lps_leave(hw);
+                       schedule_work(&rtlpriv->works.lps_leave_work);
                        ppsc->last_delaylps_stamp_jiffies = jiffies;
                }
 
index c1e065f..f9f059d 100644 (file)
@@ -116,9 +116,8 @@ static void _rtl_rc_rate_set_series(struct rtl_priv *rtlpriv,
                if (txrc->short_preamble)
                        rate->flags |= IEEE80211_TX_RC_USE_SHORT_PREAMBLE;
                if (mac->opmode == NL80211_IFTYPE_AP ||
-                       mac->opmode == NL80211_IFTYPE_ADHOC) {
-                       if (sta && (sta->ht_cap.cap &
-                           IEEE80211_HT_CAP_SUP_WIDTH_20_40))
+                   mac->opmode == NL80211_IFTYPE_ADHOC) {
+                       if (sta && (sta->bandwidth >= IEEE80211_STA_RX_BW_40))
                                rate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
                } else {
                        if (mac->bw_40)
@@ -223,13 +222,6 @@ static void rtl_rate_init(void *ppriv,
 {
 }
 
-static void rtl_rate_update(void *ppriv,
-                           struct ieee80211_supported_band *sband,
-                           struct ieee80211_sta *sta, void *priv_sta,
-                           u32 changed)
-{
-}
-
 static void *rtl_rate_alloc(struct ieee80211_hw *hw,
                struct dentry *debugfsdir)
 {
@@ -275,7 +267,6 @@ static struct rate_control_ops rtl_rate_ops = {
        .alloc_sta = rtl_rate_alloc_sta,
        .free_sta = rtl_rate_free_sta,
        .rate_init = rtl_rate_init,
-       .rate_update = rtl_rate_update,
        .tx_status = rtl_tx_status,
        .get_rate = rtl_get_rate,
 };
index 1cdf5a2..b793a65 100644 (file)
@@ -669,7 +669,8 @@ static void rtl92c_dm_txpower_tracking_callback_thermalmeter(struct ieee80211_hw
        u8 thermalvalue, delta, delta_lck, delta_iqk;
        long ele_a, ele_d, temp_cck, val_x, value32;
        long val_y, ele_c = 0;
-       u8 ofdm_index[2], cck_index = 0, ofdm_index_old[2], cck_index_old = 0;
+       u8 ofdm_index[2], ofdm_index_old[2], cck_index_old = 0;
+       s8 cck_index = 0;
        int i;
        bool is2t = IS_92C_SERIAL(rtlhal->version);
        s8 txpwr_level[2] = {0, 0};
index d1f34f6..1b65db7 100644 (file)
@@ -1846,9 +1846,9 @@ static void rtl92ce_update_hal_rate_mask(struct ieee80211_hw *hw,
        struct rtl_sta_info *sta_entry = NULL;
        u32 ratr_bitmap;
        u8 ratr_index;
-       u8 curtxbw_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)
-                               ? 1 : 0;
-       u8 curshortgi_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
+       u8 curtxbw_40mhz = (sta->bandwidth >= IEEE80211_STA_RX_BW_40) ? 1 : 0;
+       u8 curshortgi_40mhz = curtxbw_40mhz &&
+                             (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
                                1 : 0;
        u8 curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
                                1 : 0;
index c31795e..b9b1a6e 100644 (file)
@@ -488,7 +488,7 @@ static void _rtl92ce_translate_rx_signal_stuff(struct ieee80211_hw *hw,
        u8 *praddr;
        __le16 fc;
        u16 type, c_fc;
-       bool packet_matchbssid, packet_toself, packet_beacon;
+       bool packet_matchbssid, packet_toself, packet_beacon = false;
 
        tmp_buf = skb->data + pstats->rx_drvinfo_size + pstats->rx_bufshift;
 
@@ -626,8 +626,7 @@ void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
        } else if (mac->opmode == NL80211_IFTYPE_AP ||
                mac->opmode == NL80211_IFTYPE_ADHOC) {
                if (sta)
-                       bw_40 = sta->ht_cap.cap &
-                               IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+                       bw_40 = sta->bandwidth >= IEEE80211_STA_RX_BW_40;
        }
 
        seq_number = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4;
index 32ff959..85b6bdb 100644 (file)
@@ -1084,7 +1084,7 @@ void rtl92c_translate_rx_signal_stuff(struct ieee80211_hw *hw,
        u8 *praddr;
        __le16 fc;
        u16 type, cpu_fc;
-       bool packet_matchbssid, packet_toself, packet_beacon;
+       bool packet_matchbssid, packet_toself, packet_beacon = false;
 
        tmp_buf = skb->data + pstats->rx_drvinfo_size + pstats->rx_bufshift;
        hdr = (struct ieee80211_hdr *)tmp_buf;
index b7e6607..a73a17b 100644 (file)
@@ -76,7 +76,7 @@ static int rtl92cu_init_sw_vars(struct ieee80211_hw *hw)
                                      GFP_KERNEL, hw, rtl_fw_cb);
 
 
-       return 0;
+       return err;
 }
 
 static void rtl92cu_deinit_sw_vars(struct ieee80211_hw *hw)
@@ -285,6 +285,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
        {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817f, rtl92cu_hal_cfg)},
        /* RTL8188CUS-VL */
        {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x818a, rtl92cu_hal_cfg)},
+       {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x819a, rtl92cu_hal_cfg)},
        /* 8188 Combo for BC4 */
        {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8754, rtl92cu_hal_cfg)},
 
@@ -363,9 +364,15 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
 
 MODULE_DEVICE_TABLE(usb, rtl8192c_usb_ids);
 
+static int rtl8192cu_probe(struct usb_interface *intf,
+                          const struct usb_device_id *id)
+{
+       return rtl_usb_probe(intf, id, &rtl92cu_hal_cfg);
+}
+
 static struct usb_driver rtl8192cu_driver = {
        .name = "rtl8192cu",
-       .probe = rtl_usb_probe,
+       .probe = rtl8192cu_probe,
        .disconnect = rtl_usb_disconnect,
        .id_table = rtl8192c_usb_ids,
 
index fd8df23..5251fb8 100644 (file)
@@ -841,9 +841,9 @@ static void rtl92d_dm_txpower_tracking_callback_thermalmeter(
        long ele_a = 0, ele_d, temp_cck, val_x, value32;
        long val_y, ele_c = 0;
        u8 ofdm_index[2];
-       u8 cck_index = 0;
+       s8 cck_index = 0;
        u8 ofdm_index_old[2];
-       u8 cck_index_old = 0;
+       s8 cck_index_old = 0;
        u8 index;
        int i;
        bool is2t = IS_92D_SINGLEPHY(rtlhal->version);
index f4051f4..aa5b425 100644 (file)
@@ -1970,8 +1970,7 @@ static void rtl92de_update_hal_rate_mask(struct ieee80211_hw *hw,
        struct rtl_sta_info *sta_entry = NULL;
        u32 ratr_bitmap;
        u8 ratr_index;
-       u8 curtxbw_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)
-                                                       ? 1 : 0;
+       u8 curtxbw_40mhz = (sta->bandwidth >= IEEE80211_STA_RX_BW_40) ? 1 : 0;
        u8 curshortgi_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
                                                        1 : 0;
        u8 curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
index cdb570f..941080e 100644 (file)
@@ -574,8 +574,7 @@ void rtl92de_tx_fill_desc(struct ieee80211_hw *hw,
        } else if (mac->opmode == NL80211_IFTYPE_AP ||
                mac->opmode == NL80211_IFTYPE_ADHOC) {
                if (sta)
-                       bw_40 = sta->ht_cap.cap &
-                               IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+                       bw_40 = sta->bandwidth >= IEEE80211_STA_RX_BW_40;
        }
        seq_number = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4;
        rtl_get_tcb_desc(hw, info, sta, skb, ptcb_desc);
index 28526a7..084e777 100644 (file)
@@ -2085,8 +2085,7 @@ static void rtl92se_update_hal_rate_mask(struct ieee80211_hw *hw,
        struct rtl_sta_info *sta_entry = NULL;
        u32 ratr_bitmap;
        u8 ratr_index = 0;
-       u8 curtxbw_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)
-                               ? 1 : 0;
+       u8 curtxbw_40mhz = (sta->bandwidth >= IEEE80211_STA_RX_BW_40) ? 1 : 0;
        u8 curshortgi_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
                                1 : 0;
        u8 curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
index f8431a3..7b0a2e7 100644 (file)
@@ -621,8 +621,7 @@ void rtl92se_tx_fill_desc(struct ieee80211_hw *hw,
        } else if (mac->opmode == NL80211_IFTYPE_AP ||
                mac->opmode == NL80211_IFTYPE_ADHOC) {
                if (sta)
-                       bw_40 = sta->ht_cap.cap &
-                                   IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+                       bw_40 = sta->bandwidth >= IEEE80211_STA_RX_BW_40;
        }
 
        seq_number = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4;
index f55b176..35cb8f8 100644 (file)
@@ -252,7 +252,7 @@ static void _rtl8723ae_fill_h2c_command(struct ieee80211_hw *hw,
        u16 box_reg = 0, box_extreg = 0;
        u8 u1tmp;
        bool isfw_rd = false;
-       bool bwrite_sucess = false;
+       bool bwrite_success = false;
        u8 wait_h2c_limmit = 100;
        u8 wait_writeh2c_limmit = 100;
        u8 boxcontent[4], boxextcontent[2];
@@ -291,7 +291,7 @@ static void _rtl8723ae_fill_h2c_command(struct ieee80211_hw *hw,
                }
        }
 
-       while (!bwrite_sucess) {
+       while (!bwrite_success) {
                wait_writeh2c_limmit--;
                if (wait_writeh2c_limmit == 0) {
                        RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
@@ -429,7 +429,7 @@ static void _rtl8723ae_fill_h2c_command(struct ieee80211_hw *hw,
                        break;
                }
 
-               bwrite_sucess = true;
+               bwrite_success = true;
 
                rtlhal->last_hmeboxnum = boxnum + 1;
                if (rtlhal->last_hmeboxnum == 4)
@@ -512,7 +512,6 @@ static bool _rtl8723ae_cmd_send_packet(struct ieee80211_hw *hw,
        struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
        struct rtl8192_tx_ring *ring;
        struct rtl_tx_desc *pdesc;
-       u8 own;
        unsigned long flags;
        struct sk_buff *pskb = NULL;
 
@@ -525,7 +524,6 @@ static bool _rtl8723ae_cmd_send_packet(struct ieee80211_hw *hw,
        spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
 
        pdesc = &ring->desc[0];
-       own = (u8) rtlpriv->cfg->ops->get_desc((u8 *) pdesc, true, HW_DESC_OWN);
 
        rtlpriv->cfg->ops->fill_tx_cmddesc(hw, (u8 *) pdesc, 1, 1, skb);
 
index 887d521..68c2834 100644 (file)
@@ -1433,7 +1433,6 @@ static void _rtl8723ae_dm_bt_coexist_2_ant(struct ieee80211_hw *hw)
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
        struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
-       u8 bt_retry_cnt;
        u8 bt_info_original;
        RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
                 "[BTCoex] Get bt info by fw!!\n");
@@ -1445,7 +1444,6 @@ static void _rtl8723ae_dm_bt_coexist_2_ant(struct ieee80211_hw *hw)
                                 "[BTCoex] c2h for btInfo not rcvd yet!!\n");
        }
 
-       bt_retry_cnt = rtlhal->hal_coex_8723.bt_retry_cnt;
        bt_info_original = rtlhal->hal_coex_8723.c2h_bt_info_original;
 
        /* when bt inquiry or page scan, we have to set h2c 0x25
index 0a8c038..9a0c71c 100644 (file)
@@ -703,11 +703,9 @@ static void _rtl8723ae_hw_configure(struct ieee80211_hw *hw)
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
        u8 reg_bw_opmode;
-       u32 reg_ratr, reg_prsr;
+       u32 reg_prsr;
 
        reg_bw_opmode = BW_OPMODE_20MHZ;
-       reg_ratr = RATE_ALL_CCK | RATE_ALL_OFDM_AG |
-           RATE_ALL_OFDM_1SS | RATE_ALL_OFDM_2SS;
        reg_prsr = RATE_ALL_CCK | RATE_ALL_OFDM_AG;
 
        rtl_write_byte(rtlpriv, REG_INIRTS_RATE_SEL, 0x8);
@@ -1868,8 +1866,7 @@ static void rtl8723ae_update_hal_rate_mask(struct ieee80211_hw *hw,
        struct rtl_sta_info *sta_entry = NULL;
        u32 ratr_bitmap;
        u8 ratr_index;
-       u8 curtxbw_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)
-                               ? 1 : 0;
+       u8 curtxbw_40mhz = (sta->bandwidth >= IEEE80211_STA_RX_BW_40) ? 1 : 0;
        u8 curshortgi_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
                                1 : 0;
        u8 curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
@@ -2030,7 +2027,7 @@ bool rtl8723ae_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid)
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
        struct rtl_phy *rtlphy = &(rtlpriv->phy);
-       enum rf_pwrstate e_rfpowerstate_toset, cur_rfstate;
+       enum rf_pwrstate e_rfpowerstate_toset;
        u8 u1tmp;
        bool actuallyset = false;
 
@@ -2049,8 +2046,6 @@ bool rtl8723ae_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid)
                spin_unlock(&rtlpriv->locks.rf_ps_lock);
        }
 
-       cur_rfstate = ppsc->rfpwr_state;
-
        rtl_write_byte(rtlpriv, REG_GPIO_IO_SEL_2,
                       rtl_read_byte(rtlpriv, REG_GPIO_IO_SEL_2)&~(BIT(1)));
 
index 3d8536b..eafbb18 100644 (file)
@@ -614,17 +614,11 @@ bool rtl8723ae_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        int i;
-       bool rtstatus = true;
        u32 *radioa_array_table;
-       u32 *radiob_array_table;
-       u16 radioa_arraylen, radiob_arraylen;
+       u16 radioa_arraylen;
 
        radioa_arraylen = Rtl8723ERADIOA_1TARRAYLENGTH;
        radioa_array_table = RTL8723E_RADIOA_1TARRAY;
-       radiob_arraylen = RTL8723E_RADIOB_1TARRAYLENGTH;
-       radiob_array_table = RTL8723E_RADIOB_1TARRAY;
-
-       rtstatus = true;
 
        switch (rfpath) {
        case RF90_PATH_A:
@@ -1531,11 +1525,8 @@ static void _rtl8723ae_phy_iq_calibrate(struct ieee80211_hw *hw,
                0x522, 0x550, 0x551, 0x040
        };
        const u32 retrycount = 2;
-       u32 bbvalue;
 
        if (t == 0) {
-               bbvalue = rtl_get_bbreg(hw, 0x800, MASKDWORD);
-
                phy_save_adda_regs(hw, adda_reg, rtlphy->adda_backup, 16);
                phy_save_mac_regs(hw, iqk_mac_reg, rtlphy->iqk_mac_backup);
        }
@@ -1712,8 +1703,7 @@ void rtl8723ae_phy_iq_calibrate(struct ieee80211_hw *hw, bool recovery)
        long result[4][8];
        u8 i, final_candidate;
        bool patha_ok, pathb_ok;
-       long reg_e94, reg_e9c, reg_ea4, reg_eac, reg_eb4, reg_ebc, reg_ec4,
-           reg_ecc, reg_tmp = 0;
+       long reg_e94, reg_e9c, reg_ea4, reg_eb4, reg_ebc, reg_tmp = 0;
        bool is12simular, is13simular, is23simular;
        bool start_conttx = false, singletone = false;
        u32 iqk_bb_reg[10] = {
@@ -1780,21 +1770,15 @@ void rtl8723ae_phy_iq_calibrate(struct ieee80211_hw *hw, bool recovery)
                reg_e94 = result[i][0];
                reg_e9c = result[i][1];
                reg_ea4 = result[i][2];
-               reg_eac = result[i][3];
                reg_eb4 = result[i][4];
                reg_ebc = result[i][5];
-               reg_ec4 = result[i][6];
-               reg_ecc = result[i][7];
        }
        if (final_candidate != 0xff) {
                rtlphy->reg_e94 = reg_e94 = result[final_candidate][0];
                rtlphy->reg_e9c = reg_e9c = result[final_candidate][1];
                reg_ea4 = result[final_candidate][2];
-               reg_eac = result[final_candidate][3];
                rtlphy->reg_eb4 = reg_eb4 = result[final_candidate][4];
                rtlphy->reg_ebc = reg_ebc = result[final_candidate][5];
-               reg_ec4 = result[final_candidate][6];
-               reg_ecc = result[final_candidate][7];
                patha_ok = pathb_ok = true;
        } else {
                rtlphy->reg_e94 = rtlphy->reg_eb4 = 0x100;
index ce8ad12..ac08129 100644 (file)
@@ -244,7 +244,6 @@ static void _rtl8723ae_translate_rx_signal_stuff(struct ieee80211_hw *hw,
        struct ieee80211_hdr *hdr;
        u8 *tmp_buf;
        u8 *praddr;
-       u8 *psaddr;
        __le16 fc;
        u16 type;
        bool packet_matchbssid, packet_toself, packet_beacon = false;
@@ -255,7 +254,6 @@ static void _rtl8723ae_translate_rx_signal_stuff(struct ieee80211_hw *hw,
        fc = hdr->frame_control;
        type = WLAN_FC_GET_TYPE(fc);
        praddr = hdr->addr1;
-       psaddr = ieee80211_get_SA(hdr);
 
        packet_matchbssid = ((IEEE80211_FTYPE_CTL != type) &&
                            (!compare_ether_addr(mac->bssid,
@@ -397,8 +395,7 @@ void rtl8723ae_tx_fill_desc(struct ieee80211_hw *hw,
        } else if (mac->opmode == NL80211_IFTYPE_AP ||
                mac->opmode == NL80211_IFTYPE_ADHOC) {
                if (sta)
-                       bw_40 = sta->ht_cap.cap &
-                               IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+                       bw_40 = sta->bandwidth >= IEEE80211_STA_RX_BW_40;
        }
 
        seq_number = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4;
index f2ecdeb..476eaef 100644 (file)
@@ -542,8 +542,8 @@ static void _rtl_rx_pre_process(struct ieee80211_hw *hw, struct sk_buff *skb)
        WARN_ON(skb_queue_empty(&rx_queue));
        while (!skb_queue_empty(&rx_queue)) {
                _skb = skb_dequeue(&rx_queue);
-               _rtl_usb_rx_process_agg(hw, skb);
-               ieee80211_rx_irqsafe(hw, skb);
+               _rtl_usb_rx_process_agg(hw, _skb);
+               ieee80211_rx_irqsafe(hw, _skb);
        }
 }
 
@@ -825,8 +825,6 @@ static void _rtl_usb_transmit(struct ieee80211_hw *hw, struct sk_buff *skb,
        u32 ep_num;
        struct urb *_urb = NULL;
        struct sk_buff *_skb = NULL;
-       struct sk_buff_head *skb_list;
-       struct usb_anchor *urb_list;
 
        WARN_ON(NULL == rtlusb->usb_tx_aggregate_hdl);
        if (unlikely(IS_USB_STOP(rtlusb))) {
@@ -836,7 +834,6 @@ static void _rtl_usb_transmit(struct ieee80211_hw *hw, struct sk_buff *skb,
                return;
        }
        ep_num = rtlusb->ep_map.ep_mapping[qnum];
-       skb_list = &rtlusb->tx_skb_queue[ep_num];
        _skb = skb;
        _urb = _rtl_usb_tx_urb_setup(hw, _skb, ep_num);
        if (unlikely(!_urb)) {
@@ -844,7 +841,6 @@ static void _rtl_usb_transmit(struct ieee80211_hw *hw, struct sk_buff *skb,
                         "Can't allocate urb. Drop skb!\n");
                return;
        }
-       urb_list = &rtlusb->tx_pending[ep_num];
        _rtl_submit_tx_urb(hw, _urb);
 }
 
@@ -941,7 +937,8 @@ static struct rtl_intf_ops rtl_usb_ops = {
 };
 
 int rtl_usb_probe(struct usb_interface *intf,
-                       const struct usb_device_id *id)
+                 const struct usb_device_id *id,
+                 struct rtl_hal_cfg *rtl_hal_cfg)
 {
        int err;
        struct ieee80211_hw *hw = NULL;
@@ -976,7 +973,7 @@ int rtl_usb_probe(struct usb_interface *intf,
        usb_set_intfdata(intf, hw);
        /* init cfg & intf_ops */
        rtlpriv->rtlhal.interface = INTF_USB;
-       rtlpriv->cfg = (struct rtl_hal_cfg *)(id->driver_info);
+       rtlpriv->cfg = rtl_hal_cfg;
        rtlpriv->intf_ops = &rtl_usb_ops;
        rtl_dbgp_flag_init(hw);
        /* Init IO handler */
index 5235136..fb986f9 100644 (file)
@@ -157,7 +157,8 @@ struct rtl_usb_priv {
 
 
 int rtl_usb_probe(struct usb_interface *intf,
-                           const struct usb_device_id *id);
+                 const struct usb_device_id *id,
+                 struct rtl_hal_cfg *rtl92cu_hal_cfg);
 void rtl_usb_disconnect(struct usb_interface *intf);
 int rtl_usb_suspend(struct usb_interface *pusb_intf, pm_message_t message);
 int rtl_usb_resume(struct usb_interface *pusb_intf);
index 21a5f4f..f13258a 100644 (file)
@@ -1702,7 +1702,7 @@ struct rtl_works {
 
 struct rtl_debug {
        u32 dbgp_type[DBGP_TYPE_MAX];
-       u32 global_debuglevel;
+       int global_debuglevel;
        u64 global_debugcomponents;
 
        /* add for proc debug */
index be80011..cbe1e7f 100644 (file)
@@ -12,4 +12,13 @@ source "drivers/net/wireless/ti/wl18xx/Kconfig"
 
 # keep last for automatic dependencies
 source "drivers/net/wireless/ti/wlcore/Kconfig"
+
+config WILINK_PLATFORM_DATA
+       bool "TI WiLink platform data"
+       depends on WLCORE_SDIO || WL1251_SDIO
+       default y
+       ---help---
+       Small platform data bit needed to pass data to the sdio modules.
+
+
 endif # WL_TI
index 4d68239..af14231 100644 (file)
@@ -1,5 +1,7 @@
 obj-$(CONFIG_WLCORE)                   += wlcore/
 obj-$(CONFIG_WL12XX)                   += wl12xx/
-obj-$(CONFIG_WL12XX_PLATFORM_DATA)     += wlcore/
 obj-$(CONFIG_WL1251)                   += wl1251/
 obj-$(CONFIG_WL18XX)                   += wl18xx/
+
+# small builtin driver bit
+obj-$(CONFIG_WILINK_PLATFORM_DATA)     += wilink_platform_data.o
index 1fb6584..8fec4ed 100644 (file)
@@ -1,6 +1,6 @@
 menuconfig WL1251
        tristate "TI wl1251 driver support"
-       depends on MAC80211 && EXPERIMENTAL && GENERIC_HARDIRQS
+       depends on MAC80211 && GENERIC_HARDIRQS
        select FW_LOADER
        select CRC7
        ---help---
index 5ec50a4..74ae8e1 100644 (file)
@@ -29,6 +29,8 @@
 static int wl1251_event_scan_complete(struct wl1251 *wl,
                                      struct event_mailbox *mbox)
 {
+       int ret = 0;
+
        wl1251_debug(DEBUG_EVENT, "status: 0x%x, channels: %d",
                     mbox->scheduled_scan_status,
                     mbox->scheduled_scan_channels);
@@ -37,9 +39,11 @@ static int wl1251_event_scan_complete(struct wl1251 *wl,
                ieee80211_scan_completed(wl->hw, false);
                wl1251_debug(DEBUG_MAC80211, "mac80211 hw scan completed");
                wl->scanning = false;
+               if (wl->hw->conf.flags & IEEE80211_CONF_IDLE)
+                       ret = wl1251_ps_set_mode(wl, STATION_IDLE);
        }
 
-       return 0;
+       return ret;
 }
 
 static void wl1251_event_mbox_dump(struct event_mailbox *mbox)
index f47e8b0..bbbf68c 100644 (file)
@@ -623,7 +623,7 @@ static int wl1251_op_config(struct ieee80211_hw *hw, u32 changed)
                }
        }
 
-       if (changed & IEEE80211_CONF_CHANGE_IDLE) {
+       if (changed & IEEE80211_CONF_CHANGE_IDLE && !wl->scanning) {
                if (conf->flags & IEEE80211_CONF_IDLE) {
                        ret = wl1251_ps_set_mode(wl, STATION_IDLE);
                        if (ret < 0)
@@ -895,11 +895,21 @@ static int wl1251_op_hw_scan(struct ieee80211_hw *hw,
        if (ret < 0)
                goto out;
 
+       if (hw->conf.flags & IEEE80211_CONF_IDLE) {
+               ret = wl1251_ps_set_mode(wl, STATION_ACTIVE_MODE);
+               if (ret < 0)
+                       goto out_sleep;
+               ret = wl1251_join(wl, wl->bss_type, wl->channel,
+                                 wl->beacon_int, wl->dtim_period);
+               if (ret < 0)
+                       goto out_sleep;
+       }
+
        skb = ieee80211_probereq_get(wl->hw, wl->vif, ssid, ssid_len,
                                     req->ie_len);
        if (!skb) {
                ret = -ENOMEM;
-               goto out;
+               goto out_idle;
        }
        if (req->ie_len)
                memcpy(skb_put(skb, req->ie_len), req->ie, req->ie_len);
@@ -908,11 +918,11 @@ static int wl1251_op_hw_scan(struct ieee80211_hw *hw,
                                      skb->len);
        dev_kfree_skb(skb);
        if (ret < 0)
-               goto out_sleep;
+               goto out_idle;
 
        ret = wl1251_cmd_trigger_scan_to(wl, 0);
        if (ret < 0)
-               goto out_sleep;
+               goto out_idle;
 
        wl->scanning = true;
 
@@ -920,9 +930,13 @@ static int wl1251_op_hw_scan(struct ieee80211_hw *hw,
                              req->n_channels, WL1251_SCAN_NUM_PROBES);
        if (ret < 0) {
                wl->scanning = false;
-               goto out_sleep;
+               goto out_idle;
        }
+       goto out_sleep;
 
+out_idle:
+       if (hw->conf.flags & IEEE80211_CONF_IDLE)
+               ret = wl1251_ps_set_mode(wl, STATION_IDLE);
 out_sleep:
        wl1251_ps_elp_sleep(wl);
 
index da509aa..e6a2405 100644 (file)
@@ -1,3 +1,3 @@
-wl12xx-objs    = main.o cmd.o acx.o debugfs.o
+wl12xx-objs    = main.o cmd.o acx.o debugfs.o scan.o event.o
 
 obj-$(CONFIG_WL12XX)           += wl12xx.o
index 6222062..7dc9f96 100644 (file)
@@ -284,3 +284,40 @@ int wl128x_cmd_radio_parms(struct wl1271 *wl)
        kfree(radio_parms);
        return ret;
 }
+
+int wl12xx_cmd_channel_switch(struct wl1271 *wl,
+                             struct wl12xx_vif *wlvif,
+                             struct ieee80211_channel_switch *ch_switch)
+{
+       struct wl12xx_cmd_channel_switch *cmd;
+       int ret;
+
+       wl1271_debug(DEBUG_ACX, "cmd channel switch");
+
+       cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+       if (!cmd) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       cmd->role_id = wlvif->role_id;
+       cmd->channel = ch_switch->channel->hw_value;
+       cmd->switch_time = ch_switch->count;
+       cmd->stop_tx = ch_switch->block_tx;
+
+       /* FIXME: control from mac80211 in the future */
+       /* Enable TX on the target channel */
+       cmd->post_switch_tx_disable = 0;
+
+       ret = wl1271_cmd_send(wl, CMD_CHANNEL_SWITCH, cmd, sizeof(*cmd), 0);
+       if (ret < 0) {
+               wl1271_error("failed to send channel switch command");
+               goto out_free;
+       }
+
+out_free:
+       kfree(cmd);
+
+out:
+       return ret;
+}
index 140a0e8..32cbad5 100644 (file)
@@ -103,10 +103,30 @@ struct wl1271_ext_radio_parms_cmd {
        u8 padding[3];
 } __packed;
 
+struct wl12xx_cmd_channel_switch {
+       struct wl1271_cmd_header header;
+
+       u8 role_id;
+
+       /* The new serving channel */
+       u8 channel;
+       /* Relative time of the serving channel switch in TBTT units */
+       u8 switch_time;
+       /* Stop the role TX, should expect it after radar detection */
+       u8 stop_tx;
+       /* The target channel tx status 1-stopped 0-open*/
+       u8 post_switch_tx_disable;
+
+       u8 padding[3];
+} __packed;
+
 int wl1271_cmd_general_parms(struct wl1271 *wl);
 int wl128x_cmd_general_parms(struct wl1271 *wl);
 int wl1271_cmd_radio_parms(struct wl1271 *wl);
 int wl128x_cmd_radio_parms(struct wl1271 *wl);
 int wl1271_cmd_ext_radio_parms(struct wl1271 *wl);
+int wl12xx_cmd_channel_switch(struct wl1271 *wl,
+                             struct wl12xx_vif *wlvif,
+                             struct ieee80211_channel_switch *ch_switch);
 
 #endif /* __WL12XX_CMD_H__ */
diff --git a/drivers/net/wireless/ti/wl12xx/event.c b/drivers/net/wireless/ti/wl12xx/event.c
new file mode 100644 (file)
index 0000000..6ac0ed7
--- /dev/null
@@ -0,0 +1,116 @@
+/*
+ * This file is part of wl12xx
+ *
+ * Copyright (C) 2012 Texas Instruments. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include "event.h"
+#include "scan.h"
+#include "../wlcore/cmd.h"
+#include "../wlcore/debug.h"
+
+int wl12xx_wait_for_event(struct wl1271 *wl, enum wlcore_wait_event event,
+                         bool *timeout)
+{
+       u32 local_event;
+
+       switch (event) {
+       case WLCORE_EVENT_ROLE_STOP_COMPLETE:
+               local_event = ROLE_STOP_COMPLETE_EVENT_ID;
+               break;
+
+       case WLCORE_EVENT_PEER_REMOVE_COMPLETE:
+               local_event = PEER_REMOVE_COMPLETE_EVENT_ID;
+               break;
+
+       default:
+               /* event not implemented */
+               return 0;
+       }
+       return wlcore_cmd_wait_for_event_or_timeout(wl, local_event, timeout);
+}
+
+int wl12xx_process_mailbox_events(struct wl1271 *wl)
+{
+       struct wl12xx_event_mailbox *mbox = wl->mbox;
+       u32 vector;
+
+
+       vector = le32_to_cpu(mbox->events_vector);
+       vector &= ~(le32_to_cpu(mbox->events_mask));
+
+       wl1271_debug(DEBUG_EVENT, "MBOX vector: 0x%x", vector);
+
+       if (vector & SCAN_COMPLETE_EVENT_ID) {
+               wl1271_debug(DEBUG_EVENT, "status: 0x%x",
+                            mbox->scheduled_scan_status);
+
+               if (wl->scan_wlvif)
+                       wl12xx_scan_completed(wl, wl->scan_wlvif);
+       }
+
+       if (vector & PERIODIC_SCAN_REPORT_EVENT_ID) {
+               wl1271_debug(DEBUG_EVENT,
+                            "PERIODIC_SCAN_REPORT_EVENT (status 0x%0x)",
+                            mbox->scheduled_scan_status);
+
+               wlcore_scan_sched_scan_results(wl);
+       }
+
+       if (vector & PERIODIC_SCAN_COMPLETE_EVENT_ID)
+               wlcore_event_sched_scan_completed(wl,
+                                                 mbox->scheduled_scan_status);
+       if (vector & SOFT_GEMINI_SENSE_EVENT_ID)
+               wlcore_event_soft_gemini_sense(wl,
+                                              mbox->soft_gemini_sense_info);
+
+       if (vector & BSS_LOSE_EVENT_ID)
+               wlcore_event_beacon_loss(wl, 0xff);
+
+       if (vector & RSSI_SNR_TRIGGER_0_EVENT_ID)
+               wlcore_event_rssi_trigger(wl, mbox->rssi_snr_trigger_metric);
+
+       if (vector & BA_SESSION_RX_CONSTRAINT_EVENT_ID)
+               wlcore_event_ba_rx_constraint(wl,
+                                             BIT(mbox->role_id),
+                                             mbox->rx_ba_allowed);
+
+       if (vector & CHANNEL_SWITCH_COMPLETE_EVENT_ID)
+               wlcore_event_channel_switch(wl, 0xff,
+                                           mbox->channel_switch_status);
+
+       if (vector & DUMMY_PACKET_EVENT_ID)
+               wlcore_event_dummy_packet(wl);
+
+       /*
+        * "TX retries exceeded" has a different meaning according to mode.
+        * In AP mode the offending station is disconnected.
+        */
+       if (vector & MAX_TX_RETRY_EVENT_ID)
+               wlcore_event_max_tx_failure(wl,
+                               le16_to_cpu(mbox->sta_tx_retry_exceeded));
+
+       if (vector & INACTIVE_STA_EVENT_ID)
+               wlcore_event_inactive_sta(wl,
+                                         le16_to_cpu(mbox->sta_aging_status));
+
+       if (vector & REMAIN_ON_CHANNEL_COMPLETE_EVENT_ID)
+               wlcore_event_roc_complete(wl);
+
+       return 0;
+}
diff --git a/drivers/net/wireless/ti/wl12xx/event.h b/drivers/net/wireless/ti/wl12xx/event.h
new file mode 100644 (file)
index 0000000..a5cc3fc
--- /dev/null
@@ -0,0 +1,111 @@
+/*
+ * This file is part of wl12xx
+ *
+ * Copyright (C) 2012 Texas Instruments. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef __WL12XX_EVENT_H__
+#define __WL12XX_EVENT_H__
+
+#include "../wlcore/wlcore.h"
+
+enum {
+       MEASUREMENT_START_EVENT_ID               = BIT(8),
+       MEASUREMENT_COMPLETE_EVENT_ID            = BIT(9),
+       SCAN_COMPLETE_EVENT_ID                   = BIT(10),
+       WFD_DISCOVERY_COMPLETE_EVENT_ID          = BIT(11),
+       AP_DISCOVERY_COMPLETE_EVENT_ID           = BIT(12),
+       RESERVED1                                = BIT(13),
+       PSPOLL_DELIVERY_FAILURE_EVENT_ID         = BIT(14),
+       ROLE_STOP_COMPLETE_EVENT_ID              = BIT(15),
+       RADAR_DETECTED_EVENT_ID                  = BIT(16),
+       CHANNEL_SWITCH_COMPLETE_EVENT_ID         = BIT(17),
+       BSS_LOSE_EVENT_ID                        = BIT(18),
+       REGAINED_BSS_EVENT_ID                    = BIT(19),
+       MAX_TX_RETRY_EVENT_ID                    = BIT(20),
+       DUMMY_PACKET_EVENT_ID                    = BIT(21),
+       SOFT_GEMINI_SENSE_EVENT_ID               = BIT(22),
+       CHANGE_AUTO_MODE_TIMEOUT_EVENT_ID        = BIT(23),
+       SOFT_GEMINI_AVALANCHE_EVENT_ID           = BIT(24),
+       PLT_RX_CALIBRATION_COMPLETE_EVENT_ID     = BIT(25),
+       INACTIVE_STA_EVENT_ID                    = BIT(26),
+       PEER_REMOVE_COMPLETE_EVENT_ID            = BIT(27),
+       PERIODIC_SCAN_COMPLETE_EVENT_ID          = BIT(28),
+       PERIODIC_SCAN_REPORT_EVENT_ID            = BIT(29),
+       BA_SESSION_RX_CONSTRAINT_EVENT_ID        = BIT(30),
+       REMAIN_ON_CHANNEL_COMPLETE_EVENT_ID      = BIT(31),
+};
+
+struct wl12xx_event_mailbox {
+       __le32 events_vector;
+       __le32 events_mask;
+       __le32 reserved_1;
+       __le32 reserved_2;
+
+       u8 number_of_scan_results;
+       u8 scan_tag;
+       u8 completed_scan_status;
+       u8 reserved_3;
+
+       u8 soft_gemini_sense_info;
+       u8 soft_gemini_protective_info;
+       s8 rssi_snr_trigger_metric[NUM_OF_RSSI_SNR_TRIGGERS];
+       u8 change_auto_mode_timeout;
+       u8 scheduled_scan_status;
+       u8 reserved4;
+       /* tuned channel (roc) */
+       u8 roc_channel;
+
+       __le16 hlid_removed_bitmap;
+
+       /* bitmap of aged stations (by HLID) */
+       __le16 sta_aging_status;
+
+       /* bitmap of stations (by HLID) which exceeded max tx retries */
+       __le16 sta_tx_retry_exceeded;
+
+       /* discovery completed results */
+       u8 discovery_tag;
+       u8 number_of_preq_results;
+       u8 number_of_prsp_results;
+       u8 reserved_5;
+
+       /* rx ba constraint */
+       u8 role_id; /* 0xFF means any role. */
+       u8 rx_ba_allowed;
+       u8 reserved_6[2];
+
+       /* Channel switch results */
+
+       u8 channel_switch_role_id;
+       u8 channel_switch_status;
+       u8 reserved_7[2];
+
+       u8 ps_poll_delivery_failure_role_ids;
+       u8 stopped_role_ids;
+       u8 started_role_ids;
+
+       u8 reserved_8[9];
+} __packed;
+
+int wl12xx_wait_for_event(struct wl1271 *wl, enum wlcore_wait_event event,
+                         bool *timeout);
+int wl12xx_process_mailbox_events(struct wl1271 *wl);
+
+#endif
+
index e5f5f8f..09694e3 100644 (file)
@@ -38,6 +38,8 @@
 #include "reg.h"
 #include "cmd.h"
 #include "acx.h"
+#include "scan.h"
+#include "event.h"
 #include "debugfs.h"
 
 static char *fref_param;
@@ -208,6 +210,8 @@ static struct wlcore_conf wl12xx_conf = {
                .tmpl_short_retry_limit      = 10,
                .tmpl_long_retry_limit       = 10,
                .tx_watchdog_timeout         = 5000,
+               .slow_link_thold             = 3,
+               .fast_link_thold             = 10,
        },
        .conn = {
                .wake_up_event               = CONF_WAKE_UP_EVENT_DTIM,
@@ -265,8 +269,10 @@ static struct wlcore_conf wl12xx_conf = {
        .scan = {
                .min_dwell_time_active        = 7500,
                .max_dwell_time_active        = 30000,
-               .min_dwell_time_passive       = 100000,
-               .max_dwell_time_passive       = 100000,
+               .min_dwell_time_active_long   = 25000,
+               .max_dwell_time_active_long   = 50000,
+               .dwell_time_passive           = 100000,
+               .dwell_time_dfs               = 150000,
                .num_probe_reqs               = 2,
                .split_scan_timeout           = 50000,
        },
@@ -368,6 +374,10 @@ static struct wlcore_conf wl12xx_conf = {
                .increase_time              = 1,
                .window_size                = 16,
        },
+       .recovery = {
+               .bug_on_recovery            = 0,
+               .no_recovery                = 0,
+       },
 };
 
 static struct wl12xx_priv_conf wl12xx_default_priv_conf = {
@@ -601,9 +611,9 @@ static int wl127x_prepare_read(struct wl1271 *wl, u32 rx_desc, u32 len)
 {
        int ret;
 
-       if (wl->chip.id != CHIP_ID_1283_PG20) {
+       if (wl->chip.id != CHIP_ID_128X_PG20) {
                struct wl1271_acx_mem_map *wl_mem_map = wl->target_mem_map;
-               struct wl127x_rx_mem_pool_addr rx_mem_addr;
+               struct wl12xx_priv *priv = wl->priv;
 
                /*
                 * Choose the block we want to read
@@ -612,13 +622,13 @@ static int wl127x_prepare_read(struct wl1271 *wl, u32 rx_desc, u32 len)
                 */
                u32 mem_block = rx_desc & RX_MEM_BLOCK_MASK;
 
-               rx_mem_addr.addr = (mem_block << 8) +
+               priv->rx_mem_addr->addr = (mem_block << 8) +
                        le32_to_cpu(wl_mem_map->packet_memory_pool_start);
 
-               rx_mem_addr.addr_extra = rx_mem_addr.addr + 4;
+               priv->rx_mem_addr->addr_extra = priv->rx_mem_addr->addr + 4;
 
-               ret = wlcore_write(wl, WL1271_SLV_REG_DATA, &rx_mem_addr,
-                                  sizeof(rx_mem_addr), false);
+               ret = wlcore_write(wl, WL1271_SLV_REG_DATA, priv->rx_mem_addr,
+                                  sizeof(*priv->rx_mem_addr), false);
                if (ret < 0)
                        return ret;
        }
@@ -631,13 +641,15 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
        int ret = 0;
 
        switch (wl->chip.id) {
-       case CHIP_ID_1271_PG10:
+       case CHIP_ID_127X_PG10:
                wl1271_warning("chip id 0x%x (1271 PG10) support is obsolete",
                               wl->chip.id);
 
                wl->quirks |= WLCORE_QUIRK_LEGACY_NVS |
                              WLCORE_QUIRK_DUAL_PROBE_TMPL |
-                             WLCORE_QUIRK_TKIP_HEADER_SPACE;
+                             WLCORE_QUIRK_TKIP_HEADER_SPACE |
+                             WLCORE_QUIRK_START_STA_FAILS |
+                             WLCORE_QUIRK_AP_ZERO_SESSION_ID;
                wl->sr_fw_name = WL127X_FW_NAME_SINGLE;
                wl->mr_fw_name = WL127X_FW_NAME_MULTI;
                memcpy(&wl->conf.mem, &wl12xx_default_priv_conf.mem_wl127x,
@@ -646,18 +658,22 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
                /* read data preparation is only needed by wl127x */
                wl->ops->prepare_read = wl127x_prepare_read;
 
-               wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER, WL127X_IFTYPE_VER,
-                                     WL127X_MAJOR_VER, WL127X_SUBTYPE_VER,
-                                     WL127X_MINOR_VER);
+               wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER,
+                             WL127X_IFTYPE_SR_VER,  WL127X_MAJOR_SR_VER,
+                             WL127X_SUBTYPE_SR_VER, WL127X_MINOR_SR_VER,
+                             WL127X_IFTYPE_MR_VER,  WL127X_MAJOR_MR_VER,
+                             WL127X_SUBTYPE_MR_VER, WL127X_MINOR_MR_VER);
                break;
 
-       case CHIP_ID_1271_PG20:
+       case CHIP_ID_127X_PG20:
                wl1271_debug(DEBUG_BOOT, "chip id 0x%x (1271 PG20)",
                             wl->chip.id);
 
                wl->quirks |= WLCORE_QUIRK_LEGACY_NVS |
                              WLCORE_QUIRK_DUAL_PROBE_TMPL |
-                             WLCORE_QUIRK_TKIP_HEADER_SPACE;
+                             WLCORE_QUIRK_TKIP_HEADER_SPACE |
+                             WLCORE_QUIRK_START_STA_FAILS |
+                             WLCORE_QUIRK_AP_ZERO_SESSION_ID;
                wl->plt_fw_name = WL127X_PLT_FW_NAME;
                wl->sr_fw_name = WL127X_FW_NAME_SINGLE;
                wl->mr_fw_name = WL127X_FW_NAME_MULTI;
@@ -667,12 +683,14 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
                /* read data preparation is only needed by wl127x */
                wl->ops->prepare_read = wl127x_prepare_read;
 
-               wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER, WL127X_IFTYPE_VER,
-                                     WL127X_MAJOR_VER, WL127X_SUBTYPE_VER,
-                                     WL127X_MINOR_VER);
+               wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER,
+                             WL127X_IFTYPE_SR_VER,  WL127X_MAJOR_SR_VER,
+                             WL127X_SUBTYPE_SR_VER, WL127X_MINOR_SR_VER,
+                             WL127X_IFTYPE_MR_VER,  WL127X_MAJOR_MR_VER,
+                             WL127X_SUBTYPE_MR_VER, WL127X_MINOR_MR_VER);
                break;
 
-       case CHIP_ID_1283_PG20:
+       case CHIP_ID_128X_PG20:
                wl1271_debug(DEBUG_BOOT, "chip id 0x%x (1283 PG20)",
                             wl->chip.id);
                wl->plt_fw_name = WL128X_PLT_FW_NAME;
@@ -682,19 +700,29 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
                /* wl128x requires TX blocksize alignment */
                wl->quirks |= WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN |
                              WLCORE_QUIRK_DUAL_PROBE_TMPL |
-                             WLCORE_QUIRK_TKIP_HEADER_SPACE;
-
-               wlcore_set_min_fw_ver(wl, WL128X_CHIP_VER, WL128X_IFTYPE_VER,
-                                     WL128X_MAJOR_VER, WL128X_SUBTYPE_VER,
-                                     WL128X_MINOR_VER);
+                             WLCORE_QUIRK_TKIP_HEADER_SPACE |
+                             WLCORE_QUIRK_START_STA_FAILS |
+                             WLCORE_QUIRK_AP_ZERO_SESSION_ID;
+
+               wlcore_set_min_fw_ver(wl, WL128X_CHIP_VER,
+                             WL128X_IFTYPE_SR_VER,  WL128X_MAJOR_SR_VER,
+                             WL128X_SUBTYPE_SR_VER, WL128X_MINOR_SR_VER,
+                             WL128X_IFTYPE_MR_VER,  WL128X_MAJOR_MR_VER,
+                             WL128X_SUBTYPE_MR_VER, WL128X_MINOR_MR_VER);
                break;
-       case CHIP_ID_1283_PG10:
+       case CHIP_ID_128X_PG10:
        default:
                wl1271_warning("unsupported chip id: 0x%x", wl->chip.id);
                ret = -ENODEV;
                goto out;
        }
 
+       /* common settings */
+       wl->scan_templ_id_2_4 = CMD_TEMPL_APP_PROBE_REQ_2_4_LEGACY;
+       wl->scan_templ_id_5 = CMD_TEMPL_APP_PROBE_REQ_5_LEGACY;
+       wl->sched_scan_templ_id_2_4 = CMD_TEMPL_CFG_PROBE_REQ_2_4;
+       wl->sched_scan_templ_id_5 = CMD_TEMPL_CFG_PROBE_REQ_5;
+       wl->max_channels_5 = WL12XX_MAX_CHANNELS_5GHZ;
 out:
        return ret;
 }
@@ -1067,7 +1095,7 @@ static int wl12xx_pre_boot(struct wl1271 *wl)
        u32 clk;
        int selected_clock = -1;
 
-       if (wl->chip.id == CHIP_ID_1283_PG20) {
+       if (wl->chip.id == CHIP_ID_128X_PG20) {
                ret = wl128x_boot_clk(wl, &selected_clock);
                if (ret < 0)
                        goto out;
@@ -1098,7 +1126,7 @@ static int wl12xx_pre_boot(struct wl1271 *wl)
 
        wl1271_debug(DEBUG_BOOT, "clk2 0x%x", clk);
 
-       if (wl->chip.id == CHIP_ID_1283_PG20)
+       if (wl->chip.id == CHIP_ID_128X_PG20)
                clk |= ((selected_clock & 0x3) << 1) << 4;
        else
                clk |= (priv->ref_clock << 1) << 4;
@@ -1152,7 +1180,7 @@ static int wl12xx_pre_upload(struct wl1271 *wl)
        /* WL1271: The reference driver skips steps 7 to 10 (jumps directly
         * to upload_fw) */
 
-       if (wl->chip.id == CHIP_ID_1283_PG20) {
+       if (wl->chip.id == CHIP_ID_128X_PG20) {
                ret = wl12xx_top_reg_write(wl, SDIO_IO_DS, HCI_IO_DS_6MA);
                if (ret < 0)
                        goto out;
@@ -1219,6 +1247,23 @@ static int wl12xx_boot(struct wl1271 *wl)
        if (ret < 0)
                goto out;
 
+       wl->event_mask = BSS_LOSE_EVENT_ID |
+               REGAINED_BSS_EVENT_ID |
+               SCAN_COMPLETE_EVENT_ID |
+               ROLE_STOP_COMPLETE_EVENT_ID |
+               RSSI_SNR_TRIGGER_0_EVENT_ID |
+               PSPOLL_DELIVERY_FAILURE_EVENT_ID |
+               SOFT_GEMINI_SENSE_EVENT_ID |
+               PERIODIC_SCAN_REPORT_EVENT_ID |
+               PERIODIC_SCAN_COMPLETE_EVENT_ID |
+               DUMMY_PACKET_EVENT_ID |
+               PEER_REMOVE_COMPLETE_EVENT_ID |
+               BA_SESSION_RX_CONSTRAINT_EVENT_ID |
+               REMAIN_ON_CHANNEL_COMPLETE_EVENT_ID |
+               INACTIVE_STA_EVENT_ID |
+               MAX_TX_RETRY_EVENT_ID |
+               CHANNEL_SWITCH_COMPLETE_EVENT_ID;
+
        ret = wlcore_boot_run_firmware(wl);
        if (ret < 0)
                goto out;
@@ -1261,7 +1306,7 @@ static void
 wl12xx_set_tx_desc_blocks(struct wl1271 *wl, struct wl1271_tx_hw_descr *desc,
                          u32 blks, u32 spare_blks)
 {
-       if (wl->chip.id == CHIP_ID_1283_PG20) {
+       if (wl->chip.id == CHIP_ID_128X_PG20) {
                desc->wl128x_mem.total_mem_blocks = blks;
        } else {
                desc->wl127x_mem.extra_blocks = spare_blks;
@@ -1275,7 +1320,7 @@ wl12xx_set_tx_desc_data_len(struct wl1271 *wl, struct wl1271_tx_hw_descr *desc,
 {
        u32 aligned_len = wlcore_calc_packet_alignment(wl, skb->len);
 
-       if (wl->chip.id == CHIP_ID_1283_PG20) {
+       if (wl->chip.id == CHIP_ID_128X_PG20) {
                desc->wl128x_mem.extra_bytes = aligned_len - skb->len;
                desc->length = cpu_to_le16(aligned_len >> 2);
 
@@ -1339,7 +1384,7 @@ static int wl12xx_hw_init(struct wl1271 *wl)
 {
        int ret;
 
-       if (wl->chip.id == CHIP_ID_1283_PG20) {
+       if (wl->chip.id == CHIP_ID_128X_PG20) {
                u32 host_cfg_bitmap = HOST_IF_CFG_RX_FIFO_ENABLE;
 
                ret = wl128x_cmd_general_parms(wl);
@@ -1394,22 +1439,6 @@ static u32 wl12xx_sta_get_ap_rate_mask(struct wl1271 *wl,
        return wlvif->rate_set;
 }
 
-static int wl12xx_identify_fw(struct wl1271 *wl)
-{
-       unsigned int *fw_ver = wl->chip.fw_ver;
-
-       /* Only new station firmwares support routing fw logs to the host */
-       if ((fw_ver[FW_VER_IF_TYPE] == FW_VER_IF_TYPE_STA) &&
-           (fw_ver[FW_VER_MINOR] < FW_VER_MINOR_FWLOG_STA_MIN))
-               wl->quirks |= WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED;
-
-       /* This feature is not yet supported for AP mode */
-       if (fw_ver[FW_VER_IF_TYPE] == FW_VER_IF_TYPE_AP)
-               wl->quirks |= WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED;
-
-       return 0;
-}
-
 static void wl12xx_conf_init(struct wl1271 *wl)
 {
        struct wl12xx_priv *priv = wl->priv;
@@ -1426,7 +1455,7 @@ static bool wl12xx_mac_in_fuse(struct wl1271 *wl)
        bool supported = false;
        u8 major, minor;
 
-       if (wl->chip.id == CHIP_ID_1283_PG20) {
+       if (wl->chip.id == CHIP_ID_128X_PG20) {
                major = WL128X_PG_GET_MAJOR(wl->hw_pg_ver);
                minor = WL128X_PG_GET_MINOR(wl->hw_pg_ver);
 
@@ -1482,7 +1511,7 @@ static int wl12xx_get_pg_ver(struct wl1271 *wl, s8 *ver)
        u16 die_info;
        int ret;
 
-       if (wl->chip.id == CHIP_ID_1283_PG20)
+       if (wl->chip.id == CHIP_ID_128X_PG20)
                ret = wl12xx_top_reg_read(wl, WL128X_REG_FUSE_DATA_2_1,
                                          &die_info);
        else
@@ -1589,16 +1618,46 @@ static int wl12xx_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
        return wlcore_set_key(wl, cmd, vif, sta, key_conf);
 }
 
+static int wl12xx_set_peer_cap(struct wl1271 *wl,
+                              struct ieee80211_sta_ht_cap *ht_cap,
+                              bool allow_ht_operation,
+                              u32 rate_set, u8 hlid)
+{
+       return wl1271_acx_set_ht_capabilities(wl, ht_cap, allow_ht_operation,
+                                             hlid);
+}
+
+static bool wl12xx_lnk_high_prio(struct wl1271 *wl, u8 hlid,
+                                struct wl1271_link *lnk)
+{
+       u8 thold;
+
+       if (test_bit(hlid, (unsigned long *)&wl->fw_fast_lnk_map))
+               thold = wl->conf.tx.fast_link_thold;
+       else
+               thold = wl->conf.tx.slow_link_thold;
+
+       return lnk->allocated_pkts < thold;
+}
+
+static bool wl12xx_lnk_low_prio(struct wl1271 *wl, u8 hlid,
+                               struct wl1271_link *lnk)
+{
+       /* any link is good for low priority */
+       return true;
+}
+
 static int wl12xx_setup(struct wl1271 *wl);
 
 static struct wlcore_ops wl12xx_ops = {
        .setup                  = wl12xx_setup,
        .identify_chip          = wl12xx_identify_chip,
-       .identify_fw            = wl12xx_identify_fw,
        .boot                   = wl12xx_boot,
        .plt_init               = wl12xx_plt_init,
        .trigger_cmd            = wl12xx_trigger_cmd,
        .ack_event              = wl12xx_ack_event,
+       .wait_for_event         = wl12xx_wait_for_event,
+       .process_mailbox_events = wl12xx_process_mailbox_events,
        .calc_tx_blocks         = wl12xx_calc_tx_blocks,
        .set_tx_desc_blocks     = wl12xx_set_tx_desc_blocks,
        .set_tx_desc_data_len   = wl12xx_set_tx_desc_data_len,
@@ -1615,9 +1674,17 @@ static struct wlcore_ops wl12xx_ops = {
        .set_rx_csum            = NULL,
        .ap_get_mimo_wide_rate_mask = NULL,
        .debugfs_init           = wl12xx_debugfs_add_files,
+       .scan_start             = wl12xx_scan_start,
+       .scan_stop              = wl12xx_scan_stop,
+       .sched_scan_start       = wl12xx_sched_scan_start,
+       .sched_scan_stop        = wl12xx_scan_sched_scan_stop,
        .get_spare_blocks       = wl12xx_get_spare_blocks,
        .set_key                = wl12xx_set_key,
+       .channel_switch         = wl12xx_cmd_channel_switch,
        .pre_pkt_send           = NULL,
+       .set_peer_cap           = wl12xx_set_peer_cap,
+       .lnk_high_prio          = wl12xx_lnk_high_prio,
+       .lnk_low_prio           = wl12xx_lnk_low_prio,
 };
 
 static struct ieee80211_sta_ht_cap wl12xx_ht_cap = {
@@ -1636,11 +1703,13 @@ static struct ieee80211_sta_ht_cap wl12xx_ht_cap = {
 static int wl12xx_setup(struct wl1271 *wl)
 {
        struct wl12xx_priv *priv = wl->priv;
-       struct wl12xx_platform_data *pdata = wl->pdev->dev.platform_data;
+       struct wlcore_platdev_data *pdev_data = wl->pdev->dev.platform_data;
+       struct wl12xx_platform_data *pdata = pdev_data->pdata;
 
        wl->rtable = wl12xx_rtable;
        wl->num_tx_desc = WL12XX_NUM_TX_DESCRIPTORS;
        wl->num_rx_desc = WL12XX_NUM_RX_DESCRIPTORS;
+       wl->num_channels = 1;
        wl->num_mac_addr = WL12XX_NUM_MAC_ADDRESSES;
        wl->band_rate_to_idx = wl12xx_band_rate_to_idx;
        wl->hw_tx_rate_tbl_size = WL12XX_CONF_HW_RXTX_RATE_MAX;
@@ -1693,6 +1762,10 @@ static int wl12xx_setup(struct wl1271 *wl)
                        wl1271_error("Invalid tcxo parameter %s", tcxo_param);
        }
 
+       priv->rx_mem_addr = kmalloc(sizeof(*priv->rx_mem_addr), GFP_KERNEL);
+       if (!priv->rx_mem_addr)
+               return -ENOMEM;
+
        return 0;
 }
 
@@ -1703,7 +1776,8 @@ static int wl12xx_probe(struct platform_device *pdev)
        int ret;
 
        hw = wlcore_alloc_hw(sizeof(struct wl12xx_priv),
-                            WL12XX_AGGR_BUFFER_SIZE);
+                            WL12XX_AGGR_BUFFER_SIZE,
+                            sizeof(struct wl12xx_event_mailbox));
        if (IS_ERR(hw)) {
                wl1271_error("can't allocate hw");
                ret = PTR_ERR(hw);
@@ -1725,6 +1799,21 @@ out:
        return ret;
 }
 
+static int wl12xx_remove(struct platform_device *pdev)
+{
+       struct wl1271 *wl = platform_get_drvdata(pdev);
+       struct wl12xx_priv *priv;
+
+       if (!wl)
+               goto out;
+       priv = wl->priv;
+
+       kfree(priv->rx_mem_addr);
+
+out:
+       return wlcore_remove(pdev);
+}
+
 static const struct platform_device_id wl12xx_id_table[] = {
        { "wl12xx", 0 },
        {  } /* Terminating Entry */
@@ -1733,7 +1822,7 @@ MODULE_DEVICE_TABLE(platform, wl12xx_id_table);
 
 static struct platform_driver wl12xx_driver = {
        .probe          = wl12xx_probe,
-       .remove         = wlcore_remove,
+       .remove         = wl12xx_remove,
        .id_table       = wl12xx_id_table,
        .driver = {
                .name   = "wl12xx_driver",
diff --git a/drivers/net/wireless/ti/wl12xx/scan.c b/drivers/net/wireless/ti/wl12xx/scan.c
new file mode 100644 (file)
index 0000000..affdb3e
--- /dev/null
@@ -0,0 +1,501 @@
+/*
+ * This file is part of wl12xx
+ *
+ * Copyright (C) 2012 Texas Instruments. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <linux/ieee80211.h>
+#include "scan.h"
+#include "../wlcore/debug.h"
+#include "../wlcore/tx.h"
+
+static int wl1271_get_scan_channels(struct wl1271 *wl,
+                                   struct cfg80211_scan_request *req,
+                                   struct basic_scan_channel_params *channels,
+                                   enum ieee80211_band band, bool passive)
+{
+       struct conf_scan_settings *c = &wl->conf.scan;
+       int i, j;
+       u32 flags;
+
+       for (i = 0, j = 0;
+            i < req->n_channels && j < WL1271_SCAN_MAX_CHANNELS;
+            i++) {
+               flags = req->channels[i]->flags;
+
+               if (!test_bit(i, wl->scan.scanned_ch) &&
+                   !(flags & IEEE80211_CHAN_DISABLED) &&
+                   (req->channels[i]->band == band) &&
+                   /*
+                    * In passive scans, we scan all remaining
+                    * channels, even if not marked as such.
+                    * In active scans, we only scan channels not
+                    * marked as passive.
+                    */
+                   (passive || !(flags & IEEE80211_CHAN_PASSIVE_SCAN))) {
+                       wl1271_debug(DEBUG_SCAN, "band %d, center_freq %d ",
+                                    req->channels[i]->band,
+                                    req->channels[i]->center_freq);
+                       wl1271_debug(DEBUG_SCAN, "hw_value %d, flags %X",
+                                    req->channels[i]->hw_value,
+                                    req->channels[i]->flags);
+                       wl1271_debug(DEBUG_SCAN,
+                                    "max_antenna_gain %d, max_power %d",
+                                    req->channels[i]->max_antenna_gain,
+                                    req->channels[i]->max_power);
+                       wl1271_debug(DEBUG_SCAN, "beacon_found %d",
+                                    req->channels[i]->beacon_found);
+
+                       if (!passive) {
+                               channels[j].min_duration =
+                                       cpu_to_le32(c->min_dwell_time_active);
+                               channels[j].max_duration =
+                                       cpu_to_le32(c->max_dwell_time_active);
+                       } else {
+                               channels[j].min_duration =
+                                       cpu_to_le32(c->dwell_time_passive);
+                               channels[j].max_duration =
+                                       cpu_to_le32(c->dwell_time_passive);
+                       }
+                       channels[j].early_termination = 0;
+                       channels[j].tx_power_att = req->channels[i]->max_power;
+                       channels[j].channel = req->channels[i]->hw_value;
+
+                       memset(&channels[j].bssid_lsb, 0xff, 4);
+                       memset(&channels[j].bssid_msb, 0xff, 2);
+
+                       /* Mark the channels we already used */
+                       set_bit(i, wl->scan.scanned_ch);
+
+                       j++;
+               }
+       }
+
+       return j;
+}
+
+#define WL1271_NOTHING_TO_SCAN 1
+
+static int wl1271_scan_send(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                           enum ieee80211_band band,
+                           bool passive, u32 basic_rate)
+{
+       struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
+       struct wl1271_cmd_scan *cmd;
+       struct wl1271_cmd_trigger_scan_to *trigger;
+       int ret;
+       u16 scan_options = 0;
+
+       /* skip active scans if we don't have SSIDs */
+       if (!passive && wl->scan.req->n_ssids == 0)
+               return WL1271_NOTHING_TO_SCAN;
+
+       cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+       trigger = kzalloc(sizeof(*trigger), GFP_KERNEL);
+       if (!cmd || !trigger) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       if (wl->conf.scan.split_scan_timeout)
+               scan_options |= WL1271_SCAN_OPT_SPLIT_SCAN;
+
+       if (passive)
+               scan_options |= WL1271_SCAN_OPT_PASSIVE;
+
+       cmd->params.role_id = wlvif->role_id;
+
+       if (WARN_ON(cmd->params.role_id == WL12XX_INVALID_ROLE_ID)) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       cmd->params.scan_options = cpu_to_le16(scan_options);
+
+       cmd->params.n_ch = wl1271_get_scan_channels(wl, wl->scan.req,
+                                                   cmd->channels,
+                                                   band, passive);
+       if (cmd->params.n_ch == 0) {
+               ret = WL1271_NOTHING_TO_SCAN;
+               goto out;
+       }
+
+       cmd->params.tx_rate = cpu_to_le32(basic_rate);
+       cmd->params.n_probe_reqs = wl->conf.scan.num_probe_reqs;
+       cmd->params.tid_trigger = CONF_TX_AC_ANY_TID;
+       cmd->params.scan_tag = WL1271_SCAN_DEFAULT_TAG;
+
+       if (band == IEEE80211_BAND_2GHZ)
+               cmd->params.band = WL1271_SCAN_BAND_2_4_GHZ;
+       else
+               cmd->params.band = WL1271_SCAN_BAND_5_GHZ;
+
+       if (wl->scan.ssid_len && wl->scan.ssid) {
+               cmd->params.ssid_len = wl->scan.ssid_len;
+               memcpy(cmd->params.ssid, wl->scan.ssid, wl->scan.ssid_len);
+       }
+
+       memcpy(cmd->addr, vif->addr, ETH_ALEN);
+
+       ret = wl12xx_cmd_build_probe_req(wl, wlvif,
+                                        cmd->params.role_id, band,
+                                        wl->scan.ssid, wl->scan.ssid_len,
+                                        wl->scan.req->ie,
+                                        wl->scan.req->ie_len, false);
+       if (ret < 0) {
+               wl1271_error("PROBE request template failed");
+               goto out;
+       }
+
+       trigger->timeout = cpu_to_le32(wl->conf.scan.split_scan_timeout);
+       ret = wl1271_cmd_send(wl, CMD_TRIGGER_SCAN_TO, trigger,
+                             sizeof(*trigger), 0);
+       if (ret < 0) {
+               wl1271_error("trigger scan to failed for hw scan");
+               goto out;
+       }
+
+       wl1271_dump(DEBUG_SCAN, "SCAN: ", cmd, sizeof(*cmd));
+
+       ret = wl1271_cmd_send(wl, CMD_SCAN, cmd, sizeof(*cmd), 0);
+       if (ret < 0) {
+               wl1271_error("SCAN failed");
+               goto out;
+       }
+
+out:
+       kfree(cmd);
+       kfree(trigger);
+       return ret;
+}
+
+int wl12xx_scan_stop(struct wl1271 *wl, struct wl12xx_vif *wlvif)
+{
+       struct wl1271_cmd_header *cmd = NULL;
+       int ret = 0;
+
+       if (WARN_ON(wl->scan.state == WL1271_SCAN_STATE_IDLE))
+               return -EINVAL;
+
+       wl1271_debug(DEBUG_CMD, "cmd scan stop");
+
+       cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+       if (!cmd) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       ret = wl1271_cmd_send(wl, CMD_STOP_SCAN, cmd,
+                             sizeof(*cmd), 0);
+       if (ret < 0) {
+               wl1271_error("cmd stop_scan failed");
+               goto out;
+       }
+out:
+       kfree(cmd);
+       return ret;
+}
+
+void wl1271_scan_stm(struct wl1271 *wl, struct wl12xx_vif *wlvif)
+{
+       int ret = 0;
+       enum ieee80211_band band;
+       u32 rate, mask;
+
+       switch (wl->scan.state) {
+       case WL1271_SCAN_STATE_IDLE:
+               break;
+
+       case WL1271_SCAN_STATE_2GHZ_ACTIVE:
+               band = IEEE80211_BAND_2GHZ;
+               mask = wlvif->bitrate_masks[band];
+               if (wl->scan.req->no_cck) {
+                       mask &= ~CONF_TX_CCK_RATES;
+                       if (!mask)
+                               mask = CONF_TX_RATE_MASK_BASIC_P2P;
+               }
+               rate = wl1271_tx_min_rate_get(wl, mask);
+               ret = wl1271_scan_send(wl, wlvif, band, false, rate);
+               if (ret == WL1271_NOTHING_TO_SCAN) {
+                       wl->scan.state = WL1271_SCAN_STATE_2GHZ_PASSIVE;
+                       wl1271_scan_stm(wl, wlvif);
+               }
+
+               break;
+
+       case WL1271_SCAN_STATE_2GHZ_PASSIVE:
+               band = IEEE80211_BAND_2GHZ;
+               mask = wlvif->bitrate_masks[band];
+               if (wl->scan.req->no_cck) {
+                       mask &= ~CONF_TX_CCK_RATES;
+                       if (!mask)
+                               mask = CONF_TX_RATE_MASK_BASIC_P2P;
+               }
+               rate = wl1271_tx_min_rate_get(wl, mask);
+               ret = wl1271_scan_send(wl, wlvif, band, true, rate);
+               if (ret == WL1271_NOTHING_TO_SCAN) {
+                       if (wl->enable_11a)
+                               wl->scan.state = WL1271_SCAN_STATE_5GHZ_ACTIVE;
+                       else
+                               wl->scan.state = WL1271_SCAN_STATE_DONE;
+                       wl1271_scan_stm(wl, wlvif);
+               }
+
+               break;
+
+       case WL1271_SCAN_STATE_5GHZ_ACTIVE:
+               band = IEEE80211_BAND_5GHZ;
+               rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[band]);
+               ret = wl1271_scan_send(wl, wlvif, band, false, rate);
+               if (ret == WL1271_NOTHING_TO_SCAN) {
+                       wl->scan.state = WL1271_SCAN_STATE_5GHZ_PASSIVE;
+                       wl1271_scan_stm(wl, wlvif);
+               }
+
+               break;
+
+       case WL1271_SCAN_STATE_5GHZ_PASSIVE:
+               band = IEEE80211_BAND_5GHZ;
+               rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[band]);
+               ret = wl1271_scan_send(wl, wlvif, band, true, rate);
+               if (ret == WL1271_NOTHING_TO_SCAN) {
+                       wl->scan.state = WL1271_SCAN_STATE_DONE;
+                       wl1271_scan_stm(wl, wlvif);
+               }
+
+               break;
+
+       case WL1271_SCAN_STATE_DONE:
+               wl->scan.failed = false;
+               cancel_delayed_work(&wl->scan_complete_work);
+               ieee80211_queue_delayed_work(wl->hw, &wl->scan_complete_work,
+                                            msecs_to_jiffies(0));
+               break;
+
+       default:
+               wl1271_error("invalid scan state");
+               break;
+       }
+
+       if (ret < 0) {
+               cancel_delayed_work(&wl->scan_complete_work);
+               ieee80211_queue_delayed_work(wl->hw, &wl->scan_complete_work,
+                                            msecs_to_jiffies(0));
+       }
+}
+
+static void wl12xx_adjust_channels(struct wl1271_cmd_sched_scan_config *cmd,
+                                  struct wlcore_scan_channels *cmd_channels)
+{
+       memcpy(cmd->passive, cmd_channels->passive, sizeof(cmd->passive));
+       memcpy(cmd->active, cmd_channels->active, sizeof(cmd->active));
+       cmd->dfs = cmd_channels->dfs;
+       cmd->n_pactive_ch = cmd_channels->passive_active;
+
+       memcpy(cmd->channels_2, cmd_channels->channels_2,
+              sizeof(cmd->channels_2));
+       memcpy(cmd->channels_5, cmd_channels->channels_5,
+              sizeof(cmd->channels_2));
+       /* channels_4 are not supported, so no need to copy them */
+}
+
+int wl1271_scan_sched_scan_config(struct wl1271 *wl,
+                                 struct wl12xx_vif *wlvif,
+                                 struct cfg80211_sched_scan_request *req,
+                                 struct ieee80211_sched_scan_ies *ies)
+{
+       struct wl1271_cmd_sched_scan_config *cfg = NULL;
+       struct wlcore_scan_channels *cfg_channels = NULL;
+       struct conf_sched_scan_settings *c = &wl->conf.sched_scan;
+       int i, ret;
+       bool force_passive = !req->n_ssids;
+
+       wl1271_debug(DEBUG_CMD, "cmd sched_scan scan config");
+
+       cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
+       if (!cfg)
+               return -ENOMEM;
+
+       cfg->role_id = wlvif->role_id;
+       cfg->rssi_threshold = c->rssi_threshold;
+       cfg->snr_threshold  = c->snr_threshold;
+       cfg->n_probe_reqs = c->num_probe_reqs;
+       /* cycles set to 0 it means infinite (until manually stopped) */
+       cfg->cycles = 0;
+       /* report APs when at least 1 is found */
+       cfg->report_after = 1;
+       /* don't stop scanning automatically when something is found */
+       cfg->terminate = 0;
+       cfg->tag = WL1271_SCAN_DEFAULT_TAG;
+       /* don't filter on BSS type */
+       cfg->bss_type = SCAN_BSS_TYPE_ANY;
+       /* currently NL80211 supports only a single interval */
+       for (i = 0; i < SCAN_MAX_CYCLE_INTERVALS; i++)
+               cfg->intervals[i] = cpu_to_le32(req->interval);
+
+       cfg->ssid_len = 0;
+       ret = wlcore_scan_sched_scan_ssid_list(wl, wlvif, req);
+       if (ret < 0)
+               goto out;
+
+       cfg->filter_type = ret;
+
+       wl1271_debug(DEBUG_SCAN, "filter_type = %d", cfg->filter_type);
+
+       cfg_channels = kzalloc(sizeof(*cfg_channels), GFP_KERNEL);
+       if (!cfg_channels) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       if (!wlcore_set_scan_chan_params(wl, cfg_channels, req->channels,
+                                        req->n_channels, req->n_ssids,
+                                        SCAN_TYPE_PERIODIC)) {
+               wl1271_error("scan channel list is empty");
+               ret = -EINVAL;
+               goto out;
+       }
+       wl12xx_adjust_channels(cfg, cfg_channels);
+
+       if (!force_passive && cfg->active[0]) {
+               u8 band = IEEE80211_BAND_2GHZ;
+               ret = wl12xx_cmd_build_probe_req(wl, wlvif,
+                                                wlvif->role_id, band,
+                                                req->ssids[0].ssid,
+                                                req->ssids[0].ssid_len,
+                                                ies->ie[band],
+                                                ies->len[band], true);
+               if (ret < 0) {
+                       wl1271_error("2.4GHz PROBE request template failed");
+                       goto out;
+               }
+       }
+
+       if (!force_passive && cfg->active[1]) {
+               u8 band = IEEE80211_BAND_5GHZ;
+               ret = wl12xx_cmd_build_probe_req(wl, wlvif,
+                                                wlvif->role_id, band,
+                                                req->ssids[0].ssid,
+                                                req->ssids[0].ssid_len,
+                                                ies->ie[band],
+                                                ies->len[band], true);
+               if (ret < 0) {
+                       wl1271_error("5GHz PROBE request template failed");
+                       goto out;
+               }
+       }
+
+       wl1271_dump(DEBUG_SCAN, "SCAN_CFG: ", cfg, sizeof(*cfg));
+
+       ret = wl1271_cmd_send(wl, CMD_CONNECTION_SCAN_CFG, cfg,
+                             sizeof(*cfg), 0);
+       if (ret < 0) {
+               wl1271_error("SCAN configuration failed");
+               goto out;
+       }
+out:
+       kfree(cfg_channels);
+       kfree(cfg);
+       return ret;
+}
+
+int wl1271_scan_sched_scan_start(struct wl1271 *wl, struct wl12xx_vif *wlvif)
+{
+       struct wl1271_cmd_sched_scan_start *start;
+       int ret = 0;
+
+       wl1271_debug(DEBUG_CMD, "cmd periodic scan start");
+
+       if (wlvif->bss_type != BSS_TYPE_STA_BSS)
+               return -EOPNOTSUPP;
+
+       if ((wl->quirks & WLCORE_QUIRK_NO_SCHED_SCAN_WHILE_CONN) &&
+           test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags))
+               return -EBUSY;
+
+       start = kzalloc(sizeof(*start), GFP_KERNEL);
+       if (!start)
+               return -ENOMEM;
+
+       start->role_id = wlvif->role_id;
+       start->tag = WL1271_SCAN_DEFAULT_TAG;
+
+       ret = wl1271_cmd_send(wl, CMD_START_PERIODIC_SCAN, start,
+                             sizeof(*start), 0);
+       if (ret < 0) {
+               wl1271_error("failed to send scan start command");
+               goto out_free;
+       }
+
+out_free:
+       kfree(start);
+       return ret;
+}
+
+int wl12xx_sched_scan_start(struct wl1271 *wl, struct wl12xx_vif  *wlvif,
+                           struct cfg80211_sched_scan_request *req,
+                           struct ieee80211_sched_scan_ies *ies)
+{
+       int ret;
+
+       ret = wl1271_scan_sched_scan_config(wl, wlvif, req, ies);
+       if (ret < 0)
+               return ret;
+
+       return wl1271_scan_sched_scan_start(wl, wlvif);
+}
+
+void wl12xx_scan_sched_scan_stop(struct wl1271 *wl,  struct wl12xx_vif *wlvif)
+{
+       struct wl1271_cmd_sched_scan_stop *stop;
+       int ret = 0;
+
+       wl1271_debug(DEBUG_CMD, "cmd periodic scan stop");
+
+       /* FIXME: what to do if alloc'ing to stop fails? */
+       stop = kzalloc(sizeof(*stop), GFP_KERNEL);
+       if (!stop) {
+               wl1271_error("failed to alloc memory to send sched scan stop");
+               return;
+       }
+
+       stop->role_id = wlvif->role_id;
+       stop->tag = WL1271_SCAN_DEFAULT_TAG;
+
+       ret = wl1271_cmd_send(wl, CMD_STOP_PERIODIC_SCAN, stop,
+                             sizeof(*stop), 0);
+       if (ret < 0) {
+               wl1271_error("failed to send sched scan stop command");
+               goto out_free;
+       }
+
+out_free:
+       kfree(stop);
+}
+
+int wl12xx_scan_start(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                     struct cfg80211_scan_request *req)
+{
+       wl1271_scan_stm(wl, wlvif);
+       return 0;
+}
+
+void wl12xx_scan_completed(struct wl1271 *wl, struct wl12xx_vif *wlvif)
+{
+       wl1271_scan_stm(wl, wlvif);
+}
diff --git a/drivers/net/wireless/ti/wl12xx/scan.h b/drivers/net/wireless/ti/wl12xx/scan.h
new file mode 100644 (file)
index 0000000..264af7a
--- /dev/null
@@ -0,0 +1,140 @@
+/*
+ * This file is part of wl12xx
+ *
+ * Copyright (C) 2012 Texas Instruments. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef __WL12XX_SCAN_H__
+#define __WL12XX_SCAN_H__
+
+#include "../wlcore/wlcore.h"
+#include "../wlcore/cmd.h"
+#include "../wlcore/scan.h"
+
+#define WL12XX_MAX_CHANNELS_5GHZ 23
+
+struct basic_scan_params {
+       /* Scan option flags (WL1271_SCAN_OPT_*) */
+       __le16 scan_options;
+       u8 role_id;
+       /* Number of scan channels in the list (maximum 30) */
+       u8 n_ch;
+       /* This field indicates the number of probe requests to send
+          per channel for an active scan */
+       u8 n_probe_reqs;
+       u8 tid_trigger;
+       u8 ssid_len;
+       u8 use_ssid_list;
+
+       /* Rate bit field for sending the probes */
+       __le32 tx_rate;
+
+       u8 ssid[IEEE80211_MAX_SSID_LEN];
+       /* Band to scan */
+       u8 band;
+
+       u8 scan_tag;
+       u8 padding2[2];
+} __packed;
+
+struct basic_scan_channel_params {
+       /* Duration in TU to wait for frames on a channel for active scan */
+       __le32 min_duration;
+       __le32 max_duration;
+       __le32 bssid_lsb;
+       __le16 bssid_msb;
+       u8 early_termination;
+       u8 tx_power_att;
+       u8 channel;
+       /* FW internal use only! */
+       u8 dfs_candidate;
+       u8 activity_detected;
+       u8 pad;
+} __packed;
+
+struct wl1271_cmd_scan {
+       struct wl1271_cmd_header header;
+
+       struct basic_scan_params params;
+       struct basic_scan_channel_params channels[WL1271_SCAN_MAX_CHANNELS];
+
+       /* src mac address */
+       u8 addr[ETH_ALEN];
+       u8 padding[2];
+} __packed;
+
+struct wl1271_cmd_sched_scan_config {
+       struct wl1271_cmd_header header;
+
+       __le32 intervals[SCAN_MAX_CYCLE_INTERVALS];
+
+       s8 rssi_threshold; /* for filtering (in dBm) */
+       s8 snr_threshold;  /* for filtering (in dB) */
+
+       u8 cycles;       /* maximum number of scan cycles */
+       u8 report_after; /* report when this number of results are received */
+       u8 terminate;    /* stop scanning after reporting */
+
+       u8 tag;
+       u8 bss_type; /* for filtering */
+       u8 filter_type;
+
+       u8 ssid_len;     /* For SCAN_SSID_FILTER_SPECIFIC */
+       u8 ssid[IEEE80211_MAX_SSID_LEN];
+
+       u8 n_probe_reqs; /* Number of probes requests per channel */
+
+       u8 passive[SCAN_MAX_BANDS];
+       u8 active[SCAN_MAX_BANDS];
+
+       u8 dfs;
+
+       u8 n_pactive_ch; /* number of pactive (passive until fw detects energy)
+                           channels in BG band */
+       u8 role_id;
+       u8 padding[1];
+       struct conn_scan_ch_params channels_2[MAX_CHANNELS_2GHZ];
+       struct conn_scan_ch_params channels_5[WL12XX_MAX_CHANNELS_5GHZ];
+       struct conn_scan_ch_params channels_4[MAX_CHANNELS_4GHZ];
+} __packed;
+
+struct wl1271_cmd_sched_scan_start {
+       struct wl1271_cmd_header header;
+
+       u8 tag;
+       u8 role_id;
+       u8 padding[2];
+} __packed;
+
+struct wl1271_cmd_sched_scan_stop {
+       struct wl1271_cmd_header header;
+
+       u8 tag;
+       u8 role_id;
+       u8 padding[2];
+} __packed;
+
+int wl12xx_scan_start(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                     struct cfg80211_scan_request *req);
+int wl12xx_scan_stop(struct wl1271 *wl, struct wl12xx_vif *wlvif);
+void wl12xx_scan_completed(struct wl1271 *wl, struct wl12xx_vif *wlvif);
+int wl12xx_sched_scan_start(struct wl1271 *wl, struct wl12xx_vif  *wlvif,
+                           struct cfg80211_sched_scan_request *req,
+                           struct ieee80211_sched_scan_ies *ies);
+void wl12xx_scan_sched_scan_stop(struct wl1271 *wl,  struct wl12xx_vif *wlvif);
+#endif
index 7182bbf..d455285 100644 (file)
 
 #include "conf.h"
 
-/* minimum FW required for driver for wl127x */
+/* WiLink 6/7 chip IDs */
+#define CHIP_ID_127X_PG10              (0x04030101)
+#define CHIP_ID_127X_PG20              (0x04030111)
+#define CHIP_ID_128X_PG10              (0x05030101)
+#define CHIP_ID_128X_PG20              (0x05030111)
+
+/* FW chip version for wl127x */
 #define WL127X_CHIP_VER                6
-#define WL127X_IFTYPE_VER      3
-#define WL127X_MAJOR_VER       10
-#define WL127X_SUBTYPE_VER     2
-#define WL127X_MINOR_VER       115
+/* minimum single-role FW version for wl127x */
+#define WL127X_IFTYPE_SR_VER   3
+#define WL127X_MAJOR_SR_VER    10
+#define WL127X_SUBTYPE_SR_VER  WLCORE_FW_VER_IGNORE
+#define WL127X_MINOR_SR_VER    115
+/* minimum multi-role FW version for wl127x */
+#define WL127X_IFTYPE_MR_VER   5
+#define WL127X_MAJOR_MR_VER    7
+#define WL127X_SUBTYPE_MR_VER  WLCORE_FW_VER_IGNORE
+#define WL127X_MINOR_MR_VER    115
 
-/* minimum FW required for driver for wl128x */
+/* FW chip version for wl128x */
 #define WL128X_CHIP_VER                7
-#define WL128X_IFTYPE_VER      3
-#define WL128X_MAJOR_VER       10
-#define WL128X_SUBTYPE_VER     2
-#define WL128X_MINOR_VER       115
+/* minimum single-role FW version for wl128x */
+#define WL128X_IFTYPE_SR_VER   3
+#define WL128X_MAJOR_SR_VER    10
+#define WL128X_SUBTYPE_SR_VER  WLCORE_FW_VER_IGNORE
+#define WL128X_MINOR_SR_VER    115
+/* minimum multi-role FW version for wl128x */
+#define WL128X_IFTYPE_MR_VER   5
+#define WL128X_MAJOR_MR_VER    7
+#define WL128X_SUBTYPE_MR_VER  WLCORE_FW_VER_IGNORE
+#define WL128X_MINOR_MR_VER    42
 
 #define WL12XX_AGGR_BUFFER_SIZE        (4 * PAGE_SIZE)
 
@@ -55,6 +73,8 @@ struct wl12xx_priv {
 
        int ref_clock;
        int tcxo_clock;
+
+       struct wl127x_rx_mem_pool_addr *rx_mem_addr;
 };
 
 #endif /* __WL12XX_PRIV_H__ */
index 67c0987..ae2b817 100644 (file)
@@ -1,3 +1,3 @@
-wl18xx-objs    = main.o acx.o tx.o io.o debugfs.o
+wl18xx-objs    = main.o acx.o tx.o io.o debugfs.o scan.o cmd.o event.o
 
 obj-$(CONFIG_WL18XX)           += wl18xx.o
index 72840e2..a169bb5 100644 (file)
@@ -75,7 +75,7 @@ int wl18xx_acx_set_checksum_state(struct wl1271 *wl)
 
        acx->checksum_state = CHECKSUM_OFFLOAD_ENABLED;
 
-       ret = wl1271_cmd_configure(wl, ACX_CHECKSUM_CONFIG, acx, sizeof(*acx));
+       ret = wl1271_cmd_configure(wl, ACX_CSUM_CONFIG, acx, sizeof(*acx));
        if (ret < 0) {
                wl1271_warning("failed to set Tx checksum state: %d", ret);
                goto out;
@@ -109,3 +109,88 @@ out:
        kfree(acx);
        return ret;
 }
+
+int wl18xx_acx_peer_ht_operation_mode(struct wl1271 *wl, u8 hlid, bool wide)
+{
+       struct wlcore_peer_ht_operation_mode *acx;
+       int ret;
+
+       wl1271_debug(DEBUG_ACX, "acx peer ht operation mode hlid %d bw %d",
+                    hlid, wide);
+
+       acx = kzalloc(sizeof(*acx), GFP_KERNEL);
+       if (!acx) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       acx->hlid = hlid;
+       acx->bandwidth = wide ? WLCORE_BANDWIDTH_40MHZ : WLCORE_BANDWIDTH_20MHZ;
+
+       ret = wl1271_cmd_configure(wl, ACX_PEER_HT_OPERATION_MODE_CFG, acx,
+                                  sizeof(*acx));
+
+       if (ret < 0) {
+               wl1271_warning("acx peer ht operation mode failed: %d", ret);
+               goto out;
+       }
+
+out:
+       kfree(acx);
+       return ret;
+
+}
+
+/*
+ * this command is basically the same as wl1271_acx_ht_capabilities,
+ * with the addition of supported rates. they should be unified in
+ * the next fw api change
+ */
+int wl18xx_acx_set_peer_cap(struct wl1271 *wl,
+                           struct ieee80211_sta_ht_cap *ht_cap,
+                           bool allow_ht_operation,
+                           u32 rate_set, u8 hlid)
+{
+       struct wlcore_acx_peer_cap *acx;
+       int ret = 0;
+       u32 ht_capabilites = 0;
+
+       wl1271_debug(DEBUG_ACX,
+                    "acx set cap ht_supp: %d ht_cap: %d rates: 0x%x",
+                    ht_cap->ht_supported, ht_cap->cap, rate_set);
+
+       acx = kzalloc(sizeof(*acx), GFP_KERNEL);
+       if (!acx) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       if (allow_ht_operation && ht_cap->ht_supported) {
+               /* no need to translate capabilities - use the spec values */
+               ht_capabilites = ht_cap->cap;
+
+               /*
+                * this bit is not employed by the spec but only by FW to
+                * indicate peer HT support
+                */
+               ht_capabilites |= WL12XX_HT_CAP_HT_OPERATION;
+
+               /* get data from A-MPDU parameters field */
+               acx->ampdu_max_length = ht_cap->ampdu_factor;
+               acx->ampdu_min_spacing = ht_cap->ampdu_density;
+       }
+
+       acx->hlid = hlid;
+       acx->ht_capabilites = cpu_to_le32(ht_capabilites);
+       acx->supported_rates = cpu_to_le32(rate_set);
+
+       ret = wl1271_cmd_configure(wl, ACX_PEER_CAP, acx, sizeof(*acx));
+       if (ret < 0) {
+               wl1271_warning("acx ht capabilities setting failed: %d", ret);
+               goto out;
+       }
+
+out:
+       kfree(acx);
+       return ret;
+}
index e2609a6..0e636de 100644 (file)
 #include "../wlcore/acx.h"
 
 enum {
-       ACX_CLEAR_STATISTICS             = 0x0047,
+       ACX_NS_IPV6_FILTER               = 0x0050,
+       ACX_PEER_HT_OPERATION_MODE_CFG   = 0x0051,
+       ACX_CSUM_CONFIG                  = 0x0052,
+       ACX_SIM_CONFIG                   = 0x0053,
+       ACX_CLEAR_STATISTICS             = 0x0054,
+       ACX_AUTO_RX_STREAMING            = 0x0055,
+       ACX_PEER_CAP                     = 0x0056
 };
 
 /* numbers of bits the length field takes (add 1 for the actual number) */
@@ -278,10 +284,57 @@ struct wl18xx_acx_clear_statistics {
        struct acx_header header;
 };
 
+enum wlcore_bandwidth {
+       WLCORE_BANDWIDTH_20MHZ,
+       WLCORE_BANDWIDTH_40MHZ,
+};
+
+struct wlcore_peer_ht_operation_mode {
+       struct acx_header header;
+
+       u8 hlid;
+       u8 bandwidth; /* enum wlcore_bandwidth */
+       u8 padding[2];
+};
+
+/*
+ * ACX_PEER_CAP
+ * this struct is very similar to wl1271_acx_ht_capabilities, with the
+ * addition of supported rates
+ */
+struct wlcore_acx_peer_cap {
+       struct acx_header header;
+
+       /* bitmask of capability bits supported by the peer */
+       __le32 ht_capabilites;
+
+       /* rates supported by the remote peer */
+       __le32 supported_rates;
+
+       /* Indicates to which link these capabilities apply. */
+       u8 hlid;
+
+       /*
+        * This the maximum A-MPDU length supported by the AP. The FW may not
+        * exceed this length when sending A-MPDUs
+        */
+       u8 ampdu_max_length;
+
+       /* This is the minimal spacing required when sending A-MPDUs to the AP*/
+       u8 ampdu_min_spacing;
+
+       u8 padding;
+} __packed;
+
 int wl18xx_acx_host_if_cfg_bitmap(struct wl1271 *wl, u32 host_cfg_bitmap,
                                  u32 sdio_blk_size, u32 extra_mem_blks,
                                  u32 len_field_size);
 int wl18xx_acx_set_checksum_state(struct wl1271 *wl);
 int wl18xx_acx_clear_statistics(struct wl1271 *wl);
+int wl18xx_acx_peer_ht_operation_mode(struct wl1271 *wl, u8 hlid, bool wide);
+int wl18xx_acx_set_peer_cap(struct wl1271 *wl,
+                           struct ieee80211_sta_ht_cap *ht_cap,
+                           bool allow_ht_operation,
+                           u32 rate_set, u8 hlid);
 
 #endif /* __WL18XX_ACX_H__ */
diff --git a/drivers/net/wireless/ti/wl18xx/cmd.c b/drivers/net/wireless/ti/wl18xx/cmd.c
new file mode 100644 (file)
index 0000000..1d1f6cc
--- /dev/null
@@ -0,0 +1,80 @@
+/*
+ * This file is part of wl18xx
+ *
+ * Copyright (C) 2011 Texas Instruments Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include "../wlcore/cmd.h"
+#include "../wlcore/debug.h"
+#include "../wlcore/hw_ops.h"
+
+#include "cmd.h"
+
+int wl18xx_cmd_channel_switch(struct wl1271 *wl,
+                             struct wl12xx_vif *wlvif,
+                             struct ieee80211_channel_switch *ch_switch)
+{
+       struct wl18xx_cmd_channel_switch *cmd;
+       u32 supported_rates;
+       int ret;
+
+       wl1271_debug(DEBUG_ACX, "cmd channel switch");
+
+       cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+       if (!cmd) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       cmd->role_id = wlvif->role_id;
+       cmd->channel = ch_switch->channel->hw_value;
+       cmd->switch_time = ch_switch->count;
+       cmd->stop_tx = ch_switch->block_tx;
+
+       switch (ch_switch->channel->band) {
+       case IEEE80211_BAND_2GHZ:
+               cmd->band = WLCORE_BAND_2_4GHZ;
+               break;
+       case IEEE80211_BAND_5GHZ:
+               cmd->band = WLCORE_BAND_5GHZ;
+               break;
+       default:
+               wl1271_error("invalid channel switch band: %d",
+                            ch_switch->channel->band);
+               ret = -EINVAL;
+               goto out_free;
+       }
+
+       supported_rates = CONF_TX_ENABLED_RATES | CONF_TX_MCS_RATES |
+                         wlcore_hw_sta_get_ap_rate_mask(wl, wlvif);
+       if (wlvif->p2p)
+               supported_rates &= ~CONF_TX_CCK_RATES;
+       cmd->local_supported_rates = cpu_to_le32(supported_rates);
+       cmd->channel_type = wlvif->channel_type;
+
+       ret = wl1271_cmd_send(wl, CMD_CHANNEL_SWITCH, cmd, sizeof(*cmd), 0);
+       if (ret < 0) {
+               wl1271_error("failed to send channel switch command");
+               goto out_free;
+       }
+
+out_free:
+       kfree(cmd);
+out:
+       return ret;
+}
diff --git a/drivers/net/wireless/ti/wl18xx/cmd.h b/drivers/net/wireless/ti/wl18xx/cmd.h
new file mode 100644 (file)
index 0000000..6687d10
--- /dev/null
@@ -0,0 +1,52 @@
+/*
+ * This file is part of wl18xx
+ *
+ * Copyright (C) 2011 Texas Instruments. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef __WL18XX_CMD_H__
+#define __WL18XX_CMD_H__
+
+#include "../wlcore/wlcore.h"
+#include "../wlcore/acx.h"
+
+struct wl18xx_cmd_channel_switch {
+       struct wl1271_cmd_header header;
+
+       u8 role_id;
+
+       /* The new serving channel */
+       u8 channel;
+       /* Relative time of the serving channel switch in TBTT units */
+       u8 switch_time;
+       /* Stop the role TX, should expect it after radar detection */
+       u8 stop_tx;
+
+       __le32 local_supported_rates;
+
+       u8 channel_type;
+       u8 band;
+
+       u8 padding[2];
+} __packed;
+
+int wl18xx_cmd_channel_switch(struct wl1271 *wl,
+                             struct wl12xx_vif *wlvif,
+                             struct ieee80211_channel_switch *ch_switch);
+
+#endif
index 4d426cc..e34302e 100644 (file)
 #define __WL18XX_CONF_H__
 
 #define WL18XX_CONF_MAGIC      0x10e100ca
-#define WL18XX_CONF_VERSION    (WLCORE_CONF_VERSION | 0x0003)
+#define WL18XX_CONF_VERSION    (WLCORE_CONF_VERSION | 0x0006)
 #define WL18XX_CONF_MASK       0x0000ffff
 #define WL18XX_CONF_SIZE       (WLCORE_CONF_SIZE + \
                                 sizeof(struct wl18xx_priv_conf))
 
 #define NUM_OF_CHANNELS_11_ABG 150
 #define NUM_OF_CHANNELS_11_P 7
-#define WL18XX_NUM_OF_SUB_BANDS 9
 #define SRF_TABLE_LEN 16
 #define PIN_MUXING_SIZE 2
+#define WL18XX_TRACE_LOSS_GAPS_TX 10
+#define WL18XX_TRACE_LOSS_GAPS_RX 18
 
 struct wl18xx_mac_and_phy_params {
        u8 phy_standalone;
-       u8 rdl;
+       u8 spare0;
        u8 enable_clpc;
        u8 enable_tx_low_pwr_on_siso_rdl;
        u8 auto_detect;
@@ -69,18 +70,27 @@ struct wl18xx_mac_and_phy_params {
        u8 pwr_limit_reference_11_abg;
        u8 per_chan_pwr_limit_arr_11p[NUM_OF_CHANNELS_11_P];
        u8 pwr_limit_reference_11p;
-       u8 per_sub_band_tx_trace_loss[WL18XX_NUM_OF_SUB_BANDS];
-       u8 per_sub_band_rx_trace_loss[WL18XX_NUM_OF_SUB_BANDS];
+       u8 spare1;
+       u8 per_chan_bo_mode_11_abg[13];
+       u8 per_chan_bo_mode_11_p[4];
        u8 primary_clock_setting_time;
        u8 clock_valid_on_wake_up;
        u8 secondary_clock_setting_time;
        u8 board_type;
        /* enable point saturation */
        u8 psat;
-       /* low/medium/high Tx power in dBm */
+       /* low/medium/high Tx power in dBm for STA-HP BG */
        s8 low_power_val;
        s8 med_power_val;
        s8 high_power_val;
+       s8 per_sub_band_tx_trace_loss[WL18XX_TRACE_LOSS_GAPS_TX];
+       s8 per_sub_band_rx_trace_loss[WL18XX_TRACE_LOSS_GAPS_RX];
+       u8 tx_rf_margin;
+       /* low/medium/high Tx power in dBm for other role */
+       s8 low_power_val_2nd;
+       s8 med_power_val_2nd;
+       s8 high_power_val_2nd;
+
        u8 padding[1];
 } __packed;
 
diff --git a/drivers/net/wireless/ti/wl18xx/event.c b/drivers/net/wireless/ti/wl18xx/event.c
new file mode 100644 (file)
index 0000000..c9199d7
--- /dev/null
@@ -0,0 +1,111 @@
+/*
+ * This file is part of wl12xx
+ *
+ * Copyright (C) 2012 Texas Instruments. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include "event.h"
+#include "scan.h"
+#include "../wlcore/cmd.h"
+#include "../wlcore/debug.h"
+
+int wl18xx_wait_for_event(struct wl1271 *wl, enum wlcore_wait_event event,
+                         bool *timeout)
+{
+       u32 local_event;
+
+       switch (event) {
+       case WLCORE_EVENT_PEER_REMOVE_COMPLETE:
+               local_event = PEER_REMOVE_COMPLETE_EVENT_ID;
+               break;
+
+       case WLCORE_EVENT_DFS_CONFIG_COMPLETE:
+               local_event = DFS_CHANNELS_CONFIG_COMPLETE_EVENT;
+               break;
+
+       default:
+               /* event not implemented */
+               return 0;
+       }
+       return wlcore_cmd_wait_for_event_or_timeout(wl, local_event, timeout);
+}
+
+int wl18xx_process_mailbox_events(struct wl1271 *wl)
+{
+       struct wl18xx_event_mailbox *mbox = wl->mbox;
+       u32 vector;
+
+       vector = le32_to_cpu(mbox->events_vector);
+       wl1271_debug(DEBUG_EVENT, "MBOX vector: 0x%x", vector);
+
+       if (vector & SCAN_COMPLETE_EVENT_ID) {
+               wl1271_debug(DEBUG_EVENT, "scan results: %d",
+                            mbox->number_of_scan_results);
+
+               if (wl->scan_wlvif)
+                       wl18xx_scan_completed(wl, wl->scan_wlvif);
+       }
+
+       if (vector & PERIODIC_SCAN_REPORT_EVENT_ID) {
+               wl1271_debug(DEBUG_EVENT,
+                            "PERIODIC_SCAN_REPORT_EVENT (results %d)",
+                            mbox->number_of_sched_scan_results);
+
+               wlcore_scan_sched_scan_results(wl);
+       }
+
+       if (vector & PERIODIC_SCAN_COMPLETE_EVENT_ID)
+               wlcore_event_sched_scan_completed(wl, 1);
+
+       if (vector & RSSI_SNR_TRIGGER_0_EVENT_ID)
+               wlcore_event_rssi_trigger(wl, mbox->rssi_snr_trigger_metric);
+
+       if (vector & BA_SESSION_RX_CONSTRAINT_EVENT_ID)
+               wlcore_event_ba_rx_constraint(wl,
+                               le16_to_cpu(mbox->rx_ba_role_id_bitmap),
+                               le16_to_cpu(mbox->rx_ba_allowed_bitmap));
+
+       if (vector & BSS_LOSS_EVENT_ID)
+               wlcore_event_beacon_loss(wl,
+                                        le16_to_cpu(mbox->bss_loss_bitmap));
+
+       if (vector & CHANNEL_SWITCH_COMPLETE_EVENT_ID)
+               wlcore_event_channel_switch(wl,
+                       le16_to_cpu(mbox->channel_switch_role_id_bitmap),
+                       true);
+
+       if (vector & DUMMY_PACKET_EVENT_ID)
+               wlcore_event_dummy_packet(wl);
+
+       /*
+        * "TX retries exceeded" has a different meaning according to mode.
+        * In AP mode the offending station is disconnected.
+        */
+       if (vector & MAX_TX_FAILURE_EVENT_ID)
+               wlcore_event_max_tx_failure(wl,
+                               le32_to_cpu(mbox->tx_retry_exceeded_bitmap));
+
+       if (vector & INACTIVE_STA_EVENT_ID)
+               wlcore_event_inactive_sta(wl,
+                               le32_to_cpu(mbox->inactive_sta_bitmap));
+
+       if (vector & REMAIN_ON_CHANNEL_COMPLETE_EVENT_ID)
+               wlcore_event_roc_complete(wl);
+
+       return 0;
+}
diff --git a/drivers/net/wireless/ti/wl18xx/event.h b/drivers/net/wireless/ti/wl18xx/event.h
new file mode 100644 (file)
index 0000000..398f3d2
--- /dev/null
@@ -0,0 +1,77 @@
+/*
+ * This file is part of wl18xx
+ *
+ * Copyright (C) 2012 Texas Instruments. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef __WL18XX_EVENT_H__
+#define __WL18XX_EVENT_H__
+
+#include "../wlcore/wlcore.h"
+
+enum {
+       SCAN_COMPLETE_EVENT_ID                   = BIT(8),
+       RADAR_DETECTED_EVENT_ID                  = BIT(9),
+       CHANNEL_SWITCH_COMPLETE_EVENT_ID         = BIT(10),
+       BSS_LOSS_EVENT_ID                        = BIT(11),
+       MAX_TX_FAILURE_EVENT_ID                  = BIT(12),
+       DUMMY_PACKET_EVENT_ID                    = BIT(13),
+       INACTIVE_STA_EVENT_ID                    = BIT(14),
+       PEER_REMOVE_COMPLETE_EVENT_ID            = BIT(15),
+       PERIODIC_SCAN_COMPLETE_EVENT_ID          = BIT(16),
+       BA_SESSION_RX_CONSTRAINT_EVENT_ID        = BIT(17),
+       REMAIN_ON_CHANNEL_COMPLETE_EVENT_ID      = BIT(18),
+       DFS_CHANNELS_CONFIG_COMPLETE_EVENT       = BIT(19),
+       PERIODIC_SCAN_REPORT_EVENT_ID            = BIT(20),
+};
+
+struct wl18xx_event_mailbox {
+       __le32 events_vector;
+
+       u8 number_of_scan_results;
+       u8 number_of_sched_scan_results;
+
+       __le16 channel_switch_role_id_bitmap;
+
+       s8 rssi_snr_trigger_metric[NUM_OF_RSSI_SNR_TRIGGERS];
+
+       /* bitmap of removed links */
+       __le32 hlid_removed_bitmap;
+
+       /* rx ba constraint */
+       __le16 rx_ba_role_id_bitmap; /* 0xfff means any role. */
+       __le16 rx_ba_allowed_bitmap;
+
+       /* bitmap of roc completed (by role id) */
+       __le16 roc_completed_bitmap;
+
+       /* bitmap of stations (by role id) with bss loss */
+       __le16 bss_loss_bitmap;
+
+       /* bitmap of stations (by HLID) which exceeded max tx retries */
+       __le32 tx_retry_exceeded_bitmap;
+
+       /* bitmap of inactive stations (by HLID) */
+       __le32 inactive_sta_bitmap;
+} __packed;
+
+int wl18xx_wait_for_event(struct wl1271 *wl, enum wlcore_wait_event event,
+                         bool *timeout);
+int wl18xx_process_mailbox_events(struct wl1271 *wl);
+
+#endif
index 8d8c1f8..da3ef1b 100644 (file)
 
 #include "reg.h"
 #include "conf.h"
+#include "cmd.h"
 #include "acx.h"
 #include "tx.h"
 #include "wl18xx.h"
 #include "io.h"
+#include "scan.h"
+#include "event.h"
 #include "debugfs.h"
 
 #define WL18XX_RX_CHECKSUM_MASK      0x40
@@ -334,6 +337,8 @@ static struct wlcore_conf wl18xx_conf = {
                .tmpl_short_retry_limit      = 10,
                .tmpl_long_retry_limit       = 10,
                .tx_watchdog_timeout         = 5000,
+               .slow_link_thold             = 3,
+               .fast_link_thold             = 30,
        },
        .conn = {
                .wake_up_event               = CONF_WAKE_UP_EVENT_DTIM,
@@ -391,8 +396,10 @@ static struct wlcore_conf wl18xx_conf = {
        .scan = {
                .min_dwell_time_active        = 7500,
                .max_dwell_time_active        = 30000,
-               .min_dwell_time_passive       = 100000,
-               .max_dwell_time_passive       = 100000,
+               .min_dwell_time_active_long   = 25000,
+               .max_dwell_time_active_long   = 50000,
+               .dwell_time_passive           = 100000,
+               .dwell_time_dfs               = 150000,
                .num_probe_reqs               = 2,
                .split_scan_timeout           = 50000,
        },
@@ -489,6 +496,10 @@ static struct wlcore_conf wl18xx_conf = {
                .increase_time              = 1,
                .window_size                = 16,
        },
+       .recovery = {
+               .bug_on_recovery            = 0,
+               .no_recovery                = 0,
+       },
 };
 
 static struct wl18xx_priv_conf wl18xx_default_priv_conf = {
@@ -501,7 +512,6 @@ static struct wl18xx_priv_conf wl18xx_default_priv_conf = {
                .clock_valid_on_wake_up         = 0x00,
                .secondary_clock_setting_time   = 0x05,
                .board_type                     = BOARD_TYPE_HDK_18XX,
-               .rdl                            = 0x01,
                .auto_detect                    = 0x00,
                .dedicated_fem                  = FEM_NONE,
                .low_band_component             = COMPONENT_3_WAY_SWITCH,
@@ -517,14 +527,44 @@ static struct wl18xx_priv_conf wl18xx_default_priv_conf = {
                .enable_clpc                    = 0x00,
                .enable_tx_low_pwr_on_siso_rdl  = 0x00,
                .rx_profile                     = 0x00,
-               .pwr_limit_reference_11_abg     = 0xc8,
+               .pwr_limit_reference_11_abg     = 0x64,
+               .per_chan_pwr_limit_arr_11abg   = {
+                       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+                       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+                       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+                       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+                       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+                       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+                       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+                       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+                       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+                       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+                       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+                       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+                       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+                       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+                       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+                       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+                       0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
+               .pwr_limit_reference_11p        = 0x64,
+               .per_chan_bo_mode_11_abg        = { 0x00, 0x00, 0x00, 0x00,
+                                                   0x00, 0x00, 0x00, 0x00,
+                                                   0x00, 0x00, 0x00, 0x00,
+                                                   0x00 },
+               .per_chan_bo_mode_11_p          = { 0x00, 0x00, 0x00, 0x00 },
+               .per_chan_pwr_limit_arr_11p     = { 0xff, 0xff, 0xff, 0xff,
+                                                   0xff, 0xff, 0xff },
                .psat                           = 0,
-               .low_power_val                  = 0x00,
-               .med_power_val                  = 0x0a,
-               .high_power_val                 = 0x1e,
+               .low_power_val                  = 0x08,
+               .med_power_val                  = 0x12,
+               .high_power_val                 = 0x18,
+               .low_power_val_2nd              = 0x05,
+               .med_power_val_2nd              = 0x0a,
+               .high_power_val_2nd             = 0x14,
                .external_pa_dc2dc              = 0,
-               .number_of_assembled_ant2_4     = 1,
+               .number_of_assembled_ant2_4     = 2,
                .number_of_assembled_ant5       = 1,
+               .tx_rf_margin                   = 1,
        },
 };
 
@@ -595,7 +635,7 @@ static const struct wl18xx_clk_cfg wl18xx_clk_table[NUM_CLOCK_CONFIGS] = {
 };
 
 /* TODO: maybe move to a new header file? */
-#define WL18XX_FW_NAME "ti-connectivity/wl18xx-fw.bin"
+#define WL18XX_FW_NAME "ti-connectivity/wl18xx-fw-2.bin"
 
 static int wl18xx_identify_chip(struct wl1271 *wl)
 {
@@ -608,15 +648,18 @@ static int wl18xx_identify_chip(struct wl1271 *wl)
                wl->sr_fw_name = WL18XX_FW_NAME;
                /* wl18xx uses the same firmware for PLT */
                wl->plt_fw_name = WL18XX_FW_NAME;
-               wl->quirks |= WLCORE_QUIRK_NO_ELP |
-                             WLCORE_QUIRK_RX_BLOCKSIZE_ALIGN |
+               wl->quirks |= WLCORE_QUIRK_RX_BLOCKSIZE_ALIGN |
                              WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN |
                              WLCORE_QUIRK_NO_SCHED_SCAN_WHILE_CONN |
-                             WLCORE_QUIRK_TX_PAD_LAST_FRAME;
-
-               wlcore_set_min_fw_ver(wl, WL18XX_CHIP_VER, WL18XX_IFTYPE_VER,
-                                     WL18XX_MAJOR_VER, WL18XX_SUBTYPE_VER,
-                                     WL18XX_MINOR_VER);
+                             WLCORE_QUIRK_TX_PAD_LAST_FRAME |
+                             WLCORE_QUIRK_REGDOMAIN_CONF |
+                             WLCORE_QUIRK_DUAL_PROBE_TMPL;
+
+               wlcore_set_min_fw_ver(wl, WL18XX_CHIP_VER,
+                                     WL18XX_IFTYPE_VER,  WL18XX_MAJOR_VER,
+                                     WL18XX_SUBTYPE_VER, WL18XX_MINOR_VER,
+                                     /* there's no separate multi-role FW */
+                                     0, 0, 0, 0);
                break;
        case CHIP_ID_185x_PG10:
                wl1271_warning("chip id 0x%x (185x PG10) is deprecated",
@@ -630,6 +673,11 @@ static int wl18xx_identify_chip(struct wl1271 *wl)
                goto out;
        }
 
+       wl->scan_templ_id_2_4 = CMD_TEMPL_CFG_PROBE_REQ_2_4;
+       wl->scan_templ_id_5 = CMD_TEMPL_CFG_PROBE_REQ_5;
+       wl->sched_scan_templ_id_2_4 = CMD_TEMPL_PROBE_REQ_2_4_PERIODIC;
+       wl->sched_scan_templ_id_5 = CMD_TEMPL_PROBE_REQ_5_PERIODIC;
+       wl->max_channels_5 = WL18XX_MAX_CHANNELS_5GHZ;
 out:
        return ret;
 }
@@ -843,6 +891,20 @@ static int wl18xx_boot(struct wl1271 *wl)
        if (ret < 0)
                goto out;
 
+       wl->event_mask = BSS_LOSS_EVENT_ID |
+               SCAN_COMPLETE_EVENT_ID |
+               RSSI_SNR_TRIGGER_0_EVENT_ID |
+               PERIODIC_SCAN_COMPLETE_EVENT_ID |
+               PERIODIC_SCAN_REPORT_EVENT_ID |
+               DUMMY_PACKET_EVENT_ID |
+               PEER_REMOVE_COMPLETE_EVENT_ID |
+               BA_SESSION_RX_CONSTRAINT_EVENT_ID |
+               REMAIN_ON_CHANNEL_COMPLETE_EVENT_ID |
+               INACTIVE_STA_EVENT_ID |
+               MAX_TX_FAILURE_EVENT_ID |
+               CHANNEL_SWITCH_COMPLETE_EVENT_ID |
+               DFS_CHANNELS_CONFIG_COMPLETE_EVENT;
+
        ret = wlcore_boot_run_firmware(wl);
        if (ret < 0)
                goto out;
@@ -964,7 +1026,7 @@ static int wl18xx_hw_init(struct wl1271 *wl)
 
        /* (re)init private structures. Relevant on recovery as well. */
        priv->last_fw_rls_idx = 0;
-       priv->extra_spare_vif_count = 0;
+       priv->extra_spare_key_count = 0;
 
        /* set the default amount of spare blocks in the bitmap */
        ret = wl18xx_set_host_cfg_bitmap(wl, WL18XX_TX_HW_BLOCK_SPARE);
@@ -1022,7 +1084,12 @@ static bool wl18xx_is_mimo_supported(struct wl1271 *wl)
 {
        struct wl18xx_priv *priv = wl->priv;
 
-       return priv->conf.phy.number_of_assembled_ant2_4 >= 2;
+       /* only support MIMO with multiple antennas, and when SISO
+        * is not forced through config
+        */
+       return (priv->conf.phy.number_of_assembled_ant2_4 >= 2) &&
+              (priv->conf.ht.mode != HT_MODE_WIDE) &&
+              (priv->conf.ht.mode != HT_MODE_SISO20);
 }
 
 /*
@@ -1223,8 +1290,8 @@ static int wl18xx_get_spare_blocks(struct wl1271 *wl, bool is_gem)
 {
        struct wl18xx_priv *priv = wl->priv;
 
-       /* If we have VIFs requiring extra spare, indulge them */
-       if (priv->extra_spare_vif_count)
+       /* If we have keys requiring extra spare, indulge them */
+       if (priv->extra_spare_key_count)
                return WL18XX_TX_HW_EXTRA_BLOCK_SPARE;
 
        return WL18XX_TX_HW_BLOCK_SPARE;
@@ -1236,42 +1303,48 @@ static int wl18xx_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
                          struct ieee80211_key_conf *key_conf)
 {
        struct wl18xx_priv *priv = wl->priv;
-       bool change_spare = false;
+       bool change_spare = false, special_enc;
        int ret;
 
+       wl1271_debug(DEBUG_CRYPT, "extra spare keys before: %d",
+                    priv->extra_spare_key_count);
+
+       special_enc = key_conf->cipher == WL1271_CIPHER_SUITE_GEM ||
+                     key_conf->cipher == WLAN_CIPHER_SUITE_TKIP;
+
+       ret = wlcore_set_key(wl, cmd, vif, sta, key_conf);
+       if (ret < 0)
+               goto out;
+
        /*
-        * when adding the first or removing the last GEM/TKIP interface,
+        * when adding the first or removing the last GEM/TKIP key,
         * we have to adjust the number of spare blocks.
         */
-       change_spare = (key_conf->cipher == WL1271_CIPHER_SUITE_GEM ||
-               key_conf->cipher == WLAN_CIPHER_SUITE_TKIP) &&
-               ((priv->extra_spare_vif_count == 0 && cmd == SET_KEY) ||
-                (priv->extra_spare_vif_count == 1 && cmd == DISABLE_KEY));
+       if (special_enc) {
+               if (cmd == SET_KEY) {
+                       /* first key */
+                       change_spare = (priv->extra_spare_key_count == 0);
+                       priv->extra_spare_key_count++;
+               } else if (cmd == DISABLE_KEY) {
+                       /* last key */
+                       change_spare = (priv->extra_spare_key_count == 1);
+                       priv->extra_spare_key_count--;
+               }
+       }
 
-       /* no need to change spare - just regular set_key */
-       if (!change_spare)
-               return wlcore_set_key(wl, cmd, vif, sta, key_conf);
+       wl1271_debug(DEBUG_CRYPT, "extra spare keys after: %d",
+                    priv->extra_spare_key_count);
 
-       ret = wlcore_set_key(wl, cmd, vif, sta, key_conf);
-       if (ret < 0)
+       if (!change_spare)
                goto out;
 
        /* key is now set, change the spare blocks */
-       if (cmd == SET_KEY) {
+       if (priv->extra_spare_key_count)
                ret = wl18xx_set_host_cfg_bitmap(wl,
                                        WL18XX_TX_HW_EXTRA_BLOCK_SPARE);
-               if (ret < 0)
-                       goto out;
-
-               priv->extra_spare_vif_count++;
-       } else {
+       else
                ret = wl18xx_set_host_cfg_bitmap(wl,
                                        WL18XX_TX_HW_BLOCK_SPARE);
-               if (ret < 0)
-                       goto out;
-
-               priv->extra_spare_vif_count--;
-       }
 
 out:
        return ret;
@@ -1296,6 +1369,92 @@ static u32 wl18xx_pre_pkt_send(struct wl1271 *wl,
        return buf_offset;
 }
 
+static void wl18xx_sta_rc_update(struct wl1271 *wl,
+                                struct wl12xx_vif *wlvif,
+                                struct ieee80211_sta *sta,
+                                u32 changed)
+{
+       bool wide = sta->bandwidth >= IEEE80211_STA_RX_BW_40;
+
+       wl1271_debug(DEBUG_MAC80211, "mac80211 sta_rc_update wide %d", wide);
+
+       if (!(changed & IEEE80211_RC_BW_CHANGED))
+               return;
+
+       mutex_lock(&wl->mutex);
+
+       /* sanity */
+       if (WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS))
+               goto out;
+
+       /* ignore the change before association */
+       if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
+               goto out;
+
+       /*
+        * If we started out as wide, we can change the operation mode. If we
+        * thought this was a 20mhz AP, we have to reconnect
+        */
+       if (wlvif->sta.role_chan_type == NL80211_CHAN_HT40MINUS ||
+           wlvif->sta.role_chan_type == NL80211_CHAN_HT40PLUS)
+               wl18xx_acx_peer_ht_operation_mode(wl, wlvif->sta.hlid, wide);
+       else
+               ieee80211_connection_loss(wl12xx_wlvif_to_vif(wlvif));
+
+out:
+       mutex_unlock(&wl->mutex);
+}
+
+static int wl18xx_set_peer_cap(struct wl1271 *wl,
+                              struct ieee80211_sta_ht_cap *ht_cap,
+                              bool allow_ht_operation,
+                              u32 rate_set, u8 hlid)
+{
+       return wl18xx_acx_set_peer_cap(wl, ht_cap, allow_ht_operation,
+                                      rate_set, hlid);
+}
+
+static bool wl18xx_lnk_high_prio(struct wl1271 *wl, u8 hlid,
+                                struct wl1271_link *lnk)
+{
+       u8 thold;
+       struct wl18xx_fw_status_priv *status_priv =
+               (struct wl18xx_fw_status_priv *)wl->fw_status_2->priv;
+       u32 suspend_bitmap = le32_to_cpu(status_priv->link_suspend_bitmap);
+
+       /* suspended links are never high priority */
+       if (test_bit(hlid, (unsigned long *)&suspend_bitmap))
+               return false;
+
+       /* the priority thresholds are taken from FW */
+       if (test_bit(hlid, (unsigned long *)&wl->fw_fast_lnk_map) &&
+           !test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map))
+               thold = status_priv->tx_fast_link_prio_threshold;
+       else
+               thold = status_priv->tx_slow_link_prio_threshold;
+
+       return lnk->allocated_pkts < thold;
+}
+
+static bool wl18xx_lnk_low_prio(struct wl1271 *wl, u8 hlid,
+                               struct wl1271_link *lnk)
+{
+       u8 thold;
+       struct wl18xx_fw_status_priv *status_priv =
+               (struct wl18xx_fw_status_priv *)wl->fw_status_2->priv;
+       u32 suspend_bitmap = le32_to_cpu(status_priv->link_suspend_bitmap);
+
+       if (test_bit(hlid, (unsigned long *)&suspend_bitmap))
+               thold = status_priv->tx_suspend_threshold;
+       else if (test_bit(hlid, (unsigned long *)&wl->fw_fast_lnk_map) &&
+                !test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map))
+               thold = status_priv->tx_fast_stop_threshold;
+       else
+               thold = status_priv->tx_slow_stop_threshold;
+
+       return lnk->allocated_pkts < thold;
+}
+
 static int wl18xx_setup(struct wl1271 *wl);
 
 static struct wlcore_ops wl18xx_ops = {
@@ -1305,6 +1464,8 @@ static struct wlcore_ops wl18xx_ops = {
        .plt_init       = wl18xx_plt_init,
        .trigger_cmd    = wl18xx_trigger_cmd,
        .ack_event      = wl18xx_ack_event,
+       .wait_for_event = wl18xx_wait_for_event,
+       .process_mailbox_events = wl18xx_process_mailbox_events,
        .calc_tx_blocks = wl18xx_calc_tx_blocks,
        .set_tx_desc_blocks = wl18xx_set_tx_desc_blocks,
        .set_tx_desc_data_len = wl18xx_set_tx_desc_data_len,
@@ -1320,16 +1481,26 @@ static struct wlcore_ops wl18xx_ops = {
        .ap_get_mimo_wide_rate_mask = wl18xx_ap_get_mimo_wide_rate_mask,
        .get_mac        = wl18xx_get_mac,
        .debugfs_init   = wl18xx_debugfs_add_files,
+       .scan_start     = wl18xx_scan_start,
+       .scan_stop      = wl18xx_scan_stop,
+       .sched_scan_start       = wl18xx_sched_scan_start,
+       .sched_scan_stop        = wl18xx_scan_sched_scan_stop,
        .handle_static_data     = wl18xx_handle_static_data,
        .get_spare_blocks = wl18xx_get_spare_blocks,
        .set_key        = wl18xx_set_key,
+       .channel_switch = wl18xx_cmd_channel_switch,
        .pre_pkt_send   = wl18xx_pre_pkt_send,
+       .sta_rc_update  = wl18xx_sta_rc_update,
+       .set_peer_cap   = wl18xx_set_peer_cap,
+       .lnk_high_prio  = wl18xx_lnk_high_prio,
+       .lnk_low_prio   = wl18xx_lnk_low_prio,
 };
 
 /* HT cap appropriate for wide channels in 2Ghz */
 static struct ieee80211_sta_ht_cap wl18xx_siso40_ht_cap_2ghz = {
        .cap = IEEE80211_HT_CAP_SGI_20 | IEEE80211_HT_CAP_SGI_40 |
-              IEEE80211_HT_CAP_SUP_WIDTH_20_40 | IEEE80211_HT_CAP_DSSSCCK40,
+              IEEE80211_HT_CAP_SUP_WIDTH_20_40 | IEEE80211_HT_CAP_DSSSCCK40 |
+              IEEE80211_HT_CAP_GRN_FLD,
        .ht_supported = true,
        .ampdu_factor = IEEE80211_HT_MAX_AMPDU_16K,
        .ampdu_density = IEEE80211_HT_MPDU_DENSITY_16,
@@ -1343,7 +1514,8 @@ static struct ieee80211_sta_ht_cap wl18xx_siso40_ht_cap_2ghz = {
 /* HT cap appropriate for wide channels in 5Ghz */
 static struct ieee80211_sta_ht_cap wl18xx_siso40_ht_cap_5ghz = {
        .cap = IEEE80211_HT_CAP_SGI_20 | IEEE80211_HT_CAP_SGI_40 |
-              IEEE80211_HT_CAP_SUP_WIDTH_20_40,
+              IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
+              IEEE80211_HT_CAP_GRN_FLD,
        .ht_supported = true,
        .ampdu_factor = IEEE80211_HT_MAX_AMPDU_16K,
        .ampdu_density = IEEE80211_HT_MPDU_DENSITY_16,
@@ -1356,7 +1528,8 @@ static struct ieee80211_sta_ht_cap wl18xx_siso40_ht_cap_5ghz = {
 
 /* HT cap appropriate for SISO 20 */
 static struct ieee80211_sta_ht_cap wl18xx_siso20_ht_cap = {
-       .cap = IEEE80211_HT_CAP_SGI_20,
+       .cap = IEEE80211_HT_CAP_SGI_20 |
+              IEEE80211_HT_CAP_GRN_FLD,
        .ht_supported = true,
        .ampdu_factor = IEEE80211_HT_MAX_AMPDU_16K,
        .ampdu_density = IEEE80211_HT_MPDU_DENSITY_16,
@@ -1369,7 +1542,8 @@ static struct ieee80211_sta_ht_cap wl18xx_siso20_ht_cap = {
 
 /* HT cap appropriate for MIMO rates in 20mhz channel */
 static struct ieee80211_sta_ht_cap wl18xx_mimo_ht_cap_2ghz = {
-       .cap = IEEE80211_HT_CAP_SGI_20,
+       .cap = IEEE80211_HT_CAP_SGI_20 |
+              IEEE80211_HT_CAP_GRN_FLD,
        .ht_supported = true,
        .ampdu_factor = IEEE80211_HT_MAX_AMPDU_16K,
        .ampdu_density = IEEE80211_HT_MPDU_DENSITY_16,
@@ -1387,7 +1561,8 @@ static int wl18xx_setup(struct wl1271 *wl)
 
        wl->rtable = wl18xx_rtable;
        wl->num_tx_desc = WL18XX_NUM_TX_DESCRIPTORS;
-       wl->num_rx_desc = WL18XX_NUM_TX_DESCRIPTORS;
+       wl->num_rx_desc = WL18XX_NUM_RX_DESCRIPTORS;
+       wl->num_channels = 2;
        wl->num_mac_addr = WL18XX_NUM_MAC_ADDRESSES;
        wl->band_rate_to_idx = wl18xx_band_rate_to_idx;
        wl->hw_tx_rate_tbl_size = WL18XX_CONF_HW_RXTX_RATE_MAX;
@@ -1506,7 +1681,8 @@ static int wl18xx_probe(struct platform_device *pdev)
        int ret;
 
        hw = wlcore_alloc_hw(sizeof(struct wl18xx_priv),
-                            WL18XX_AGGR_BUFFER_SIZE);
+                            WL18XX_AGGR_BUFFER_SIZE,
+                            sizeof(struct wl18xx_event_mailbox));
        if (IS_ERR(hw)) {
                wl1271_error("can't allocate hw");
                ret = PTR_ERR(hw);
diff --git a/drivers/net/wireless/ti/wl18xx/scan.c b/drivers/net/wireless/ti/wl18xx/scan.c
new file mode 100644 (file)
index 0000000..09d9445
--- /dev/null
@@ -0,0 +1,326 @@
+/*
+ * This file is part of wl18xx
+ *
+ * Copyright (C) 2012 Texas Instruments. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <linux/ieee80211.h>
+#include "scan.h"
+#include "../wlcore/debug.h"
+
+static void wl18xx_adjust_channels(struct wl18xx_cmd_scan_params *cmd,
+                                  struct wlcore_scan_channels *cmd_channels)
+{
+       memcpy(cmd->passive, cmd_channels->passive, sizeof(cmd->passive));
+       memcpy(cmd->active, cmd_channels->active, sizeof(cmd->active));
+       cmd->dfs = cmd_channels->dfs;
+       cmd->passive_active = cmd_channels->passive_active;
+
+       memcpy(cmd->channels_2, cmd_channels->channels_2,
+              sizeof(cmd->channels_2));
+       memcpy(cmd->channels_5, cmd_channels->channels_5,
+              sizeof(cmd->channels_2));
+       /* channels_4 are not supported, so no need to copy them */
+}
+
+static int wl18xx_scan_send(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                           struct cfg80211_scan_request *req)
+{
+       struct wl18xx_cmd_scan_params *cmd;
+       struct wlcore_scan_channels *cmd_channels = NULL;
+       int ret;
+
+       cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+       if (!cmd) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       cmd->role_id = wlvif->role_id;
+
+       if (WARN_ON(cmd->role_id == WL12XX_INVALID_ROLE_ID)) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       cmd->scan_type = SCAN_TYPE_SEARCH;
+       cmd->rssi_threshold = -127;
+       cmd->snr_threshold = 0;
+
+       cmd->bss_type = SCAN_BSS_TYPE_ANY;
+
+       cmd->ssid_from_list = 0;
+       cmd->filter = 0;
+       cmd->add_broadcast = 0;
+
+       cmd->urgency = 0;
+       cmd->protect = 0;
+
+       cmd->n_probe_reqs = wl->conf.scan.num_probe_reqs;
+       cmd->terminate_after = 0;
+
+       /* configure channels */
+       WARN_ON(req->n_ssids > 1);
+
+       cmd_channels = kzalloc(sizeof(*cmd_channels), GFP_KERNEL);
+       if (!cmd_channels) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       wlcore_set_scan_chan_params(wl, cmd_channels, req->channels,
+                                   req->n_channels, req->n_ssids,
+                                   SCAN_TYPE_SEARCH);
+       wl18xx_adjust_channels(cmd, cmd_channels);
+
+       /*
+        * all the cycles params (except total cycles) should
+        * remain 0 for normal scan
+        */
+       cmd->total_cycles = 1;
+
+       if (req->no_cck)
+               cmd->rate = WL18XX_SCAN_RATE_6;
+
+       cmd->tag = WL1271_SCAN_DEFAULT_TAG;
+
+       if (req->n_ssids) {
+               cmd->ssid_len = req->ssids[0].ssid_len;
+               memcpy(cmd->ssid, req->ssids[0].ssid, cmd->ssid_len);
+       }
+
+       /* TODO: per-band ies? */
+       if (cmd->active[0]) {
+               u8 band = IEEE80211_BAND_2GHZ;
+               ret = wl12xx_cmd_build_probe_req(wl, wlvif,
+                                cmd->role_id, band,
+                                req->ssids ? req->ssids[0].ssid : NULL,
+                                req->ssids ? req->ssids[0].ssid_len : 0,
+                                req->ie,
+                                req->ie_len,
+                                false);
+               if (ret < 0) {
+                       wl1271_error("2.4GHz PROBE request template failed");
+                       goto out;
+               }
+       }
+
+       if (cmd->active[1] || cmd->dfs) {
+               u8 band = IEEE80211_BAND_5GHZ;
+               ret = wl12xx_cmd_build_probe_req(wl, wlvif,
+                                cmd->role_id, band,
+                                req->ssids ? req->ssids[0].ssid : NULL,
+                                req->ssids ? req->ssids[0].ssid_len : 0,
+                                req->ie,
+                                req->ie_len,
+                                false);
+               if (ret < 0) {
+                       wl1271_error("5GHz PROBE request template failed");
+                       goto out;
+               }
+       }
+
+       wl1271_dump(DEBUG_SCAN, "SCAN: ", cmd, sizeof(*cmd));
+
+       ret = wl1271_cmd_send(wl, CMD_SCAN, cmd, sizeof(*cmd), 0);
+       if (ret < 0) {
+               wl1271_error("SCAN failed");
+               goto out;
+       }
+
+out:
+       kfree(cmd_channels);
+       kfree(cmd);
+       return ret;
+}
+
+void wl18xx_scan_completed(struct wl1271 *wl, struct wl12xx_vif *wlvif)
+{
+       wl->scan.failed = false;
+       cancel_delayed_work(&wl->scan_complete_work);
+       ieee80211_queue_delayed_work(wl->hw, &wl->scan_complete_work,
+                                    msecs_to_jiffies(0));
+}
+
+static
+int wl18xx_scan_sched_scan_config(struct wl1271 *wl,
+                                 struct wl12xx_vif *wlvif,
+                                 struct cfg80211_sched_scan_request *req,
+                                 struct ieee80211_sched_scan_ies *ies)
+{
+       struct wl18xx_cmd_scan_params *cmd;
+       struct wlcore_scan_channels *cmd_channels = NULL;
+       struct conf_sched_scan_settings *c = &wl->conf.sched_scan;
+       int ret;
+       int filter_type;
+
+       wl1271_debug(DEBUG_CMD, "cmd sched_scan scan config");
+
+       filter_type = wlcore_scan_sched_scan_ssid_list(wl, wlvif, req);
+       if (filter_type < 0)
+               return filter_type;
+
+       cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+       if (!cmd) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       cmd->role_id = wlvif->role_id;
+
+       if (WARN_ON(cmd->role_id == WL12XX_INVALID_ROLE_ID)) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       cmd->scan_type = SCAN_TYPE_PERIODIC;
+       cmd->rssi_threshold = c->rssi_threshold;
+       cmd->snr_threshold = c->snr_threshold;
+
+       /* don't filter on BSS type */
+       cmd->bss_type = SCAN_BSS_TYPE_ANY;
+
+       cmd->ssid_from_list = 1;
+       if (filter_type == SCAN_SSID_FILTER_LIST)
+               cmd->filter = 1;
+       cmd->add_broadcast = 0;
+
+       cmd->urgency = 0;
+       cmd->protect = 0;
+
+       cmd->n_probe_reqs = c->num_probe_reqs;
+       /* don't stop scanning automatically when something is found */
+       cmd->terminate_after = 0;
+
+       cmd_channels = kzalloc(sizeof(*cmd_channels), GFP_KERNEL);
+       if (!cmd_channels) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       /* configure channels */
+       wlcore_set_scan_chan_params(wl, cmd_channels, req->channels,
+                                   req->n_channels, req->n_ssids,
+                                   SCAN_TYPE_PERIODIC);
+       wl18xx_adjust_channels(cmd, cmd_channels);
+
+       cmd->short_cycles_sec = 0;
+       cmd->long_cycles_sec = cpu_to_le16(req->interval);
+       cmd->short_cycles_count = 0;
+
+       cmd->total_cycles = 0;
+
+       cmd->tag = WL1271_SCAN_DEFAULT_TAG;
+
+       /* create a PERIODIC_SCAN_REPORT_EVENT whenever we've got a match */
+       cmd->report_threshold = 1;
+       cmd->terminate_on_report = 0;
+
+       if (cmd->active[0]) {
+               u8 band = IEEE80211_BAND_2GHZ;
+               ret = wl12xx_cmd_build_probe_req(wl, wlvif,
+                                cmd->role_id, band,
+                                req->ssids ? req->ssids[0].ssid : NULL,
+                                req->ssids ? req->ssids[0].ssid_len : 0,
+                                ies->ie[band],
+                                ies->len[band],
+                                true);
+               if (ret < 0) {
+                       wl1271_error("2.4GHz PROBE request template failed");
+                       goto out;
+               }
+       }
+
+       if (cmd->active[1] || cmd->dfs) {
+               u8 band = IEEE80211_BAND_5GHZ;
+               ret = wl12xx_cmd_build_probe_req(wl, wlvif,
+                                cmd->role_id, band,
+                                req->ssids ? req->ssids[0].ssid : NULL,
+                                req->ssids ? req->ssids[0].ssid_len : 0,
+                                ies->ie[band],
+                                ies->len[band],
+                                true);
+               if (ret < 0) {
+                       wl1271_error("5GHz PROBE request template failed");
+                       goto out;
+               }
+       }
+
+       wl1271_dump(DEBUG_SCAN, "SCAN: ", cmd, sizeof(*cmd));
+
+       ret = wl1271_cmd_send(wl, CMD_SCAN, cmd, sizeof(*cmd), 0);
+       if (ret < 0) {
+               wl1271_error("SCAN failed");
+               goto out;
+       }
+
+out:
+       kfree(cmd_channels);
+       kfree(cmd);
+       return ret;
+}
+
+int wl18xx_sched_scan_start(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                           struct cfg80211_sched_scan_request *req,
+                           struct ieee80211_sched_scan_ies *ies)
+{
+       return wl18xx_scan_sched_scan_config(wl, wlvif, req, ies);
+}
+
+static int __wl18xx_scan_stop(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                              u8 scan_type)
+{
+       struct wl18xx_cmd_scan_stop *stop;
+       int ret;
+
+       wl1271_debug(DEBUG_CMD, "cmd periodic scan stop");
+
+       stop = kzalloc(sizeof(*stop), GFP_KERNEL);
+       if (!stop) {
+               wl1271_error("failed to alloc memory to send sched scan stop");
+               return -ENOMEM;
+       }
+
+       stop->role_id = wlvif->role_id;
+       stop->scan_type = scan_type;
+
+       ret = wl1271_cmd_send(wl, CMD_STOP_SCAN, stop, sizeof(*stop), 0);
+       if (ret < 0) {
+               wl1271_error("failed to send sched scan stop command");
+               goto out_free;
+       }
+
+out_free:
+       kfree(stop);
+       return ret;
+}
+
+void wl18xx_scan_sched_scan_stop(struct wl1271 *wl, struct wl12xx_vif *wlvif)
+{
+       __wl18xx_scan_stop(wl, wlvif, SCAN_TYPE_PERIODIC);
+}
+int wl18xx_scan_start(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                     struct cfg80211_scan_request *req)
+{
+       return wl18xx_scan_send(wl, wlvif, req);
+}
+
+int wl18xx_scan_stop(struct wl1271 *wl, struct wl12xx_vif *wlvif)
+{
+       return __wl18xx_scan_stop(wl, wlvif, SCAN_TYPE_SEARCH);
+}
diff --git a/drivers/net/wireless/ti/wl18xx/scan.h b/drivers/net/wireless/ti/wl18xx/scan.h
new file mode 100644 (file)
index 0000000..eadee42
--- /dev/null
@@ -0,0 +1,127 @@
+/*
+ * This file is part of wl18xx
+ *
+ * Copyright (C) 2012 Texas Instruments. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef __WL18XX_SCAN_H__
+#define __WL18XX_SCAN_H__
+
+#include "../wlcore/wlcore.h"
+#include "../wlcore/cmd.h"
+#include "../wlcore/scan.h"
+
+struct tracking_ch_params {
+       struct conn_scan_ch_params channel;
+
+       __le32 bssid_lsb;
+       __le16 bssid_msb;
+
+       u8 padding[2];
+} __packed;
+
+/* probe request rate */
+enum
+{
+       WL18XX_SCAN_RATE_1      = 0,
+       WL18XX_SCAN_RATE_5_5    = 1,
+       WL18XX_SCAN_RATE_6      = 2,
+};
+
+#define WL18XX_MAX_CHANNELS_5GHZ 32
+
+struct wl18xx_cmd_scan_params {
+       struct wl1271_cmd_header header;
+
+       u8 role_id;
+       u8 scan_type;
+
+       s8 rssi_threshold; /* for filtering (in dBm) */
+       s8 snr_threshold;  /* for filtering (in dB) */
+
+       u8 bss_type;       /* for filtering */
+       u8 ssid_from_list; /* use ssid from configured ssid list */
+       u8 filter;         /* forward only results with matching ssids */
+
+       /*
+        * add broadcast ssid in addition to the configured ssids.
+        * the driver should add dummy entry for it (?).
+        */
+       u8 add_broadcast;
+
+       u8 urgency;
+       u8 protect;      /* ??? */
+       u8 n_probe_reqs;    /* Number of probes requests per channel */
+       u8 terminate_after; /* early terminate scan operation */
+
+       u8 passive[SCAN_MAX_BANDS]; /* number of passive scan channels */
+       u8 active[SCAN_MAX_BANDS];  /* number of active scan channels */
+       u8 dfs;            /* number of dfs channels in 5ghz */
+       u8 passive_active; /* number of passive before active channels 2.4ghz */
+
+       __le16 short_cycles_sec;
+       __le16 long_cycles_sec;
+       u8 short_cycles_count;
+       u8 total_cycles; /* 0 - infinite */
+       u8 padding[2];
+
+       union {
+               struct {
+                       struct conn_scan_ch_params channels_2[MAX_CHANNELS_2GHZ];
+                       struct conn_scan_ch_params channels_5[WL18XX_MAX_CHANNELS_5GHZ];
+                       struct conn_scan_ch_params channels_4[MAX_CHANNELS_4GHZ];
+               };
+               struct tracking_ch_params channels_tracking[WL1271_SCAN_MAX_CHANNELS];
+       } ;
+
+       u8 ssid[IEEE80211_MAX_SSID_LEN];
+       u8 ssid_len;     /* For SCAN_SSID_FILTER_SPECIFIC */
+       u8 tag;
+       u8 rate;
+
+       /* send SCAN_REPORT_EVENT in periodic scans after each cycle
+       * if number of results >= report_threshold. Must be 0 for
+       * non periodic scans
+       */
+       u8 report_threshold;
+
+       /* Should periodic scan stop after a report event was created.
+       * Must be 0 for non periodic scans.
+       */
+       u8 terminate_on_report;
+
+       u8 padding1[3];
+} __packed;
+
+struct wl18xx_cmd_scan_stop {
+       struct wl1271_cmd_header header;
+
+       u8 role_id;
+       u8 scan_type;
+       u8 padding[2];
+} __packed;
+
+int wl18xx_scan_start(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                     struct cfg80211_scan_request *req);
+int wl18xx_scan_stop(struct wl1271 *wl, struct wl12xx_vif *wlvif);
+void wl18xx_scan_completed(struct wl1271 *wl, struct wl12xx_vif *wlvif);
+int wl18xx_sched_scan_start(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                           struct cfg80211_sched_scan_request *req,
+                           struct ieee80211_sched_scan_ies *ies);
+void wl18xx_scan_sched_scan_stop(struct wl1271 *wl, struct wl12xx_vif *wlvif);
+#endif
index 5b1fb10..57c6943 100644 (file)
 #include "wl18xx.h"
 #include "tx.h"
 
+static
+void wl18xx_get_last_tx_rate(struct wl1271 *wl, struct ieee80211_vif *vif,
+                            struct ieee80211_tx_rate *rate)
+{
+       u8 fw_rate = wl->fw_status_2->counters.tx_last_rate;
+
+       if (fw_rate > CONF_HW_RATE_INDEX_MAX) {
+               wl1271_error("last Tx rate invalid: %d", fw_rate);
+               rate->idx = 0;
+               rate->flags = 0;
+               return;
+       }
+
+       if (fw_rate <= CONF_HW_RATE_INDEX_54MBPS) {
+               rate->idx = fw_rate;
+               rate->flags = 0;
+       } else {
+               rate->flags = IEEE80211_TX_RC_MCS;
+               rate->idx = fw_rate - CONF_HW_RATE_INDEX_MCS0;
+
+               /* SGI modifier is counted as a separate rate */
+               if (fw_rate >= CONF_HW_RATE_INDEX_MCS7_SGI)
+                       (rate->idx)--;
+               if (fw_rate == CONF_HW_RATE_INDEX_MCS15_SGI)
+                       (rate->idx)--;
+
+               /* this also covers the 40Mhz SGI case (= MCS15) */
+               if (fw_rate == CONF_HW_RATE_INDEX_MCS7_SGI ||
+                   fw_rate == CONF_HW_RATE_INDEX_MCS15_SGI)
+                       rate->flags |= IEEE80211_TX_RC_SHORT_GI;
+
+               if (fw_rate > CONF_HW_RATE_INDEX_MCS7_SGI && vif) {
+                       struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
+                       if (wlvif->channel_type == NL80211_CHAN_HT40MINUS ||
+                           wlvif->channel_type == NL80211_CHAN_HT40PLUS) {
+                               /* adjustment needed for range 0-7 */
+                               rate->idx -= 8;
+                               rate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
+                       }
+               }
+       }
+}
+
 static void wl18xx_tx_complete_packet(struct wl1271 *wl, u8 tx_stat_byte)
 {
        struct ieee80211_tx_info *info;
@@ -44,7 +87,6 @@ static void wl18xx_tx_complete_packet(struct wl1271 *wl, u8 tx_stat_byte)
        /* a zero bit indicates Tx success */
        tx_success = !(tx_stat_byte & BIT(WL18XX_TX_STATUS_STAT_BIT_IDX));
 
-
        skb = wl->tx_frames[id];
        info = IEEE80211_SKB_CB(skb);
 
@@ -56,11 +98,13 @@ static void wl18xx_tx_complete_packet(struct wl1271 *wl, u8 tx_stat_byte)
        /* update the TX status info */
        if (tx_success && !(info->flags & IEEE80211_TX_CTL_NO_ACK))
                info->flags |= IEEE80211_TX_STAT_ACK;
+       /*
+        * first pass info->control.vif while it's valid, and then fill out
+        * the info->status structures
+        */
+       wl18xx_get_last_tx_rate(wl, info->control.vif, &info->status.rates[0]);
 
-       /* no real data about Tx completion */
-       info->status.rates[0].idx = -1;
-       info->status.rates[0].count = 0;
-       info->status.rates[0].flags = 0;
+       info->status.rates[0].count = 1; /* no data about retries */
        info->status.ack_signal = -1;
 
        if (!tx_success)
index 96a1e43..b6739e7 100644 (file)
 
 /* minimum FW required for driver */
 #define WL18XX_CHIP_VER                8
-#define WL18XX_IFTYPE_VER      2
-#define WL18XX_MAJOR_VER       0
-#define WL18XX_SUBTYPE_VER     0
-#define WL18XX_MINOR_VER       100
+#define WL18XX_IFTYPE_VER      5
+#define WL18XX_MAJOR_VER       WLCORE_FW_VER_IGNORE
+#define WL18XX_SUBTYPE_VER     WLCORE_FW_VER_IGNORE
+#define WL18XX_MINOR_VER       28
 
 #define WL18XX_CMD_MAX_SIZE          740
 
@@ -49,8 +49,8 @@ struct wl18xx_priv {
        /* Index of last released Tx desc in FW */
        u8 last_fw_rls_idx;
 
-       /* number of VIFs requiring extra spare mem-blocks */
-       int extra_spare_vif_count;
+       /* number of keys requiring extra spare mem-blocks */
+       int extra_spare_key_count;
 };
 
 #define WL18XX_FW_MAX_TX_STATUS_DESC 33
@@ -68,7 +68,43 @@ struct wl18xx_fw_status_priv {
         */
        u8 released_tx_desc[WL18XX_FW_MAX_TX_STATUS_DESC];
 
-       u8 padding[2];
+       /* A bitmap representing the currently suspended links. The suspend
+        * is short lived, for multi-channel Tx requirements.
+        */
+       __le32 link_suspend_bitmap;
+
+       /* packet threshold for an "almost empty" AC,
+        * for Tx schedulng purposes
+        */
+       u8 tx_ac_threshold;
+
+       /* number of packets to queue up for a link in PS */
+       u8 tx_ps_threshold;
+
+       /* number of packet to queue up for a suspended link */
+       u8 tx_suspend_threshold;
+
+       /* Should have less than this number of packets in queue of a slow
+        * link to qualify as high priority link
+        */
+       u8 tx_slow_link_prio_threshold;
+
+       /* Should have less than this number of packets in queue of a fast
+        * link to qualify as high priority link
+        */
+       u8 tx_fast_link_prio_threshold;
+
+       /* Should have less than this number of packets in queue of a slow
+        * link before we stop queuing up packets for it.
+        */
+       u8 tx_slow_stop_threshold;
+
+       /* Should have less than this number of packets in queue of a fast
+        * link before we stop queuing up packets for it.
+        */
+       u8 tx_fast_stop_threshold;
+
+       u8 padding[3];
 };
 
 #define WL18XX_PHY_VERSION_MAX_LEN 20
index d7b907e..2b83282 100644 (file)
@@ -33,8 +33,3 @@ config WLCORE_SDIO
 
          If you choose to build a module, it'll be called wlcore_sdio.
          Say N if unsure.
-
-config WL12XX_PLATFORM_DATA
-       bool
-       depends on WLCORE_SDIO != n || WL1251_SDIO != n
-       default y
index d9fba9e..b21398f 100644 (file)
@@ -9,7 +9,4 @@ obj-$(CONFIG_WLCORE)                    += wlcore.o
 obj-$(CONFIG_WLCORE_SPI)               += wlcore_spi.o
 obj-$(CONFIG_WLCORE_SDIO)              += wlcore_sdio.o
 
-# small builtin driver bit
-obj-$(CONFIG_WL12XX_PLATFORM_DATA)     += wl12xx_platform_data.o
-
 ccflags-y += -D__CHECK_ENDIAN__
index ce108a7..c796543 100644 (file)
@@ -1340,6 +1340,8 @@ out:
        kfree(acx);
        return ret;
 }
+EXPORT_SYMBOL_GPL(wl1271_acx_set_ht_capabilities);
+
 
 int wl1271_acx_set_ht_information(struct wl1271 *wl,
                                   struct wl12xx_vif *wlvif,
@@ -1433,13 +1435,22 @@ int wl12xx_acx_set_ba_receiver_session(struct wl1271 *wl, u8 tid_index,
        acx->win_size = wl->conf.ht.rx_ba_win_size;
        acx->ssn = ssn;
 
-       ret = wl1271_cmd_configure(wl, ACX_BA_SESSION_RX_SETUP, acx,
-                                  sizeof(*acx));
+       ret = wlcore_cmd_configure_failsafe(wl, ACX_BA_SESSION_RX_SETUP, acx,
+                                           sizeof(*acx),
+                                           BIT(CMD_STATUS_NO_RX_BA_SESSION));
        if (ret < 0) {
                wl1271_warning("acx ba receiver session failed: %d", ret);
                goto out;
        }
 
+       /* sometimes we can't start the session */
+       if (ret == CMD_STATUS_NO_RX_BA_SESSION) {
+               wl1271_warning("no fw rx ba on tid %d", tid_index);
+               ret = -EBUSY;
+               goto out;
+       }
+
+       ret = 0;
 out:
        kfree(acx);
        return ret;
index d03215d..126536c 100644 (file)
@@ -1025,7 +1025,6 @@ enum {
        ACX_CONFIG_HANGOVER              = 0x0042,
        ACX_FEATURE_CFG                  = 0x0043,
        ACX_PROTECTION_CFG               = 0x0044,
-       ACX_CHECKSUM_CONFIG              = 0x0045,
 };
 
 
index 375ea57..77752b0 100644 (file)
@@ -84,47 +84,57 @@ out:
 static int wlcore_validate_fw_ver(struct wl1271 *wl)
 {
        unsigned int *fw_ver = wl->chip.fw_ver;
-       unsigned int *min_ver = wl->min_fw_ver;
+       unsigned int *min_ver = (wl->fw_type == WL12XX_FW_TYPE_MULTI) ?
+               wl->min_mr_fw_ver : wl->min_sr_fw_ver;
+       char min_fw_str[32] = "";
+       int i;
 
        /* the chip must be exactly equal */
-       if (min_ver[FW_VER_CHIP] != fw_ver[FW_VER_CHIP])
+       if ((min_ver[FW_VER_CHIP] != WLCORE_FW_VER_IGNORE) &&
+           (min_ver[FW_VER_CHIP] != fw_ver[FW_VER_CHIP]))
                goto fail;
 
-       /* always check the next digit if all previous ones are equal */
-
-       if (min_ver[FW_VER_IF_TYPE] < fw_ver[FW_VER_IF_TYPE])
-               goto out;
-       else if (min_ver[FW_VER_IF_TYPE] > fw_ver[FW_VER_IF_TYPE])
+       /* the firmware type must be equal */
+       if ((min_ver[FW_VER_IF_TYPE] != WLCORE_FW_VER_IGNORE) &&
+           (min_ver[FW_VER_IF_TYPE] != fw_ver[FW_VER_IF_TYPE]))
                goto fail;
 
-       if (min_ver[FW_VER_MAJOR] < fw_ver[FW_VER_MAJOR])
-               goto out;
-       else if (min_ver[FW_VER_MAJOR] > fw_ver[FW_VER_MAJOR])
+       /* the project number must be equal */
+       if ((min_ver[FW_VER_SUBTYPE] != WLCORE_FW_VER_IGNORE) &&
+           (min_ver[FW_VER_SUBTYPE] != fw_ver[FW_VER_SUBTYPE]))
                goto fail;
 
-       if (min_ver[FW_VER_SUBTYPE] < fw_ver[FW_VER_SUBTYPE])
-               goto out;
-       else if (min_ver[FW_VER_SUBTYPE] > fw_ver[FW_VER_SUBTYPE])
+       /* the API version must be greater or equal */
+       if ((min_ver[FW_VER_MAJOR] != WLCORE_FW_VER_IGNORE) &&
+                (min_ver[FW_VER_MAJOR] > fw_ver[FW_VER_MAJOR]))
                goto fail;
 
-       if (min_ver[FW_VER_MINOR] < fw_ver[FW_VER_MINOR])
-               goto out;
-       else if (min_ver[FW_VER_MINOR] > fw_ver[FW_VER_MINOR])
+       /* if the API version is equal... */
+       if (((min_ver[FW_VER_MAJOR] == WLCORE_FW_VER_IGNORE) ||
+            (min_ver[FW_VER_MAJOR] == fw_ver[FW_VER_MAJOR])) &&
+           /* ...the minor must be greater or equal */
+           ((min_ver[FW_VER_MINOR] != WLCORE_FW_VER_IGNORE) &&
+            (min_ver[FW_VER_MINOR] > fw_ver[FW_VER_MINOR])))
                goto fail;
 
-out:
        return 0;
 
 fail:
-       wl1271_error("Your WiFi FW version (%u.%u.%u.%u.%u) is outdated.\n"
-                    "Please use at least FW %u.%u.%u.%u.%u.\n"
-                    "You can get more information at:\n"
-                    "http://wireless.kernel.org/en/users/Drivers/wl12xx",
+       for (i = 0; i < NUM_FW_VER; i++)
+               if (min_ver[i] == WLCORE_FW_VER_IGNORE)
+                       snprintf(min_fw_str, sizeof(min_fw_str),
+                                 "%s*.", min_fw_str);
+               else
+                       snprintf(min_fw_str, sizeof(min_fw_str),
+                                 "%s%u.", min_fw_str, min_ver[i]);
+
+       wl1271_error("Your WiFi FW version (%u.%u.%u.%u.%u) is invalid.\n"
+                    "Please use at least FW %s\n"
+                    "You can get the latest firmwares at:\n"
+                    "git://github.com/TI-OpenLink/firmwares.git",
                     fw_ver[FW_VER_CHIP], fw_ver[FW_VER_IF_TYPE],
                     fw_ver[FW_VER_MAJOR], fw_ver[FW_VER_SUBTYPE],
-                    fw_ver[FW_VER_MINOR], min_ver[FW_VER_CHIP],
-                    min_ver[FW_VER_IF_TYPE], min_ver[FW_VER_MAJOR],
-                    min_ver[FW_VER_SUBTYPE], min_ver[FW_VER_MINOR]);
+                    fw_ver[FW_VER_MINOR], min_fw_str);
        return -EINVAL;
 }
 
@@ -491,7 +501,7 @@ int wlcore_boot_run_firmware(struct wl1271 *wl)
        if (ret < 0)
                return ret;
 
-       wl->mbox_ptr[1] = wl->mbox_ptr[0] + sizeof(struct event_mailbox);
+       wl->mbox_ptr[1] = wl->mbox_ptr[0] + wl->mbox_size;
 
        wl1271_debug(DEBUG_MAILBOX, "MBOX ptrs: 0x%x 0x%x",
                     wl->mbox_ptr[0], wl->mbox_ptr[1]);
@@ -508,23 +518,6 @@ int wlcore_boot_run_firmware(struct wl1271 *wl)
         */
 
        /* unmask required mbox events  */
-       wl->event_mask = BSS_LOSE_EVENT_ID |
-               REGAINED_BSS_EVENT_ID |
-               SCAN_COMPLETE_EVENT_ID |
-               ROLE_STOP_COMPLETE_EVENT_ID |
-               RSSI_SNR_TRIGGER_0_EVENT_ID |
-               PSPOLL_DELIVERY_FAILURE_EVENT_ID |
-               SOFT_GEMINI_SENSE_EVENT_ID |
-               PERIODIC_SCAN_REPORT_EVENT_ID |
-               PERIODIC_SCAN_COMPLETE_EVENT_ID |
-               DUMMY_PACKET_EVENT_ID |
-               PEER_REMOVE_COMPLETE_EVENT_ID |
-               BA_SESSION_RX_CONSTRAINT_EVENT_ID |
-               REMAIN_ON_CHANNEL_COMPLETE_EVENT_ID |
-               INACTIVE_STA_EVENT_ID |
-               MAX_TX_RETRY_EVENT_ID |
-               CHANNEL_SWITCH_COMPLETE_EVENT_ID;
-
        ret = wl1271_event_unmask(wl);
        if (ret < 0) {
                wl1271_error("EVENT mask setting failed");
index 27f83f7..6331f9e 100644 (file)
  * @id: command id
  * @buf: buffer containing the command, must work with dma
  * @len: length of the buffer
+ * return the cmd status code on success.
  */
-int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len,
-                   size_t res_len)
+static int __wlcore_cmd_send(struct wl1271 *wl, u16 id, void *buf,
+                            size_t len, size_t res_len)
 {
        struct wl1271_cmd_header *cmd;
        unsigned long timeout;
        u32 intr;
-       int ret = 0;
+       int ret;
        u16 status;
        u16 poll_count = 0;
 
@@ -71,7 +72,7 @@ int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len,
 
        ret = wlcore_write(wl, wl->cmd_box_addr, buf, len, false);
        if (ret < 0)
-               goto fail;
+               return ret;
 
        /*
         * TODO: we just need this because one bit is in a different
@@ -79,19 +80,18 @@ int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len,
         */
        ret = wl->ops->trigger_cmd(wl, wl->cmd_box_addr, buf, len);
        if (ret < 0)
-               goto fail;
+               return ret;
 
        timeout = jiffies + msecs_to_jiffies(WL1271_COMMAND_TIMEOUT);
 
        ret = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR, &intr);
        if (ret < 0)
-               goto fail;
+               return ret;
 
        while (!(intr & WL1271_ACX_INTR_CMD_COMPLETE)) {
                if (time_after(jiffies, timeout)) {
                        wl1271_error("command complete timeout");
-                       ret = -ETIMEDOUT;
-                       goto fail;
+                       return -ETIMEDOUT;
                }
 
                poll_count++;
@@ -102,7 +102,7 @@ int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len,
 
                ret = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR, &intr);
                if (ret < 0)
-                       goto fail;
+                       return ret;
        }
 
        /* read back the status code of the command */
@@ -111,33 +111,66 @@ int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len,
 
        ret = wlcore_read(wl, wl->cmd_box_addr, cmd, res_len, false);
        if (ret < 0)
-               goto fail;
+               return ret;
 
        status = le16_to_cpu(cmd->status);
-       if (status != CMD_STATUS_SUCCESS) {
-               wl1271_error("command execute failure %d", status);
-               ret = -EIO;
-               goto fail;
-       }
 
        ret = wlcore_write_reg(wl, REG_INTERRUPT_ACK,
                               WL1271_ACX_INTR_CMD_COMPLETE);
        if (ret < 0)
+               return ret;
+
+       return status;
+}
+
+/*
+ * send command to fw and return cmd status on success
+ * valid_rets contains a bitmap of allowed error codes
+ */
+int wlcore_cmd_send_failsafe(struct wl1271 *wl, u16 id, void *buf, size_t len,
+                            size_t res_len, unsigned long valid_rets)
+{
+       int ret = __wlcore_cmd_send(wl, id, buf, len, res_len);
+
+       if (ret < 0)
                goto fail;
 
-       return 0;
+       /* success is always a valid status */
+       valid_rets |= BIT(CMD_STATUS_SUCCESS);
 
+       if (ret >= MAX_COMMAND_STATUS ||
+           !test_bit(ret, &valid_rets)) {
+               wl1271_error("command execute failure %d", ret);
+               ret = -EIO;
+               goto fail;
+       }
+       return ret;
 fail:
        wl12xx_queue_recovery_work(wl);
        return ret;
 }
+EXPORT_SYMBOL_GPL(wl1271_cmd_send);
+
+/*
+ * wrapper for wlcore_cmd_send that accept only CMD_STATUS_SUCCESS
+ * return 0 on success.
+ */
+int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len,
+                   size_t res_len)
+{
+       int ret = wlcore_cmd_send_failsafe(wl, id, buf, len, res_len, 0);
+
+       if (ret < 0)
+               return ret;
+       return 0;
+}
 
 /*
  * Poll the mailbox event field until any of the bits in the mask is set or a
  * timeout occurs (WL1271_EVENT_TIMEOUT in msecs)
  */
-static int wl1271_cmd_wait_for_event_or_timeout(struct wl1271 *wl,
-                                               u32 mask, bool *timeout)
+int wlcore_cmd_wait_for_event_or_timeout(struct wl1271 *wl,
+                                        u32 mask, bool *timeout)
 {
        u32 *events_vector;
        u32 event;
@@ -187,20 +220,7 @@ out:
        kfree(events_vector);
        return ret;
 }
-
-static int wl1271_cmd_wait_for_event(struct wl1271 *wl, u32 mask)
-{
-       int ret;
-       bool timeout = false;
-
-       ret = wl1271_cmd_wait_for_event_or_timeout(wl, mask, &timeout);
-       if (ret != 0 || timeout) {
-               wl12xx_queue_recovery_work(wl);
-               return ret;
-       }
-
-       return 0;
-}
+EXPORT_SYMBOL_GPL(wlcore_cmd_wait_for_event_or_timeout);
 
 int wl12xx_cmd_role_enable(struct wl1271 *wl, u8 *addr, u8 role_type,
                           u8 *role_id)
@@ -278,6 +298,16 @@ out:
        return ret;
 }
 
+static int wlcore_get_new_session_id(struct wl1271 *wl, u8 hlid)
+{
+       if (wl->session_ids[hlid] >= SESSION_COUNTER_MAX)
+               wl->session_ids[hlid] = 0;
+
+       wl->session_ids[hlid]++;
+
+       return wl->session_ids[hlid];
+}
+
 int wl12xx_allocate_link(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 *hlid)
 {
        unsigned long flags;
@@ -285,12 +315,21 @@ int wl12xx_allocate_link(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 *hlid)
        if (link >= WL12XX_MAX_LINKS)
                return -EBUSY;
 
+       wl->session_ids[link] = wlcore_get_new_session_id(wl, link);
+
        /* these bits are used by op_tx */
        spin_lock_irqsave(&wl->wl_lock, flags);
        __set_bit(link, wl->links_map);
        __set_bit(link, wlvif->links_map);
        spin_unlock_irqrestore(&wl->wl_lock, flags);
+
+       /* take the last "freed packets" value from the current FW status */
+       wl->links[link].prev_freed_pkts =
+                       wl->fw_status_2->counters.tx_lnk_free_pkts[link];
+       wl->links[link].wlvif = wlvif;
        *hlid = link;
+
+       wl->active_link_count++;
        return 0;
 }
 
@@ -307,24 +346,21 @@ void wl12xx_free_link(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 *hlid)
        __clear_bit(*hlid, wlvif->links_map);
        spin_unlock_irqrestore(&wl->wl_lock, flags);
 
+       wl->links[*hlid].allocated_pkts = 0;
+       wl->links[*hlid].prev_freed_pkts = 0;
+       wl->links[*hlid].ba_bitmap = 0;
+       memset(wl->links[*hlid].addr, 0, ETH_ALEN);
+
        /*
         * At this point op_tx() will not add more packets to the queues. We
         * can purge them.
         */
        wl1271_tx_reset_link_queues(wl, *hlid);
+       wl->links[*hlid].wlvif = NULL;
 
        *hlid = WL12XX_INVALID_LINK_ID;
-}
-
-static int wl12xx_get_new_session_id(struct wl1271 *wl,
-                                    struct wl12xx_vif *wlvif)
-{
-       if (wlvif->session_counter >= SESSION_COUNTER_MAX)
-               wlvif->session_counter = 0;
-
-       wlvif->session_counter++;
-
-       return wlvif->session_counter;
+       wl->active_link_count--;
+       WARN_ON_ONCE(wl->active_link_count < 0);
 }
 
 static u8 wlcore_get_native_channel_type(u8 nl_channel_type)
@@ -345,7 +381,9 @@ static u8 wlcore_get_native_channel_type(u8 nl_channel_type)
 }
 
 static int wl12xx_cmd_role_start_dev(struct wl1271 *wl,
-                                    struct wl12xx_vif *wlvif)
+                                    struct wl12xx_vif *wlvif,
+                                    enum ieee80211_band band,
+                                    int channel)
 {
        struct wl12xx_cmd_role_start *cmd;
        int ret;
@@ -359,9 +397,9 @@ static int wl12xx_cmd_role_start_dev(struct wl1271 *wl,
        wl1271_debug(DEBUG_CMD, "cmd role start dev %d", wlvif->dev_role_id);
 
        cmd->role_id = wlvif->dev_role_id;
-       if (wlvif->band == IEEE80211_BAND_5GHZ)
+       if (band == IEEE80211_BAND_5GHZ)
                cmd->band = WLCORE_BAND_5GHZ;
-       cmd->channel = wlvif->channel;
+       cmd->channel = channel;
 
        if (wlvif->dev_hlid == WL12XX_INVALID_LINK_ID) {
                ret = wl12xx_allocate_link(wl, wlvif, &wlvif->dev_hlid);
@@ -369,7 +407,7 @@ static int wl12xx_cmd_role_start_dev(struct wl1271 *wl,
                        goto out_free;
        }
        cmd->device.hlid = wlvif->dev_hlid;
-       cmd->device.session = wl12xx_get_new_session_id(wl, wlvif);
+       cmd->device.session = wl->session_ids[wlvif->dev_hlid];
 
        wl1271_debug(DEBUG_CMD, "role start: roleid=%d, hlid=%d, session=%d",
                     cmd->role_id, cmd->device.hlid, cmd->device.session);
@@ -420,12 +458,6 @@ static int wl12xx_cmd_role_stop_dev(struct wl1271 *wl,
                goto out_free;
        }
 
-       ret = wl1271_cmd_wait_for_event(wl, ROLE_STOP_COMPLETE_EVENT_ID);
-       if (ret < 0) {
-               wl1271_error("cmd role stop dev event completion error");
-               goto out_free;
-       }
-
        wl12xx_free_link(wl, wlvif, &wlvif->dev_hlid);
 
 out_free:
@@ -439,6 +471,7 @@ int wl12xx_cmd_role_start_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif)
 {
        struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
        struct wl12xx_cmd_role_start *cmd;
+       u32 supported_rates;
        int ret;
 
        cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
@@ -459,7 +492,14 @@ int wl12xx_cmd_role_start_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif)
        cmd->sta.ssid_len = wlvif->ssid_len;
        memcpy(cmd->sta.ssid, wlvif->ssid, wlvif->ssid_len);
        memcpy(cmd->sta.bssid, vif->bss_conf.bssid, ETH_ALEN);
-       cmd->sta.local_rates = cpu_to_le32(wlvif->rate_set);
+
+       supported_rates = CONF_TX_ENABLED_RATES | CONF_TX_MCS_RATES |
+                         wlcore_hw_sta_get_ap_rate_mask(wl, wlvif);
+       if (wlvif->p2p)
+               supported_rates &= ~CONF_TX_CCK_RATES;
+
+       cmd->sta.local_rates = cpu_to_le32(supported_rates);
+
        cmd->channel_type = wlcore_get_native_channel_type(wlvif->channel_type);
 
        if (wlvif->sta.hlid == WL12XX_INVALID_LINK_ID) {
@@ -468,8 +508,14 @@ int wl12xx_cmd_role_start_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif)
                        goto out_free;
        }
        cmd->sta.hlid = wlvif->sta.hlid;
-       cmd->sta.session = wl12xx_get_new_session_id(wl, wlvif);
-       cmd->sta.remote_rates = cpu_to_le32(wlvif->rate_set);
+       cmd->sta.session = wl->session_ids[wlvif->sta.hlid];
+       /*
+        * We don't have the correct remote rates in this stage.  The
+        * rates will be reconfigured later, after association, if the
+        * firmware supports ACX_PEER_CAP.  Otherwise, there's nothing
+        * we can do, so use all supported_rates here.
+        */
+       cmd->sta.remote_rates = cpu_to_le32(supported_rates);
 
        wl1271_debug(DEBUG_CMD, "role start: roleid=%d, hlid=%d, session=%d "
                     "basic_rate_set: 0x%x, remote_rates: 0x%x",
@@ -482,6 +528,7 @@ int wl12xx_cmd_role_start_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif)
                goto err_hlid;
        }
 
+       wlvif->sta.role_chan_type = wlvif->channel_type;
        goto out_free;
 
 err_hlid:
@@ -500,7 +547,6 @@ int wl12xx_cmd_role_stop_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif)
 {
        struct wl12xx_cmd_role_stop *cmd;
        int ret;
-       bool timeout = false;
 
        if (WARN_ON(wlvif->sta.hlid == WL12XX_INVALID_LINK_ID))
                return -EINVAL;
@@ -523,17 +569,6 @@ int wl12xx_cmd_role_stop_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif)
                goto out_free;
        }
 
-       /*
-        * Sometimes the firmware doesn't send this event, so we just
-        * time out without failing.  Queue recovery for other
-        * failures.
-        */
-       ret = wl1271_cmd_wait_for_event_or_timeout(wl,
-                                                  ROLE_STOP_COMPLETE_EVENT_ID,
-                                                  &timeout);
-       if (ret)
-               wl12xx_queue_recovery_work(wl);
-
        wl12xx_free_link(wl, wlvif, &wlvif->sta.hlid);
 
 out_free:
@@ -579,12 +614,15 @@ int wl12xx_cmd_role_start_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif)
        cmd->ap.bss_index = WL1271_AP_BSS_INDEX;
        cmd->ap.global_hlid = wlvif->ap.global_hlid;
        cmd->ap.broadcast_hlid = wlvif->ap.bcast_hlid;
+       cmd->ap.global_session_id = wl->session_ids[wlvif->ap.global_hlid];
+       cmd->ap.bcast_session_id = wl->session_ids[wlvif->ap.bcast_hlid];
        cmd->ap.basic_rate_set = cpu_to_le32(wlvif->basic_rate_set);
        cmd->ap.beacon_interval = cpu_to_le16(wlvif->beacon_int);
        cmd->ap.dtim_interval = bss_conf->dtim_period;
        cmd->ap.beacon_expiry = WL1271_AP_DEF_BEACON_EXP;
        /* FIXME: Change when adding DFS */
        cmd->ap.reset_tsf = 1;  /* By default reset AP TSF */
+       cmd->ap.wmm = wlvif->wmm_enabled;
        cmd->channel = wlvif->channel;
        cmd->channel_type = wlcore_get_native_channel_type(wlvif->channel_type);
 
@@ -599,8 +637,10 @@ int wl12xx_cmd_role_start_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif)
                memcpy(cmd->ap.ssid, bss_conf->ssid, bss_conf->ssid_len);
        }
 
-       supported_rates = CONF_TX_AP_ENABLED_RATES | CONF_TX_MCS_RATES |
+       supported_rates = CONF_TX_ENABLED_RATES | CONF_TX_MCS_RATES |
                wlcore_hw_ap_get_mimo_wide_rate_mask(wl, wlvif);
+       if (wlvif->p2p)
+               supported_rates &= ~CONF_TX_CCK_RATES;
 
        wl1271_debug(DEBUG_CMD, "cmd role start ap with supported_rates 0x%08x",
                     supported_rates);
@@ -799,8 +839,11 @@ int wl1271_cmd_interrogate(struct wl1271 *wl, u16 id, void *buf, size_t len)
  * @id: acx id
  * @buf: buffer containing acx, including all headers, must work with dma
  * @len: length of buf
+ * @valid_rets: bitmap of valid cmd status codes (i.e. return values).
+ * return the cmd status on success.
  */
-int wl1271_cmd_configure(struct wl1271 *wl, u16 id, void *buf, size_t len)
+int wlcore_cmd_configure_failsafe(struct wl1271 *wl, u16 id, void *buf,
+                                 size_t len, unsigned long valid_rets)
 {
        struct acx_header *acx = buf;
        int ret;
@@ -812,12 +855,26 @@ int wl1271_cmd_configure(struct wl1271 *wl, u16 id, void *buf, size_t len)
        /* payload length, does not include any headers */
        acx->len = cpu_to_le16(len - sizeof(*acx));
 
-       ret = wl1271_cmd_send(wl, CMD_CONFIGURE, acx, len, 0);
+       ret = wlcore_cmd_send_failsafe(wl, CMD_CONFIGURE, acx, len, 0,
+                                      valid_rets);
        if (ret < 0) {
                wl1271_warning("CONFIGURE command NOK");
                return ret;
        }
 
+       return ret;
+}
+
+/*
+ * wrapper for wlcore_cmd_configure that accepts only success status.
+ * return 0 on success
+ */
+int wl1271_cmd_configure(struct wl1271 *wl, u16 id, void *buf, size_t len)
+{
+       int ret = wlcore_cmd_configure_failsafe(wl, id, buf, len, 0);
+
+       if (ret < 0)
+               return ret;
        return 0;
 }
 EXPORT_SYMBOL_GPL(wl1271_cmd_configure);
@@ -1034,8 +1091,8 @@ int wl12xx_cmd_build_probe_req(struct wl1271 *wl, struct wl12xx_vif *wlvif,
        struct sk_buff *skb;
        int ret;
        u32 rate;
-       u16 template_id_2_4 = CMD_TEMPL_CFG_PROBE_REQ_2_4;
-       u16 template_id_5 = CMD_TEMPL_CFG_PROBE_REQ_5;
+       u16 template_id_2_4 = wl->scan_templ_id_2_4;
+       u16 template_id_5 = wl->scan_templ_id_5;
 
        skb = ieee80211_probereq_get(wl->hw, vif, ssid, ssid_len,
                                     ie_len);
@@ -1048,10 +1105,10 @@ int wl12xx_cmd_build_probe_req(struct wl1271 *wl, struct wl12xx_vif *wlvif,
 
        wl1271_dump(DEBUG_SCAN, "PROBE REQ: ", skb->data, skb->len);
 
-       if (!sched_scan &&
+       if (sched_scan &&
            (wl->quirks & WLCORE_QUIRK_DUAL_PROBE_TMPL)) {
-               template_id_2_4 = CMD_TEMPL_APP_PROBE_REQ_2_4;
-               template_id_5 = CMD_TEMPL_APP_PROBE_REQ_5;
+               template_id_2_4 = wl->sched_scan_templ_id_2_4;
+               template_id_5 = wl->sched_scan_templ_id_5;
        }
 
        rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[band]);
@@ -1068,6 +1125,7 @@ out:
        dev_kfree_skb(skb);
        return ret;
 }
+EXPORT_SYMBOL_GPL(wl12xx_cmd_build_probe_req);
 
 struct sk_buff *wl1271_cmd_build_ap_probe_req(struct wl1271 *wl,
                                              struct wl12xx_vif *wlvif,
@@ -1379,7 +1437,8 @@ out:
        return ret;
 }
 
-int wl12xx_cmd_set_peer_state(struct wl1271 *wl, u8 hlid)
+int wl12xx_cmd_set_peer_state(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                             u8 hlid)
 {
        struct wl12xx_cmd_set_peer_state *cmd;
        int ret = 0;
@@ -1395,6 +1454,10 @@ int wl12xx_cmd_set_peer_state(struct wl1271 *wl, u8 hlid)
        cmd->hlid = hlid;
        cmd->state = WL1271_CMD_STA_STATE_CONNECTED;
 
+       /* wmm param is valid only for station role */
+       if (wlvif->bss_type == BSS_TYPE_STA_BSS)
+               cmd->wmm = wlvif->wmm_enabled;
+
        ret = wl1271_cmd_send(wl, CMD_SET_PEER_STATE, cmd, sizeof(*cmd), 0);
        if (ret < 0) {
                wl1271_error("failed to send set peer state command");
@@ -1429,6 +1492,7 @@ int wl12xx_cmd_add_peer(struct wl1271 *wl, struct wl12xx_vif *wlvif,
        cmd->hlid = hlid;
        cmd->sp_len = sta->max_sp;
        cmd->wmm = sta->wme ? 1 : 0;
+       cmd->session_id = wl->session_ids[hlid];
 
        for (i = 0; i < NUM_ACCESS_CATEGORIES_COPY; i++)
                if (sta->wme && (sta->uapsd_queues & BIT(i)))
@@ -1490,9 +1554,10 @@ int wl12xx_cmd_remove_peer(struct wl1271 *wl, u8 hlid)
                goto out_free;
        }
 
-       ret = wl1271_cmd_wait_for_event_or_timeout(wl,
-                                          PEER_REMOVE_COMPLETE_EVENT_ID,
-                                          &timeout);
+       ret = wl->ops->wait_for_event(wl,
+                                     WLCORE_EVENT_PEER_REMOVE_COMPLETE,
+                                     &timeout);
+
        /*
         * We are ok with a timeout here. The event is sometimes not sent
         * due to a firmware bug. In case of another error (like SDIO timeout)
@@ -1508,6 +1573,131 @@ out:
        return ret;
 }
 
+static int wlcore_get_reg_conf_ch_idx(enum ieee80211_band band, u16 ch)
+{
+       int idx = -1;
+
+       switch (band) {
+       case IEEE80211_BAND_5GHZ:
+               if (ch >= 8 && ch <= 16)
+                       idx = ((ch-8)/4 + 18);
+               else if (ch >= 34 && ch <= 64)
+                       idx = ((ch-34)/2 + 3 + 18);
+               else if (ch >= 100 && ch <= 140)
+                       idx = ((ch-100)/4 + 15 + 18);
+               else if (ch >= 149 && ch <= 165)
+                       idx = ((ch-149)/4 + 26 + 18);
+               else
+                       idx = -1;
+               break;
+       case IEEE80211_BAND_2GHZ:
+               if (ch >= 1 && ch <= 14)
+                       idx = ch - 1;
+               else
+                       idx = -1;
+               break;
+       default:
+               wl1271_error("get reg conf ch idx - unknown band: %d",
+                            (int)band);
+       }
+
+       return idx;
+}
+
+void wlcore_set_pending_regdomain_ch(struct wl1271 *wl, u16 channel,
+                                    enum ieee80211_band band)
+{
+       int ch_bit_idx = 0;
+
+       if (!(wl->quirks & WLCORE_QUIRK_REGDOMAIN_CONF))
+               return;
+
+       ch_bit_idx = wlcore_get_reg_conf_ch_idx(band, channel);
+
+       if (ch_bit_idx > 0 && ch_bit_idx <= WL1271_MAX_CHANNELS)
+               set_bit(ch_bit_idx, (long *)wl->reg_ch_conf_pending);
+}
+
+int wlcore_cmd_regdomain_config_locked(struct wl1271 *wl)
+{
+       struct wl12xx_cmd_regdomain_dfs_config *cmd = NULL;
+       int ret = 0, i, b, ch_bit_idx;
+       struct ieee80211_channel *channel;
+       u32 tmp_ch_bitmap[2];
+       u16 ch;
+       struct wiphy *wiphy = wl->hw->wiphy;
+       struct ieee80211_supported_band *band;
+       bool timeout = false;
+
+       if (!(wl->quirks & WLCORE_QUIRK_REGDOMAIN_CONF))
+               return 0;
+
+       wl1271_debug(DEBUG_CMD, "cmd reg domain config");
+
+       memset(tmp_ch_bitmap, 0, sizeof(tmp_ch_bitmap));
+
+       for (b = IEEE80211_BAND_2GHZ; b <= IEEE80211_BAND_5GHZ; b++) {
+               band = wiphy->bands[b];
+               for (i = 0; i < band->n_channels; i++) {
+                       channel = &band->channels[i];
+                       ch = channel->hw_value;
+
+                       if (channel->flags & (IEEE80211_CHAN_DISABLED |
+                                             IEEE80211_CHAN_RADAR |
+                                             IEEE80211_CHAN_PASSIVE_SCAN))
+                               continue;
+
+                       ch_bit_idx = wlcore_get_reg_conf_ch_idx(b, ch);
+                       if (ch_bit_idx < 0)
+                               continue;
+
+                       set_bit(ch_bit_idx, (long *)tmp_ch_bitmap);
+               }
+       }
+
+       tmp_ch_bitmap[0] |= wl->reg_ch_conf_pending[0];
+       tmp_ch_bitmap[1] |= wl->reg_ch_conf_pending[1];
+
+       if (!memcmp(tmp_ch_bitmap, wl->reg_ch_conf_last, sizeof(tmp_ch_bitmap)))
+               goto out;
+
+       cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+       if (!cmd) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       cmd->ch_bit_map1 = cpu_to_le32(tmp_ch_bitmap[0]);
+       cmd->ch_bit_map2 = cpu_to_le32(tmp_ch_bitmap[1]);
+
+       wl1271_debug(DEBUG_CMD,
+                    "cmd reg domain bitmap1: 0x%08x, bitmap2: 0x%08x",
+                    cmd->ch_bit_map1, cmd->ch_bit_map2);
+
+       ret = wl1271_cmd_send(wl, CMD_DFS_CHANNEL_CONFIG, cmd, sizeof(*cmd), 0);
+       if (ret < 0) {
+               wl1271_error("failed to send reg domain dfs config");
+               goto out;
+       }
+
+       ret = wl->ops->wait_for_event(wl,
+                                     WLCORE_EVENT_DFS_CONFIG_COMPLETE,
+                                     &timeout);
+       if (ret < 0 || timeout) {
+               wl1271_error("reg domain conf %serror",
+                            timeout ? "completion " : "");
+               ret = timeout ? -ETIMEDOUT : ret;
+               goto out;
+       }
+
+       memcpy(wl->reg_ch_conf_last, tmp_ch_bitmap, sizeof(tmp_ch_bitmap));
+       memset(wl->reg_ch_conf_pending, 0, sizeof(wl->reg_ch_conf_pending));
+
+out:
+       kfree(cmd);
+       return ret;
+}
+
 int wl12xx_cmd_config_fwlog(struct wl1271 *wl)
 {
        struct wl12xx_cmd_config_fwlog *cmd;
@@ -1593,12 +1783,12 @@ out:
 }
 
 static int wl12xx_cmd_roc(struct wl1271 *wl, struct wl12xx_vif *wlvif,
-                         u8 role_id)
+                         u8 role_id, enum ieee80211_band band, u8 channel)
 {
        struct wl12xx_cmd_roc *cmd;
        int ret = 0;
 
-       wl1271_debug(DEBUG_CMD, "cmd roc %d (%d)", wlvif->channel, role_id);
+       wl1271_debug(DEBUG_CMD, "cmd roc %d (%d)", channel, role_id);
 
        if (WARN_ON(role_id == WL12XX_INVALID_ROLE_ID))
                return -EINVAL;
@@ -1610,8 +1800,8 @@ static int wl12xx_cmd_roc(struct wl1271 *wl, struct wl12xx_vif *wlvif,
        }
 
        cmd->role_id = role_id;
-       cmd->channel = wlvif->channel;
-       switch (wlvif->band) {
+       cmd->channel = channel;
+       switch (band) {
        case IEEE80211_BAND_2GHZ:
                cmd->band = WLCORE_BAND_2_4GHZ;
                break;
@@ -1666,30 +1856,18 @@ out:
        return ret;
 }
 
-int wl12xx_roc(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 role_id)
+int wl12xx_roc(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 role_id,
+              enum ieee80211_band band, u8 channel)
 {
        int ret = 0;
-       bool is_first_roc;
 
        if (WARN_ON(test_bit(role_id, wl->roc_map)))
                return 0;
 
-       is_first_roc = (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) >=
-                       WL12XX_MAX_ROLES);
-
-       ret = wl12xx_cmd_roc(wl, wlvif, role_id);
+       ret = wl12xx_cmd_roc(wl, wlvif, role_id, band, channel);
        if (ret < 0)
                goto out;
 
-       if (is_first_roc) {
-               ret = wl1271_cmd_wait_for_event(wl,
-                                          REMAIN_ON_CHANNEL_COMPLETE_EVENT_ID);
-               if (ret < 0) {
-                       wl1271_error("cmd roc event completion error");
-                       goto out;
-               }
-       }
-
        __set_bit(role_id, wl->roc_map);
 out:
        return ret;
@@ -1719,43 +1897,7 @@ out:
        return ret;
 }
 
-int wl12xx_cmd_channel_switch(struct wl1271 *wl,
-                             struct wl12xx_vif *wlvif,
-                             struct ieee80211_channel_switch *ch_switch)
-{
-       struct wl12xx_cmd_channel_switch *cmd;
-       int ret;
-
-       wl1271_debug(DEBUG_ACX, "cmd channel switch");
-
-       cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
-       if (!cmd) {
-               ret = -ENOMEM;
-               goto out;
-       }
-
-       cmd->role_id = wlvif->role_id;
-       cmd->channel = ch_switch->channel->hw_value;
-       cmd->switch_time = ch_switch->count;
-       cmd->stop_tx = ch_switch->block_tx;
-
-       /* FIXME: control from mac80211 in the future */
-       cmd->post_switch_tx_disable = 0;  /* Enable TX on the target channel */
-
-       ret = wl1271_cmd_send(wl, CMD_CHANNEL_SWITCH, cmd, sizeof(*cmd), 0);
-       if (ret < 0) {
-               wl1271_error("failed to send channel switch command");
-               goto out_free;
-       }
-
-out_free:
-       kfree(cmd);
-
-out:
-       return ret;
-}
-
-int wl12xx_cmd_stop_channel_switch(struct wl1271 *wl)
+int wl12xx_cmd_stop_channel_switch(struct wl1271 *wl, struct wl12xx_vif *wlvif)
 {
        struct wl12xx_cmd_stop_channel_switch *cmd;
        int ret;
@@ -1768,6 +1910,8 @@ int wl12xx_cmd_stop_channel_switch(struct wl1271 *wl)
                goto out;
        }
 
+       cmd->role_id = wlvif->role_id;
+
        ret = wl1271_cmd_send(wl, CMD_STOP_CHANNEL_SWICTH, cmd, sizeof(*cmd), 0);
        if (ret < 0) {
                wl1271_error("failed to stop channel switch command");
@@ -1782,7 +1926,8 @@ out:
 }
 
 /* start dev role and roc on its channel */
-int wl12xx_start_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif)
+int wl12xx_start_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                    enum ieee80211_band band, int channel)
 {
        int ret;
 
@@ -1797,11 +1942,11 @@ int wl12xx_start_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif)
        if (ret < 0)
                goto out;
 
-       ret = wl12xx_cmd_role_start_dev(wl, wlvif);
+       ret = wl12xx_cmd_role_start_dev(wl, wlvif, band, channel);
        if (ret < 0)
                goto out_disable;
 
-       ret = wl12xx_roc(wl, wlvif, wlvif->dev_role_id);
+       ret = wl12xx_roc(wl, wlvif, wlvif->dev_role_id, band, channel);
        if (ret < 0)
                goto out_stop;
 
index 2409f3d..fd34123 100644 (file)
@@ -31,6 +31,8 @@ struct acx_header;
 
 int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len,
                    size_t res_len);
+int wlcore_cmd_send_failsafe(struct wl1271 *wl, u16 id, void *buf, size_t len,
+                            size_t res_len, unsigned long valid_rets);
 int wl12xx_cmd_role_enable(struct wl1271 *wl, u8 *addr, u8 role_type,
                           u8 *role_id);
 int wl12xx_cmd_role_disable(struct wl1271 *wl, u8 *role_id);
@@ -39,11 +41,14 @@ int wl12xx_cmd_role_stop_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif);
 int wl12xx_cmd_role_start_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif);
 int wl12xx_cmd_role_stop_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif);
 int wl12xx_cmd_role_start_ibss(struct wl1271 *wl, struct wl12xx_vif *wlvif);
-int wl12xx_start_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif);
+int wl12xx_start_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                    enum ieee80211_band band, int channel);
 int wl12xx_stop_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif);
 int wl1271_cmd_test(struct wl1271 *wl, void *buf, size_t buf_len, u8 answer);
 int wl1271_cmd_interrogate(struct wl1271 *wl, u16 id, void *buf, size_t len);
 int wl1271_cmd_configure(struct wl1271 *wl, u16 id, void *buf, size_t len);
+int wlcore_cmd_configure_failsafe(struct wl1271 *wl, u16 id, void *buf,
+                                 size_t len, unsigned long valid_rets);
 int wl1271_cmd_data_path(struct wl1271 *wl, bool enable);
 int wl1271_cmd_ps_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif,
                       u8 ps_mode, u16 auto_ps_timeout);
@@ -75,22 +80,30 @@ int wl1271_cmd_set_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
                          u16 action, u8 id, u8 key_type,
                          u8 key_size, const u8 *key, u8 hlid, u32 tx_seq_32,
                          u16 tx_seq_16);
-int wl12xx_cmd_set_peer_state(struct wl1271 *wl, u8 hlid);
-int wl12xx_roc(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 role_id);
+int wl12xx_cmd_set_peer_state(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                             u8 hlid);
+int wl12xx_roc(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 role_id,
+              enum ieee80211_band band, u8 channel);
 int wl12xx_croc(struct wl1271 *wl, u8 role_id);
 int wl12xx_cmd_add_peer(struct wl1271 *wl, struct wl12xx_vif *wlvif,
                        struct ieee80211_sta *sta, u8 hlid);
 int wl12xx_cmd_remove_peer(struct wl1271 *wl, u8 hlid);
+void wlcore_set_pending_regdomain_ch(struct wl1271 *wl, u16 channel,
+                                    enum ieee80211_band band);
+int wlcore_cmd_regdomain_config_locked(struct wl1271 *wl);
 int wl12xx_cmd_config_fwlog(struct wl1271 *wl);
 int wl12xx_cmd_start_fwlog(struct wl1271 *wl);
 int wl12xx_cmd_stop_fwlog(struct wl1271 *wl);
 int wl12xx_cmd_channel_switch(struct wl1271 *wl,
                              struct wl12xx_vif *wlvif,
                              struct ieee80211_channel_switch *ch_switch);
-int wl12xx_cmd_stop_channel_switch(struct wl1271 *wl);
+int wl12xx_cmd_stop_channel_switch(struct wl1271 *wl,
+                                  struct wl12xx_vif *wlvif);
 int wl12xx_allocate_link(struct wl1271 *wl, struct wl12xx_vif *wlvif,
                         u8 *hlid);
 void wl12xx_free_link(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 *hlid);
+int wlcore_cmd_wait_for_event_or_timeout(struct wl1271 *wl,
+                                        u32 mask, bool *timeout);
 
 enum wl1271_commands {
        CMD_INTERROGATE = 1, /* use this to read information elements */
@@ -149,8 +162,11 @@ enum wl1271_commands {
        CMD_WFD_START_DISCOVERY = 45,
        CMD_WFD_STOP_DISCOVERY  = 46,
        CMD_WFD_ATTRIBUTE_CONFIG        = 47,
-       CMD_NOP                 = 48,
-       CMD_LAST_COMMAND,
+       CMD_GENERIC_CFG                 = 48,
+       CMD_NOP                         = 49,
+
+       /* start of 18xx specific commands */
+       CMD_DFS_CHANNEL_CONFIG          = 60,
 
        MAX_COMMAND_ID = 0xFFFF,
 };
@@ -167,8 +183,8 @@ enum cmd_templ {
        CMD_TEMPL_PS_POLL,
        CMD_TEMPL_KLV,
        CMD_TEMPL_DISCONNECT,
-       CMD_TEMPL_APP_PROBE_REQ_2_4,
-       CMD_TEMPL_APP_PROBE_REQ_5,
+       CMD_TEMPL_APP_PROBE_REQ_2_4_LEGACY,
+       CMD_TEMPL_APP_PROBE_REQ_5_LEGACY,
        CMD_TEMPL_BAR,           /* for firmware internal use only */
        CMD_TEMPL_CTS,           /*
                                  * For CTS-to-self (FastCTS) mechanism
@@ -179,6 +195,8 @@ enum cmd_templ {
        CMD_TEMPL_DEAUTH_AP,
        CMD_TEMPL_TEMPORARY,
        CMD_TEMPL_LINK_MEASUREMENT_REPORT,
+       CMD_TEMPL_PROBE_REQ_2_4_PERIODIC,
+       CMD_TEMPL_PROBE_REQ_5_PERIODIC,
 
        CMD_TEMPL_MAX = 0xff
 };
@@ -220,7 +238,8 @@ enum {
        CMD_STATUS_FW_RESET             = 22, /* Driver internal use.*/
        CMD_STATUS_TEMPLATE_OOM         = 23,
        CMD_STATUS_NO_RX_BA_SESSION     = 24,
-       MAX_COMMAND_STATUS              = 0xff
+
+       MAX_COMMAND_STATUS
 };
 
 #define CMDMBOX_HEADER_LEN 4
@@ -345,7 +364,15 @@ struct wl12xx_cmd_role_start {
 
                        u8 reset_tsf;
 
-                       u8 padding_1[4];
+                       /*
+                        * ap supports wmm (note that there is additional
+                        * per-sta wmm configuration)
+                        */
+                       u8 wmm;
+
+                       u8 bcast_session_id;
+                       u8 global_session_id;
+                       u8 padding_1[1];
                } __packed ap;
        };
 } __packed;
@@ -515,7 +542,14 @@ struct wl12xx_cmd_set_peer_state {
 
        u8 hlid;
        u8 state;
-       u8 padding[2];
+
+       /*
+        * wmm is relevant for sta role only.
+        * ap role configures the per-sta wmm params in
+        * the add_peer command.
+        */
+       u8 wmm;
+       u8 padding[1];
 } __packed;
 
 struct wl12xx_cmd_roc {
@@ -558,7 +592,7 @@ struct wl12xx_cmd_add_peer {
        u8 bss_index;
        u8 sp_len;
        u8 wmm;
-       u8 padding1;
+       u8 session_id;
 } __packed;
 
 struct wl12xx_cmd_remove_peer {
@@ -597,6 +631,13 @@ enum wl12xx_fwlogger_output {
        WL12XX_FWLOG_OUTPUT_HOST,
 };
 
+struct wl12xx_cmd_regdomain_dfs_config {
+       struct wl1271_cmd_header header;
+
+       __le32 ch_bit_map1;
+       __le32 ch_bit_map2;
+} __packed;
+
 struct wl12xx_cmd_config_fwlog {
        struct wl1271_cmd_header header;
 
@@ -626,27 +667,13 @@ struct wl12xx_cmd_stop_fwlog {
        struct wl1271_cmd_header header;
 } __packed;
 
-struct wl12xx_cmd_channel_switch {
+struct wl12xx_cmd_stop_channel_switch {
        struct wl1271_cmd_header header;
 
        u8 role_id;
-
-       /* The new serving channel */
-       u8 channel;
-       /* Relative time of the serving channel switch in TBTT units */
-       u8 switch_time;
-       /* Stop the role TX, should expect it after radar detection */
-       u8 stop_tx;
-       /* The target channel tx status 1-stopped 0-open*/
-       u8 post_switch_tx_disable;
-
        u8 padding[3];
 } __packed;
 
-struct wl12xx_cmd_stop_channel_switch {
-       struct wl1271_cmd_header header;
-} __packed;
-
 /* Used to check radio status after calibration */
 #define MAX_TLV_LENGTH         500
 #define TEST_CMD_P2G_CAL       2       /* TX BiP */
index 9e40760..2b96ff8 100644 (file)
@@ -57,20 +57,49 @@ enum {
 };
 
 enum {
-       CONF_HW_RATE_INDEX_1MBPS   = 0,
-       CONF_HW_RATE_INDEX_2MBPS   = 1,
-       CONF_HW_RATE_INDEX_5_5MBPS = 2,
-       CONF_HW_RATE_INDEX_6MBPS   = 3,
-       CONF_HW_RATE_INDEX_9MBPS   = 4,
-       CONF_HW_RATE_INDEX_11MBPS  = 5,
-       CONF_HW_RATE_INDEX_12MBPS  = 6,
-       CONF_HW_RATE_INDEX_18MBPS  = 7,
-       CONF_HW_RATE_INDEX_22MBPS  = 8,
-       CONF_HW_RATE_INDEX_24MBPS  = 9,
-       CONF_HW_RATE_INDEX_36MBPS  = 10,
-       CONF_HW_RATE_INDEX_48MBPS  = 11,
-       CONF_HW_RATE_INDEX_54MBPS  = 12,
-       CONF_HW_RATE_INDEX_MAX     = CONF_HW_RATE_INDEX_54MBPS,
+       CONF_HW_RATE_INDEX_1MBPS      = 0,
+       CONF_HW_RATE_INDEX_2MBPS      = 1,
+       CONF_HW_RATE_INDEX_5_5MBPS    = 2,
+       CONF_HW_RATE_INDEX_11MBPS     = 3,
+       CONF_HW_RATE_INDEX_6MBPS      = 4,
+       CONF_HW_RATE_INDEX_9MBPS      = 5,
+       CONF_HW_RATE_INDEX_12MBPS     = 6,
+       CONF_HW_RATE_INDEX_18MBPS     = 7,
+       CONF_HW_RATE_INDEX_24MBPS     = 8,
+       CONF_HW_RATE_INDEX_36MBPS     = 9,
+       CONF_HW_RATE_INDEX_48MBPS     = 10,
+       CONF_HW_RATE_INDEX_54MBPS     = 11,
+       CONF_HW_RATE_INDEX_MCS0       = 12,
+       CONF_HW_RATE_INDEX_MCS1       = 13,
+       CONF_HW_RATE_INDEX_MCS2       = 14,
+       CONF_HW_RATE_INDEX_MCS3       = 15,
+       CONF_HW_RATE_INDEX_MCS4       = 16,
+       CONF_HW_RATE_INDEX_MCS5       = 17,
+       CONF_HW_RATE_INDEX_MCS6       = 18,
+       CONF_HW_RATE_INDEX_MCS7       = 19,
+       CONF_HW_RATE_INDEX_MCS7_SGI   = 20,
+       CONF_HW_RATE_INDEX_MCS0_40MHZ = 21,
+       CONF_HW_RATE_INDEX_MCS1_40MHZ = 22,
+       CONF_HW_RATE_INDEX_MCS2_40MHZ = 23,
+       CONF_HW_RATE_INDEX_MCS3_40MHZ = 24,
+       CONF_HW_RATE_INDEX_MCS4_40MHZ = 25,
+       CONF_HW_RATE_INDEX_MCS5_40MHZ = 26,
+       CONF_HW_RATE_INDEX_MCS6_40MHZ = 27,
+       CONF_HW_RATE_INDEX_MCS7_40MHZ = 28,
+       CONF_HW_RATE_INDEX_MCS7_40MHZ_SGI = 29,
+
+       /* MCS8+ rates overlap with 40Mhz rates */
+       CONF_HW_RATE_INDEX_MCS8       = 21,
+       CONF_HW_RATE_INDEX_MCS9       = 22,
+       CONF_HW_RATE_INDEX_MCS10      = 23,
+       CONF_HW_RATE_INDEX_MCS11      = 24,
+       CONF_HW_RATE_INDEX_MCS12      = 25,
+       CONF_HW_RATE_INDEX_MCS13      = 26,
+       CONF_HW_RATE_INDEX_MCS14      = 27,
+       CONF_HW_RATE_INDEX_MCS15      = 28,
+       CONF_HW_RATE_INDEX_MCS15_SGI  = 29,
+
+       CONF_HW_RATE_INDEX_MAX        = CONF_HW_RATE_INDEX_MCS7_40MHZ_SGI,
 };
 
 #define CONF_HW_RXTX_RATE_UNSUPPORTED 0xff
@@ -415,11 +444,11 @@ struct conf_rx_settings {
 #define CONF_TX_RATE_MASK_BASIC_P2P    CONF_HW_BIT_RATE_6MBPS
 
 /*
- * Rates supported for data packets when operating as AP. Note the absence
+ * Rates supported for data packets when operating as STA/AP. Note the absence
  * of the 22Mbps rate. There is a FW limitation on 12 rates so we must drop
  * one. The rate dropped is not mandatory under any operating mode.
  */
-#define CONF_TX_AP_ENABLED_RATES       (CONF_HW_BIT_RATE_1MBPS | \
+#define CONF_TX_ENABLED_RATES       (CONF_HW_BIT_RATE_1MBPS |    \
        CONF_HW_BIT_RATE_2MBPS | CONF_HW_BIT_RATE_5_5MBPS |      \
        CONF_HW_BIT_RATE_6MBPS | CONF_HW_BIT_RATE_9MBPS |        \
        CONF_HW_BIT_RATE_11MBPS | CONF_HW_BIT_RATE_12MBPS |      \
@@ -677,6 +706,18 @@ struct conf_tx_settings {
 
        /* Time in ms for Tx watchdog timer to expire */
        u32 tx_watchdog_timeout;
+
+       /*
+        * when a slow link has this much packets pending, it becomes a low
+        * priority link, scheduling-wise
+        */
+       u8 slow_link_thold;
+
+       /*
+        * when a fast link has this much packets pending, it becomes a low
+        * priority link, scheduling-wise
+        */
+       u8 fast_link_thold;
 } __packed;
 
 enum {
@@ -1047,6 +1088,7 @@ struct conf_roam_trigger_settings {
 struct conf_scan_settings {
        /*
         * The minimum time to wait on each channel for active scans
+        * This value will be used whenever there's a connected interface.
         *
         * Range: u32 tu/1000
         */
@@ -1054,24 +1096,37 @@ struct conf_scan_settings {
 
        /*
         * The maximum time to wait on each channel for active scans
+        * This value will be currently used whenever there's a
+        * connected interface. It shouldn't exceed 30000 (~30ms) to avoid
+        * possible interference of voip traffic going on while scanning.
         *
         * Range: u32 tu/1000
         */
        u32 max_dwell_time_active;
 
-       /*
-        * The minimum time to wait on each channel for passive scans
+       /* The minimum time to wait on each channel for active scans
+        * when it's possible to have longer scan dwell times.
+        * Currently this is used whenever we're idle on all interfaces.
+        * Longer dwell times improve detection of networks within a
+        * single scan.
         *
         * Range: u32 tu/1000
         */
-       u32 min_dwell_time_passive;
+       u32 min_dwell_time_active_long;
 
-       /*
-        * The maximum time to wait on each channel for passive scans
+       /* The maximum time to wait on each channel for active scans
+        * when it's possible to have longer scan dwell times.
+        * See min_dwell_time_active_long
         *
         * Range: u32 tu/1000
         */
-       u32 max_dwell_time_passive;
+       u32 max_dwell_time_active_long;
+
+       /* time to wait on the channel for passive scans (in TU/1000) */
+       u32 dwell_time_passive;
+
+       /* time to wait on the channel for DFS scans (in TU/1000) */
+       u32 dwell_time_dfs;
 
        /*
         * Number of probe requests to transmit on each active scan channel
@@ -1276,12 +1331,20 @@ struct conf_hangover_settings {
        u8 window_size;
 } __packed;
 
+struct conf_recovery_settings {
+       /* BUG() on fw recovery */
+       u8 bug_on_recovery;
+
+       /* Prevent HW recovery. FW will remain stuck. */
+       u8 no_recovery;
+} __packed;
+
 /*
  * The conf version consists of 4 bytes.  The two MSB are the wlcore
  * version, the two LSB are the lower driver's private conf
  * version.
  */
-#define WLCORE_CONF_VERSION    (0x0002 << 16)
+#define WLCORE_CONF_VERSION    (0x0005 << 16)
 #define WLCORE_CONF_MASK       0xffff0000
 #define WLCORE_CONF_SIZE       (sizeof(struct wlcore_conf_header) +    \
                                 sizeof(struct wlcore_conf))
@@ -1309,6 +1372,7 @@ struct wlcore_conf {
        struct conf_fwlog fwlog;
        struct conf_rate_policy_settings rate;
        struct conf_hangover_settings hangover;
+       struct conf_recovery_settings recovery;
 } __packed;
 
 struct wlcore_conf_file {
index c86bb00..e70a7c8 100644 (file)
@@ -490,7 +490,7 @@ static ssize_t driver_state_read(struct file *file, char __user *user_buf,
        DRIVER_STATE_PRINT_HEX(chip.id);
        DRIVER_STATE_PRINT_STR(chip.fw_ver_str);
        DRIVER_STATE_PRINT_STR(chip.phy_fw_ver_str);
-       DRIVER_STATE_PRINT_INT(sched_scanning);
+       DRIVER_STATE_PRINT_INT(recovery_count);
 
 #undef DRIVER_STATE_PRINT_INT
 #undef DRIVER_STATE_PRINT_LONG
@@ -560,7 +560,6 @@ static ssize_t vifs_state_read(struct file *file, char __user *user_buf,
                if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
                    wlvif->bss_type == BSS_TYPE_IBSS) {
                        VIF_STATE_PRINT_INT(sta.hlid);
-                       VIF_STATE_PRINT_INT(sta.ba_rx_bitmap);
                        VIF_STATE_PRINT_INT(sta.basic_rate_idx);
                        VIF_STATE_PRINT_INT(sta.ap_rate_idx);
                        VIF_STATE_PRINT_INT(sta.p2p_rate_idx);
@@ -577,6 +576,10 @@ static ssize_t vifs_state_read(struct file *file, char __user *user_buf,
                        VIF_STATE_PRINT_INT(ap.ucast_rate_idx[3]);
                }
                VIF_STATE_PRINT_INT(last_tx_hlid);
+               VIF_STATE_PRINT_INT(tx_queue_count[0]);
+               VIF_STATE_PRINT_INT(tx_queue_count[1]);
+               VIF_STATE_PRINT_INT(tx_queue_count[2]);
+               VIF_STATE_PRINT_INT(tx_queue_count[3]);
                VIF_STATE_PRINT_LHEX(links_map[0]);
                VIF_STATE_PRINT_NSTR(ssid, wlvif->ssid_len);
                VIF_STATE_PRINT_INT(band);
@@ -589,7 +592,6 @@ static ssize_t vifs_state_read(struct file *file, char __user *user_buf,
                VIF_STATE_PRINT_INT(beacon_int);
                VIF_STATE_PRINT_INT(default_key);
                VIF_STATE_PRINT_INT(aid);
-               VIF_STATE_PRINT_INT(session_counter);
                VIF_STATE_PRINT_INT(psm_entry_retry);
                VIF_STATE_PRINT_INT(power_level);
                VIF_STATE_PRINT_INT(rssi_thold);
@@ -993,7 +995,7 @@ static ssize_t sleep_auth_write(struct file *file,
                return -EINVAL;
        }
 
-       if (value < 0 || value > WL1271_PSM_MAX) {
+       if (value > WL1271_PSM_MAX) {
                wl1271_warning("sleep_auth must be between 0 and %d",
                               WL1271_PSM_MAX);
                return -ERANGE;
index 4890705..70f289a 100644 (file)
 #include "scan.h"
 #include "wl12xx_80211.h"
 
-static void wl1271_event_rssi_trigger(struct wl1271 *wl,
-                                     struct wl12xx_vif *wlvif,
-                                     struct event_mailbox *mbox)
+void wlcore_event_rssi_trigger(struct wl1271 *wl, s8 *metric_arr)
 {
-       struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
+       struct wl12xx_vif *wlvif;
+       struct ieee80211_vif *vif;
        enum nl80211_cqm_rssi_threshold_event event;
-       s8 metric = mbox->rssi_snr_trigger_metric[0];
+       s8 metric = metric_arr[0];
 
        wl1271_debug(DEBUG_EVENT, "RSSI trigger metric: %d", metric);
 
-       if (metric <= wlvif->rssi_thold)
-               event = NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW;
-       else
-               event = NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH;
-
-       if (event != wlvif->last_rssi_event)
-               ieee80211_cqm_rssi_notify(vif, event, GFP_KERNEL);
-       wlvif->last_rssi_event = event;
+       /* TODO: check actual multi-role support */
+       wl12xx_for_each_wlvif_sta(wl, wlvif) {
+               if (metric <= wlvif->rssi_thold)
+                       event = NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW;
+               else
+                       event = NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH;
+
+               vif = wl12xx_wlvif_to_vif(wlvif);
+               if (event != wlvif->last_rssi_event)
+                       ieee80211_cqm_rssi_notify(vif, event, GFP_KERNEL);
+               wlvif->last_rssi_event = event;
+       }
 }
+EXPORT_SYMBOL_GPL(wlcore_event_rssi_trigger);
 
 static void wl1271_stop_ba_event(struct wl1271 *wl, struct wl12xx_vif *wlvif)
 {
        struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
 
        if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
-               if (!wlvif->sta.ba_rx_bitmap)
+               u8 hlid = wlvif->sta.hlid;
+               if (!wl->links[hlid].ba_bitmap)
                        return;
-               ieee80211_stop_rx_ba_session(vif, wlvif->sta.ba_rx_bitmap,
+               ieee80211_stop_rx_ba_session(vif, wl->links[hlid].ba_bitmap,
                                             vif->bss_conf.bssid);
        } else {
                u8 hlid;
@@ -74,8 +79,7 @@ static void wl1271_stop_ba_event(struct wl1271 *wl, struct wl12xx_vif *wlvif)
        }
 }
 
-static void wl12xx_event_soft_gemini_sense(struct wl1271 *wl,
-                                              u8 enable)
+void wlcore_event_soft_gemini_sense(struct wl1271 *wl, u8 enable)
 {
        struct wl12xx_vif *wlvif;
 
@@ -87,201 +91,169 @@ static void wl12xx_event_soft_gemini_sense(struct wl1271 *wl,
                        wl1271_recalc_rx_streaming(wl, wlvif);
                }
        }
-
 }
+EXPORT_SYMBOL_GPL(wlcore_event_soft_gemini_sense);
 
-static void wl1271_event_mbox_dump(struct event_mailbox *mbox)
+void wlcore_event_sched_scan_completed(struct wl1271 *wl,
+                                      u8 status)
 {
-       wl1271_debug(DEBUG_EVENT, "MBOX DUMP:");
-       wl1271_debug(DEBUG_EVENT, "\tvector: 0x%x", mbox->events_vector);
-       wl1271_debug(DEBUG_EVENT, "\tmask: 0x%x", mbox->events_mask);
+       wl1271_debug(DEBUG_EVENT, "PERIODIC_SCAN_COMPLETE_EVENT (status 0x%0x)",
+                    status);
+
+       if (wl->sched_vif) {
+               ieee80211_sched_scan_stopped(wl->hw);
+               wl->sched_vif = NULL;
+       }
 }
+EXPORT_SYMBOL_GPL(wlcore_event_sched_scan_completed);
 
-static int wl1271_event_process(struct wl1271 *wl)
+void wlcore_event_ba_rx_constraint(struct wl1271 *wl,
+                                  unsigned long roles_bitmap,
+                                  unsigned long allowed_bitmap)
 {
-       struct event_mailbox *mbox = wl->mbox;
-       struct ieee80211_vif *vif;
        struct wl12xx_vif *wlvif;
-       u32 vector;
-       bool disconnect_sta = false;
-       unsigned long sta_bitmap = 0;
-       int ret;
-
-       wl1271_event_mbox_dump(mbox);
-
-       vector = le32_to_cpu(mbox->events_vector);
-       vector &= ~(le32_to_cpu(mbox->events_mask));
-       wl1271_debug(DEBUG_EVENT, "vector: 0x%x", vector);
 
-       if (vector & SCAN_COMPLETE_EVENT_ID) {
-               wl1271_debug(DEBUG_EVENT, "status: 0x%x",
-                            mbox->scheduled_scan_status);
-
-               wl1271_scan_stm(wl, wl->scan_vif);
-       }
+       wl1271_debug(DEBUG_EVENT, "%s: roles=0x%lx allowed=0x%lx",
+                    __func__, roles_bitmap, allowed_bitmap);
 
-       if (vector & PERIODIC_SCAN_REPORT_EVENT_ID) {
-               wl1271_debug(DEBUG_EVENT, "PERIODIC_SCAN_REPORT_EVENT "
-                            "(status 0x%0x)", mbox->scheduled_scan_status);
+       wl12xx_for_each_wlvif(wl, wlvif) {
+               if (wlvif->role_id == WL12XX_INVALID_ROLE_ID ||
+                   !test_bit(wlvif->role_id , &roles_bitmap))
+                       continue;
 
-               wl1271_scan_sched_scan_results(wl);
+               wlvif->ba_allowed = !!test_bit(wlvif->role_id,
+                                              &allowed_bitmap);
+               if (!wlvif->ba_allowed)
+                       wl1271_stop_ba_event(wl, wlvif);
        }
+}
+EXPORT_SYMBOL_GPL(wlcore_event_ba_rx_constraint);
 
-       if (vector & PERIODIC_SCAN_COMPLETE_EVENT_ID) {
-               wl1271_debug(DEBUG_EVENT, "PERIODIC_SCAN_COMPLETE_EVENT "
-                            "(status 0x%0x)", mbox->scheduled_scan_status);
-               if (wl->sched_scanning) {
-                       ieee80211_sched_scan_stopped(wl->hw);
-                       wl->sched_scanning = false;
-               }
-       }
+void wlcore_event_channel_switch(struct wl1271 *wl,
+                                unsigned long roles_bitmap,
+                                bool success)
+{
+       struct wl12xx_vif *wlvif;
+       struct ieee80211_vif *vif;
 
-       if (vector & SOFT_GEMINI_SENSE_EVENT_ID)
-               wl12xx_event_soft_gemini_sense(wl,
-                                              mbox->soft_gemini_sense_info);
+       wl1271_debug(DEBUG_EVENT, "%s: roles=0x%lx success=%d",
+                    __func__, roles_bitmap, success);
 
-       /*
-        * We are HW_MONITOR device. On beacon loss - queue
-        * connection loss work. Cancel it on REGAINED event.
-        */
-       if (vector & BSS_LOSE_EVENT_ID) {
-               /* TODO: check for multi-role */
-               int delay = wl->conf.conn.synch_fail_thold *
-                                       wl->conf.conn.bss_lose_timeout;
-               wl1271_info("Beacon loss detected.");
+       wl12xx_for_each_wlvif_sta(wl, wlvif) {
+               if (wlvif->role_id == WL12XX_INVALID_ROLE_ID ||
+                   !test_bit(wlvif->role_id , &roles_bitmap))
+                       continue;
 
-               /*
-                * if the work is already queued, it should take place. We
-                * don't want to delay the connection loss indication
-                * any more.
-                */
-               ieee80211_queue_delayed_work(wl->hw, &wl->connection_loss_work,
-                                            msecs_to_jiffies(delay));
+               if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS,
+                                       &wlvif->flags))
+                       continue;
 
-               wl12xx_for_each_wlvif_sta(wl, wlvif) {
-                       vif = wl12xx_wlvif_to_vif(wlvif);
+               vif = wl12xx_wlvif_to_vif(wlvif);
 
-                       ieee80211_cqm_rssi_notify(
-                                       vif,
-                                       NL80211_CQM_RSSI_BEACON_LOSS_EVENT,
-                                       GFP_KERNEL);
-               }
+               ieee80211_chswitch_done(vif, success);
+               cancel_delayed_work(&wlvif->channel_switch_work);
        }
+}
+EXPORT_SYMBOL_GPL(wlcore_event_channel_switch);
 
-       if (vector & REGAINED_BSS_EVENT_ID) {
-               /* TODO: check for multi-role */
-               wl1271_info("Beacon regained.");
-               cancel_delayed_work(&wl->connection_loss_work);
-
-               /* sanity check - we can't lose and gain the beacon together */
-               WARN(vector & BSS_LOSE_EVENT_ID,
-                    "Concurrent beacon loss and gain from FW");
-       }
+void wlcore_event_dummy_packet(struct wl1271 *wl)
+{
+       wl1271_debug(DEBUG_EVENT, "DUMMY_PACKET_ID_EVENT_ID");
+       wl1271_tx_dummy_packet(wl);
+}
+EXPORT_SYMBOL_GPL(wlcore_event_dummy_packet);
 
-       if (vector & RSSI_SNR_TRIGGER_0_EVENT_ID) {
-               /* TODO: check actual multi-role support */
-               wl1271_debug(DEBUG_EVENT, "RSSI_SNR_TRIGGER_0_EVENT");
-               wl12xx_for_each_wlvif_sta(wl, wlvif) {
-                       wl1271_event_rssi_trigger(wl, wlvif, mbox);
+static void wlcore_disconnect_sta(struct wl1271 *wl, unsigned long sta_bitmap)
+{
+       u32 num_packets = wl->conf.tx.max_tx_retries;
+       struct wl12xx_vif *wlvif;
+       struct ieee80211_vif *vif;
+       struct ieee80211_sta *sta;
+       const u8 *addr;
+       int h;
+
+       for_each_set_bit(h, &sta_bitmap, WL12XX_MAX_LINKS) {
+               bool found = false;
+               /* find the ap vif connected to this sta */
+               wl12xx_for_each_wlvif_ap(wl, wlvif) {
+                       if (!test_bit(h, wlvif->ap.sta_hlid_map))
+                               continue;
+                       found = true;
+                       break;
                }
-       }
+               if (!found)
+                       continue;
 
-       if (vector & BA_SESSION_RX_CONSTRAINT_EVENT_ID) {
-               u8 role_id = mbox->role_id;
-               wl1271_debug(DEBUG_EVENT, "BA_SESSION_RX_CONSTRAINT_EVENT_ID. "
-                            "ba_allowed = 0x%x, role_id=%d",
-                            mbox->rx_ba_allowed, role_id);
+               vif = wl12xx_wlvif_to_vif(wlvif);
+               addr = wl->links[h].addr;
 
-               wl12xx_for_each_wlvif(wl, wlvif) {
-                       if (role_id != 0xff && role_id != wlvif->role_id)
-                               continue;
-
-                       wlvif->ba_allowed = !!mbox->rx_ba_allowed;
-                       if (!wlvif->ba_allowed)
-                               wl1271_stop_ba_event(wl, wlvif);
+               rcu_read_lock();
+               sta = ieee80211_find_sta(vif, addr);
+               if (sta) {
+                       wl1271_debug(DEBUG_EVENT, "remove sta %d", h);
+                       ieee80211_report_low_ack(sta, num_packets);
                }
+               rcu_read_unlock();
        }
+}
 
-       if (vector & CHANNEL_SWITCH_COMPLETE_EVENT_ID) {
-               wl1271_debug(DEBUG_EVENT, "CHANNEL_SWITCH_COMPLETE_EVENT_ID. "
-                                         "status = 0x%x",
-                                         mbox->channel_switch_status);
-               /*
-                * That event uses for two cases:
-                * 1) channel switch complete with status=0
-                * 2) channel switch failed status=1
-                */
-
-               /* TODO: configure only the relevant vif */
-               wl12xx_for_each_wlvif_sta(wl, wlvif) {
-                       bool success;
-
-                       if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS,
-                                               &wlvif->flags))
-                               continue;
-
-                       success = mbox->channel_switch_status ? false : true;
-                       vif = wl12xx_wlvif_to_vif(wlvif);
+void wlcore_event_max_tx_failure(struct wl1271 *wl, unsigned long sta_bitmap)
+{
+       wl1271_debug(DEBUG_EVENT, "MAX_TX_FAILURE_EVENT_ID");
+       wlcore_disconnect_sta(wl, sta_bitmap);
+}
+EXPORT_SYMBOL_GPL(wlcore_event_max_tx_failure);
 
-                       ieee80211_chswitch_done(vif, success);
-               }
-       }
+void wlcore_event_inactive_sta(struct wl1271 *wl, unsigned long sta_bitmap)
+{
+       wl1271_debug(DEBUG_EVENT, "INACTIVE_STA_EVENT_ID");
+       wlcore_disconnect_sta(wl, sta_bitmap);
+}
+EXPORT_SYMBOL_GPL(wlcore_event_inactive_sta);
 
-       if ((vector & DUMMY_PACKET_EVENT_ID)) {
-               wl1271_debug(DEBUG_EVENT, "DUMMY_PACKET_ID_EVENT_ID");
-               ret = wl1271_tx_dummy_packet(wl);
-               if (ret < 0)
-                       return ret;
-       }
+void wlcore_event_roc_complete(struct wl1271 *wl)
+{
+       wl1271_debug(DEBUG_EVENT, "REMAIN_ON_CHANNEL_COMPLETE_EVENT_ID");
+       if (wl->roc_vif)
+               ieee80211_ready_on_channel(wl->hw);
+}
+EXPORT_SYMBOL_GPL(wlcore_event_roc_complete);
 
+void wlcore_event_beacon_loss(struct wl1271 *wl, unsigned long roles_bitmap)
+{
        /*
-        * "TX retries exceeded" has a different meaning according to mode.
-        * In AP mode the offending station is disconnected.
+        * We are HW_MONITOR device. On beacon loss - queue
+        * connection loss work. Cancel it on REGAINED event.
         */
-       if (vector & MAX_TX_RETRY_EVENT_ID) {
-               wl1271_debug(DEBUG_EVENT, "MAX_TX_RETRY_EVENT_ID");
-               sta_bitmap |= le16_to_cpu(mbox->sta_tx_retry_exceeded);
-               disconnect_sta = true;
-       }
+       struct wl12xx_vif *wlvif;
+       struct ieee80211_vif *vif;
+       int delay = wl->conf.conn.synch_fail_thold *
+                               wl->conf.conn.bss_lose_timeout;
 
-       if (vector & INACTIVE_STA_EVENT_ID) {
-               wl1271_debug(DEBUG_EVENT, "INACTIVE_STA_EVENT_ID");
-               sta_bitmap |= le16_to_cpu(mbox->sta_aging_status);
-               disconnect_sta = true;
-       }
+       wl1271_info("Beacon loss detected. roles:0x%lx", roles_bitmap);
 
-       if (disconnect_sta) {
-               u32 num_packets = wl->conf.tx.max_tx_retries;
-               struct ieee80211_sta *sta;
-               const u8 *addr;
-               int h;
-
-               for_each_set_bit(h, &sta_bitmap, WL12XX_MAX_LINKS) {
-                       bool found = false;
-                       /* find the ap vif connected to this sta */
-                       wl12xx_for_each_wlvif_ap(wl, wlvif) {
-                               if (!test_bit(h, wlvif->ap.sta_hlid_map))
-                                       continue;
-                               found = true;
-                               break;
-                       }
-                       if (!found)
-                               continue;
+       wl12xx_for_each_wlvif_sta(wl, wlvif) {
+               if (wlvif->role_id == WL12XX_INVALID_ROLE_ID ||
+                   !test_bit(wlvif->role_id , &roles_bitmap))
+                       continue;
 
-                       vif = wl12xx_wlvif_to_vif(wlvif);
-                       addr = wl->links[h].addr;
+               /*
+                * if the work is already queued, it should take place.
+                * We don't want to delay the connection loss
+                * indication any more.
+                */
+               ieee80211_queue_delayed_work(wl->hw,
+                                            &wlvif->connection_loss_work,
+                                            msecs_to_jiffies(delay));
 
-                       rcu_read_lock();
-                       sta = ieee80211_find_sta(vif, addr);
-                       if (sta) {
-                               wl1271_debug(DEBUG_EVENT, "remove sta %d", h);
-                               ieee80211_report_low_ack(sta, num_packets);
-                       }
-                       rcu_read_unlock();
-               }
+               vif = wl12xx_wlvif_to_vif(wlvif);
+               ieee80211_cqm_rssi_notify(
+                               vif,
+                               NL80211_CQM_RSSI_BEACON_LOSS_EVENT,
+                               GFP_KERNEL);
        }
-       return 0;
 }
+EXPORT_SYMBOL_GPL(wlcore_event_beacon_loss);
 
 int wl1271_event_unmask(struct wl1271 *wl)
 {
@@ -305,12 +277,12 @@ int wl1271_event_handle(struct wl1271 *wl, u8 mbox_num)
 
        /* first we read the mbox descriptor */
        ret = wlcore_read(wl, wl->mbox_ptr[mbox_num], wl->mbox,
-                         sizeof(*wl->mbox), false);
+                         wl->mbox_size, false);
        if (ret < 0)
                return ret;
 
        /* process the descriptor */
-       ret = wl1271_event_process(wl);
+       ret = wl->ops->process_mailbox_events(wl);
        if (ret < 0)
                return ret;
 
index 8adf18d..acc7a59 100644 (file)
@@ -46,33 +46,17 @@ enum {
        RSSI_SNR_TRIGGER_5_EVENT_ID              = BIT(5),
        RSSI_SNR_TRIGGER_6_EVENT_ID              = BIT(6),
        RSSI_SNR_TRIGGER_7_EVENT_ID              = BIT(7),
-       MEASUREMENT_START_EVENT_ID               = BIT(8),
-       MEASUREMENT_COMPLETE_EVENT_ID            = BIT(9),
-       SCAN_COMPLETE_EVENT_ID                   = BIT(10),
-       WFD_DISCOVERY_COMPLETE_EVENT_ID          = BIT(11),
-       AP_DISCOVERY_COMPLETE_EVENT_ID           = BIT(12),
-       RESERVED1                                = BIT(13),
-       PSPOLL_DELIVERY_FAILURE_EVENT_ID         = BIT(14),
-       ROLE_STOP_COMPLETE_EVENT_ID              = BIT(15),
-       RADAR_DETECTED_EVENT_ID                  = BIT(16),
-       CHANNEL_SWITCH_COMPLETE_EVENT_ID         = BIT(17),
-       BSS_LOSE_EVENT_ID                        = BIT(18),
-       REGAINED_BSS_EVENT_ID                    = BIT(19),
-       MAX_TX_RETRY_EVENT_ID                    = BIT(20),
-       DUMMY_PACKET_EVENT_ID                    = BIT(21),
-       SOFT_GEMINI_SENSE_EVENT_ID               = BIT(22),
-       CHANGE_AUTO_MODE_TIMEOUT_EVENT_ID        = BIT(23),
-       SOFT_GEMINI_AVALANCHE_EVENT_ID           = BIT(24),
-       PLT_RX_CALIBRATION_COMPLETE_EVENT_ID     = BIT(25),
-       INACTIVE_STA_EVENT_ID                    = BIT(26),
-       PEER_REMOVE_COMPLETE_EVENT_ID            = BIT(27),
-       PERIODIC_SCAN_COMPLETE_EVENT_ID          = BIT(28),
-       PERIODIC_SCAN_REPORT_EVENT_ID            = BIT(29),
-       BA_SESSION_RX_CONSTRAINT_EVENT_ID        = BIT(30),
-       REMAIN_ON_CHANNEL_COMPLETE_EVENT_ID      = BIT(31),
+
        EVENT_MBOX_ALL_EVENT_ID                  = 0x7fffffff,
 };
 
+/* events the driver might want to wait for */
+enum wlcore_wait_event {
+       WLCORE_EVENT_ROLE_STOP_COMPLETE,
+       WLCORE_EVENT_PEER_REMOVE_COMPLETE,
+       WLCORE_EVENT_DFS_CONFIG_COMPLETE
+};
+
 enum {
        EVENT_ENTER_POWER_SAVE_FAIL = 0,
        EVENT_ENTER_POWER_SAVE_SUCCESS,
@@ -80,61 +64,24 @@ enum {
 
 #define NUM_OF_RSSI_SNR_TRIGGERS 8
 
-struct event_mailbox {
-       __le32 events_vector;
-       __le32 events_mask;
-       __le32 reserved_1;
-       __le32 reserved_2;
-
-       u8 number_of_scan_results;
-       u8 scan_tag;
-       u8 completed_scan_status;
-       u8 reserved_3;
-
-       u8 soft_gemini_sense_info;
-       u8 soft_gemini_protective_info;
-       s8 rssi_snr_trigger_metric[NUM_OF_RSSI_SNR_TRIGGERS];
-       u8 change_auto_mode_timeout;
-       u8 scheduled_scan_status;
-       u8 reserved4;
-       /* tuned channel (roc) */
-       u8 roc_channel;
-
-       __le16 hlid_removed_bitmap;
-
-       /* bitmap of aged stations (by HLID) */
-       __le16 sta_aging_status;
-
-       /* bitmap of stations (by HLID) which exceeded max tx retries */
-       __le16 sta_tx_retry_exceeded;
-
-       /* discovery completed results */
-       u8 discovery_tag;
-       u8 number_of_preq_results;
-       u8 number_of_prsp_results;
-       u8 reserved_5;
-
-       /* rx ba constraint */
-       u8 role_id; /* 0xFF means any role. */
-       u8 rx_ba_allowed;
-       u8 reserved_6[2];
-
-       /* Channel switch results */
-
-       u8 channel_switch_role_id;
-       u8 channel_switch_status;
-       u8 reserved_7[2];
-
-       u8 ps_poll_delivery_failure_role_ids;
-       u8 stopped_role_ids;
-       u8 started_role_ids;
-
-       u8 reserved_8[9];
-} __packed;
-
 struct wl1271;
 
 int wl1271_event_unmask(struct wl1271 *wl);
 int wl1271_event_handle(struct wl1271 *wl, u8 mbox);
 
+void wlcore_event_soft_gemini_sense(struct wl1271 *wl, u8 enable);
+void wlcore_event_sched_scan_completed(struct wl1271 *wl,
+                                      u8 status);
+void wlcore_event_ba_rx_constraint(struct wl1271 *wl,
+                                  unsigned long roles_bitmap,
+                                  unsigned long allowed_bitmap);
+void wlcore_event_channel_switch(struct wl1271 *wl,
+                                unsigned long roles_bitmap,
+                                bool success);
+void wlcore_event_beacon_loss(struct wl1271 *wl, unsigned long roles_bitmap);
+void wlcore_event_dummy_packet(struct wl1271 *wl);
+void wlcore_event_max_tx_failure(struct wl1271 *wl, unsigned long sta_bitmap);
+void wlcore_event_inactive_sta(struct wl1271 *wl, unsigned long sta_bitmap);
+void wlcore_event_roc_complete(struct wl1271 *wl);
+void wlcore_event_rssi_trigger(struct wl1271 *wl, s8 *metric_arr);
 #endif
index 2673d78..7fd260c 100644 (file)
@@ -201,4 +201,45 @@ wlcore_hw_pre_pkt_send(struct wl1271 *wl, u32 buf_offset, u32 last_len)
        return buf_offset;
 }
 
+static inline void
+wlcore_hw_sta_rc_update(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                       struct ieee80211_sta *sta, u32 changed)
+{
+       if (wl->ops->sta_rc_update)
+               wl->ops->sta_rc_update(wl, wlvif, sta, changed);
+}
+
+static inline int
+wlcore_hw_set_peer_cap(struct wl1271 *wl,
+                      struct ieee80211_sta_ht_cap *ht_cap,
+                      bool allow_ht_operation,
+                      u32 rate_set, u8 hlid)
+{
+       if (wl->ops->set_peer_cap)
+               return wl->ops->set_peer_cap(wl, ht_cap, allow_ht_operation,
+                                            rate_set, hlid);
+
+       return 0;
+}
+
+static inline bool
+wlcore_hw_lnk_high_prio(struct wl1271 *wl, u8 hlid,
+                       struct wl1271_link *lnk)
+{
+       if (!wl->ops->lnk_high_prio)
+               BUG_ON(1);
+
+       return wl->ops->lnk_high_prio(wl, hlid, lnk);
+}
+
+static inline bool
+wlcore_hw_lnk_low_prio(struct wl1271 *wl, u8 hlid,
+                      struct wl1271_link *lnk)
+{
+       if (!wl->ops->lnk_low_prio)
+               BUG_ON(1);
+
+       return wl->ops->lnk_low_prio(wl, hlid, lnk);
+}
+
 #endif
index 32d157f..5c6f11e 100644 (file)
@@ -41,14 +41,14 @@ int wl1271_init_templates_config(struct wl1271 *wl)
 
        /* send empty templates for fw memory reservation */
        ret = wl1271_cmd_template_set(wl, WL12XX_INVALID_ROLE_ID,
-                                     CMD_TEMPL_CFG_PROBE_REQ_2_4, NULL,
+                                     wl->scan_templ_id_2_4, NULL,
                                      WL1271_CMD_TEMPL_MAX_SIZE,
                                      0, WL1271_RATE_AUTOMATIC);
        if (ret < 0)
                return ret;
 
        ret = wl1271_cmd_template_set(wl, WL12XX_INVALID_ROLE_ID,
-                                     CMD_TEMPL_CFG_PROBE_REQ_5,
+                                     wl->scan_templ_id_5,
                                      NULL, WL1271_CMD_TEMPL_MAX_SIZE, 0,
                                      WL1271_RATE_AUTOMATIC);
        if (ret < 0)
@@ -56,14 +56,16 @@ int wl1271_init_templates_config(struct wl1271 *wl)
 
        if (wl->quirks & WLCORE_QUIRK_DUAL_PROBE_TMPL) {
                ret = wl1271_cmd_template_set(wl, WL12XX_INVALID_ROLE_ID,
-                                             CMD_TEMPL_APP_PROBE_REQ_2_4, NULL,
+                                             wl->sched_scan_templ_id_2_4,
+                                             NULL,
                                              WL1271_CMD_TEMPL_MAX_SIZE,
                                              0, WL1271_RATE_AUTOMATIC);
                if (ret < 0)
                        return ret;
 
                ret = wl1271_cmd_template_set(wl, WL12XX_INVALID_ROLE_ID,
-                                             CMD_TEMPL_APP_PROBE_REQ_5, NULL,
+                                             wl->sched_scan_templ_id_5,
+                                             NULL,
                                              WL1271_CMD_TEMPL_MAX_SIZE,
                                              0, WL1271_RATE_AUTOMATIC);
                if (ret < 0)
@@ -463,7 +465,7 @@ int wl1271_init_ap_rates(struct wl1271 *wl, struct wl12xx_vif *wlvif)
        if ((wlvif->basic_rate_set & CONF_TX_OFDM_RATES))
                supported_rates = CONF_TX_OFDM_RATES;
        else
-               supported_rates = CONF_TX_AP_ENABLED_RATES;
+               supported_rates = CONF_TX_ENABLED_RATES;
 
        /* unconditionally enable HT rates */
        supported_rates |= CONF_TX_MCS_RATES;
@@ -575,9 +577,6 @@ int wl1271_init_vif_specific(struct wl1271 *wl, struct ieee80211_vif *vif)
                /* Configure for power according to debugfs */
                if (sta_auth != WL1271_PSM_ILLEGAL)
                        ret = wl1271_acx_sleep_auth(wl, sta_auth);
-               /* Configure for power always on */
-               else if (wl->quirks & WLCORE_QUIRK_NO_ELP)
-                       ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_CAM);
                /* Configure for ELP power saving */
                else
                        ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
@@ -679,6 +678,10 @@ int wl1271_hw_init(struct wl1271 *wl)
        if (ret < 0)
                return ret;
 
+       ret = wlcore_cmd_regdomain_config_locked(wl);
+       if (ret < 0)
+               return ret;
+
        /* Bluetooth WLAN coexistence */
        ret = wl1271_init_pta(wl);
        if (ret < 0)
index f48530f..af7d9f9 100644 (file)
@@ -105,13 +105,13 @@ static inline int __must_check wlcore_raw_read32(struct wl1271 *wl, int addr,
 {
        int ret;
 
-       ret = wlcore_raw_read(wl, addr, &wl->buffer_32,
-                             sizeof(wl->buffer_32), false);
+       ret = wlcore_raw_read(wl, addr, wl->buffer_32,
+                             sizeof(*wl->buffer_32), false);
        if (ret < 0)
                return ret;
 
        if (val)
-               *val = le32_to_cpu(wl->buffer_32);
+               *val = le32_to_cpu(*wl->buffer_32);
 
        return 0;
 }
@@ -119,9 +119,9 @@ static inline int __must_check wlcore_raw_read32(struct wl1271 *wl, int addr,
 static inline int __must_check wlcore_raw_write32(struct wl1271 *wl, int addr,
                                                  u32 val)
 {
-       wl->buffer_32 = cpu_to_le32(val);
-       return wlcore_raw_write(wl, addr, &wl->buffer_32,
-                               sizeof(wl->buffer_32), false);
+       *wl->buffer_32 = cpu_to_le32(val);
+       return wlcore_raw_write(wl, addr, wl->buffer_32,
+                               sizeof(*wl->buffer_32), false);
 }
 
 static inline int __must_check wlcore_read(struct wl1271 *wl, int addr,
index ce6e62a..2c2ff3e 100644 (file)
@@ -56,8 +56,8 @@
 #define WL1271_BOOT_RETRIES 3
 
 static char *fwlog_param;
-static bool bug_on_recovery;
-static bool no_recovery;
+static int bug_on_recovery = -1;
+static int no_recovery     = -1;
 
 static void __wl1271_op_remove_interface(struct wl1271 *wl,
                                         struct ieee80211_vif *vif,
@@ -79,12 +79,10 @@ static int wl12xx_set_authorized(struct wl1271 *wl,
        if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags))
                return 0;
 
-       ret = wl12xx_cmd_set_peer_state(wl, wlvif->sta.hlid);
+       ret = wl12xx_cmd_set_peer_state(wl, wlvif, wlvif->sta.hlid);
        if (ret < 0)
                return ret;
 
-       wl12xx_croc(wl, wlvif->role_id);
-
        wl1271_info("Association completed.");
        return 0;
 }
@@ -95,6 +93,8 @@ static void wl1271_reg_notify(struct wiphy *wiphy,
        struct ieee80211_supported_band *band;
        struct ieee80211_channel *ch;
        int i;
+       struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
+       struct wl1271 *wl = hw->priv;
 
        band = wiphy->bands[IEEE80211_BAND_5GHZ];
        for (i = 0; i < band->n_channels; i++) {
@@ -107,6 +107,9 @@ static void wl1271_reg_notify(struct wiphy *wiphy,
                                     IEEE80211_CHAN_PASSIVE_SCAN;
 
        }
+
+       if (likely(wl->state == WLCORE_STATE_ON))
+               wlcore_regdomain_config(wl);
 }
 
 static int wl1271_set_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
@@ -301,6 +304,7 @@ out:
 static void wlcore_adjust_conf(struct wl1271 *wl)
 {
        /* Adjust settings according to optional module parameters */
+
        if (fwlog_param) {
                if (!strcmp(fwlog_param, "continuous")) {
                        wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
@@ -316,16 +320,22 @@ static void wlcore_adjust_conf(struct wl1271 *wl)
                        wl1271_error("Unknown fwlog parameter %s", fwlog_param);
                }
        }
+
+       if (bug_on_recovery != -1)
+               wl->conf.recovery.bug_on_recovery = (u8) bug_on_recovery;
+
+       if (no_recovery != -1)
+               wl->conf.recovery.no_recovery = (u8) no_recovery;
 }
 
 static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
                                        struct wl12xx_vif *wlvif,
                                        u8 hlid, u8 tx_pkts)
 {
-       bool fw_ps, single_sta;
+       bool fw_ps, single_link;
 
        fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
-       single_sta = (wl->active_sta_count == 1);
+       single_link = (wl->active_link_count == 1);
 
        /*
         * Wake up from high level PS if the STA is asleep with too little
@@ -336,10 +346,10 @@ static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
 
        /*
         * Start high-level PS if the STA is asleep with enough blocks in FW.
-        * Make an exception if this is the only connected station. In this
-        * case FW-memory congestion is not a problem.
+        * Make an exception if this is the only connected link. In this
+        * case FW-memory congestion is less of a problem.
         */
-       else if (!single_sta && fw_ps && tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
+       else if (!single_link && fw_ps && tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
                wl12xx_ps_link_start(wl, wlvif, hlid, true);
 }
 
@@ -347,11 +357,8 @@ static void wl12xx_irq_update_links_status(struct wl1271 *wl,
                                           struct wl12xx_vif *wlvif,
                                           struct wl_fw_status_2 *status)
 {
-       struct wl1271_link *lnk;
        u32 cur_fw_ps_map;
-       u8 hlid, cnt;
-
-       /* TODO: also use link_fast_bitmap here */
+       u8 hlid;
 
        cur_fw_ps_map = le32_to_cpu(status->link_ps_bitmap);
        if (wl->ap_fw_ps_map != cur_fw_ps_map) {
@@ -363,17 +370,9 @@ static void wl12xx_irq_update_links_status(struct wl1271 *wl,
                wl->ap_fw_ps_map = cur_fw_ps_map;
        }
 
-       for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, WL12XX_MAX_LINKS) {
-               lnk = &wl->links[hlid];
-               cnt = status->counters.tx_lnk_free_pkts[hlid] -
-                       lnk->prev_freed_pkts;
-
-               lnk->prev_freed_pkts = status->counters.tx_lnk_free_pkts[hlid];
-               lnk->allocated_pkts -= cnt;
-
+       for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, WL12XX_MAX_LINKS)
                wl12xx_irq_ps_regulate_link(wl, wlvif, hlid,
-                                           lnk->allocated_pkts);
-       }
+                                           wl->links[hlid].allocated_pkts);
 }
 
 static int wlcore_fw_status(struct wl1271 *wl,
@@ -387,6 +386,7 @@ static int wlcore_fw_status(struct wl1271 *wl,
        int i;
        size_t status_len;
        int ret;
+       struct wl1271_link *lnk;
 
        status_len = WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc) +
                sizeof(*status_2) + wl->fw_status_priv_len;
@@ -412,6 +412,17 @@ static int wlcore_fw_status(struct wl1271 *wl,
                wl->tx_pkts_freed[i] = status_2->counters.tx_released_pkts[i];
        }
 
+
+       for_each_set_bit(i, wl->links_map, WL12XX_MAX_LINKS) {
+               lnk = &wl->links[i];
+               /* prevent wrap-around in freed-packets counter */
+               lnk->allocated_pkts -=
+                       (status_2->counters.tx_lnk_free_pkts[i] -
+                        lnk->prev_freed_pkts) & 0xff;
+
+               lnk->prev_freed_pkts = status_2->counters.tx_lnk_free_pkts[i];
+       }
+
        /* prevent wrap-around in total blocks counter */
        if (likely(wl->tx_blocks_freed <=
                   le32_to_cpu(status_2->total_released_blks)))
@@ -464,6 +475,8 @@ static int wlcore_fw_status(struct wl1271 *wl,
        wl->time_offset = (timespec_to_ns(&ts) >> 10) -
                (s64)le32_to_cpu(status_2->fw_localtime);
 
+       wl->fw_fast_lnk_map = le32_to_cpu(status_2->link_fast_bitmap);
+
        return 0;
 }
 
@@ -800,11 +813,13 @@ static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
 
        /*
         * Make sure the chip is awake and the logger isn't active.
-        * Do not send a stop fwlog command if the fw is hanged.
+        * Do not send a stop fwlog command if the fw is hanged or if
+        * dbgpins are used (due to some fw bug).
         */
        if (wl1271_ps_elp_wakeup(wl))
                goto out;
-       if (!wl->watchdog_recovery)
+       if (!wl->watchdog_recovery &&
+           wl->conf.fwlog.output != WL12XX_FWLOG_OUTPUT_DBG_PINS)
                wl12xx_cmd_stop_fwlog(wl);
 
        /* Read the first memory block address */
@@ -872,7 +887,8 @@ static void wlcore_print_recovery(struct wl1271 *wl)
        if (ret < 0)
                return;
 
-       wl1271_info("pc: 0x%x, hint_sts: 0x%08x", pc, hint_sts);
+       wl1271_info("pc: 0x%x, hint_sts: 0x%08x count: %d",
+                               pc, hint_sts, ++wl->recovery_count);
 
        wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
 }
@@ -895,10 +911,10 @@ static void wl1271_recovery_work(struct work_struct *work)
                wlcore_print_recovery(wl);
        }
 
-       BUG_ON(bug_on_recovery &&
+       BUG_ON(wl->conf.recovery.bug_on_recovery &&
               !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
 
-       if (no_recovery) {
+       if (wl->conf.recovery.no_recovery) {
                wl1271_info("No recovery (chosen on module load). Fw will remain stuck.");
                goto out_unlock;
        }
@@ -918,11 +934,6 @@ static void wl1271_recovery_work(struct work_struct *work)
        /* Prevent spurious TX during FW restart */
        wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
 
-       if (wl->sched_scanning) {
-               ieee80211_sched_scan_stopped(wl->hw);
-               wl->sched_scanning = false;
-       }
-
        /* reboot the chipset */
        while (!list_empty(&wl->wlvif_list)) {
                wlvif = list_first_entry(&wl->wlvif_list,
@@ -1139,7 +1150,6 @@ int wl1271_plt_stop(struct wl1271 *wl)
        cancel_work_sync(&wl->recovery_work);
        cancel_delayed_work_sync(&wl->elp_work);
        cancel_delayed_work_sync(&wl->tx_watchdog_work);
-       cancel_delayed_work_sync(&wl->connection_loss_work);
 
        mutex_lock(&wl->mutex);
        wl1271_power_off(wl);
@@ -1167,9 +1177,13 @@ static void wl1271_op_tx(struct ieee80211_hw *hw,
        int q, mapping;
        u8 hlid;
 
-       if (vif)
-               wlvif = wl12xx_vif_to_data(vif);
+       if (!vif) {
+               wl1271_debug(DEBUG_TX, "DROP skb with no vif");
+               ieee80211_free_txskb(hw, skb);
+               return;
+       }
 
+       wlvif = wl12xx_vif_to_data(vif);
        mapping = skb_get_queue_mapping(skb);
        q = wl1271_tx_get_queue(mapping);
 
@@ -1183,9 +1197,9 @@ static void wl1271_op_tx(struct ieee80211_hw *hw,
         * allow these packets through.
         */
        if (hlid == WL12XX_INVALID_LINK_ID ||
-           (wlvif && !test_bit(hlid, wlvif->links_map)) ||
-            (wlcore_is_queue_stopped(wl, q) &&
-             !wlcore_is_queue_stopped_by_reason(wl, q,
+           (!test_bit(hlid, wlvif->links_map)) ||
+            (wlcore_is_queue_stopped_locked(wl, wlvif, q) &&
+             !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
                        WLCORE_QUEUE_STOP_REASON_WATERMARK))) {
                wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", hlid, q);
                ieee80211_free_txskb(hw, skb);
@@ -1197,16 +1211,17 @@ static void wl1271_op_tx(struct ieee80211_hw *hw,
        skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
 
        wl->tx_queue_count[q]++;
+       wlvif->tx_queue_count[q]++;
 
        /*
         * The workqueue is slow to process the tx_queue and we need stop
         * the queue here, otherwise the queue will get too long.
         */
-       if (wl->tx_queue_count[q] >= WL1271_TX_QUEUE_HIGH_WATERMARK &&
-           !wlcore_is_queue_stopped_by_reason(wl, q,
+       if (wlvif->tx_queue_count[q] >= WL1271_TX_QUEUE_HIGH_WATERMARK &&
+           !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
                                        WLCORE_QUEUE_STOP_REASON_WATERMARK)) {
                wl1271_debug(DEBUG_TX, "op_tx: stopping queues for q %d", q);
-               wlcore_stop_queue_locked(wl, q,
+               wlcore_stop_queue_locked(wl, wlvif, q,
                                         WLCORE_QUEUE_STOP_REASON_WATERMARK);
        }
 
@@ -1841,11 +1856,10 @@ static void wlcore_op_stop_locked(struct wl1271 *wl)
        cancel_work_sync(&wl->tx_work);
        cancel_delayed_work_sync(&wl->elp_work);
        cancel_delayed_work_sync(&wl->tx_watchdog_work);
-       cancel_delayed_work_sync(&wl->connection_loss_work);
 
        /* let's notify MAC80211 about the remaining pending TX frames */
-       wl12xx_tx_reset(wl);
        mutex_lock(&wl->mutex);
+       wl12xx_tx_reset(wl);
 
        wl1271_power_off(wl);
        /*
@@ -1868,14 +1882,17 @@ static void wlcore_op_stop_locked(struct wl1271 *wl)
        wl->time_offset = 0;
        wl->ap_fw_ps_map = 0;
        wl->ap_ps_map = 0;
-       wl->sched_scanning = false;
        wl->sleep_auth = WL1271_PSM_ILLEGAL;
        memset(wl->roles_map, 0, sizeof(wl->roles_map));
        memset(wl->links_map, 0, sizeof(wl->links_map));
        memset(wl->roc_map, 0, sizeof(wl->roc_map));
+       memset(wl->session_ids, 0, sizeof(wl->session_ids));
        wl->active_sta_count = 0;
+       wl->active_link_count = 0;
 
        /* The system link is always allocated */
+       wl->links[WL12XX_SYSTEM_HLID].allocated_pkts = 0;
+       wl->links[WL12XX_SYSTEM_HLID].prev_freed_pkts = 0;
        __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
 
        /*
@@ -1901,6 +1918,12 @@ static void wlcore_op_stop_locked(struct wl1271 *wl)
        wl->tx_res_if = NULL;
        kfree(wl->target_mem_map);
        wl->target_mem_map = NULL;
+
+       /*
+        * FW channels must be re-calibrated after recovery,
+        * clear the last Reg-Domain channel configuration.
+        */
+       memset(wl->reg_ch_conf_last, 0, sizeof(wl->reg_ch_conf_last));
 }
 
 static void wlcore_op_stop(struct ieee80211_hw *hw)
@@ -1916,6 +1939,71 @@ static void wlcore_op_stop(struct ieee80211_hw *hw)
        mutex_unlock(&wl->mutex);
 }
 
+static void wlcore_channel_switch_work(struct work_struct *work)
+{
+       struct delayed_work *dwork;
+       struct wl1271 *wl;
+       struct ieee80211_vif *vif;
+       struct wl12xx_vif *wlvif;
+       int ret;
+
+       dwork = container_of(work, struct delayed_work, work);
+       wlvif = container_of(dwork, struct wl12xx_vif, channel_switch_work);
+       wl = wlvif->wl;
+
+       wl1271_info("channel switch failed (role_id: %d).", wlvif->role_id);
+
+       mutex_lock(&wl->mutex);
+
+       if (unlikely(wl->state != WLCORE_STATE_ON))
+               goto out;
+
+       /* check the channel switch is still ongoing */
+       if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags))
+               goto out;
+
+       vif = wl12xx_wlvif_to_vif(wlvif);
+       ieee80211_chswitch_done(vif, false);
+
+       ret = wl1271_ps_elp_wakeup(wl);
+       if (ret < 0)
+               goto out;
+
+       wl12xx_cmd_stop_channel_switch(wl, wlvif);
+
+       wl1271_ps_elp_sleep(wl);
+out:
+       mutex_unlock(&wl->mutex);
+}
+
+static void wlcore_connection_loss_work(struct work_struct *work)
+{
+       struct delayed_work *dwork;
+       struct wl1271 *wl;
+       struct ieee80211_vif *vif;
+       struct wl12xx_vif *wlvif;
+
+       dwork = container_of(work, struct delayed_work, work);
+       wlvif = container_of(dwork, struct wl12xx_vif, connection_loss_work);
+       wl = wlvif->wl;
+
+       wl1271_info("Connection loss work (role_id: %d).", wlvif->role_id);
+
+       mutex_lock(&wl->mutex);
+
+       if (unlikely(wl->state != WLCORE_STATE_ON))
+               goto out;
+
+       /* Call mac80211 connection loss */
+       if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
+               goto out;
+
+       vif = wl12xx_wlvif_to_vif(wlvif);
+       ieee80211_connection_loss(vif);
+out:
+       mutex_unlock(&wl->mutex);
+}
+
 static int wl12xx_allocate_rate_policy(struct wl1271 *wl, u8 *idx)
 {
        u8 policy = find_first_zero_bit(wl->rate_policies_map,
@@ -2035,15 +2123,15 @@ static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
                for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
                        wl12xx_allocate_rate_policy(wl,
                                                &wlvif->ap.ucast_rate_idx[i]);
-               wlvif->basic_rate_set = CONF_TX_AP_ENABLED_RATES;
+               wlvif->basic_rate_set = CONF_TX_ENABLED_RATES;
                /*
                 * TODO: check if basic_rate shouldn't be
                 * wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
                 * instead (the same thing for STA above).
                */
-               wlvif->basic_rate = CONF_TX_AP_ENABLED_RATES;
+               wlvif->basic_rate = CONF_TX_ENABLED_RATES;
                /* TODO: this seems to be used only for STA, check it */
-               wlvif->rate_set = CONF_TX_AP_ENABLED_RATES;
+               wlvif->rate_set = CONF_TX_ENABLED_RATES;
        }
 
        wlvif->bitrate_masks[IEEE80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
@@ -2063,6 +2151,10 @@ static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
                  wl1271_rx_streaming_enable_work);
        INIT_WORK(&wlvif->rx_streaming_disable_work,
                  wl1271_rx_streaming_disable_work);
+       INIT_DELAYED_WORK(&wlvif->channel_switch_work,
+                         wlcore_channel_switch_work);
+       INIT_DELAYED_WORK(&wlvif->connection_loss_work,
+                         wlcore_connection_loss_work);
        INIT_LIST_HEAD(&wlvif->list);
 
        setup_timer(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer,
@@ -2070,7 +2162,7 @@ static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
        return 0;
 }
 
-static bool wl12xx_init_fw(struct wl1271 *wl)
+static int wl12xx_init_fw(struct wl1271 *wl)
 {
        int retries = WL1271_BOOT_RETRIES;
        bool booted = false;
@@ -2136,7 +2228,7 @@ power_off:
 
        wl->state = WLCORE_STATE_ON;
 out:
-       return booted;
+       return ret;
 }
 
 static bool wl12xx_dev_role_started(struct wl12xx_vif *wlvif)
@@ -2196,6 +2288,81 @@ static void wl12xx_force_active_psm(struct wl1271 *wl)
        }
 }
 
+struct wlcore_hw_queue_iter_data {
+       unsigned long hw_queue_map[BITS_TO_LONGS(WLCORE_NUM_MAC_ADDRESSES)];
+       /* current vif */
+       struct ieee80211_vif *vif;
+       /* is the current vif among those iterated */
+       bool cur_running;
+};
+
+static void wlcore_hw_queue_iter(void *data, u8 *mac,
+                                struct ieee80211_vif *vif)
+{
+       struct wlcore_hw_queue_iter_data *iter_data = data;
+
+       if (WARN_ON_ONCE(vif->hw_queue[0] == IEEE80211_INVAL_HW_QUEUE))
+               return;
+
+       if (iter_data->cur_running || vif == iter_data->vif) {
+               iter_data->cur_running = true;
+               return;
+       }
+
+       __set_bit(vif->hw_queue[0] / NUM_TX_QUEUES, iter_data->hw_queue_map);
+}
+
+static int wlcore_allocate_hw_queue_base(struct wl1271 *wl,
+                                        struct wl12xx_vif *wlvif)
+{
+       struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
+       struct wlcore_hw_queue_iter_data iter_data = {};
+       int i, q_base;
+
+       iter_data.vif = vif;
+
+       /* mark all bits taken by active interfaces */
+       ieee80211_iterate_active_interfaces_atomic(wl->hw,
+                                       IEEE80211_IFACE_ITER_RESUME_ALL,
+                                       wlcore_hw_queue_iter, &iter_data);
+
+       /* the current vif is already running in mac80211 (resume/recovery) */
+       if (iter_data.cur_running) {
+               wlvif->hw_queue_base = vif->hw_queue[0];
+               wl1271_debug(DEBUG_MAC80211,
+                            "using pre-allocated hw queue base %d",
+                            wlvif->hw_queue_base);
+
+               /* interface type might have changed type */
+               goto adjust_cab_queue;
+       }
+
+       q_base = find_first_zero_bit(iter_data.hw_queue_map,
+                                    WLCORE_NUM_MAC_ADDRESSES);
+       if (q_base >= WLCORE_NUM_MAC_ADDRESSES)
+               return -EBUSY;
+
+       wlvif->hw_queue_base = q_base * NUM_TX_QUEUES;
+       wl1271_debug(DEBUG_MAC80211, "allocating hw queue base: %d",
+                    wlvif->hw_queue_base);
+
+       for (i = 0; i < NUM_TX_QUEUES; i++) {
+               wl->queue_stop_reasons[wlvif->hw_queue_base + i] = 0;
+               /* register hw queues in mac80211 */
+               vif->hw_queue[i] = wlvif->hw_queue_base + i;
+       }
+
+adjust_cab_queue:
+       /* the last places are reserved for cab queues per interface */
+       if (wlvif->bss_type == BSS_TYPE_AP_BSS)
+               vif->cab_queue = NUM_TX_QUEUES * WLCORE_NUM_MAC_ADDRESSES +
+                                wlvif->hw_queue_base / NUM_TX_QUEUES;
+       else
+               vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
+
+       return 0;
+}
+
 static int wl1271_op_add_interface(struct ieee80211_hw *hw,
                                   struct ieee80211_vif *vif)
 {
@@ -2204,7 +2371,6 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw,
        struct vif_counter_data vif_count;
        int ret = 0;
        u8 role_type;
-       bool booted = false;
 
        vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
                             IEEE80211_VIF_SUPPORTS_CQM_RSSI;
@@ -2242,6 +2408,10 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw,
                goto out;
        }
 
+       ret = wlcore_allocate_hw_queue_base(wl, wlvif);
+       if (ret < 0)
+               goto out;
+
        if (wl12xx_need_fw_change(wl, vif_count, true)) {
                wl12xx_force_active_psm(wl);
                set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
@@ -2261,11 +2431,9 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw,
                 */
                memcpy(wl->addresses[0].addr, vif->addr, ETH_ALEN);
 
-               booted = wl12xx_init_fw(wl);
-               if (!booted) {
-                       ret = -EINVAL;
+               ret = wl12xx_init_fw(wl);
+               if (ret < 0)
                        goto out;
-               }
        }
 
        ret = wl12xx_cmd_role_enable(wl, vif->addr,
@@ -2312,7 +2480,7 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl,
        wl1271_info("down");
 
        if (wl->scan.state != WL1271_SCAN_STATE_IDLE &&
-           wl->scan_vif == vif) {
+           wl->scan_wlvif == wlvif) {
                /*
                 * Rearm the tx watchdog just before idling scan. This
                 * prevents just-finished scans from triggering the watchdog
@@ -2321,11 +2489,21 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl,
 
                wl->scan.state = WL1271_SCAN_STATE_IDLE;
                memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
-               wl->scan_vif = NULL;
+               wl->scan_wlvif = NULL;
                wl->scan.req = NULL;
                ieee80211_scan_completed(wl->hw, true);
        }
 
+       if (wl->sched_vif == wlvif) {
+               ieee80211_sched_scan_stopped(wl->hw);
+               wl->sched_vif = NULL;
+       }
+
+       if (wl->roc_vif == vif) {
+               wl->roc_vif = NULL;
+               ieee80211_remain_on_channel_expired(wl->hw);
+       }
+
        if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
                /* disable active roles */
                ret = wl1271_ps_elp_wakeup(wl);
@@ -2394,9 +2572,6 @@ deinit:
                /* Configure for power according to debugfs */
                if (sta_auth != WL1271_PSM_ILLEGAL)
                        wl1271_acx_sleep_auth(wl, sta_auth);
-               /* Configure for power always on */
-               else if (wl->quirks & WLCORE_QUIRK_NO_ELP)
-                       wl1271_acx_sleep_auth(wl, WL1271_PSM_CAM);
                /* Configure for ELP power saving */
                else
                        wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
@@ -2408,6 +2583,7 @@ unlock:
        del_timer_sync(&wlvif->rx_streaming_timer);
        cancel_work_sync(&wlvif->rx_streaming_enable_work);
        cancel_work_sync(&wlvif->rx_streaming_disable_work);
+       cancel_delayed_work_sync(&wlvif->connection_loss_work);
 
        mutex_lock(&wl->mutex);
 }
@@ -2466,8 +2642,7 @@ static int wl12xx_op_change_interface(struct ieee80211_hw *hw,
        return ret;
 }
 
-static int wl1271_join(struct wl1271 *wl, struct wl12xx_vif *wlvif,
-                         bool set_assoc)
+static int wlcore_join(struct wl1271 *wl, struct wl12xx_vif *wlvif)
 {
        int ret;
        bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
@@ -2487,18 +2662,111 @@ static int wl1271_join(struct wl1271 *wl, struct wl12xx_vif *wlvif,
        /* clear encryption type */
        wlvif->encryption_type = KEY_NONE;
 
-       if (set_assoc)
-               set_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags);
-
        if (is_ibss)
                ret = wl12xx_cmd_role_start_ibss(wl, wlvif);
-       else
+       else {
+               if (wl->quirks & WLCORE_QUIRK_START_STA_FAILS) {
+                       /*
+                        * TODO: this is an ugly workaround for wl12xx fw
+                        * bug - we are not able to tx/rx after the first
+                        * start_sta, so make dummy start+stop calls,
+                        * and then call start_sta again.
+                        * this should be fixed in the fw.
+                        */
+                       wl12xx_cmd_role_start_sta(wl, wlvif);
+                       wl12xx_cmd_role_stop_sta(wl, wlvif);
+               }
+
                ret = wl12xx_cmd_role_start_sta(wl, wlvif);
+       }
+
+       return ret;
+}
+
+static int wl1271_ssid_set(struct wl12xx_vif *wlvif, struct sk_buff *skb,
+                           int offset)
+{
+       u8 ssid_len;
+       const u8 *ptr = cfg80211_find_ie(WLAN_EID_SSID, skb->data + offset,
+                                        skb->len - offset);
+
+       if (!ptr) {
+               wl1271_error("No SSID in IEs!");
+               return -ENOENT;
+       }
+
+       ssid_len = ptr[1];
+       if (ssid_len > IEEE80211_MAX_SSID_LEN) {
+               wl1271_error("SSID is too long!");
+               return -EINVAL;
+       }
+
+       wlvif->ssid_len = ssid_len;
+       memcpy(wlvif->ssid, ptr+2, ssid_len);
+       return 0;
+}
+
+static int wlcore_set_ssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
+{
+       struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
+       struct sk_buff *skb;
+       int ieoffset;
+
+       /* we currently only support setting the ssid from the ap probe req */
+       if (wlvif->bss_type != BSS_TYPE_STA_BSS)
+               return -EINVAL;
+
+       skb = ieee80211_ap_probereq_get(wl->hw, vif);
+       if (!skb)
+               return -EINVAL;
+
+       ieoffset = offsetof(struct ieee80211_mgmt,
+                           u.probe_req.variable);
+       wl1271_ssid_set(wlvif, skb, ieoffset);
+       dev_kfree_skb(skb);
+
+       return 0;
+}
+
+static int wlcore_set_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                           struct ieee80211_bss_conf *bss_conf,
+                           u32 sta_rate_set)
+{
+       int ieoffset;
+       int ret;
+
+       wlvif->aid = bss_conf->aid;
+       wlvif->channel_type = cfg80211_get_chandef_type(&bss_conf->chandef);
+       wlvif->beacon_int = bss_conf->beacon_int;
+       wlvif->wmm_enabled = bss_conf->qos;
+
+       set_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags);
+
+       /*
+        * with wl1271, we don't need to update the
+        * beacon_int and dtim_period, because the firmware
+        * updates it by itself when the first beacon is
+        * received after a join.
+        */
+       ret = wl1271_cmd_build_ps_poll(wl, wlvif, wlvif->aid);
        if (ret < 0)
-               goto out;
+               return ret;
 
-       if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
-               goto out;
+       /*
+        * Get a template for hardware connection maintenance
+        */
+       dev_kfree_skb(wlvif->probereq);
+       wlvif->probereq = wl1271_cmd_build_ap_probe_req(wl,
+                                                       wlvif,
+                                                       NULL);
+       ieoffset = offsetof(struct ieee80211_mgmt,
+                           u.probe_req.variable);
+       wl1271_ssid_set(wlvif, wlvif->probereq, ieoffset);
+
+       /* enable the connection monitoring feature */
+       ret = wl1271_acx_conn_monit_params(wl, wlvif, true);
+       if (ret < 0)
+               return ret;
 
        /*
         * The join command disable the keep-alive mode, shut down its process,
@@ -2508,35 +2776,83 @@ static int wl1271_join(struct wl1271 *wl, struct wl12xx_vif *wlvif,
         */
        ret = wl1271_acx_keep_alive_mode(wl, wlvif, true);
        if (ret < 0)
-               goto out;
+               return ret;
 
        ret = wl1271_acx_aid(wl, wlvif, wlvif->aid);
        if (ret < 0)
-               goto out;
+               return ret;
 
        ret = wl12xx_cmd_build_klv_null_data(wl, wlvif);
        if (ret < 0)
-               goto out;
+               return ret;
 
        ret = wl1271_acx_keep_alive_config(wl, wlvif,
                                           wlvif->sta.klv_template_id,
                                           ACX_KEEP_ALIVE_TPL_VALID);
        if (ret < 0)
-               goto out;
+               return ret;
+
+       /*
+        * The default fw psm configuration is AUTO, while mac80211 default
+        * setting is off (ACTIVE), so sync the fw with the correct value.
+        */
+       ret = wl1271_ps_set_mode(wl, wlvif, STATION_ACTIVE_MODE);
+       if (ret < 0)
+               return ret;
+
+       if (sta_rate_set) {
+               wlvif->rate_set =
+                       wl1271_tx_enabled_rates_get(wl,
+                                                   sta_rate_set,
+                                                   wlvif->band);
+               ret = wl1271_acx_sta_rate_policies(wl, wlvif);
+               if (ret < 0)
+                       return ret;
+       }
 
-out:
        return ret;
 }
 
-static int wl1271_unjoin(struct wl1271 *wl, struct wl12xx_vif *wlvif)
+static int wlcore_unset_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
 {
        int ret;
+       bool sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
+
+       /* make sure we are connected (sta) joined */
+       if (sta &&
+           !test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
+               return false;
+
+       /* make sure we are joined (ibss) */
+       if (!sta &&
+           test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags))
+               return false;
+
+       if (sta) {
+               /* use defaults when not associated */
+               wlvif->aid = 0;
+
+               /* free probe-request template */
+               dev_kfree_skb(wlvif->probereq);
+               wlvif->probereq = NULL;
+
+               /* disable connection monitor features */
+               ret = wl1271_acx_conn_monit_params(wl, wlvif, false);
+               if (ret < 0)
+                       return ret;
+
+               /* Disable the keep-alive feature */
+               ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
+               if (ret < 0)
+                       return ret;
+       }
 
        if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags)) {
                struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
 
-               wl12xx_cmd_stop_channel_switch(wl);
+               wl12xx_cmd_stop_channel_switch(wl, wlvif);
                ieee80211_chswitch_done(vif, false);
+               cancel_delayed_work(&wlvif->channel_switch_work);
        }
 
        /* invalidate keep-alive template */
@@ -2544,17 +2860,11 @@ static int wl1271_unjoin(struct wl1271 *wl, struct wl12xx_vif *wlvif)
                                     wlvif->sta.klv_template_id,
                                     ACX_KEEP_ALIVE_TPL_INVALID);
 
-       /* to stop listening to a channel, we disconnect */
-       ret = wl12xx_cmd_role_stop_sta(wl, wlvif);
-       if (ret < 0)
-               goto out;
-
        /* reset TX security counters on a clean disconnect */
        wlvif->tx_security_last_seq_lsb = 0;
        wlvif->tx_security_seq = 0;
 
-out:
-       return ret;
+       return 0;
 }
 
 static void wl1271_set_band_rate(struct wl1271 *wl, struct wl12xx_vif *wlvif)
@@ -2563,195 +2873,38 @@ static void wl1271_set_band_rate(struct wl1271 *wl, struct wl12xx_vif *wlvif)
        wlvif->rate_set = wlvif->basic_rate_set;
 }
 
-static int wl1271_sta_handle_idle(struct wl1271 *wl, struct wl12xx_vif *wlvif,
-                                 bool idle)
+static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                            struct ieee80211_conf *conf, u32 changed)
 {
        int ret;
-       bool cur_idle = !test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
-
-       if (idle == cur_idle)
-               return 0;
 
-       if (idle) {
-               /* no need to croc if we weren't busy (e.g. during boot) */
-               if (wl12xx_dev_role_started(wlvif)) {
-                       ret = wl12xx_stop_dev(wl, wlvif);
-                       if (ret < 0)
-                               goto out;
-               }
-               wlvif->rate_set =
-                       wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
-               ret = wl1271_acx_sta_rate_policies(wl, wlvif);
+       if (conf->power_level != wlvif->power_level) {
+               ret = wl1271_acx_tx_power(wl, wlvif, conf->power_level);
                if (ret < 0)
-                       goto out;
-               clear_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
-       } else {
-               /* The current firmware only supports sched_scan in idle */
-               if (wl->sched_scanning) {
-                       wl1271_scan_sched_scan_stop(wl, wlvif);
-                       ieee80211_sched_scan_stopped(wl->hw);
-               }
+                       return ret;
 
-               ret = wl12xx_start_dev(wl, wlvif);
-               if (ret < 0)
-                       goto out;
-               set_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
+               wlvif->power_level = conf->power_level;
        }
 
-out:
-       return ret;
+       return 0;
 }
 
-static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif,
-                            struct ieee80211_conf *conf, u32 changed)
-{
-       bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
-       int channel, ret;
-
-       channel = ieee80211_frequency_to_channel(conf->channel->center_freq);
-
-       /* if the channel changes while joined, join again */
-       if (changed & IEEE80211_CONF_CHANGE_CHANNEL &&
-           ((wlvif->band != conf->channel->band) ||
-            (wlvif->channel != channel) ||
-            (wlvif->channel_type != conf->channel_type))) {
-               /* send all pending packets */
-               ret = wlcore_tx_work_locked(wl);
-               if (ret < 0)
-                       return ret;
-
-               wlvif->band = conf->channel->band;
-               wlvif->channel = channel;
-               wlvif->channel_type = conf->channel_type;
-
-               if (is_ap) {
-                       wl1271_set_band_rate(wl, wlvif);
-                       ret = wl1271_init_ap_rates(wl, wlvif);
-                       if (ret < 0)
-                               wl1271_error("AP rate policy change failed %d",
-                                            ret);
-               } else {
-                       /*
-                        * FIXME: the mac80211 should really provide a fixed
-                        * rate to use here. for now, just use the smallest
-                        * possible rate for the band as a fixed rate for
-                        * association frames and other control messages.
-                        */
-                       if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
-                               wl1271_set_band_rate(wl, wlvif);
-
-                       wlvif->basic_rate =
-                               wl1271_tx_min_rate_get(wl,
-                                                      wlvif->basic_rate_set);
-                       ret = wl1271_acx_sta_rate_policies(wl, wlvif);
-                       if (ret < 0)
-                               wl1271_warning("rate policy for channel "
-                                              "failed %d", ret);
-
-                       /*
-                        * change the ROC channel. do it only if we are
-                        * not idle. otherwise, CROC will be called
-                        * anyway.
-                        */
-                       if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED,
-                                     &wlvif->flags) &&
-                           wl12xx_dev_role_started(wlvif) &&
-                           !(conf->flags & IEEE80211_CONF_IDLE)) {
-                               ret = wl12xx_stop_dev(wl, wlvif);
-                               if (ret < 0)
-                                       return ret;
-
-                               ret = wl12xx_start_dev(wl, wlvif);
-                               if (ret < 0)
-                                       return ret;
-                       }
-               }
-       }
-
-       if ((changed & IEEE80211_CONF_CHANGE_PS) && !is_ap) {
-
-               if ((conf->flags & IEEE80211_CONF_PS) &&
-                   test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
-                   !test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
-
-                       int ps_mode;
-                       char *ps_mode_str;
-
-                       if (wl->conf.conn.forced_ps) {
-                               ps_mode = STATION_POWER_SAVE_MODE;
-                               ps_mode_str = "forced";
-                       } else {
-                               ps_mode = STATION_AUTO_PS_MODE;
-                               ps_mode_str = "auto";
-                       }
-
-                       wl1271_debug(DEBUG_PSM, "%s ps enabled", ps_mode_str);
-
-                       ret = wl1271_ps_set_mode(wl, wlvif, ps_mode);
-
-                       if (ret < 0)
-                               wl1271_warning("enter %s ps failed %d",
-                                              ps_mode_str, ret);
-
-               } else if (!(conf->flags & IEEE80211_CONF_PS) &&
-                          test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
-
-                       wl1271_debug(DEBUG_PSM, "auto ps disabled");
-
-                       ret = wl1271_ps_set_mode(wl, wlvif,
-                                                STATION_ACTIVE_MODE);
-                       if (ret < 0)
-                               wl1271_warning("exit auto ps failed %d", ret);
-               }
-       }
-
-       if (conf->power_level != wlvif->power_level) {
-               ret = wl1271_acx_tx_power(wl, wlvif, conf->power_level);
-               if (ret < 0)
-                       return ret;
-
-               wlvif->power_level = conf->power_level;
-       }
-
-       return 0;
-}
-
-static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
+static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
 {
        struct wl1271 *wl = hw->priv;
        struct wl12xx_vif *wlvif;
        struct ieee80211_conf *conf = &hw->conf;
-       int channel, ret = 0;
-
-       channel = ieee80211_frequency_to_channel(conf->channel->center_freq);
+       int ret = 0;
 
-       wl1271_debug(DEBUG_MAC80211, "mac80211 config ch %d psm %s power %d %s"
+       wl1271_debug(DEBUG_MAC80211, "mac80211 config psm %s power %d %s"
                     " changed 0x%x",
-                    channel,
                     conf->flags & IEEE80211_CONF_PS ? "on" : "off",
                     conf->power_level,
                     conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use",
                         changed);
 
-       /*
-        * mac80211 will go to idle nearly immediately after transmitting some
-        * frames, such as the deauth. To make sure those frames reach the air,
-        * wait here until the TX queue is fully flushed.
-        */
-       if ((changed & IEEE80211_CONF_CHANGE_CHANNEL) ||
-           ((changed & IEEE80211_CONF_CHANGE_IDLE) &&
-            (conf->flags & IEEE80211_CONF_IDLE)))
-               wl1271_tx_flush(wl);
-
        mutex_lock(&wl->mutex);
 
-       /* we support configuring the channel and band even while off */
-       if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
-               wl->band = conf->channel->band;
-               wl->channel = channel;
-               wl->channel_type = conf->channel_type;
-       }
-
        if (changed & IEEE80211_CONF_CHANGE_POWER)
                wl->power_level = conf->power_level;
 
@@ -3071,10 +3224,7 @@ static int wlcore_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
                 * stop the queues and flush to ensure the next packets are
                 * in sync with FW spare block accounting
                 */
-               mutex_lock(&wl->mutex);
                wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
-               mutex_unlock(&wl->mutex);
-
                wl1271_tx_flush(wl);
        }
 
@@ -3200,6 +3350,29 @@ int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
 }
 EXPORT_SYMBOL_GPL(wlcore_set_key);
 
+void wlcore_regdomain_config(struct wl1271 *wl)
+{
+       int ret;
+
+       if (!(wl->quirks & WLCORE_QUIRK_REGDOMAIN_CONF))
+               return;
+
+       mutex_lock(&wl->mutex);
+       ret = wl1271_ps_elp_wakeup(wl);
+       if (ret < 0)
+               goto out;
+
+       ret = wlcore_cmd_regdomain_config_locked(wl);
+       if (ret < 0) {
+               wl12xx_queue_recovery_work(wl);
+               goto out;
+       }
+
+       wl1271_ps_elp_sleep(wl);
+out:
+       mutex_unlock(&wl->mutex);
+}
+
 static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
                             struct ieee80211_vif *vif,
                             struct cfg80211_scan_request *req)
@@ -3239,7 +3412,7 @@ static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
                goto out_sleep;
        }
 
-       ret = wl1271_scan(hw->priv, vif, ssid, len, req);
+       ret = wlcore_scan(hw->priv, vif, ssid, len, req);
 out_sleep:
        wl1271_ps_elp_sleep(wl);
 out:
@@ -3252,6 +3425,7 @@ static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
                                     struct ieee80211_vif *vif)
 {
        struct wl1271 *wl = hw->priv;
+       struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
        int ret;
 
        wl1271_debug(DEBUG_MAC80211, "mac80211 cancel hw scan");
@@ -3269,7 +3443,7 @@ static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
                goto out;
 
        if (wl->scan.state != WL1271_SCAN_STATE_DONE) {
-               ret = wl1271_scan_stop(wl);
+               ret = wl->ops->scan_stop(wl, wlvif);
                if (ret < 0)
                        goto out_sleep;
        }
@@ -3282,7 +3456,7 @@ static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
 
        wl->scan.state = WL1271_SCAN_STATE_IDLE;
        memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
-       wl->scan_vif = NULL;
+       wl->scan_wlvif = NULL;
        wl->scan.req = NULL;
        ieee80211_scan_completed(wl->hw, true);
 
@@ -3316,15 +3490,11 @@ static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
        if (ret < 0)
                goto out;
 
-       ret = wl1271_scan_sched_scan_config(wl, wlvif, req, ies);
-       if (ret < 0)
-               goto out_sleep;
-
-       ret = wl1271_scan_sched_scan_start(wl, wlvif);
+       ret = wl->ops->sched_scan_start(wl, wlvif, req, ies);
        if (ret < 0)
                goto out_sleep;
 
-       wl->sched_scanning = true;
+       wl->sched_vif = wlvif;
 
 out_sleep:
        wl1271_ps_elp_sleep(wl);
@@ -3351,7 +3521,7 @@ static void wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
        if (ret < 0)
                goto out;
 
-       wl1271_scan_sched_scan_stop(wl, wlvif);
+       wl->ops->sched_scan_stop(wl, wlvif);
 
        wl1271_ps_elp_sleep(wl);
 out:
@@ -3416,30 +3586,6 @@ out:
        return ret;
 }
 
-static int wl1271_ssid_set(struct ieee80211_vif *vif, struct sk_buff *skb,
-                           int offset)
-{
-       struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
-       u8 ssid_len;
-       const u8 *ptr = cfg80211_find_ie(WLAN_EID_SSID, skb->data + offset,
-                                        skb->len - offset);
-
-       if (!ptr) {
-               wl1271_error("No SSID in IEs!");
-               return -ENOENT;
-       }
-
-       ssid_len = ptr[1];
-       if (ssid_len > IEEE80211_MAX_SSID_LEN) {
-               wl1271_error("SSID is too long!");
-               return -EINVAL;
-       }
-
-       wlvif->ssid_len = ssid_len;
-       memcpy(wlvif->ssid, ptr+2, ssid_len);
-       return 0;
-}
-
 static void wl12xx_remove_ie(struct sk_buff *skb, u8 eid, int ieoffset)
 {
        int len;
@@ -3620,7 +3766,7 @@ static int wlcore_set_beacon_template(struct wl1271 *wl,
 
        wl1271_debug(DEBUG_MASTER, "beacon updated");
 
-       ret = wl1271_ssid_set(vif, beacon, ieoffset);
+       ret = wl1271_ssid_set(wlvif, beacon, ieoffset);
        if (ret < 0) {
                dev_kfree_skb(beacon);
                goto out;
@@ -3637,6 +3783,12 @@ static int wlcore_set_beacon_template(struct wl1271 *wl,
                goto out;
        }
 
+       wlvif->wmm_enabled =
+               cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
+                                       WLAN_OUI_TYPE_MICROSOFT_WMM,
+                                       beacon->data + ieoffset,
+                                       beacon->len - ieoffset);
+
        /*
         * In case we already have a probe-resp beacon set explicitly
         * by usermode, don't use the beacon data.
@@ -3690,7 +3842,7 @@ static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
        bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
        int ret = 0;
 
-       if ((changed & BSS_CHANGED_BEACON_INT)) {
+       if (changed & BSS_CHANGED_BEACON_INT) {
                wl1271_debug(DEBUG_MASTER, "beacon interval updated: %d",
                        bss_conf->beacon_int);
 
@@ -3703,7 +3855,7 @@ static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
                wl1271_ap_set_probe_resp_tmpl(wl, rate, vif);
        }
 
-       if ((changed & BSS_CHANGED_BEACON)) {
+       if (changed & BSS_CHANGED_BEACON) {
                ret = wlcore_set_beacon_template(wl, vif, is_ap);
                if (ret < 0)
                        goto out;
@@ -3724,7 +3876,7 @@ static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
        struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
        int ret = 0;
 
-       if ((changed & BSS_CHANGED_BASIC_RATES)) {
+       if (changed & BSS_CHANGED_BASIC_RATES) {
                u32 rates = bss_conf->basic_rates;
 
                wlvif->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates,
@@ -3755,7 +3907,7 @@ static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
        if (ret < 0)
                goto out;
 
-       if ((changed & BSS_CHANGED_BEACON_ENABLED)) {
+       if (changed & BSS_CHANGED_BEACON_ENABLED) {
                if (bss_conf->enable_beacon) {
                        if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
                                ret = wl12xx_cmd_role_start_ap(wl, wlvif);
@@ -3802,6 +3954,79 @@ out:
        return;
 }
 
+static int wlcore_set_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                           struct ieee80211_bss_conf *bss_conf,
+                           u32 sta_rate_set)
+{
+       u32 rates;
+       int ret;
+
+       wl1271_debug(DEBUG_MAC80211,
+            "changed_bssid: %pM, aid: %d, bcn_int: %d, brates: 0x%x sta_rate_set: 0x%x",
+            bss_conf->bssid, bss_conf->aid,
+            bss_conf->beacon_int,
+            bss_conf->basic_rates, sta_rate_set);
+
+       wlvif->beacon_int = bss_conf->beacon_int;
+       rates = bss_conf->basic_rates;
+       wlvif->basic_rate_set =
+               wl1271_tx_enabled_rates_get(wl, rates,
+                                           wlvif->band);
+       wlvif->basic_rate =
+               wl1271_tx_min_rate_get(wl,
+                                      wlvif->basic_rate_set);
+
+       if (sta_rate_set)
+               wlvif->rate_set =
+                       wl1271_tx_enabled_rates_get(wl,
+                                               sta_rate_set,
+                                               wlvif->band);
+
+       /* we only support sched_scan while not connected */
+       if (wl->sched_vif == wlvif)
+               wl->ops->sched_scan_stop(wl, wlvif);
+
+       ret = wl1271_acx_sta_rate_policies(wl, wlvif);
+       if (ret < 0)
+               return ret;
+
+       ret = wl12xx_cmd_build_null_data(wl, wlvif);
+       if (ret < 0)
+               return ret;
+
+       ret = wl1271_build_qos_null_data(wl, wl12xx_wlvif_to_vif(wlvif));
+       if (ret < 0)
+               return ret;
+
+       wlcore_set_ssid(wl, wlvif);
+
+       set_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
+
+       return 0;
+}
+
+static int wlcore_clear_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
+{
+       int ret;
+
+       /* revert back to minimum rates for the current band */
+       wl1271_set_band_rate(wl, wlvif);
+       wlvif->basic_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
+
+       ret = wl1271_acx_sta_rate_policies(wl, wlvif);
+       if (ret < 0)
+               return ret;
+
+       if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
+           test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags)) {
+               ret = wl12xx_cmd_role_stop_sta(wl, wlvif);
+               if (ret < 0)
+                       return ret;
+       }
+
+       clear_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
+       return 0;
+}
 /* STA/IBSS mode changes */
 static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
                                        struct ieee80211_vif *vif,
@@ -3809,7 +4034,7 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
                                        u32 changed)
 {
        struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
-       bool do_join = false, set_assoc = false;
+       bool do_join = false;
        bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
        bool ibss_joined = false;
        u32 sta_rate_set = 0;
@@ -3830,9 +4055,8 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
                        set_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags);
                        ibss_joined = true;
                } else {
-                       if (test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED,
-                                              &wlvif->flags))
-                               wl1271_unjoin(wl, wlvif);
+                       wlcore_unset_assoc(wl, wlvif);
+                       wl12xx_cmd_role_stop_sta(wl, wlvif);
                }
        }
 
@@ -3850,13 +4074,7 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
                do_join = true;
        }
 
-       if (changed & BSS_CHANGED_IDLE && !is_ibss) {
-               ret = wl1271_sta_handle_idle(wl, wlvif, bss_conf->idle);
-               if (ret < 0)
-                       wl1271_warning("idle mode change failed %d", ret);
-       }
-
-       if ((changed & BSS_CHANGED_CQM)) {
+       if (changed & BSS_CHANGED_CQM) {
                bool enable = false;
                if (bss_conf->cqm_rssi_thold)
                        enable = true;
@@ -3868,150 +4086,39 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
                wlvif->rssi_thold = bss_conf->cqm_rssi_thold;
        }
 
-       if (changed & BSS_CHANGED_BSSID)
-               if (!is_zero_ether_addr(bss_conf->bssid)) {
-                       ret = wl12xx_cmd_build_null_data(wl, wlvif);
-                       if (ret < 0)
-                               goto out;
-
-                       ret = wl1271_build_qos_null_data(wl, vif);
-                       if (ret < 0)
-                               goto out;
-               }
-
-       if (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_HT)) {
+       if (changed & (BSS_CHANGED_BSSID | BSS_CHANGED_HT |
+                      BSS_CHANGED_ASSOC)) {
                rcu_read_lock();
                sta = ieee80211_find_sta(vif, bss_conf->bssid);
-               if (!sta)
-                       goto sta_not_found;
-
-               /* save the supp_rates of the ap */
-               sta_rate_set = sta->supp_rates[wl->hw->conf.channel->band];
-               if (sta->ht_cap.ht_supported)
-                       sta_rate_set |=
-                         (sta->ht_cap.mcs.rx_mask[0] << HW_HT_RATES_OFFSET) |
-                         (sta->ht_cap.mcs.rx_mask[1] << HW_MIMO_RATES_OFFSET);
-               sta_ht_cap = sta->ht_cap;
-               sta_exists = true;
-
-sta_not_found:
+               if (sta) {
+                       u8 *rx_mask = sta->ht_cap.mcs.rx_mask;
+
+                       /* save the supp_rates of the ap */
+                       sta_rate_set = sta->supp_rates[wlvif->band];
+                       if (sta->ht_cap.ht_supported)
+                               sta_rate_set |=
+                                       (rx_mask[0] << HW_HT_RATES_OFFSET) |
+                                       (rx_mask[1] << HW_MIMO_RATES_OFFSET);
+                       sta_ht_cap = sta->ht_cap;
+                       sta_exists = true;
+               }
+
                rcu_read_unlock();
        }
 
-       if ((changed & BSS_CHANGED_ASSOC)) {
-               if (bss_conf->assoc) {
-                       u32 rates;
-                       int ieoffset;
-                       wlvif->aid = bss_conf->aid;
-                       wlvif->channel_type =
-                               cfg80211_get_chandef_type(&bss_conf->chandef);
-                       wlvif->beacon_int = bss_conf->beacon_int;
-                       do_join = true;
-                       set_assoc = true;
-
-                       /*
-                        * use basic rates from AP, and determine lowest rate
-                        * to use with control frames.
-                        */
-                       rates = bss_conf->basic_rates;
-                       wlvif->basic_rate_set =
-                               wl1271_tx_enabled_rates_get(wl, rates,
-                                                           wlvif->band);
-                       wlvif->basic_rate =
-                               wl1271_tx_min_rate_get(wl,
-                                                      wlvif->basic_rate_set);
-                       if (sta_rate_set)
-                               wlvif->rate_set =
-                                       wl1271_tx_enabled_rates_get(wl,
-                                                               sta_rate_set,
-                                                               wlvif->band);
-                       ret = wl1271_acx_sta_rate_policies(wl, wlvif);
-                       if (ret < 0)
-                               goto out;
-
-                       /*
-                        * with wl1271, we don't need to update the
-                        * beacon_int and dtim_period, because the firmware
-                        * updates it by itself when the first beacon is
-                        * received after a join.
-                        */
-                       ret = wl1271_cmd_build_ps_poll(wl, wlvif, wlvif->aid);
+       if (changed & BSS_CHANGED_BSSID) {
+               if (!is_zero_ether_addr(bss_conf->bssid)) {
+                       ret = wlcore_set_bssid(wl, wlvif, bss_conf,
+                                              sta_rate_set);
                        if (ret < 0)
                                goto out;
 
-                       /*
-                        * Get a template for hardware connection maintenance
-                        */
-                       dev_kfree_skb(wlvif->probereq);
-                       wlvif->probereq = wl1271_cmd_build_ap_probe_req(wl,
-                                                                       wlvif,
-                                                                       NULL);
-                       ieoffset = offsetof(struct ieee80211_mgmt,
-                                           u.probe_req.variable);
-                       wl1271_ssid_set(vif, wlvif->probereq, ieoffset);
-
-                       /* enable the connection monitoring feature */
-                       ret = wl1271_acx_conn_monit_params(wl, wlvif, true);
-                       if (ret < 0)
-                               goto out;
+                       /* Need to update the BSSID (for filtering etc) */
+                       do_join = true;
                } else {
-                       /* use defaults when not associated */
-                       bool was_assoc =
-                           !!test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED,
-                                                &wlvif->flags);
-                       bool was_ifup =
-                           !!test_and_clear_bit(WLVIF_FLAG_STA_STATE_SENT,
-                                                &wlvif->flags);
-                       wlvif->aid = 0;
-
-                       /* free probe-request template */
-                       dev_kfree_skb(wlvif->probereq);
-                       wlvif->probereq = NULL;
-
-                       /* revert back to minimum rates for the current band */
-                       wl1271_set_band_rate(wl, wlvif);
-                       wlvif->basic_rate =
-                               wl1271_tx_min_rate_get(wl,
-                                                      wlvif->basic_rate_set);
-                       ret = wl1271_acx_sta_rate_policies(wl, wlvif);
-                       if (ret < 0)
-                               goto out;
-
-                       /* disable connection monitor features */
-                       ret = wl1271_acx_conn_monit_params(wl, wlvif, false);
-
-                       /* Disable the keep-alive feature */
-                       ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
+                       ret = wlcore_clear_bssid(wl, wlvif);
                        if (ret < 0)
                                goto out;
-
-                       /* restore the bssid filter and go to dummy bssid */
-                       if (was_assoc) {
-                               /*
-                                * we might have to disable roc, if there was
-                                * no IF_OPER_UP notification.
-                                */
-                               if (!was_ifup) {
-                                       ret = wl12xx_croc(wl, wlvif->role_id);
-                                       if (ret < 0)
-                                               goto out;
-                               }
-                               /*
-                                * (we also need to disable roc in case of
-                                * roaming on the same channel. until we will
-                                * have a better flow...)
-                                */
-                               if (test_bit(wlvif->dev_role_id, wl->roc_map)) {
-                                       ret = wl12xx_croc(wl,
-                                                         wlvif->dev_role_id);
-                                       if (ret < 0)
-                                               goto out;
-                               }
-
-                               wl1271_unjoin(wl, wlvif);
-                               if (!bss_conf->idle)
-                                       wl12xx_start_dev(wl, wlvif);
-                       }
                }
        }
 
@@ -4041,71 +4148,87 @@ sta_not_found:
                goto out;
 
        if (do_join) {
-               ret = wl1271_join(wl, wlvif, set_assoc);
+               ret = wlcore_join(wl, wlvif);
                if (ret < 0) {
                        wl1271_warning("cmd join failed %d", ret);
                        goto out;
                }
+       }
 
-               /* ROC until connected (after EAPOL exchange) */
-               if (!is_ibss) {
-                       ret = wl12xx_roc(wl, wlvif, wlvif->role_id);
+       if (changed & BSS_CHANGED_ASSOC) {
+               if (bss_conf->assoc) {
+                       ret = wlcore_set_assoc(wl, wlvif, bss_conf,
+                                              sta_rate_set);
                        if (ret < 0)
                                goto out;
 
                        if (test_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags))
                                wl12xx_set_authorized(wl, wlvif);
+               } else {
+                       wlcore_unset_assoc(wl, wlvif);
                }
-               /*
-                * stop device role if started (we might already be in
-                * STA/IBSS role).
-                */
-               if (wl12xx_dev_role_started(wlvif)) {
-                       ret = wl12xx_stop_dev(wl, wlvif);
+       }
+
+       if (changed & BSS_CHANGED_PS) {
+               if ((bss_conf->ps) &&
+                   test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
+                   !test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
+                       int ps_mode;
+                       char *ps_mode_str;
+
+                       if (wl->conf.conn.forced_ps) {
+                               ps_mode = STATION_POWER_SAVE_MODE;
+                               ps_mode_str = "forced";
+                       } else {
+                               ps_mode = STATION_AUTO_PS_MODE;
+                               ps_mode_str = "auto";
+                       }
+
+                       wl1271_debug(DEBUG_PSM, "%s ps enabled", ps_mode_str);
+
+                       ret = wl1271_ps_set_mode(wl, wlvif, ps_mode);
                        if (ret < 0)
-                               goto out;
+                               wl1271_warning("enter %s ps failed %d",
+                                              ps_mode_str, ret);
+               } else if (!bss_conf->ps &&
+                          test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
+                       wl1271_debug(DEBUG_PSM, "auto ps disabled");
+
+                       ret = wl1271_ps_set_mode(wl, wlvif,
+                                                STATION_ACTIVE_MODE);
+                       if (ret < 0)
+                               wl1271_warning("exit auto ps failed %d", ret);
                }
        }
 
        /* Handle new association with HT. Do this after join. */
-       if (sta_exists) {
-               if ((changed & BSS_CHANGED_HT) &&
-                   (bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT)) {
-                       ret = wl1271_acx_set_ht_capabilities(wl,
-                                                            &sta_ht_cap,
-                                                            true,
-                                                            wlvif->sta.hlid);
-                       if (ret < 0) {
-                               wl1271_warning("Set ht cap true failed %d",
-                                              ret);
-                               goto out;
-                       }
+       if (sta_exists &&
+           (changed & BSS_CHANGED_HT)) {
+               bool enabled =
+                       bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT;
+
+               ret = wlcore_hw_set_peer_cap(wl,
+                                            &sta_ht_cap,
+                                            enabled,
+                                            wlvif->rate_set,
+                                            wlvif->sta.hlid);
+               if (ret < 0) {
+                       wl1271_warning("Set ht cap failed %d", ret);
+                       goto out;
+
                }
-               /* handle new association without HT and disassociation */
-               else if (changed & BSS_CHANGED_ASSOC) {
-                       ret = wl1271_acx_set_ht_capabilities(wl,
-                                                            &sta_ht_cap,
-                                                            false,
-                                                            wlvif->sta.hlid);
+
+               if (enabled) {
+                       ret = wl1271_acx_set_ht_information(wl, wlvif,
+                                               bss_conf->ht_operation_mode);
                        if (ret < 0) {
-                               wl1271_warning("Set ht cap false failed %d",
+                               wl1271_warning("Set ht information failed %d",
                                               ret);
                                goto out;
                        }
                }
        }
 
-       /* Handle HT information change. Done after join. */
-       if ((changed & BSS_CHANGED_HT) &&
-           (bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT)) {
-               ret = wl1271_acx_set_ht_information(wl, wlvif,
-                                       bss_conf->ht_operation_mode);
-               if (ret < 0) {
-                       wl1271_warning("Set ht information failed %d", ret);
-                       goto out;
-               }
-       }
-
        /* Handle arp filtering. Done after join. */
        if ((changed & BSS_CHANGED_ARP_FILTER) ||
            (!is_ibss && (changed & BSS_CHANGED_QOS))) {
@@ -4113,8 +4236,7 @@ sta_not_found:
                wlvif->sta.qos = bss_conf->qos;
                WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS);
 
-               if (bss_conf->arp_addr_cnt == 1 &&
-                   bss_conf->arp_filter_enabled) {
+               if (bss_conf->arp_addr_cnt == 1 && bss_conf->assoc) {
                        wlvif->ip_addr = addr;
                        /*
                         * The template should have been configured only upon
@@ -4155,15 +4277,15 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
        bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
        int ret;
 
-       wl1271_debug(DEBUG_MAC80211, "mac80211 bss info changed 0x%x",
-                    (int)changed);
+       wl1271_debug(DEBUG_MAC80211, "mac80211 bss info role %d changed 0x%x",
+                    wlvif->role_id, (int)changed);
 
        /*
         * make sure to cancel pending disconnections if our association
         * state changed
         */
        if (!is_ap && (changed & BSS_CHANGED_ASSOC))
-               cancel_delayed_work_sync(&wl->connection_loss_work);
+               cancel_delayed_work_sync(&wlvif->connection_loss_work);
 
        if (is_ap && (changed & BSS_CHANGED_BEACON_ENABLED) &&
            !bss_conf->enable_beacon)
@@ -4192,6 +4314,76 @@ out:
        mutex_unlock(&wl->mutex);
 }
 
+static int wlcore_op_add_chanctx(struct ieee80211_hw *hw,
+                                struct ieee80211_chanctx_conf *ctx)
+{
+       wl1271_debug(DEBUG_MAC80211, "mac80211 add chanctx %d (type %d)",
+                    ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
+                    cfg80211_get_chandef_type(&ctx->def));
+       return 0;
+}
+
+static void wlcore_op_remove_chanctx(struct ieee80211_hw *hw,
+                                    struct ieee80211_chanctx_conf *ctx)
+{
+       wl1271_debug(DEBUG_MAC80211, "mac80211 remove chanctx %d (type %d)",
+                    ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
+                    cfg80211_get_chandef_type(&ctx->def));
+}
+
+static void wlcore_op_change_chanctx(struct ieee80211_hw *hw,
+                                    struct ieee80211_chanctx_conf *ctx,
+                                    u32 changed)
+{
+       wl1271_debug(DEBUG_MAC80211,
+                    "mac80211 change chanctx %d (type %d) changed 0x%x",
+                    ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
+                    cfg80211_get_chandef_type(&ctx->def), changed);
+}
+
+static int wlcore_op_assign_vif_chanctx(struct ieee80211_hw *hw,
+                                       struct ieee80211_vif *vif,
+                                       struct ieee80211_chanctx_conf *ctx)
+{
+       struct wl1271 *wl = hw->priv;
+       struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
+       int channel = ieee80211_frequency_to_channel(
+               ctx->def.chan->center_freq);
+
+       wl1271_debug(DEBUG_MAC80211,
+                    "mac80211 assign chanctx (role %d) %d (type %d)",
+                    wlvif->role_id, channel, cfg80211_get_chandef_type(&ctx->def));
+
+       mutex_lock(&wl->mutex);
+
+       wlvif->band = ctx->def.chan->band;
+       wlvif->channel = channel;
+       wlvif->channel_type = cfg80211_get_chandef_type(&ctx->def);
+
+       /* update default rates according to the band */
+       wl1271_set_band_rate(wl, wlvif);
+
+       mutex_unlock(&wl->mutex);
+
+       return 0;
+}
+
+static void wlcore_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
+                                          struct ieee80211_vif *vif,
+                                          struct ieee80211_chanctx_conf *ctx)
+{
+       struct wl1271 *wl = hw->priv;
+       struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
+
+       wl1271_debug(DEBUG_MAC80211,
+                    "mac80211 unassign chanctx (role %d) %d (type %d)",
+                    wlvif->role_id,
+                    ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
+                    cfg80211_get_chandef_type(&ctx->def));
+
+       wl1271_tx_flush(wl);
+}
+
 static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
                             struct ieee80211_vif *vif, u16 queue,
                             const struct ieee80211_tx_queue_params *params)
@@ -4319,8 +4511,6 @@ void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid)
                return;
 
        clear_bit(hlid, wlvif->ap.sta_hlid_map);
-       memset(wl->links[hlid].addr, 0, ETH_ALEN);
-       wl->links[hlid].ba_bitmap = 0;
        __clear_bit(hlid, &wl->ap_ps_map);
        __clear_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
        wl12xx_free_link(wl, wlvif, &hlid);
@@ -4380,6 +4570,45 @@ static int wl12xx_sta_remove(struct wl1271 *wl,
        return ret;
 }
 
+static void wlcore_roc_if_possible(struct wl1271 *wl,
+                                  struct wl12xx_vif *wlvif)
+{
+       if (find_first_bit(wl->roc_map,
+                          WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)
+               return;
+
+       if (WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID))
+               return;
+
+       wl12xx_roc(wl, wlvif, wlvif->role_id, wlvif->band, wlvif->channel);
+}
+
+static void wlcore_update_inconn_sta(struct wl1271 *wl,
+                                    struct wl12xx_vif *wlvif,
+                                    struct wl1271_station *wl_sta,
+                                    bool in_connection)
+{
+       if (in_connection) {
+               if (WARN_ON(wl_sta->in_connection))
+                       return;
+               wl_sta->in_connection = true;
+               if (!wlvif->inconn_count++)
+                       wlcore_roc_if_possible(wl, wlvif);
+       } else {
+               if (!wl_sta->in_connection)
+                       return;
+
+               wl_sta->in_connection = false;
+               wlvif->inconn_count--;
+               if (WARN_ON(wlvif->inconn_count < 0))
+                       return;
+
+               if (!wlvif->inconn_count)
+                       if (test_bit(wlvif->role_id, wl->roc_map))
+                               wl12xx_croc(wl, wlvif->role_id);
+       }
+}
+
 static int wl12xx_update_sta_state(struct wl1271 *wl,
                                   struct wl12xx_vif *wlvif,
                                   struct ieee80211_sta *sta,
@@ -4398,8 +4627,13 @@ static int wl12xx_update_sta_state(struct wl1271 *wl,
        /* Add station (AP mode) */
        if (is_ap &&
            old_state == IEEE80211_STA_NOTEXIST &&
-           new_state == IEEE80211_STA_NONE)
-               return wl12xx_sta_add(wl, wlvif, sta);
+           new_state == IEEE80211_STA_NONE) {
+               ret = wl12xx_sta_add(wl, wlvif, sta);
+               if (ret)
+                       return ret;
+
+               wlcore_update_inconn_sta(wl, wlvif, wl_sta, true);
+       }
 
        /* Remove station (AP mode) */
        if (is_ap &&
@@ -4407,35 +4641,59 @@ static int wl12xx_update_sta_state(struct wl1271 *wl,
            new_state == IEEE80211_STA_NOTEXIST) {
                /* must not fail */
                wl12xx_sta_remove(wl, wlvif, sta);
-               return 0;
+
+               wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
        }
 
        /* Authorize station (AP mode) */
        if (is_ap &&
            new_state == IEEE80211_STA_AUTHORIZED) {
-               ret = wl12xx_cmd_set_peer_state(wl, hlid);
+               ret = wl12xx_cmd_set_peer_state(wl, wlvif, hlid);
                if (ret < 0)
                        return ret;
 
                ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, true,
                                                     hlid);
-               return ret;
+               if (ret)
+                       return ret;
+
+               wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
        }
 
        /* Authorize station */
        if (is_sta &&
            new_state == IEEE80211_STA_AUTHORIZED) {
                set_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
-               return wl12xx_set_authorized(wl, wlvif);
+               ret = wl12xx_set_authorized(wl, wlvif);
+               if (ret)
+                       return ret;
        }
 
        if (is_sta &&
            old_state == IEEE80211_STA_AUTHORIZED &&
            new_state == IEEE80211_STA_ASSOC) {
                clear_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
-               return 0;
+               clear_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags);
+       }
+
+       /* clear ROCs on failure or authorization */
+       if (is_sta &&
+           (new_state == IEEE80211_STA_AUTHORIZED ||
+            new_state == IEEE80211_STA_NOTEXIST)) {
+               if (test_bit(wlvif->role_id, wl->roc_map))
+                       wl12xx_croc(wl, wlvif->role_id);
        }
 
+       if (is_sta &&
+           old_state == IEEE80211_STA_NOTEXIST &&
+           new_state == IEEE80211_STA_NONE) {
+               if (find_first_bit(wl->roc_map,
+                                  WL12XX_MAX_ROLES) >= WL12XX_MAX_ROLES) {
+                       WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID);
+                       wl12xx_roc(wl, wlvif, wlvif->role_id,
+                                  wlvif->band, wlvif->channel);
+               }
+       }
        return 0;
 }
 
@@ -4500,18 +4758,18 @@ static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
 
        if (wlvif->bss_type == BSS_TYPE_STA_BSS) {
                hlid = wlvif->sta.hlid;
-               ba_bitmap = &wlvif->sta.ba_rx_bitmap;
        } else if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
                struct wl1271_station *wl_sta;
 
                wl_sta = (struct wl1271_station *)sta->drv_priv;
                hlid = wl_sta->hlid;
-               ba_bitmap = &wl->links[hlid].ba_bitmap;
        } else {
                ret = -EINVAL;
                goto out;
        }
 
+       ba_bitmap = &wl->links[hlid].ba_bitmap;
+
        ret = wl1271_ps_elp_wakeup(wl);
        if (ret < 0)
                goto out;
@@ -4665,12 +4923,23 @@ static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
 
        /* TODO: change mac80211 to pass vif as param */
        wl12xx_for_each_wlvif_sta(wl, wlvif) {
-               ret = wl12xx_cmd_channel_switch(wl, wlvif, ch_switch);
+               unsigned long delay_usec;
 
-               if (!ret)
-                       set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
+               ret = wl->ops->channel_switch(wl, wlvif, ch_switch);
+               if (ret)
+                       goto out_sleep;
+
+               set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
+
+               /* indicate failure 5 seconds after channel switch time */
+               delay_usec = ieee80211_tu_to_usec(wlvif->beacon_int) *
+                            ch_switch->count;
+               ieee80211_queue_delayed_work(hw, &wlvif->channel_switch_work,
+                               usecs_to_jiffies(delay_usec) +
+                               msecs_to_jiffies(5000));
        }
 
+out_sleep:
        wl1271_ps_elp_sleep(wl);
 
 out:
@@ -4684,6 +4953,144 @@ static void wlcore_op_flush(struct ieee80211_hw *hw, bool drop)
        wl1271_tx_flush(wl);
 }
 
+static int wlcore_op_remain_on_channel(struct ieee80211_hw *hw,
+                                      struct ieee80211_vif *vif,
+                                      struct ieee80211_channel *chan,
+                                      int duration)
+{
+       struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
+       struct wl1271 *wl = hw->priv;
+       int channel, ret = 0;
+
+       channel = ieee80211_frequency_to_channel(chan->center_freq);
+
+       wl1271_debug(DEBUG_MAC80211, "mac80211 roc %d (%d)",
+                    channel, wlvif->role_id);
+
+       mutex_lock(&wl->mutex);
+
+       if (unlikely(wl->state != WLCORE_STATE_ON))
+               goto out;
+
+       /* return EBUSY if we can't ROC right now */
+       if (WARN_ON(wl->roc_vif ||
+                   find_first_bit(wl->roc_map,
+                                  WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)) {
+               ret = -EBUSY;
+               goto out;
+       }
+
+       ret = wl1271_ps_elp_wakeup(wl);
+       if (ret < 0)
+               goto out;
+
+       ret = wl12xx_start_dev(wl, wlvif, chan->band, channel);
+       if (ret < 0)
+               goto out_sleep;
+
+       wl->roc_vif = vif;
+       ieee80211_queue_delayed_work(hw, &wl->roc_complete_work,
+                                    msecs_to_jiffies(duration));
+out_sleep:
+       wl1271_ps_elp_sleep(wl);
+out:
+       mutex_unlock(&wl->mutex);
+       return ret;
+}
+
+static int __wlcore_roc_completed(struct wl1271 *wl)
+{
+       struct wl12xx_vif *wlvif;
+       int ret;
+
+       /* already completed */
+       if (unlikely(!wl->roc_vif))
+               return 0;
+
+       wlvif = wl12xx_vif_to_data(wl->roc_vif);
+
+       if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
+               return -EBUSY;
+
+       ret = wl12xx_stop_dev(wl, wlvif);
+       if (ret < 0)
+               return ret;
+
+       wl->roc_vif = NULL;
+
+       return 0;
+}
+
+static int wlcore_roc_completed(struct wl1271 *wl)
+{
+       int ret;
+
+       wl1271_debug(DEBUG_MAC80211, "roc complete");
+
+       mutex_lock(&wl->mutex);
+
+       if (unlikely(wl->state != WLCORE_STATE_ON)) {
+               ret = -EBUSY;
+               goto out;
+       }
+
+       ret = wl1271_ps_elp_wakeup(wl);
+       if (ret < 0)
+               goto out;
+
+       ret = __wlcore_roc_completed(wl);
+
+       wl1271_ps_elp_sleep(wl);
+out:
+       mutex_unlock(&wl->mutex);
+
+       return ret;
+}
+
+static void wlcore_roc_complete_work(struct work_struct *work)
+{
+       struct delayed_work *dwork;
+       struct wl1271 *wl;
+       int ret;
+
+       dwork = container_of(work, struct delayed_work, work);
+       wl = container_of(dwork, struct wl1271, roc_complete_work);
+
+       ret = wlcore_roc_completed(wl);
+       if (!ret)
+               ieee80211_remain_on_channel_expired(wl->hw);
+}
+
+static int wlcore_op_cancel_remain_on_channel(struct ieee80211_hw *hw)
+{
+       struct wl1271 *wl = hw->priv;
+
+       wl1271_debug(DEBUG_MAC80211, "mac80211 croc");
+
+       /* TODO: per-vif */
+       wl1271_tx_flush(wl);
+
+       /*
+        * we can't just flush_work here, because it might deadlock
+        * (as we might get called from the same workqueue)
+        */
+       cancel_delayed_work_sync(&wl->roc_complete_work);
+       wlcore_roc_completed(wl);
+
+       return 0;
+}
+
+static void wlcore_op_sta_rc_update(struct ieee80211_hw *hw,
+                                   struct ieee80211_vif *vif,
+                                   struct ieee80211_sta *sta,
+                                   u32 changed)
+{
+       struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
+       struct wl1271 *wl = hw->priv;
+
+       wlcore_hw_sta_rc_update(wl, wlvif, sta, changed);
+}
+
 static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw)
 {
        struct wl1271 *wl = hw->priv;
@@ -4747,20 +5154,20 @@ static struct ieee80211_rate wl1271_rates[] = {
 
 /* can't be const, mac80211 writes to this */
 static struct ieee80211_channel wl1271_channels[] = {
-       { .hw_value = 1, .center_freq = 2412, .max_power = 25 },
-       { .hw_value = 2, .center_freq = 2417, .max_power = 25 },
-       { .hw_value = 3, .center_freq = 2422, .max_power = 25 },
-       { .hw_value = 4, .center_freq = 2427, .max_power = 25 },
-       { .hw_value = 5, .center_freq = 2432, .max_power = 25 },
-       { .hw_value = 6, .center_freq = 2437, .max_power = 25 },
-       { .hw_value = 7, .center_freq = 2442, .max_power = 25 },
-       { .hw_value = 8, .center_freq = 2447, .max_power = 25 },
-       { .hw_value = 9, .center_freq = 2452, .max_power = 25 },
-       { .hw_value = 10, .center_freq = 2457, .max_power = 25 },
-       { .hw_value = 11, .center_freq = 2462, .max_power = 25 },
-       { .hw_value = 12, .center_freq = 2467, .max_power = 25 },
-       { .hw_value = 13, .center_freq = 2472, .max_power = 25 },
-       { .hw_value = 14, .center_freq = 2484, .max_power = 25 },
+       { .hw_value = 1, .center_freq = 2412, .max_power = WLCORE_MAX_TXPWR },
+       { .hw_value = 2, .center_freq = 2417, .max_power = WLCORE_MAX_TXPWR },
+       { .hw_value = 3, .center_freq = 2422, .max_power = WLCORE_MAX_TXPWR },
+       { .hw_value = 4, .center_freq = 2427, .max_power = WLCORE_MAX_TXPWR },
+       { .hw_value = 5, .center_freq = 2432, .max_power = WLCORE_MAX_TXPWR },
+       { .hw_value = 6, .center_freq = 2437, .max_power = WLCORE_MAX_TXPWR },
+       { .hw_value = 7, .center_freq = 2442, .max_power = WLCORE_MAX_TXPWR },
+       { .hw_value = 8, .center_freq = 2447, .max_power = WLCORE_MAX_TXPWR },
+       { .hw_value = 9, .center_freq = 2452, .max_power = WLCORE_MAX_TXPWR },
+       { .hw_value = 10, .center_freq = 2457, .max_power = WLCORE_MAX_TXPWR },
+       { .hw_value = 11, .center_freq = 2462, .max_power = WLCORE_MAX_TXPWR },
+       { .hw_value = 12, .center_freq = 2467, .max_power = WLCORE_MAX_TXPWR },
+       { .hw_value = 13, .center_freq = 2472, .max_power = WLCORE_MAX_TXPWR },
+       { .hw_value = 14, .center_freq = 2484, .max_power = WLCORE_MAX_TXPWR },
 };
 
 /* can't be const, mac80211 writes to this */
@@ -4801,40 +5208,40 @@ static struct ieee80211_rate wl1271_rates_5ghz[] = {
 
 /* 5 GHz band channels for WL1273 */
 static struct ieee80211_channel wl1271_channels_5ghz[] = {
-       { .hw_value = 7, .center_freq = 5035, .max_power = 25 },
-       { .hw_value = 8, .center_freq = 5040, .max_power = 25 },
-       { .hw_value = 9, .center_freq = 5045, .max_power = 25 },
-       { .hw_value = 11, .center_freq = 5055, .max_power = 25 },
-       { .hw_value = 12, .center_freq = 5060, .max_power = 25 },
-       { .hw_value = 16, .center_freq = 5080, .max_power = 25 },
-       { .hw_value = 34, .center_freq = 5170, .max_power = 25 },
-       { .hw_value = 36, .center_freq = 5180, .max_power = 25 },
-       { .hw_value = 38, .center_freq = 5190, .max_power = 25 },
-       { .hw_value = 40, .center_freq = 5200, .max_power = 25 },
-       { .hw_value = 42, .center_freq = 5210, .max_power = 25 },
-       { .hw_value = 44, .center_freq = 5220, .max_power = 25 },
-       { .hw_value = 46, .center_freq = 5230, .max_power = 25 },
-       { .hw_value = 48, .center_freq = 5240, .max_power = 25 },
-       { .hw_value = 52, .center_freq = 5260, .max_power = 25 },
-       { .hw_value = 56, .center_freq = 5280, .max_power = 25 },
-       { .hw_value = 60, .center_freq = 5300, .max_power = 25 },
-       { .hw_value = 64, .center_freq = 5320, .max_power = 25 },
-       { .hw_value = 100, .center_freq = 5500, .max_power = 25 },
-       { .hw_value = 104, .center_freq = 5520, .max_power = 25 },
-       { .hw_value = 108, .center_freq = 5540, .max_power = 25 },
-       { .hw_value = 112, .center_freq = 5560, .max_power = 25 },
-       { .hw_value = 116, .center_freq = 5580, .max_power = 25 },
-       { .hw_value = 120, .center_freq = 5600, .max_power = 25 },
-       { .hw_value = 124, .center_freq = 5620, .max_power = 25 },
-       { .hw_value = 128, .center_freq = 5640, .max_power = 25 },
-       { .hw_value = 132, .center_freq = 5660, .max_power = 25 },
-       { .hw_value = 136, .center_freq = 5680, .max_power = 25 },
-       { .hw_value = 140, .center_freq = 5700, .max_power = 25 },
-       { .hw_value = 149, .center_freq = 5745, .max_power = 25 },
-       { .hw_value = 153, .center_freq = 5765, .max_power = 25 },
-       { .hw_value = 157, .center_freq = 5785, .max_power = 25 },
-       { .hw_value = 161, .center_freq = 5805, .max_power = 25 },
-       { .hw_value = 165, .center_freq = 5825, .max_power = 25 },
+       { .hw_value = 7, .center_freq = 5035, .max_power = WLCORE_MAX_TXPWR },
+       { .hw_value = 8, .center_freq = 5040, .max_power = WLCORE_MAX_TXPWR },
+       { .hw_value = 9, .center_freq = 5045, .max_power = WLCORE_MAX_TXPWR },
+       { .hw_value = 11, .center_freq = 5055, .max_power = WLCORE_MAX_TXPWR },
+       { .hw_value = 12, .center_freq = 5060, .max_power = WLCORE_MAX_TXPWR },
+       { .hw_value = 16, .center_freq = 5080, .max_power = WLCORE_MAX_TXPWR },
+       { .hw_value = 34, .center_freq = 5170, .max_power = WLCORE_MAX_TXPWR },
+       { .hw_value = 36, .center_freq = 5180, .max_power = WLCORE_MAX_TXPWR },
+       { .hw_value = 38, .center_freq = 5190, .max_power = WLCORE_MAX_TXPWR },
+       { .hw_value = 40, .center_freq = 5200, .max_power = WLCORE_MAX_TXPWR },
+       { .hw_value = 42, .center_freq = 5210, .max_power = WLCORE_MAX_TXPWR },
+       { .hw_value = 44, .center_freq = 5220, .max_power = WLCORE_MAX_TXPWR },
+       { .hw_value = 46, .center_freq = 5230, .max_power = WLCORE_MAX_TXPWR },
+       { .hw_value = 48, .center_freq = 5240, .max_power = WLCORE_MAX_TXPWR },
+       { .hw_value = 52, .center_freq = 5260, .max_power = WLCORE_MAX_TXPWR },
+       { .hw_value = 56, .center_freq = 5280, .max_power = WLCORE_MAX_TXPWR },
+       { .hw_value = 60, .center_freq = 5300, .max_power = WLCORE_MAX_TXPWR },
+       { .hw_value = 64, .center_freq = 5320, .max_power = WLCORE_MAX_TXPWR },
+       { .hw_value = 100, .center_freq = 5500, .max_power = WLCORE_MAX_TXPWR },
+       { .hw_value = 104, .center_freq = 5520, .max_power = WLCORE_MAX_TXPWR },
+       { .hw_value = 108, .center_freq = 5540, .max_power = WLCORE_MAX_TXPWR },
+       { .hw_value = 112, .center_freq = 5560, .max_power = WLCORE_MAX_TXPWR },
+       { .hw_value = 116, .center_freq = 5580, .max_power = WLCORE_MAX_TXPWR },
+       { .hw_value = 120, .center_freq = 5600, .max_power = WLCORE_MAX_TXPWR },
+       { .hw_value = 124, .center_freq = 5620, .max_power = WLCORE_MAX_TXPWR },
+       { .hw_value = 128, .center_freq = 5640, .max_power = WLCORE_MAX_TXPWR },
+       { .hw_value = 132, .center_freq = 5660, .max_power = WLCORE_MAX_TXPWR },
+       { .hw_value = 136, .center_freq = 5680, .max_power = WLCORE_MAX_TXPWR },
+       { .hw_value = 140, .center_freq = 5700, .max_power = WLCORE_MAX_TXPWR },
+       { .hw_value = 149, .center_freq = 5745, .max_power = WLCORE_MAX_TXPWR },
+       { .hw_value = 153, .center_freq = 5765, .max_power = WLCORE_MAX_TXPWR },
+       { .hw_value = 157, .center_freq = 5785, .max_power = WLCORE_MAX_TXPWR },
+       { .hw_value = 161, .center_freq = 5805, .max_power = WLCORE_MAX_TXPWR },
+       { .hw_value = 165, .center_freq = 5825, .max_power = WLCORE_MAX_TXPWR },
 };
 
 static struct ieee80211_supported_band wl1271_band_5ghz = {
@@ -4875,6 +5282,14 @@ static const struct ieee80211_ops wl1271_ops = {
        .set_bitrate_mask = wl12xx_set_bitrate_mask,
        .channel_switch = wl12xx_op_channel_switch,
        .flush = wlcore_op_flush,
+       .remain_on_channel = wlcore_op_remain_on_channel,
+       .cancel_remain_on_channel = wlcore_op_cancel_remain_on_channel,
+       .add_chanctx = wlcore_op_add_chanctx,
+       .remove_chanctx = wlcore_op_remove_chanctx,
+       .change_chanctx = wlcore_op_change_chanctx,
+       .assign_vif_chanctx = wlcore_op_assign_vif_chanctx,
+       .unassign_vif_chanctx = wlcore_op_unassign_vif_chanctx,
+       .sta_rc_update = wlcore_op_sta_rc_update,
        CFG80211_TESTMODE_CMD(wl1271_tm_cmd)
 };
 
@@ -5044,34 +5459,6 @@ static struct bin_attribute fwlog_attr = {
        .read = wl1271_sysfs_read_fwlog,
 };
 
-static void wl1271_connection_loss_work(struct work_struct *work)
-{
-       struct delayed_work *dwork;
-       struct wl1271 *wl;
-       struct ieee80211_vif *vif;
-       struct wl12xx_vif *wlvif;
-
-       dwork = container_of(work, struct delayed_work, work);
-       wl = container_of(dwork, struct wl1271, connection_loss_work);
-
-       wl1271_info("Connection loss work.");
-
-       mutex_lock(&wl->mutex);
-
-       if (unlikely(wl->state != WLCORE_STATE_ON))
-               goto out;
-
-       /* Call mac80211 connection loss */
-       wl12xx_for_each_wlvif_sta(wl, wlvif) {
-               if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
-                       goto out;
-               vif = wl12xx_wlvif_to_vif(wlvif);
-               ieee80211_connection_loss(vif);
-       }
-out:
-       mutex_unlock(&wl->mutex);
-}
-
 static void wl12xx_derive_mac_addresses(struct wl1271 *wl, u32 oui, u32 nic)
 {
        int i;
@@ -5117,7 +5504,7 @@ static int wl12xx_get_hw_info(struct wl1271 *wl)
 
        ret = wl12xx_set_power_on(wl);
        if (ret < 0)
-               goto out;
+               return ret;
 
        ret = wlcore_read_reg(wl, REG_CHIP_ID_B, &wl->chip.id);
        if (ret < 0)
@@ -5207,10 +5594,9 @@ static const struct ieee80211_iface_limit wlcore_iface_limits[] = {
        },
 };
 
-static const struct ieee80211_iface_combination
+static struct ieee80211_iface_combination
 wlcore_iface_combinations[] = {
        {
-         .num_different_channels = 1,
          .max_interfaces = 3,
          .limits = wlcore_iface_limits,
          .n_limits = ARRAY_SIZE(wlcore_iface_limits),
@@ -5219,6 +5605,7 @@ wlcore_iface_combinations[] = {
 
 static int wl1271_init_ieee80211(struct wl1271 *wl)
 {
+       int i;
        static const u32 cipher_suites[] = {
                WLAN_CIPHER_SUITE_WEP40,
                WLAN_CIPHER_SUITE_WEP104,
@@ -5249,7 +5636,7 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
                IEEE80211_HW_AP_LINK_PS |
                IEEE80211_HW_AMPDU_AGGREGATION |
                IEEE80211_HW_TX_AMPDU_SETUP_IN_HW |
-               IEEE80211_HW_SCAN_WHILE_IDLE;
+               IEEE80211_HW_QUEUE_CONTROL;
 
        wl->hw->wiphy->cipher_suites = cipher_suites;
        wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
@@ -5271,6 +5658,8 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
        wl->hw->wiphy->max_sched_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
                sizeof(struct ieee80211_header);
 
+       wl->hw->wiphy->max_remain_on_channel_duration = 5000;
+
        wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD |
                                WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
 
@@ -5279,6 +5668,22 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
                     ARRAY_SIZE(wl1271_channels_5ghz) >
                     WL1271_MAX_CHANNELS);
        /*
+       * clear channel flags from the previous usage
+       * and restore max_power & max_antenna_gain values.
+       */
+       for (i = 0; i < ARRAY_SIZE(wl1271_channels); i++) {
+               wl1271_band_2ghz.channels[i].flags = 0;
+               wl1271_band_2ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
+               wl1271_band_2ghz.channels[i].max_antenna_gain = 0;
+       }
+
+       for (i = 0; i < ARRAY_SIZE(wl1271_channels_5ghz); i++) {
+               wl1271_band_5ghz.channels[i].flags = 0;
+               wl1271_band_5ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
+               wl1271_band_5ghz.channels[i].max_antenna_gain = 0;
+       }
+
+       /*
         * We keep local copies of the band structs because we need to
         * modify them on a per-device basis.
         */
@@ -5298,7 +5703,14 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
        wl->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
                &wl->bands[IEEE80211_BAND_5GHZ];
 
-       wl->hw->queues = 4;
+       /*
+        * allow 4 queues per mac address we support +
+        * 1 cab queue per mac + one global offchannel Tx queue
+        */
+       wl->hw->queues = (NUM_TX_QUEUES + 1) * WLCORE_NUM_MAC_ADDRESSES + 1;
+
+       /* the last queue is the offchannel queue */
+       wl->hw->offchannel_tx_hw_queue = wl->hw->queues - 1;
        wl->hw->max_rates = 1;
 
        wl->hw->wiphy->reg_notifier = wl1271_reg_notify;
@@ -5311,6 +5723,7 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
                NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
 
        /* allowed interface combinations */
+       wlcore_iface_combinations[0].num_different_channels = wl->num_channels;
        wl->hw->wiphy->iface_combinations = wlcore_iface_combinations;
        wl->hw->wiphy->n_iface_combinations =
                ARRAY_SIZE(wlcore_iface_combinations);
@@ -5327,7 +5740,8 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
 
 #define WL1271_DEFAULT_CHANNEL 0
 
-struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size)
+struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size,
+                                    u32 mbox_size)
 {
        struct ieee80211_hw *hw;
        struct wl1271 *wl;
@@ -5369,9 +5783,8 @@ struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size)
        INIT_WORK(&wl->tx_work, wl1271_tx_work);
        INIT_WORK(&wl->recovery_work, wl1271_recovery_work);
        INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work);
+       INIT_DELAYED_WORK(&wl->roc_complete_work, wlcore_roc_complete_work);
        INIT_DELAYED_WORK(&wl->tx_watchdog_work, wl12xx_tx_watchdog_work);
-       INIT_DELAYED_WORK(&wl->connection_loss_work,
-                         wl1271_connection_loss_work);
 
        wl->freezable_wq = create_freezable_workqueue("wl12xx_wq");
        if (!wl->freezable_wq) {
@@ -5387,14 +5800,15 @@ struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size)
        wl->flags = 0;
        wl->sg_enabled = true;
        wl->sleep_auth = WL1271_PSM_ILLEGAL;
+       wl->recovery_count = 0;
        wl->hw_pg_ver = -1;
        wl->ap_ps_map = 0;
        wl->ap_fw_ps_map = 0;
        wl->quirks = 0;
        wl->platform_quirks = 0;
-       wl->sched_scanning = false;
        wl->system_hlid = WL12XX_SYSTEM_HLID;
        wl->active_sta_count = 0;
+       wl->active_link_count = 0;
        wl->fwlog_size = 0;
        init_waitqueue_head(&wl->fwlog_waitq);
 
@@ -5434,14 +5848,24 @@ struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size)
                goto err_dummy_packet;
        }
 
-       wl->mbox = kmalloc(sizeof(*wl->mbox), GFP_KERNEL | GFP_DMA);
+       wl->mbox_size = mbox_size;
+       wl->mbox = kmalloc(wl->mbox_size, GFP_KERNEL | GFP_DMA);
        if (!wl->mbox) {
                ret = -ENOMEM;
                goto err_fwlog;
        }
 
+       wl->buffer_32 = kmalloc(sizeof(*wl->buffer_32), GFP_KERNEL);
+       if (!wl->buffer_32) {
+               ret = -ENOMEM;
+               goto err_mbox;
+       }
+
        return hw;
 
+err_mbox:
+       kfree(wl->mbox);
+
 err_fwlog:
        free_page((unsigned long)wl->fwlog);
 
@@ -5480,6 +5904,8 @@ int wlcore_free_hw(struct wl1271 *wl)
        device_remove_file(wl->dev, &dev_attr_hw_pg_ver);
 
        device_remove_file(wl->dev, &dev_attr_bt_coex_state);
+       kfree(wl->buffer_32);
+       kfree(wl->mbox);
        free_page((unsigned long)wl->fwlog);
        dev_kfree_skb(wl->dummy_packet);
        free_pages((unsigned long)wl->aggr_buf, get_order(wl->aggr_buf_size));
@@ -5536,7 +5962,8 @@ static void wlcore_nvs_cb(const struct firmware *fw, void *context)
 {
        struct wl1271 *wl = context;
        struct platform_device *pdev = wl->pdev;
-       struct wl12xx_platform_data *pdata = pdev->dev.platform_data;
+       struct wlcore_platdev_data *pdev_data = pdev->dev.platform_data;
+       struct wl12xx_platform_data *pdata = pdev_data->pdata;
        unsigned long irqflags;
        int ret;
 
@@ -5565,8 +5992,7 @@ static void wlcore_nvs_cb(const struct firmware *fw, void *context)
 
        wl->irq = platform_get_irq(pdev, 0);
        wl->platform_quirks = pdata->platform_quirks;
-       wl->set_power = pdata->set_power;
-       wl->if_ops = pdata->ops;
+       wl->if_ops = pdev_data->if_ops;
 
        if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ)
                irqflags = IRQF_TRIGGER_RISING;
@@ -5712,10 +6138,10 @@ module_param_named(fwlog, fwlog_param, charp, 0);
 MODULE_PARM_DESC(fwlog,
                 "FW logger options: continuous, ondemand, dbgpins or disable");
 
-module_param(bug_on_recovery, bool, S_IRUSR | S_IWUSR);
+module_param(bug_on_recovery, int, S_IRUSR | S_IWUSR);
 MODULE_PARM_DESC(bug_on_recovery, "BUG() on fw recovery");
 
-module_param(no_recovery, bool, S_IRUSR | S_IWUSR);
+module_param(no_recovery, int, S_IRUSR | S_IWUSR);
 MODULE_PARM_DESC(no_recovery, "Prevent HW recovery. FW will remain stuck.");
 
 MODULE_LICENSE("GPL");
index 4d1414a..9b7b6e2 100644 (file)
@@ -151,9 +151,6 @@ int wl1271_ps_elp_wakeup(struct wl1271 *wl)
                        wl12xx_queue_recovery_work(wl);
                        ret = -ETIMEDOUT;
                        goto err;
-               } else if (ret < 0) {
-                       wl1271_error("ELP wakeup completion error.");
-                       goto err;
                }
        }
 
@@ -242,11 +239,12 @@ static void wl1271_ps_filter_frames(struct wl1271 *wl, u8 hlid)
        struct ieee80211_tx_info *info;
        unsigned long flags;
        int filtered[NUM_TX_QUEUES];
+       struct wl1271_link *lnk = &wl->links[hlid];
 
        /* filter all frames currently in the low level queues for this hlid */
        for (i = 0; i < NUM_TX_QUEUES; i++) {
                filtered[i] = 0;
-               while ((skb = skb_dequeue(&wl->links[hlid].tx_queue[i]))) {
+               while ((skb = skb_dequeue(&lnk->tx_queue[i]))) {
                        filtered[i]++;
 
                        if (WARN_ON(wl12xx_is_dummy_packet(wl, skb)))
@@ -260,8 +258,11 @@ static void wl1271_ps_filter_frames(struct wl1271 *wl, u8 hlid)
        }
 
        spin_lock_irqsave(&wl->wl_lock, flags);
-       for (i = 0; i < NUM_TX_QUEUES; i++)
+       for (i = 0; i < NUM_TX_QUEUES; i++) {
                wl->tx_queue_count[i] -= filtered[i];
+               if (lnk->wlvif)
+                       lnk->wlvif->tx_queue_count[i] -= filtered[i];
+       }
        spin_unlock_irqrestore(&wl->wl_lock, flags);
 
        wl1271_handle_tx_low_watermark(wl);
index 9ee0ec6..6791a1a 100644 (file)
@@ -92,11 +92,16 @@ static void wl1271_rx_status(struct wl1271 *wl,
                status->flag |= RX_FLAG_IV_STRIPPED | RX_FLAG_MMIC_STRIPPED |
                                RX_FLAG_DECRYPTED;
 
-               if (unlikely(desc_err_code == WL1271_RX_DESC_MIC_FAIL)) {
+               if (unlikely(desc_err_code & WL1271_RX_DESC_MIC_FAIL)) {
                        status->flag |= RX_FLAG_MMIC_ERROR;
-                       wl1271_warning("Michael MIC error");
+                       wl1271_warning("Michael MIC error. Desc: 0x%x",
+                                      desc_err_code);
                }
        }
+
+       if (beacon)
+               wlcore_set_pending_regdomain_ch(wl, (u16)desc->channel,
+                                               status->band);
 }
 
 static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length,
@@ -108,7 +113,7 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length,
        u8 *buf;
        u8 beacon = 0;
        u8 is_data = 0;
-       u8 reserved = 0;
+       u8 reserved = 0, offset_to_data = 0;
        u16 seq_num;
        u32 pkt_data_len;
 
@@ -128,6 +133,8 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length,
 
        if (rx_align == WLCORE_RX_BUF_UNALIGNED)
                reserved = RX_BUF_ALIGN;
+       else if (rx_align == WLCORE_RX_BUF_PADDED)
+               offset_to_data = RX_BUF_ALIGN;
 
        /* the data read starts with the descriptor */
        desc = (struct wl1271_rx_descriptor *) data;
@@ -139,19 +146,15 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length,
                return 0;
        }
 
-       switch (desc->status & WL1271_RX_DESC_STATUS_MASK) {
        /* discard corrupted packets */
-       case WL1271_RX_DESC_DRIVER_RX_Q_FAIL:
-       case WL1271_RX_DESC_DECRYPT_FAIL:
-               wl1271_warning("corrupted packet in RX with status: 0x%x",
-                              desc->status & WL1271_RX_DESC_STATUS_MASK);
-               return -EINVAL;
-       case WL1271_RX_DESC_SUCCESS:
-       case WL1271_RX_DESC_MIC_FAIL:
-               break;
-       default:
-               wl1271_error("invalid RX descriptor status: 0x%x",
-                            desc->status & WL1271_RX_DESC_STATUS_MASK);
+       if (desc->status & WL1271_RX_DESC_DECRYPT_FAIL) {
+               hdr = (void *)(data + sizeof(*desc) + offset_to_data);
+               wl1271_warning("corrupted packet in RX: status: 0x%x len: %d",
+                              desc->status & WL1271_RX_DESC_STATUS_MASK,
+                              pkt_data_len);
+               wl1271_dump((DEBUG_RX|DEBUG_CMD), "PKT: ", data + sizeof(*desc),
+                           min(pkt_data_len,
+                               ieee80211_hdrlen(hdr->frame_control)));
                return -EINVAL;
        }
 
index 71eba18..3363f60 100644 (file)
  * Bits 3-5 - process_id tag (AP mode FW)
  * Bits 6-7 - reserved
  */
-#define WL1271_RX_DESC_STATUS_MASK      0x03
+#define WL1271_RX_DESC_STATUS_MASK      0x07
 
 #define WL1271_RX_DESC_SUCCESS          0x00
 #define WL1271_RX_DESC_DECRYPT_FAIL     0x01
 #define WL1271_RX_DESC_MIC_FAIL         0x02
-#define WL1271_RX_DESC_DRIVER_RX_Q_FAIL 0x03
 
 #define RX_MEM_BLOCK_MASK            0xFF
 #define RX_BUF_SIZE_MASK             0xFFF00
index d005014..f407101 100644 (file)
@@ -35,7 +35,6 @@ void wl1271_scan_complete_work(struct work_struct *work)
 {
        struct delayed_work *dwork;
        struct wl1271 *wl;
-       struct ieee80211_vif *vif;
        struct wl12xx_vif *wlvif;
        int ret;
 
@@ -52,8 +51,7 @@ void wl1271_scan_complete_work(struct work_struct *work)
        if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
                goto out;
 
-       vif = wl->scan_vif;
-       wlvif = wl12xx_vif_to_data(vif);
+       wlvif = wl->scan_wlvif;
 
        /*
         * Rearm the tx watchdog just before idling scan. This
@@ -64,7 +62,7 @@ void wl1271_scan_complete_work(struct work_struct *work)
        wl->scan.state = WL1271_SCAN_STATE_IDLE;
        memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
        wl->scan.req = NULL;
-       wl->scan_vif = NULL;
+       wl->scan_wlvif = NULL;
 
        ret = wl1271_ps_elp_wakeup(wl);
        if (ret < 0)
@@ -82,6 +80,8 @@ void wl1271_scan_complete_work(struct work_struct *work)
                wl12xx_queue_recovery_work(wl);
        }
 
+       wlcore_cmd_regdomain_config_locked(wl);
+
        ieee80211_scan_completed(wl->hw, false);
 
 out:
@@ -89,371 +89,99 @@ out:
 
 }
 
-
-static int wl1271_get_scan_channels(struct wl1271 *wl,
-                                   struct cfg80211_scan_request *req,
-                                   struct basic_scan_channel_params *channels,
-                                   enum ieee80211_band band, bool passive)
-{
-       struct conf_scan_settings *c = &wl->conf.scan;
-       int i, j;
-       u32 flags;
-
-       for (i = 0, j = 0;
-            i < req->n_channels && j < WL1271_SCAN_MAX_CHANNELS;
-            i++) {
-               flags = req->channels[i]->flags;
-
-               if (!test_bit(i, wl->scan.scanned_ch) &&
-                   !(flags & IEEE80211_CHAN_DISABLED) &&
-                   (req->channels[i]->band == band) &&
-                   /*
-                    * In passive scans, we scan all remaining
-                    * channels, even if not marked as such.
-                    * In active scans, we only scan channels not
-                    * marked as passive.
-                    */
-                   (passive || !(flags & IEEE80211_CHAN_PASSIVE_SCAN))) {
-                       wl1271_debug(DEBUG_SCAN, "band %d, center_freq %d ",
-                                    req->channels[i]->band,
-                                    req->channels[i]->center_freq);
-                       wl1271_debug(DEBUG_SCAN, "hw_value %d, flags %X",
-                                    req->channels[i]->hw_value,
-                                    req->channels[i]->flags);
-                       wl1271_debug(DEBUG_SCAN,
-                                    "max_antenna_gain %d, max_power %d",
-                                    req->channels[i]->max_antenna_gain,
-                                    req->channels[i]->max_power);
-                       wl1271_debug(DEBUG_SCAN, "beacon_found %d",
-                                    req->channels[i]->beacon_found);
-
-                       if (!passive) {
-                               channels[j].min_duration =
-                                       cpu_to_le32(c->min_dwell_time_active);
-                               channels[j].max_duration =
-                                       cpu_to_le32(c->max_dwell_time_active);
-                       } else {
-                               channels[j].min_duration =
-                                       cpu_to_le32(c->min_dwell_time_passive);
-                               channels[j].max_duration =
-                                       cpu_to_le32(c->max_dwell_time_passive);
-                       }
-                       channels[j].early_termination = 0;
-                       channels[j].tx_power_att = req->channels[i]->max_power;
-                       channels[j].channel = req->channels[i]->hw_value;
-
-                       memset(&channels[j].bssid_lsb, 0xff, 4);
-                       memset(&channels[j].bssid_msb, 0xff, 2);
-
-                       /* Mark the channels we already used */
-                       set_bit(i, wl->scan.scanned_ch);
-
-                       j++;
-               }
-       }
-
-       return j;
-}
-
-#define WL1271_NOTHING_TO_SCAN 1
-
-static int wl1271_scan_send(struct wl1271 *wl, struct ieee80211_vif *vif,
-                           enum ieee80211_band band,
-                           bool passive, u32 basic_rate)
+static void wlcore_started_vifs_iter(void *data, u8 *mac,
+                                    struct ieee80211_vif *vif)
 {
-       struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
-       struct wl1271_cmd_scan *cmd;
-       struct wl1271_cmd_trigger_scan_to *trigger;
-       int ret;
-       u16 scan_options = 0;
-
-       /* skip active scans if we don't have SSIDs */
-       if (!passive && wl->scan.req->n_ssids == 0)
-               return WL1271_NOTHING_TO_SCAN;
-
-       cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
-       trigger = kzalloc(sizeof(*trigger), GFP_KERNEL);
-       if (!cmd || !trigger) {
-               ret = -ENOMEM;
-               goto out;
-       }
-
-       if (wl->conf.scan.split_scan_timeout)
-               scan_options |= WL1271_SCAN_OPT_SPLIT_SCAN;
-
-       if (passive)
-               scan_options |= WL1271_SCAN_OPT_PASSIVE;
-
-       cmd->params.role_id = wlvif->role_id;
-
-       if (WARN_ON(cmd->params.role_id == WL12XX_INVALID_ROLE_ID)) {
-               ret = -EINVAL;
-               goto out;
-       }
-
-       cmd->params.scan_options = cpu_to_le16(scan_options);
-
-       cmd->params.n_ch = wl1271_get_scan_channels(wl, wl->scan.req,
-                                                   cmd->channels,
-                                                   band, passive);
-       if (cmd->params.n_ch == 0) {
-               ret = WL1271_NOTHING_TO_SCAN;
-               goto out;
-       }
-
-       cmd->params.tx_rate = cpu_to_le32(basic_rate);
-       cmd->params.n_probe_reqs = wl->conf.scan.num_probe_reqs;
-       cmd->params.tid_trigger = CONF_TX_AC_ANY_TID;
-       cmd->params.scan_tag = WL1271_SCAN_DEFAULT_TAG;
-
-       if (band == IEEE80211_BAND_2GHZ)
-               cmd->params.band = WL1271_SCAN_BAND_2_4_GHZ;
-       else
-               cmd->params.band = WL1271_SCAN_BAND_5_GHZ;
-
-       if (wl->scan.ssid_len && wl->scan.ssid) {
-               cmd->params.ssid_len = wl->scan.ssid_len;
-               memcpy(cmd->params.ssid, wl->scan.ssid, wl->scan.ssid_len);
-       }
-
-       memcpy(cmd->addr, vif->addr, ETH_ALEN);
-
-       ret = wl12xx_cmd_build_probe_req(wl, wlvif,
-                                        cmd->params.role_id, band,
-                                        wl->scan.ssid, wl->scan.ssid_len,
-                                        wl->scan.req->ie,
-                                        wl->scan.req->ie_len, false);
-       if (ret < 0) {
-               wl1271_error("PROBE request template failed");
-               goto out;
-       }
-
-       trigger->timeout = cpu_to_le32(wl->conf.scan.split_scan_timeout);
-       ret = wl1271_cmd_send(wl, CMD_TRIGGER_SCAN_TO, trigger,
-                             sizeof(*trigger), 0);
-       if (ret < 0) {
-               wl1271_error("trigger scan to failed for hw scan");
-               goto out;
-       }
-
-       wl1271_dump(DEBUG_SCAN, "SCAN: ", cmd, sizeof(*cmd));
+       int *count = (int *)data;
 
-       ret = wl1271_cmd_send(wl, CMD_SCAN, cmd, sizeof(*cmd), 0);
-       if (ret < 0) {
-               wl1271_error("SCAN failed");
-               goto out;
-       }
-
-out:
-       kfree(cmd);
-       kfree(trigger);
-       return ret;
+       if (!vif->bss_conf.idle)
+               (*count)++;
 }
 
-void wl1271_scan_stm(struct wl1271 *wl, struct ieee80211_vif *vif)
+static int wlcore_count_started_vifs(struct wl1271 *wl)
 {
-       struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
-       int ret = 0;
-       enum ieee80211_band band;
-       u32 rate, mask;
-
-       switch (wl->scan.state) {
-       case WL1271_SCAN_STATE_IDLE:
-               break;
-
-       case WL1271_SCAN_STATE_2GHZ_ACTIVE:
-               band = IEEE80211_BAND_2GHZ;
-               mask = wlvif->bitrate_masks[band];
-               if (wl->scan.req->no_cck) {
-                       mask &= ~CONF_TX_CCK_RATES;
-                       if (!mask)
-                               mask = CONF_TX_RATE_MASK_BASIC_P2P;
-               }
-               rate = wl1271_tx_min_rate_get(wl, mask);
-               ret = wl1271_scan_send(wl, vif, band, false, rate);
-               if (ret == WL1271_NOTHING_TO_SCAN) {
-                       wl->scan.state = WL1271_SCAN_STATE_2GHZ_PASSIVE;
-                       wl1271_scan_stm(wl, vif);
-               }
-
-               break;
-
-       case WL1271_SCAN_STATE_2GHZ_PASSIVE:
-               band = IEEE80211_BAND_2GHZ;
-               mask = wlvif->bitrate_masks[band];
-               if (wl->scan.req->no_cck) {
-                       mask &= ~CONF_TX_CCK_RATES;
-                       if (!mask)
-                               mask = CONF_TX_RATE_MASK_BASIC_P2P;
-               }
-               rate = wl1271_tx_min_rate_get(wl, mask);
-               ret = wl1271_scan_send(wl, vif, band, true, rate);
-               if (ret == WL1271_NOTHING_TO_SCAN) {
-                       if (wl->enable_11a)
-                               wl->scan.state = WL1271_SCAN_STATE_5GHZ_ACTIVE;
-                       else
-                               wl->scan.state = WL1271_SCAN_STATE_DONE;
-                       wl1271_scan_stm(wl, vif);
-               }
-
-               break;
+       int count = 0;
 
-       case WL1271_SCAN_STATE_5GHZ_ACTIVE:
-               band = IEEE80211_BAND_5GHZ;
-               rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[band]);
-               ret = wl1271_scan_send(wl, vif, band, false, rate);
-               if (ret == WL1271_NOTHING_TO_SCAN) {
-                       wl->scan.state = WL1271_SCAN_STATE_5GHZ_PASSIVE;
-                       wl1271_scan_stm(wl, vif);
-               }
-
-               break;
-
-       case WL1271_SCAN_STATE_5GHZ_PASSIVE:
-               band = IEEE80211_BAND_5GHZ;
-               rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[band]);
-               ret = wl1271_scan_send(wl, vif, band, true, rate);
-               if (ret == WL1271_NOTHING_TO_SCAN) {
-                       wl->scan.state = WL1271_SCAN_STATE_DONE;
-                       wl1271_scan_stm(wl, vif);
-               }
-
-               break;
-
-       case WL1271_SCAN_STATE_DONE:
-               wl->scan.failed = false;
-               cancel_delayed_work(&wl->scan_complete_work);
-               ieee80211_queue_delayed_work(wl->hw, &wl->scan_complete_work,
-                                            msecs_to_jiffies(0));
-               break;
-
-       default:
-               wl1271_error("invalid scan state");
-               break;
-       }
-
-       if (ret < 0) {
-               cancel_delayed_work(&wl->scan_complete_work);
-               ieee80211_queue_delayed_work(wl->hw, &wl->scan_complete_work,
-                                            msecs_to_jiffies(0));
-       }
-}
-
-int wl1271_scan(struct wl1271 *wl, struct ieee80211_vif *vif,
-               const u8 *ssid, size_t ssid_len,
-               struct cfg80211_scan_request *req)
-{
-       /*
-        * cfg80211 should guarantee that we don't get more channels
-        * than what we have registered.
-        */
-       BUG_ON(req->n_channels > WL1271_MAX_CHANNELS);
-
-       if (wl->scan.state != WL1271_SCAN_STATE_IDLE)
-               return -EBUSY;
-
-       wl->scan.state = WL1271_SCAN_STATE_2GHZ_ACTIVE;
-
-       if (ssid_len && ssid) {
-               wl->scan.ssid_len = ssid_len;
-               memcpy(wl->scan.ssid, ssid, ssid_len);
-       } else {
-               wl->scan.ssid_len = 0;
-       }
-
-       wl->scan_vif = vif;
-       wl->scan.req = req;
-       memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
-
-       /* we assume failure so that timeout scenarios are handled correctly */
-       wl->scan.failed = true;
-       ieee80211_queue_delayed_work(wl->hw, &wl->scan_complete_work,
-                                    msecs_to_jiffies(WL1271_SCAN_TIMEOUT));
-
-       wl1271_scan_stm(wl, vif);
-
-       return 0;
-}
-
-int wl1271_scan_stop(struct wl1271 *wl)
-{
-       struct wl1271_cmd_header *cmd = NULL;
-       int ret = 0;
-
-       if (WARN_ON(wl->scan.state == WL1271_SCAN_STATE_IDLE))
-               return -EINVAL;
-
-       wl1271_debug(DEBUG_CMD, "cmd scan stop");
-
-       cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
-       if (!cmd) {
-               ret = -ENOMEM;
-               goto out;
-       }
-
-       ret = wl1271_cmd_send(wl, CMD_STOP_SCAN, cmd,
-                             sizeof(*cmd), 0);
-       if (ret < 0) {
-               wl1271_error("cmd stop_scan failed");
-               goto out;
-       }
-out:
-       kfree(cmd);
-       return ret;
+       ieee80211_iterate_active_interfaces_atomic(wl->hw,
+                                       IEEE80211_IFACE_ITER_RESUME_ALL,
+                                       wlcore_started_vifs_iter, &count);
+       return count;
 }
 
 static int
-wl1271_scan_get_sched_scan_channels(struct wl1271 *wl,
-                                   struct cfg80211_sched_scan_request *req,
-                                   struct conn_scan_ch_params *channels,
-                                   u32 band, bool radar, bool passive,
-                                   int start, int max_channels,
-                                   u8 *n_pactive_ch)
+wlcore_scan_get_channels(struct wl1271 *wl,
+                        struct ieee80211_channel *req_channels[],
+                        u32 n_channels,
+                        u32 n_ssids,
+                        struct conn_scan_ch_params *channels,
+                        u32 band, bool radar, bool passive,
+                        int start, int max_channels,
+                        u8 *n_pactive_ch,
+                        int scan_type)
 {
-       struct conf_sched_scan_settings *c = &wl->conf.sched_scan;
        int i, j;
        u32 flags;
-       bool force_passive = !req->n_ssids;
-       u32 min_dwell_time_active, max_dwell_time_active, delta_per_probe;
+       bool force_passive = !n_ssids;
+       u32 min_dwell_time_active, max_dwell_time_active;
        u32 dwell_time_passive, dwell_time_dfs;
 
-       if (band == IEEE80211_BAND_5GHZ)
-               delta_per_probe = c->dwell_time_delta_per_probe_5;
-       else
-               delta_per_probe = c->dwell_time_delta_per_probe;
+       /* configure dwell times according to scan type */
+       if (scan_type == SCAN_TYPE_SEARCH) {
+               struct conf_scan_settings *c = &wl->conf.scan;
+               bool active_vif_exists = !!wlcore_count_started_vifs(wl);
+
+               min_dwell_time_active = active_vif_exists ?
+                       c->min_dwell_time_active :
+                       c->min_dwell_time_active_long;
+               max_dwell_time_active = active_vif_exists ?
+                       c->max_dwell_time_active :
+                       c->max_dwell_time_active_long;
+               dwell_time_passive = c->dwell_time_passive;
+               dwell_time_dfs = c->dwell_time_dfs;
+       } else {
+               struct conf_sched_scan_settings *c = &wl->conf.sched_scan;
+               u32 delta_per_probe;
 
-       min_dwell_time_active = c->base_dwell_time +
-                req->n_ssids * c->num_probe_reqs * delta_per_probe;
+               if (band == IEEE80211_BAND_5GHZ)
+                       delta_per_probe = c->dwell_time_delta_per_probe_5;
+               else
+                       delta_per_probe = c->dwell_time_delta_per_probe;
 
-       max_dwell_time_active = min_dwell_time_active + c->max_dwell_time_delta;
+               min_dwell_time_active = c->base_dwell_time +
+                        n_ssids * c->num_probe_reqs * delta_per_probe;
 
+               max_dwell_time_active = min_dwell_time_active +
+                                       c->max_dwell_time_delta;
+               dwell_time_passive = c->dwell_time_passive;
+               dwell_time_dfs = c->dwell_time_dfs;
+       }
        min_dwell_time_active = DIV_ROUND_UP(min_dwell_time_active, 1000);
        max_dwell_time_active = DIV_ROUND_UP(max_dwell_time_active, 1000);
-       dwell_time_passive = DIV_ROUND_UP(c->dwell_time_passive, 1000);
-       dwell_time_dfs = DIV_ROUND_UP(c->dwell_time_dfs, 1000);
+       dwell_time_passive = DIV_ROUND_UP(dwell_time_passive, 1000);
+       dwell_time_dfs = DIV_ROUND_UP(dwell_time_dfs, 1000);
 
        for (i = 0, j = start;
-            i < req->n_channels && j < max_channels;
+            i < n_channels && j < max_channels;
             i++) {
-               flags = req->channels[i]->flags;
+               flags = req_channels[i]->flags;
 
                if (force_passive)
                        flags |= IEEE80211_CHAN_PASSIVE_SCAN;
 
-               if ((req->channels[i]->band == band) &&
+               if ((req_channels[i]->band == band) &&
                    !(flags & IEEE80211_CHAN_DISABLED) &&
                    (!!(flags & IEEE80211_CHAN_RADAR) == radar) &&
                    /* if radar is set, we ignore the passive flag */
                    (radar ||
                     !!(flags & IEEE80211_CHAN_PASSIVE_SCAN) == passive)) {
                        wl1271_debug(DEBUG_SCAN, "band %d, center_freq %d ",
-                                    req->channels[i]->band,
-                                    req->channels[i]->center_freq);
+                                    req_channels[i]->band,
+                                    req_channels[i]->center_freq);
                        wl1271_debug(DEBUG_SCAN, "hw_value %d, flags %X",
-                                    req->channels[i]->hw_value,
-                                    req->channels[i]->flags);
+                                    req_channels[i]->hw_value,
+                                    req_channels[i]->flags);
                        wl1271_debug(DEBUG_SCAN, "max_power %d",
-                                    req->channels[i]->max_power);
+                                    req_channels[i]->max_power);
                        wl1271_debug(DEBUG_SCAN, "min_dwell_time %d max dwell time %d",
                                     min_dwell_time_active,
                                     max_dwell_time_active);
@@ -473,10 +201,11 @@ wl1271_scan_get_sched_scan_channels(struct wl1271 *wl,
                        channels[j].max_duration =
                                cpu_to_le16(max_dwell_time_active);
 
-                       channels[j].tx_power_att = req->channels[i]->max_power;
-                       channels[j].channel = req->channels[i]->hw_value;
+                       channels[j].tx_power_att = req_channels[i]->max_power;
+                       channels[j].channel = req_channels[i]->hw_value;
 
-                       if ((band == IEEE80211_BAND_2GHZ) &&
+                       if (n_pactive_ch &&
+                           (band == IEEE80211_BAND_2GHZ) &&
                            (channels[j].channel >= 12) &&
                            (channels[j].channel <= 14) &&
                            (flags & IEEE80211_CHAN_PASSIVE_SCAN) &&
@@ -500,51 +229,80 @@ wl1271_scan_get_sched_scan_channels(struct wl1271 *wl,
        return j - start;
 }
 
-static bool
-wl1271_scan_sched_scan_channels(struct wl1271 *wl,
-                               struct cfg80211_sched_scan_request *req,
-                               struct wl1271_cmd_sched_scan_config *cfg)
+bool
+wlcore_set_scan_chan_params(struct wl1271 *wl,
+                           struct wlcore_scan_channels *cfg,
+                           struct ieee80211_channel *channels[],
+                           u32 n_channels,
+                           u32 n_ssids,
+                           int scan_type)
 {
        u8 n_pactive_ch = 0;
 
        cfg->passive[0] =
-               wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels_2,
-                                                   IEEE80211_BAND_2GHZ,
-                                                   false, true, 0,
-                                                   MAX_CHANNELS_2GHZ,
-                                                   &n_pactive_ch);
+               wlcore_scan_get_channels(wl,
+                                        channels,
+                                        n_channels,
+                                        n_ssids,
+                                        cfg->channels_2,
+                                        IEEE80211_BAND_2GHZ,
+                                        false, true, 0,
+                                        MAX_CHANNELS_2GHZ,
+                                        &n_pactive_ch,
+                                        scan_type);
        cfg->active[0] =
-               wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels_2,
-                                                   IEEE80211_BAND_2GHZ,
-                                                   false, false,
-                                                   cfg->passive[0],
-                                                   MAX_CHANNELS_2GHZ,
-                                                   &n_pactive_ch);
+               wlcore_scan_get_channels(wl,
+                                        channels,
+                                        n_channels,
+                                        n_ssids,
+                                        cfg->channels_2,
+                                        IEEE80211_BAND_2GHZ,
+                                        false, false,
+                                        cfg->passive[0],
+                                        MAX_CHANNELS_2GHZ,
+                                        &n_pactive_ch,
+                                        scan_type);
        cfg->passive[1] =
-               wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels_5,
-                                                   IEEE80211_BAND_5GHZ,
-                                                   false, true, 0,
-                                                   MAX_CHANNELS_5GHZ,
-                                                   &n_pactive_ch);
+               wlcore_scan_get_channels(wl,
+                                        channels,
+                                        n_channels,
+                                        n_ssids,
+                                        cfg->channels_5,
+                                        IEEE80211_BAND_5GHZ,
+                                        false, true, 0,
+                                        wl->max_channels_5,
+                                        &n_pactive_ch,
+                                        scan_type);
        cfg->dfs =
-               wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels_5,
-                                                   IEEE80211_BAND_5GHZ,
-                                                   true, true,
-                                                   cfg->passive[1],
-                                                   MAX_CHANNELS_5GHZ,
-                                                   &n_pactive_ch);
+               wlcore_scan_get_channels(wl,
+                                        channels,
+                                        n_channels,
+                                        n_ssids,
+                                        cfg->channels_5,
+                                        IEEE80211_BAND_5GHZ,
+                                        true, true,
+                                        cfg->passive[1],
+                                        wl->max_channels_5,
+                                        &n_pactive_ch,
+                                        scan_type);
        cfg->active[1] =
-               wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels_5,
-                                                   IEEE80211_BAND_5GHZ,
-                                                   false, false,
-                                                   cfg->passive[1] + cfg->dfs,
-                                                   MAX_CHANNELS_5GHZ,
-                                                   &n_pactive_ch);
+               wlcore_scan_get_channels(wl,
+                                        channels,
+                                        n_channels,
+                                        n_ssids,
+                                        cfg->channels_5,
+                                        IEEE80211_BAND_5GHZ,
+                                        false, false,
+                                        cfg->passive[1] + cfg->dfs,
+                                        wl->max_channels_5,
+                                        &n_pactive_ch,
+                                        scan_type);
+
        /* 802.11j channels are not supported yet */
        cfg->passive[2] = 0;
        cfg->active[2] = 0;
 
-       cfg->n_pactive_ch = n_pactive_ch;
+       cfg->passive_active = n_pactive_ch;
 
        wl1271_debug(DEBUG_SCAN, "    2.4GHz: active %d passive %d",
                     cfg->active[0], cfg->passive[0]);
@@ -556,10 +314,48 @@ wl1271_scan_sched_scan_channels(struct wl1271 *wl,
                cfg->passive[1] || cfg->active[1] || cfg->dfs ||
                cfg->passive[2] || cfg->active[2];
 }
+EXPORT_SYMBOL_GPL(wlcore_set_scan_chan_params);
+
+int wlcore_scan(struct wl1271 *wl, struct ieee80211_vif *vif,
+               const u8 *ssid, size_t ssid_len,
+               struct cfg80211_scan_request *req)
+{
+       struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
+
+       /*
+        * cfg80211 should guarantee that we don't get more channels
+        * than what we have registered.
+        */
+       BUG_ON(req->n_channels > WL1271_MAX_CHANNELS);
+
+       if (wl->scan.state != WL1271_SCAN_STATE_IDLE)
+               return -EBUSY;
+
+       wl->scan.state = WL1271_SCAN_STATE_2GHZ_ACTIVE;
+
+       if (ssid_len && ssid) {
+               wl->scan.ssid_len = ssid_len;
+               memcpy(wl->scan.ssid, ssid, ssid_len);
+       } else {
+               wl->scan.ssid_len = 0;
+       }
+
+       wl->scan_wlvif = wlvif;
+       wl->scan.req = req;
+       memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
+
+       /* we assume failure so that timeout scenarios are handled correctly */
+       wl->scan.failed = true;
+       ieee80211_queue_delayed_work(wl->hw, &wl->scan_complete_work,
+                                    msecs_to_jiffies(WL1271_SCAN_TIMEOUT));
 
+       wl->ops->scan_start(wl, wlvif, req);
+
+       return 0;
+}
 /* Returns the scan type to be used or a negative value on error */
-static int
-wl12xx_scan_sched_scan_ssid_list(struct wl1271 *wl,
+int
+wlcore_scan_sched_scan_ssid_list(struct wl1271 *wl,
                                 struct wl12xx_vif *wlvif,
                                 struct cfg80211_sched_scan_request *req)
 {
@@ -662,160 +458,12 @@ out:
                return ret;
        return type;
 }
+EXPORT_SYMBOL_GPL(wlcore_scan_sched_scan_ssid_list);
 
-int wl1271_scan_sched_scan_config(struct wl1271 *wl,
-                                 struct wl12xx_vif *wlvif,
-                                 struct cfg80211_sched_scan_request *req,
-                                 struct ieee80211_sched_scan_ies *ies)
-{
-       struct wl1271_cmd_sched_scan_config *cfg = NULL;
-       struct conf_sched_scan_settings *c = &wl->conf.sched_scan;
-       int i, ret;
-       bool force_passive = !req->n_ssids;
-
-       wl1271_debug(DEBUG_CMD, "cmd sched_scan scan config");
-
-       cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
-       if (!cfg)
-               return -ENOMEM;
-
-       cfg->role_id = wlvif->role_id;
-       cfg->rssi_threshold = c->rssi_threshold;
-       cfg->snr_threshold  = c->snr_threshold;
-       cfg->n_probe_reqs = c->num_probe_reqs;
-       /* cycles set to 0 it means infinite (until manually stopped) */
-       cfg->cycles = 0;
-       /* report APs when at least 1 is found */
-       cfg->report_after = 1;
-       /* don't stop scanning automatically when something is found */
-       cfg->terminate = 0;
-       cfg->tag = WL1271_SCAN_DEFAULT_TAG;
-       /* don't filter on BSS type */
-       cfg->bss_type = SCAN_BSS_TYPE_ANY;
-       /* currently NL80211 supports only a single interval */
-       for (i = 0; i < SCAN_MAX_CYCLE_INTERVALS; i++)
-               cfg->intervals[i] = cpu_to_le32(req->interval);
-
-       cfg->ssid_len = 0;
-       ret = wl12xx_scan_sched_scan_ssid_list(wl, wlvif, req);
-       if (ret < 0)
-               goto out;
-
-       cfg->filter_type = ret;
-
-       wl1271_debug(DEBUG_SCAN, "filter_type = %d", cfg->filter_type);
-
-       if (!wl1271_scan_sched_scan_channels(wl, req, cfg)) {
-               wl1271_error("scan channel list is empty");
-               ret = -EINVAL;
-               goto out;
-       }
-
-       if (!force_passive && cfg->active[0]) {
-               u8 band = IEEE80211_BAND_2GHZ;
-               ret = wl12xx_cmd_build_probe_req(wl, wlvif,
-                                                wlvif->role_id, band,
-                                                req->ssids[0].ssid,
-                                                req->ssids[0].ssid_len,
-                                                ies->ie[band],
-                                                ies->len[band], true);
-               if (ret < 0) {
-                       wl1271_error("2.4GHz PROBE request template failed");
-                       goto out;
-               }
-       }
-
-       if (!force_passive && cfg->active[1]) {
-               u8 band = IEEE80211_BAND_5GHZ;
-               ret = wl12xx_cmd_build_probe_req(wl, wlvif,
-                                                wlvif->role_id, band,
-                                                req->ssids[0].ssid,
-                                                req->ssids[0].ssid_len,
-                                                ies->ie[band],
-                                                ies->len[band], true);
-               if (ret < 0) {
-                       wl1271_error("5GHz PROBE request template failed");
-                       goto out;
-               }
-       }
-
-       wl1271_dump(DEBUG_SCAN, "SCAN_CFG: ", cfg, sizeof(*cfg));
-
-       ret = wl1271_cmd_send(wl, CMD_CONNECTION_SCAN_CFG, cfg,
-                             sizeof(*cfg), 0);
-       if (ret < 0) {
-               wl1271_error("SCAN configuration failed");
-               goto out;
-       }
-out:
-       kfree(cfg);
-       return ret;
-}
-
-int wl1271_scan_sched_scan_start(struct wl1271 *wl, struct wl12xx_vif *wlvif)
-{
-       struct wl1271_cmd_sched_scan_start *start;
-       int ret = 0;
-
-       wl1271_debug(DEBUG_CMD, "cmd periodic scan start");
-
-       if (wlvif->bss_type != BSS_TYPE_STA_BSS)
-               return -EOPNOTSUPP;
-
-       if ((wl->quirks & WLCORE_QUIRK_NO_SCHED_SCAN_WHILE_CONN) &&
-           test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags))
-               return -EBUSY;
-
-       start = kzalloc(sizeof(*start), GFP_KERNEL);
-       if (!start)
-               return -ENOMEM;
-
-       start->role_id = wlvif->role_id;
-       start->tag = WL1271_SCAN_DEFAULT_TAG;
-
-       ret = wl1271_cmd_send(wl, CMD_START_PERIODIC_SCAN, start,
-                             sizeof(*start), 0);
-       if (ret < 0) {
-               wl1271_error("failed to send scan start command");
-               goto out_free;
-       }
-
-out_free:
-       kfree(start);
-       return ret;
-}
-
-void wl1271_scan_sched_scan_results(struct wl1271 *wl)
+void wlcore_scan_sched_scan_results(struct wl1271 *wl)
 {
        wl1271_debug(DEBUG_SCAN, "got periodic scan results");
 
        ieee80211_sched_scan_results(wl->hw);
 }
-
-void wl1271_scan_sched_scan_stop(struct wl1271 *wl,  struct wl12xx_vif *wlvif)
-{
-       struct wl1271_cmd_sched_scan_stop *stop;
-       int ret = 0;
-
-       wl1271_debug(DEBUG_CMD, "cmd periodic scan stop");
-
-       /* FIXME: what to do if alloc'ing to stop fails? */
-       stop = kzalloc(sizeof(*stop), GFP_KERNEL);
-       if (!stop) {
-               wl1271_error("failed to alloc memory to send sched scan stop");
-               return;
-       }
-
-       stop->role_id = wlvif->role_id;
-       stop->tag = WL1271_SCAN_DEFAULT_TAG;
-
-       ret = wl1271_cmd_send(wl, CMD_STOP_PERIODIC_SCAN, stop,
-                             sizeof(*stop), 0);
-       if (ret < 0) {
-               wl1271_error("failed to send sched scan stop command");
-               goto out_free;
-       }
-
-out_free:
-       kfree(stop);
-}
+EXPORT_SYMBOL_GPL(wlcore_scan_sched_scan_results);
index 29f3c8d..a6ab24b 100644 (file)
 
 #include "wlcore.h"
 
-int wl1271_scan(struct wl1271 *wl, struct ieee80211_vif *vif,
+int wlcore_scan(struct wl1271 *wl, struct ieee80211_vif *vif,
                const u8 *ssid, size_t ssid_len,
                struct cfg80211_scan_request *req);
-int wl1271_scan_stop(struct wl1271 *wl);
 int wl1271_scan_build_probe_req(struct wl1271 *wl,
                                const u8 *ssid, size_t ssid_len,
                                const u8 *ie, size_t ie_len, u8 band);
-void wl1271_scan_stm(struct wl1271 *wl, struct ieee80211_vif *vif);
+void wl1271_scan_stm(struct wl1271 *wl, struct wl12xx_vif *wlvif);
 void wl1271_scan_complete_work(struct work_struct *work);
 int wl1271_scan_sched_scan_config(struct wl1271 *wl,
                                     struct wl12xx_vif *wlvif,
                                     struct cfg80211_sched_scan_request *req,
                                     struct ieee80211_sched_scan_ies *ies);
 int wl1271_scan_sched_scan_start(struct wl1271 *wl, struct wl12xx_vif *wlvif);
-void wl1271_scan_sched_scan_stop(struct wl1271 *wl,  struct wl12xx_vif *wlvif);
-void wl1271_scan_sched_scan_results(struct wl1271 *wl);
+void wlcore_scan_sched_scan_results(struct wl1271 *wl);
 
 #define WL1271_SCAN_MAX_CHANNELS       24
 #define WL1271_SCAN_DEFAULT_TAG        1
@@ -66,56 +64,6 @@ enum {
        WL1271_SCAN_STATE_DONE
 };
 
-struct basic_scan_params {
-       /* Scan option flags (WL1271_SCAN_OPT_*) */
-       __le16 scan_options;
-       u8 role_id;
-       /* Number of scan channels in the list (maximum 30) */
-       u8 n_ch;
-       /* This field indicates the number of probe requests to send
-          per channel for an active scan */
-       u8 n_probe_reqs;
-       u8 tid_trigger;
-       u8 ssid_len;
-       u8 use_ssid_list;
-
-       /* Rate bit field for sending the probes */
-       __le32 tx_rate;
-
-       u8 ssid[IEEE80211_MAX_SSID_LEN];
-       /* Band to scan */
-       u8 band;
-
-       u8 scan_tag;
-       u8 padding2[2];
-} __packed;
-
-struct basic_scan_channel_params {
-       /* Duration in TU to wait for frames on a channel for active scan */
-       __le32 min_duration;
-       __le32 max_duration;
-       __le32 bssid_lsb;
-       __le16 bssid_msb;
-       u8 early_termination;
-       u8 tx_power_att;
-       u8 channel;
-       /* FW internal use only! */
-       u8 dfs_candidate;
-       u8 activity_detected;
-       u8 pad;
-} __packed;
-
-struct wl1271_cmd_scan {
-       struct wl1271_cmd_header header;
-
-       struct basic_scan_params params;
-       struct basic_scan_channel_params channels[WL1271_SCAN_MAX_CHANNELS];
-
-       /* src mac address */
-       u8 addr[ETH_ALEN];
-       u8 padding[2];
-} __packed;
-
 struct wl1271_cmd_trigger_scan_to {
        struct wl1271_cmd_header header;
 
@@ -123,9 +71,17 @@ struct wl1271_cmd_trigger_scan_to {
 } __packed;
 
 #define MAX_CHANNELS_2GHZ      14
-#define MAX_CHANNELS_5GHZ      23
 #define MAX_CHANNELS_4GHZ      4
 
+/*
+ * This max value here is used only for the struct definition of
+ * wlcore_scan_channels. This struct is used by both 12xx
+ * and 18xx (which have different max 5ghz channels value).
+ * In order to make sure this is large enough, just use the
+ * max possible 5ghz channels.
+ */
+#define MAX_CHANNELS_5GHZ      42
+
 #define SCAN_MAX_CYCLE_INTERVALS 16
 #define SCAN_MAX_BANDS 3
 
@@ -160,43 +116,6 @@ struct conn_scan_ch_params {
        u8  padding[3];
 } __packed;
 
-struct wl1271_cmd_sched_scan_config {
-       struct wl1271_cmd_header header;
-
-       __le32 intervals[SCAN_MAX_CYCLE_INTERVALS];
-
-       s8 rssi_threshold; /* for filtering (in dBm) */
-       s8 snr_threshold;  /* for filtering (in dB) */
-
-       u8 cycles;       /* maximum number of scan cycles */
-       u8 report_after; /* report when this number of results are received */
-       u8 terminate;    /* stop scanning after reporting */
-
-       u8 tag;
-       u8 bss_type; /* for filtering */
-       u8 filter_type;
-
-       u8 ssid_len;     /* For SCAN_SSID_FILTER_SPECIFIC */
-       u8 ssid[IEEE80211_MAX_SSID_LEN];
-
-       u8 n_probe_reqs; /* Number of probes requests per channel */
-
-       u8 passive[SCAN_MAX_BANDS];
-       u8 active[SCAN_MAX_BANDS];
-
-       u8 dfs;
-
-       u8 n_pactive_ch; /* number of pactive (passive until fw detects energy)
-                           channels in BG band */
-       u8 role_id;
-       u8 padding[1];
-
-       struct conn_scan_ch_params channels_2[MAX_CHANNELS_2GHZ];
-       struct conn_scan_ch_params channels_5[MAX_CHANNELS_5GHZ];
-       struct conn_scan_ch_params channels_4[MAX_CHANNELS_4GHZ];
-} __packed;
-
-
 #define SCHED_SCAN_MAX_SSIDS 16
 
 enum {
@@ -220,21 +139,34 @@ struct wl1271_cmd_sched_scan_ssid_list {
        u8 padding[2];
 } __packed;
 
-struct wl1271_cmd_sched_scan_start {
-       struct wl1271_cmd_header header;
+struct wlcore_scan_channels {
+       u8 passive[SCAN_MAX_BANDS]; /* number of passive scan channels */
+       u8 active[SCAN_MAX_BANDS];  /* number of active scan channels */
+       u8 dfs;            /* number of dfs channels in 5ghz */
+       u8 passive_active; /* number of passive before active channels 2.4ghz */
 
-       u8 tag;
-       u8 role_id;
-       u8 padding[2];
-} __packed;
-
-struct wl1271_cmd_sched_scan_stop {
-       struct wl1271_cmd_header header;
+       struct conn_scan_ch_params channels_2[MAX_CHANNELS_2GHZ];
+       struct conn_scan_ch_params channels_5[MAX_CHANNELS_5GHZ];
+       struct conn_scan_ch_params channels_4[MAX_CHANNELS_4GHZ];
+};
 
-       u8 tag;
-       u8 role_id;
-       u8 padding[2];
-} __packed;
+enum {
+       SCAN_TYPE_SEARCH        = 0,
+       SCAN_TYPE_PERIODIC      = 1,
+       SCAN_TYPE_TRACKING      = 2,
+};
 
+bool
+wlcore_set_scan_chan_params(struct wl1271 *wl,
+                           struct wlcore_scan_channels *cfg,
+                           struct ieee80211_channel *channels[],
+                           u32 n_channels,
+                           u32 n_ssids,
+                           int scan_type);
+
+int
+wlcore_scan_sched_scan_ssid_list(struct wl1271 *wl,
+                                struct wl12xx_vif *wlvif,
+                                struct cfg80211_sched_scan_request *req);
 
 #endif /* __WL1271_SCAN_H__ */
index a3a20be..29ef249 100644 (file)
@@ -217,7 +217,7 @@ static struct wl1271_if_operations sdio_ops = {
 static int wl1271_probe(struct sdio_func *func,
                                  const struct sdio_device_id *id)
 {
-       struct wl12xx_platform_data *wlan_data;
+       struct wlcore_platdev_data *pdev_data;
        struct wl12xx_sdio_glue *glue;
        struct resource res[1];
        mmc_pm_flag_t mmcflags;
@@ -228,10 +228,18 @@ static int wl1271_probe(struct sdio_func *func,
        if (func->num != 0x02)
                return -ENODEV;
 
-       glue = kzalloc(sizeof(*glue), GFP_KERNEL);
-       if (!glue)
+       pdev_data = kzalloc(sizeof(*pdev_data), GFP_KERNEL);
+       if (!pdev_data)
                goto out;
 
+       pdev_data->if_ops = &sdio_ops;
+
+       glue = kzalloc(sizeof(*glue), GFP_KERNEL);
+       if (!glue) {
+               dev_err(&func->dev, "can't allocate glue\n");
+               goto out_free_pdev_data;
+       }
+
        glue->dev = &func->dev;
 
        /* Grab access to FN0 for ELP reg. */
@@ -240,9 +248,9 @@ static int wl1271_probe(struct sdio_func *func,
        /* Use block mode for transferring over one block size of data */
        func->card->quirks |= MMC_QUIRK_BLKSZ_FOR_BYTE_MODE;
 
-       wlan_data = wl12xx_get_platform_data();
-       if (IS_ERR(wlan_data)) {
-               ret = PTR_ERR(wlan_data);
+       pdev_data->pdata = wl12xx_get_platform_data();
+       if (IS_ERR(pdev_data->pdata)) {
+               ret = PTR_ERR(pdev_data->pdata);
                dev_err(glue->dev, "missing wlan platform data: %d\n", ret);
                goto out_free_glue;
        }
@@ -252,9 +260,7 @@ static int wl1271_probe(struct sdio_func *func,
        dev_dbg(glue->dev, "sdio PM caps = 0x%x\n", mmcflags);
 
        if (mmcflags & MMC_PM_KEEP_POWER)
-               wlan_data->pwr_in_suspend = true;
-
-       wlan_data->ops = &sdio_ops;
+               pdev_data->pdata->pwr_in_suspend = true;
 
        sdio_set_drvdata(func, glue);
 
@@ -272,7 +278,7 @@ static int wl1271_probe(struct sdio_func *func,
        else
                chip_family = "wl12xx";
 
-       glue->core = platform_device_alloc(chip_family, -1);
+       glue->core = platform_device_alloc(chip_family, PLATFORM_DEVID_AUTO);
        if (!glue->core) {
                dev_err(glue->dev, "can't allocate platform_device");
                ret = -ENOMEM;
@@ -283,7 +289,7 @@ static int wl1271_probe(struct sdio_func *func,
 
        memset(res, 0x00, sizeof(res));
 
-       res[0].start = wlan_data->irq;
+       res[0].start = pdev_data->pdata->irq;
        res[0].flags = IORESOURCE_IRQ;
        res[0].name = "irq";
 
@@ -293,8 +299,8 @@ static int wl1271_probe(struct sdio_func *func,
                goto out_dev_put;
        }
 
-       ret = platform_device_add_data(glue->core, wlan_data,
-                                      sizeof(*wlan_data));
+       ret = platform_device_add_data(glue->core, pdev_data,
+                                      sizeof(*pdev_data));
        if (ret) {
                dev_err(glue->dev, "can't add platform data\n");
                goto out_dev_put;
@@ -313,6 +319,9 @@ out_dev_put:
 out_free_glue:
        kfree(glue);
 
+out_free_pdev_data:
+       kfree(pdev_data);
+
 out:
        return ret;
 }
@@ -324,8 +333,7 @@ static void wl1271_remove(struct sdio_func *func)
        /* Undo decrement done above in wl1271_probe */
        pm_runtime_get_noresume(&func->dev);
 
-       platform_device_del(glue->core);
-       platform_device_put(glue->core);
+       platform_device_unregister(glue->core);
        kfree(glue);
 }
 
index 18cadc0..e264478 100644 (file)
@@ -87,8 +87,11 @@ static void wl12xx_spi_reset(struct device *child)
        struct spi_message m;
 
        cmd = kzalloc(WSPI_INIT_CMD_LEN, GFP_KERNEL);
-       if (!cmd)
+       if (!cmd) {
+               dev_err(child->parent,
+                       "could not allocate cmd for spi reset\n");
                return;
+       }
 
        memset(&t, 0, sizeof(t));
        spi_message_init(&m);
@@ -112,8 +115,11 @@ static void wl12xx_spi_init(struct device *child)
        struct spi_message m;
 
        cmd = kzalloc(WSPI_INIT_CMD_LEN, GFP_KERNEL);
-       if (!cmd)
+       if (!cmd) {
+               dev_err(child->parent,
+                       "could not allocate cmd for spi init\n");
                return;
+       }
 
        memset(crc, 0, sizeof(crc));
        memset(&t, 0, sizeof(t));
@@ -264,7 +270,7 @@ static int __must_check wl12xx_spi_raw_write(struct device *child, int addr,
                                             void *buf, size_t len, bool fixed)
 {
        struct wl12xx_spi_glue *glue = dev_get_drvdata(child->parent);
-       struct spi_transfer t[2 * WSPI_MAX_NUM_OF_CHUNKS];
+       struct spi_transfer t[2 * (WSPI_MAX_NUM_OF_CHUNKS + 1)];
        struct spi_message m;
        u32 commands[WSPI_MAX_NUM_OF_CHUNKS];
        u32 *cmd;
@@ -321,21 +327,28 @@ static struct wl1271_if_operations spi_ops = {
 static int wl1271_probe(struct spi_device *spi)
 {
        struct wl12xx_spi_glue *glue;
-       struct wl12xx_platform_data *pdata;
+       struct wlcore_platdev_data *pdev_data;
        struct resource res[1];
        int ret = -ENOMEM;
 
-       pdata = spi->dev.platform_data;
-       if (!pdata) {
+       pdev_data = kzalloc(sizeof(*pdev_data), GFP_KERNEL);
+       if (!pdev_data)
+               goto out;
+
+       pdev_data->pdata = spi->dev.platform_data;
+       if (!pdev_data->pdata) {
                dev_err(&spi->dev, "no platform data\n");
-               return -ENODEV;
+               ret = -ENODEV;
+               goto out_free_pdev_data;
        }
 
-       pdata->ops = &spi_ops;
+       pdev_data->if_ops = &spi_ops;
 
        glue = kzalloc(sizeof(*glue), GFP_KERNEL);
-       if (!glue)
-               goto out;
+       if (!glue) {
+               dev_err(&spi->dev, "can't allocate glue\n");
+               goto out_free_pdev_data;
+       }
 
        glue->dev = &spi->dev;
 
@@ -351,7 +364,7 @@ static int wl1271_probe(struct spi_device *spi)
                goto out_free_glue;
        }
 
-       glue->core = platform_device_alloc("wl12xx", -1);
+       glue->core = platform_device_alloc("wl12xx", PLATFORM_DEVID_AUTO);
        if (!glue->core) {
                dev_err(glue->dev, "can't allocate platform_device\n");
                ret = -ENOMEM;
@@ -372,7 +385,8 @@ static int wl1271_probe(struct spi_device *spi)
                goto out_dev_put;
        }
 
-       ret = platform_device_add_data(glue->core, pdata, sizeof(*pdata));
+       ret = platform_device_add_data(glue->core, pdev_data,
+                                      sizeof(*pdev_data));
        if (ret) {
                dev_err(glue->dev, "can't add platform data\n");
                goto out_dev_put;
@@ -391,6 +405,10 @@ out_dev_put:
 
 out_free_glue:
        kfree(glue);
+
+out_free_pdev_data:
+       kfree(pdev_data);
+
 out:
        return ret;
 }
@@ -399,8 +417,7 @@ static int wl1271_remove(struct spi_device *spi)
 {
        struct wl12xx_spi_glue *glue = spi_get_drvdata(spi);
 
-       platform_device_del(glue->core);
-       platform_device_put(glue->core);
+       platform_device_unregister(glue->core);
        kfree(glue);
 
        return 0;
index a90d3cd..ece392c 100644 (file)
@@ -104,7 +104,7 @@ static void wl1271_tx_regulate_link(struct wl1271 *wl,
                                    struct wl12xx_vif *wlvif,
                                    u8 hlid)
 {
-       bool fw_ps, single_sta;
+       bool fw_ps, single_link;
        u8 tx_pkts;
 
        if (WARN_ON(!test_bit(hlid, wlvif->links_map)))
@@ -112,15 +112,15 @@ static void wl1271_tx_regulate_link(struct wl1271 *wl,
 
        fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
        tx_pkts = wl->links[hlid].allocated_pkts;
-       single_sta = (wl->active_sta_count == 1);
+       single_link = (wl->active_link_count == 1);
 
        /*
         * if in FW PS and there is enough data in FW we can put the link
         * into high-level PS and clean out its TX queues.
-        * Make an exception if this is the only connected station. In this
-        * case FW-memory congestion is not a problem.
+        * Make an exception if this is the only connected link. In this
+        * case FW-memory congestion is less of a problem.
         */
-       if (!single_sta && fw_ps && tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
+       if (!single_link && fw_ps && tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
                wl12xx_ps_link_start(wl, wlvif, hlid, true);
 }
 
@@ -155,21 +155,18 @@ static u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif,
 u8 wl12xx_tx_get_hlid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
                      struct sk_buff *skb, struct ieee80211_sta *sta)
 {
-       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
-
-       if (!wlvif || wl12xx_is_dummy_packet(wl, skb))
-               return wl->system_hlid;
+       struct ieee80211_tx_info *control;
 
        if (wlvif->bss_type == BSS_TYPE_AP_BSS)
                return wl12xx_tx_get_hlid_ap(wl, wlvif, skb, sta);
 
-       if ((test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
-            test_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags)) &&
-           !ieee80211_is_auth(hdr->frame_control) &&
-           !ieee80211_is_assoc_req(hdr->frame_control))
-               return wlvif->sta.hlid;
-       else
+       control = IEEE80211_SKB_CB(skb);
+       if (control->flags & IEEE80211_TX_CTL_TX_OFFCHAN) {
+               wl1271_debug(DEBUG_TX, "tx offchannel");
                return wlvif->dev_hlid;
+       }
+
+       return wlvif->sta.hlid;
 }
 
 unsigned int wlcore_calc_packet_alignment(struct wl1271 *wl,
@@ -224,9 +221,7 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct wl12xx_vif *wlvif,
                ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
                wl->tx_allocated_pkts[ac]++;
 
-               if (!wl12xx_is_dummy_packet(wl, skb) && wlvif &&
-                   wlvif->bss_type == BSS_TYPE_AP_BSS &&
-                   test_bit(hlid, wlvif->ap.sta_hlid_map))
+               if (test_bit(hlid, wl->links_map))
                        wl->links[hlid].allocated_pkts++;
 
                ret = 0;
@@ -293,9 +288,14 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct wl12xx_vif *wlvif,
 
                tx_attr |= TX_HW_ATTR_TX_DUMMY_REQ;
        } else if (wlvif) {
+               u8 session_id = wl->session_ids[hlid];
+
+               if ((wl->quirks & WLCORE_QUIRK_AP_ZERO_SESSION_ID) &&
+                   (wlvif->bss_type == BSS_TYPE_AP_BSS))
+                       session_id = 0;
+
                /* configure the tx attributes */
-               tx_attr = wlvif->session_counter <<
-                         TX_HW_ATTR_OFST_SESSION_COUNTER;
+               tx_attr = session_id << TX_HW_ATTR_OFST_SESSION_COUNTER;
        }
 
        desc->hlid = hlid;
@@ -452,20 +452,22 @@ u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set,
 void wl1271_handle_tx_low_watermark(struct wl1271 *wl)
 {
        int i;
+       struct wl12xx_vif *wlvif;
 
-       for (i = 0; i < NUM_TX_QUEUES; i++) {
-               if (wlcore_is_queue_stopped_by_reason(wl, i,
-                       WLCORE_QUEUE_STOP_REASON_WATERMARK) &&
-                   wl->tx_queue_count[i] <= WL1271_TX_QUEUE_LOW_WATERMARK) {
-                       /* firmware buffer has space, restart queues */
-                       wlcore_wake_queue(wl, i,
-                                         WLCORE_QUEUE_STOP_REASON_WATERMARK);
+       wl12xx_for_each_wlvif(wl, wlvif) {
+               for (i = 0; i < NUM_TX_QUEUES; i++) {
+                       if (wlcore_is_queue_stopped_by_reason(wl, wlvif, i,
+                                       WLCORE_QUEUE_STOP_REASON_WATERMARK) &&
+                           wlvif->tx_queue_count[i] <=
+                                       WL1271_TX_QUEUE_LOW_WATERMARK)
+                               /* firmware buffer has space, restart queues */
+                               wlcore_wake_queue(wl, wlvif, i,
+                                       WLCORE_QUEUE_STOP_REASON_WATERMARK);
                }
        }
 }
 
-static struct sk_buff_head *wl1271_select_queue(struct wl1271 *wl,
-                                               struct sk_buff_head *queues)
+static int wlcore_select_ac(struct wl1271 *wl)
 {
        int i, q = -1, ac;
        u32 min_pkts = 0xffffffff;
@@ -479,45 +481,60 @@ static struct sk_buff_head *wl1271_select_queue(struct wl1271 *wl,
         */
        for (i = 0; i < NUM_TX_QUEUES; i++) {
                ac = wl1271_tx_get_queue(i);
-               if (!skb_queue_empty(&queues[ac]) &&
-                   (wl->tx_allocated_pkts[ac] < min_pkts)) {
+               if (wl->tx_queue_count[ac] &&
+                   wl->tx_allocated_pkts[ac] < min_pkts) {
                        q = ac;
                        min_pkts = wl->tx_allocated_pkts[q];
                }
        }
 
-       if (q == -1)
-               return NULL;
-
-       return &queues[q];
+       return q;
 }
 
-static struct sk_buff *wl12xx_lnk_skb_dequeue(struct wl1271 *wl,
-                                             struct wl1271_link *lnk)
+static struct sk_buff *wlcore_lnk_dequeue(struct wl1271 *wl,
+                                         struct wl1271_link *lnk, u8 q)
 {
        struct sk_buff *skb;
        unsigned long flags;
-       struct sk_buff_head *queue;
 
-       queue = wl1271_select_queue(wl, lnk->tx_queue);
-       if (!queue)
-               return NULL;
-
-       skb = skb_dequeue(queue);
+       skb = skb_dequeue(&lnk->tx_queue[q]);
        if (skb) {
-               int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
                spin_lock_irqsave(&wl->wl_lock, flags);
                WARN_ON_ONCE(wl->tx_queue_count[q] <= 0);
                wl->tx_queue_count[q]--;
+               if (lnk->wlvif) {
+                       WARN_ON_ONCE(lnk->wlvif->tx_queue_count[q] <= 0);
+                       lnk->wlvif->tx_queue_count[q]--;
+               }
                spin_unlock_irqrestore(&wl->wl_lock, flags);
        }
 
        return skb;
 }
 
-static struct sk_buff *wl12xx_vif_skb_dequeue(struct wl1271 *wl,
-                                             struct wl12xx_vif *wlvif,
-                                             u8 *hlid)
+static struct sk_buff *wlcore_lnk_dequeue_high_prio(struct wl1271 *wl,
+                                                   u8 hlid, u8 ac,
+                                                   u8 *low_prio_hlid)
+{
+       struct wl1271_link *lnk = &wl->links[hlid];
+
+       if (!wlcore_hw_lnk_high_prio(wl, hlid, lnk)) {
+               if (*low_prio_hlid == WL12XX_INVALID_LINK_ID &&
+                   !skb_queue_empty(&lnk->tx_queue[ac]) &&
+                   wlcore_hw_lnk_low_prio(wl, hlid, lnk))
+                       /* we found the first non-empty low priority queue */
+                       *low_prio_hlid = hlid;
+
+               return NULL;
+       }
+
+       return wlcore_lnk_dequeue(wl, lnk, ac);
+}
+
+static struct sk_buff *wlcore_vif_dequeue_high_prio(struct wl1271 *wl,
+                                                   struct wl12xx_vif *wlvif,
+                                                   u8 ac, u8 *hlid,
+                                                   u8 *low_prio_hlid)
 {
        struct sk_buff *skb = NULL;
        int i, h, start_hlid;
@@ -533,7 +550,8 @@ static struct sk_buff *wl12xx_vif_skb_dequeue(struct wl1271 *wl,
                if (!test_bit(h, wlvif->links_map))
                        continue;
 
-               skb = wl12xx_lnk_skb_dequeue(wl, &wl->links[h]);
+               skb = wlcore_lnk_dequeue_high_prio(wl, h, ac,
+                                                  low_prio_hlid);
                if (!skb)
                        continue;
 
@@ -553,42 +571,74 @@ static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl, u8 *hlid)
        unsigned long flags;
        struct wl12xx_vif *wlvif = wl->last_wlvif;
        struct sk_buff *skb = NULL;
+       int ac;
+       u8 low_prio_hlid = WL12XX_INVALID_LINK_ID;
+
+       ac = wlcore_select_ac(wl);
+       if (ac < 0)
+               goto out;
 
        /* continue from last wlvif (round robin) */
        if (wlvif) {
                wl12xx_for_each_wlvif_continue(wl, wlvif) {
-                       skb = wl12xx_vif_skb_dequeue(wl, wlvif, hlid);
-                       if (skb) {
-                               wl->last_wlvif = wlvif;
-                               break;
-                       }
+                       if (!wlvif->tx_queue_count[ac])
+                               continue;
+
+                       skb = wlcore_vif_dequeue_high_prio(wl, wlvif, ac, hlid,
+                                                          &low_prio_hlid);
+                       if (!skb)
+                               continue;
+
+                       wl->last_wlvif = wlvif;
+                       break;
                }
        }
 
        /* dequeue from the system HLID before the restarting wlvif list */
        if (!skb) {
-               skb = wl12xx_lnk_skb_dequeue(wl, &wl->links[wl->system_hlid]);
-               *hlid = wl->system_hlid;
+               skb = wlcore_lnk_dequeue_high_prio(wl, wl->system_hlid,
+                                                  ac, &low_prio_hlid);
+               if (skb) {
+                       *hlid = wl->system_hlid;
+                       wl->last_wlvif = NULL;
+               }
        }
 
-       /* do a new pass over the wlvif list */
+       /* Do a new pass over the wlvif list. But no need to continue
+        * after last_wlvif. The previous pass should have found it. */
        if (!skb) {
                wl12xx_for_each_wlvif(wl, wlvif) {
-                       skb = wl12xx_vif_skb_dequeue(wl, wlvif, hlid);
+                       if (!wlvif->tx_queue_count[ac])
+                               goto next;
+
+                       skb = wlcore_vif_dequeue_high_prio(wl, wlvif, ac, hlid,
+                                                          &low_prio_hlid);
                        if (skb) {
                                wl->last_wlvif = wlvif;
                                break;
                        }
 
-                       /*
-                        * No need to continue after last_wlvif. The previous
-                        * pass should have found it.
-                        */
+next:
                        if (wlvif == wl->last_wlvif)
                                break;
                }
        }
 
+       /* no high priority skbs found - but maybe a low priority one? */
+       if (!skb && low_prio_hlid != WL12XX_INVALID_LINK_ID) {
+               struct wl1271_link *lnk = &wl->links[low_prio_hlid];
+               skb = wlcore_lnk_dequeue(wl, lnk, ac);
+
+               WARN_ON(!skb); /* we checked this before */
+               *hlid = low_prio_hlid;
+
+               /* ensure proper round robin in the vif/link levels */
+               wl->last_wlvif = lnk->wlvif;
+               if (lnk->wlvif)
+                       lnk->wlvif->last_tx_hlid = low_prio_hlid;
+
+       }
+
        if (!skb &&
            test_and_clear_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags)) {
                int q;
@@ -602,6 +652,7 @@ static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl, u8 *hlid)
                spin_unlock_irqrestore(&wl->wl_lock, flags);
        }
 
+out:
        return skb;
 }
 
@@ -623,6 +674,8 @@ static void wl1271_skb_queue_head(struct wl1271 *wl, struct wl12xx_vif *wlvif,
 
        spin_lock_irqsave(&wl->wl_lock, flags);
        wl->tx_queue_count[q]++;
+       if (wlvif)
+               wlvif->tx_queue_count[q]++;
        spin_unlock_irqrestore(&wl->wl_lock, flags);
 }
 
@@ -699,7 +752,7 @@ int wlcore_tx_work_locked(struct wl1271 *wl)
                bool has_data = false;
 
                wlvif = NULL;
-               if (!wl12xx_is_dummy_packet(wl, skb) && info->control.vif)
+               if (!wl12xx_is_dummy_packet(wl, skb))
                        wlvif = wl12xx_vif_to_data(info->control.vif);
                else
                        hlid = wl->system_hlid;
@@ -972,10 +1025,11 @@ void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid)
        unsigned long flags;
        struct ieee80211_tx_info *info;
        int total[NUM_TX_QUEUES];
+       struct wl1271_link *lnk = &wl->links[hlid];
 
        for (i = 0; i < NUM_TX_QUEUES; i++) {
                total[i] = 0;
-               while ((skb = skb_dequeue(&wl->links[hlid].tx_queue[i]))) {
+               while ((skb = skb_dequeue(&lnk->tx_queue[i]))) {
                        wl1271_debug(DEBUG_TX, "link freeing skb 0x%p", skb);
 
                        if (!wl12xx_is_dummy_packet(wl, skb)) {
@@ -990,8 +1044,11 @@ void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid)
        }
 
        spin_lock_irqsave(&wl->wl_lock, flags);
-       for (i = 0; i < NUM_TX_QUEUES; i++)
+       for (i = 0; i < NUM_TX_QUEUES; i++) {
                wl->tx_queue_count[i] -= total[i];
+               if (lnk->wlvif)
+                       lnk->wlvif->tx_queue_count[i] -= total[i];
+       }
        spin_unlock_irqrestore(&wl->wl_lock, flags);
 
        wl1271_handle_tx_low_watermark(wl);
@@ -1004,16 +1061,18 @@ void wl12xx_tx_reset_wlvif(struct wl1271 *wl, struct wl12xx_vif *wlvif)
 
        /* TX failure */
        for_each_set_bit(i, wlvif->links_map, WL12XX_MAX_LINKS) {
-               if (wlvif->bss_type == BSS_TYPE_AP_BSS)
+               if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
+                       /* this calls wl12xx_free_link */
                        wl1271_free_sta(wl, wlvif, i);
-               else
-                       wlvif->sta.ba_rx_bitmap = 0;
-
-               wl->links[i].allocated_pkts = 0;
-               wl->links[i].prev_freed_pkts = 0;
+               } else {
+                       u8 hlid = i;
+                       wl12xx_free_link(wl, wlvif, &hlid);
+               }
        }
        wlvif->last_tx_hlid = 0;
 
+       for (i = 0; i < NUM_TX_QUEUES; i++)
+               wlvif->tx_queue_count[i] = 0;
 }
 /* caller must hold wl->mutex and TX must be stopped */
 void wl12xx_tx_reset(struct wl1271 *wl)
@@ -1023,7 +1082,7 @@ void wl12xx_tx_reset(struct wl1271 *wl)
        struct ieee80211_tx_info *info;
 
        /* only reset the queues if something bad happened */
-       if (WARN_ON_ONCE(wl1271_tx_total_queue_count(wl) != 0)) {
+       if (wl1271_tx_total_queue_count(wl) != 0) {
                for (i = 0; i < WL12XX_MAX_LINKS; i++)
                        wl1271_tx_reset_link_queues(wl, i);
 
@@ -1135,45 +1194,48 @@ u32 wl1271_tx_min_rate_get(struct wl1271 *wl, u32 rate_set)
 
        return BIT(__ffs(rate_set));
 }
+EXPORT_SYMBOL_GPL(wl1271_tx_min_rate_get);
 
-void wlcore_stop_queue_locked(struct wl1271 *wl, u8 queue,
-                             enum wlcore_queue_stop_reason reason)
+void wlcore_stop_queue_locked(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                             u8 queue, enum wlcore_queue_stop_reason reason)
 {
-       bool stopped = !!wl->queue_stop_reasons[queue];
+       int hwq = wlcore_tx_get_mac80211_queue(wlvif, queue);
+       bool stopped = !!wl->queue_stop_reasons[hwq];
 
        /* queue should not be stopped for this reason */
-       WARN_ON(test_and_set_bit(reason, &wl->queue_stop_reasons[queue]));
+       WARN_ON_ONCE(test_and_set_bit(reason, &wl->queue_stop_reasons[hwq]));
 
        if (stopped)
                return;
 
-       ieee80211_stop_queue(wl->hw, wl1271_tx_get_mac80211_queue(queue));
+       ieee80211_stop_queue(wl->hw, hwq);
 }
 
-void wlcore_stop_queue(struct wl1271 *wl, u8 queue,
+void wlcore_stop_queue(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 queue,
                       enum wlcore_queue_stop_reason reason)
 {
        unsigned long flags;
 
        spin_lock_irqsave(&wl->wl_lock, flags);
-       wlcore_stop_queue_locked(wl, queue, reason);
+       wlcore_stop_queue_locked(wl, wlvif, queue, reason);
        spin_unlock_irqrestore(&wl->wl_lock, flags);
 }
 
-void wlcore_wake_queue(struct wl1271 *wl, u8 queue,
+void wlcore_wake_queue(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 queue,
                       enum wlcore_queue_stop_reason reason)
 {
        unsigned long flags;
+       int hwq = wlcore_tx_get_mac80211_queue(wlvif, queue);
 
        spin_lock_irqsave(&wl->wl_lock, flags);
 
        /* queue should not be clear for this reason */
-       WARN_ON(!test_and_clear_bit(reason, &wl->queue_stop_reasons[queue]));
+       WARN_ON_ONCE(!test_and_clear_bit(reason, &wl->queue_stop_reasons[hwq]));
 
-       if (wl->queue_stop_reasons[queue])
+       if (wl->queue_stop_reasons[hwq])
                goto out;
 
-       ieee80211_wake_queue(wl->hw, wl1271_tx_get_mac80211_queue(queue));
+       ieee80211_wake_queue(wl->hw, hwq);
 
 out:
        spin_unlock_irqrestore(&wl->wl_lock, flags);
@@ -1183,48 +1245,74 @@ void wlcore_stop_queues(struct wl1271 *wl,
                        enum wlcore_queue_stop_reason reason)
 {
        int i;
+       unsigned long flags;
 
-       for (i = 0; i < NUM_TX_QUEUES; i++)
-               wlcore_stop_queue(wl, i, reason);
+       spin_lock_irqsave(&wl->wl_lock, flags);
+
+       /* mark all possible queues as stopped */
+        for (i = 0; i < WLCORE_NUM_MAC_ADDRESSES * NUM_TX_QUEUES; i++)
+                WARN_ON_ONCE(test_and_set_bit(reason,
+                                             &wl->queue_stop_reasons[i]));
+
+       /* use the global version to make sure all vifs in mac80211 we don't
+        * know are stopped.
+        */
+       ieee80211_stop_queues(wl->hw);
+
+       spin_unlock_irqrestore(&wl->wl_lock, flags);
 }
-EXPORT_SYMBOL_GPL(wlcore_stop_queues);
 
 void wlcore_wake_queues(struct wl1271 *wl,
                        enum wlcore_queue_stop_reason reason)
 {
        int i;
+       unsigned long flags;
 
-       for (i = 0; i < NUM_TX_QUEUES; i++)
-               wlcore_wake_queue(wl, i, reason);
+       spin_lock_irqsave(&wl->wl_lock, flags);
+
+       /* mark all possible queues as awake */
+        for (i = 0; i < WLCORE_NUM_MAC_ADDRESSES * NUM_TX_QUEUES; i++)
+               WARN_ON_ONCE(!test_and_clear_bit(reason,
+                                                &wl->queue_stop_reasons[i]));
+
+       /* use the global version to make sure all vifs in mac80211 we don't
+        * know are woken up.
+        */
+       ieee80211_wake_queues(wl->hw);
+
+       spin_unlock_irqrestore(&wl->wl_lock, flags);
 }
-EXPORT_SYMBOL_GPL(wlcore_wake_queues);
 
-void wlcore_reset_stopped_queues(struct wl1271 *wl)
+bool wlcore_is_queue_stopped_by_reason(struct wl1271 *wl,
+                                      struct wl12xx_vif *wlvif, u8 queue,
+                                      enum wlcore_queue_stop_reason reason)
 {
-       int i;
        unsigned long flags;
+       bool stopped;
 
        spin_lock_irqsave(&wl->wl_lock, flags);
-
-       for (i = 0; i < NUM_TX_QUEUES; i++) {
-               if (!wl->queue_stop_reasons[i])
-                       continue;
-
-               wl->queue_stop_reasons[i] = 0;
-               ieee80211_wake_queue(wl->hw,
-                                    wl1271_tx_get_mac80211_queue(i));
-       }
-
+       stopped = wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, queue,
+                                                          reason);
        spin_unlock_irqrestore(&wl->wl_lock, flags);
+
+       return stopped;
 }
 
-bool wlcore_is_queue_stopped_by_reason(struct wl1271 *wl, u8 queue,
-                            enum wlcore_queue_stop_reason reason)
+bool wlcore_is_queue_stopped_by_reason_locked(struct wl1271 *wl,
+                                      struct wl12xx_vif *wlvif, u8 queue,
+                                      enum wlcore_queue_stop_reason reason)
 {
-       return test_bit(reason, &wl->queue_stop_reasons[queue]);
+       int hwq = wlcore_tx_get_mac80211_queue(wlvif, queue);
+
+       WARN_ON_ONCE(!spin_is_locked(&wl->wl_lock));
+       return test_bit(reason, &wl->queue_stop_reasons[hwq]);
 }
 
-bool wlcore_is_queue_stopped(struct wl1271 *wl, u8 queue)
+bool wlcore_is_queue_stopped_locked(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                                   u8 queue)
 {
-       return !!wl->queue_stop_reasons[queue];
+       int hwq = wlcore_tx_get_mac80211_queue(wlvif, queue);
+
+       WARN_ON_ONCE(!spin_is_locked(&wl->wl_lock));
+       return !!wl->queue_stop_reasons[hwq];
 }
index 349520d..55aa4ac 100644 (file)
@@ -207,19 +207,22 @@ static inline int wl1271_tx_get_queue(int queue)
        }
 }
 
-static inline int wl1271_tx_get_mac80211_queue(int queue)
+static inline
+int wlcore_tx_get_mac80211_queue(struct wl12xx_vif *wlvif, int queue)
 {
+       int mac_queue = wlvif->hw_queue_base;
+
        switch (queue) {
        case CONF_TX_AC_VO:
-               return 0;
+               return mac_queue + 0;
        case CONF_TX_AC_VI:
-               return 1;
+               return mac_queue + 1;
        case CONF_TX_AC_BE:
-               return 2;
+               return mac_queue + 2;
        case CONF_TX_AC_BK:
-               return 3;
+               return mac_queue + 3;
        default:
-               return 2;
+               return mac_queue + 2;
        }
 }
 
@@ -252,20 +255,26 @@ void wl12xx_rearm_rx_streaming(struct wl1271 *wl, unsigned long *active_hlids);
 unsigned int wlcore_calc_packet_alignment(struct wl1271 *wl,
                                          unsigned int packet_length);
 void wl1271_free_tx_id(struct wl1271 *wl, int id);
-void wlcore_stop_queue_locked(struct wl1271 *wl, u8 queue,
-                             enum wlcore_queue_stop_reason reason);
-void wlcore_stop_queue(struct wl1271 *wl, u8 queue,
+void wlcore_stop_queue_locked(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                             u8 queue, enum wlcore_queue_stop_reason reason);
+void wlcore_stop_queue(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 queue,
                       enum wlcore_queue_stop_reason reason);
-void wlcore_wake_queue(struct wl1271 *wl, u8 queue,
+void wlcore_wake_queue(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 queue,
                       enum wlcore_queue_stop_reason reason);
 void wlcore_stop_queues(struct wl1271 *wl,
                        enum wlcore_queue_stop_reason reason);
 void wlcore_wake_queues(struct wl1271 *wl,
                        enum wlcore_queue_stop_reason reason);
-void wlcore_reset_stopped_queues(struct wl1271 *wl);
-bool wlcore_is_queue_stopped_by_reason(struct wl1271 *wl, u8 queue,
+bool wlcore_is_queue_stopped_by_reason(struct wl1271 *wl,
+                                      struct wl12xx_vif *wlvif, u8 queue,
                                       enum wlcore_queue_stop_reason reason);
-bool wlcore_is_queue_stopped(struct wl1271 *wl, u8 queue);
+bool
+wlcore_is_queue_stopped_by_reason_locked(struct wl1271 *wl,
+                                        struct wl12xx_vif *wlvif,
+                                        u8 queue,
+                                        enum wlcore_queue_stop_reason reason);
+bool wlcore_is_queue_stopped_locked(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                                   u8 queue);
 
 /* from main.c */
 void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid);
index c388493..af9feca 100644 (file)
@@ -37,6 +37,9 @@
  */
 #define WLCORE_NUM_MAC_ADDRESSES 3
 
+/* wl12xx/wl18xx maximum transmission power (in dBm) */
+#define WLCORE_MAX_TXPWR        25
+
 /* forward declaration */
 struct wl1271_tx_hw_descr;
 enum wl_rx_buf_align;
@@ -51,6 +54,9 @@ struct wlcore_ops {
        int (*trigger_cmd)(struct wl1271 *wl, int cmd_box_addr,
                           void *buf, size_t len);
        int (*ack_event)(struct wl1271 *wl);
+       int (*wait_for_event)(struct wl1271 *wl, enum wlcore_wait_event event,
+                             bool *timeout);
+       int (*process_mailbox_events)(struct wl1271 *wl);
        u32 (*calc_tx_blocks)(struct wl1271 *wl, u32 len, u32 spare_blks);
        void (*set_tx_desc_blocks)(struct wl1271 *wl,
                                   struct wl1271_tx_hw_descr *desc,
@@ -82,12 +88,32 @@ struct wlcore_ops {
        int (*debugfs_init)(struct wl1271 *wl, struct dentry *rootdir);
        int (*handle_static_data)(struct wl1271 *wl,
                                  struct wl1271_static_data *static_data);
+       int (*scan_start)(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                         struct cfg80211_scan_request *req);
+       int (*scan_stop)(struct wl1271 *wl, struct wl12xx_vif *wlvif);
+       int (*sched_scan_start)(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                               struct cfg80211_sched_scan_request *req,
+                               struct ieee80211_sched_scan_ies *ies);
+       void (*sched_scan_stop)(struct wl1271 *wl, struct wl12xx_vif *wlvif);
        int (*get_spare_blocks)(struct wl1271 *wl, bool is_gem);
        int (*set_key)(struct wl1271 *wl, enum set_key_cmd cmd,
                       struct ieee80211_vif *vif,
                       struct ieee80211_sta *sta,
                       struct ieee80211_key_conf *key_conf);
+       int (*channel_switch)(struct wl1271 *wl,
+                             struct wl12xx_vif *wlvif,
+                             struct ieee80211_channel_switch *ch_switch);
        u32 (*pre_pkt_send)(struct wl1271 *wl, u32 buf_offset, u32 last_len);
+       void (*sta_rc_update)(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                             struct ieee80211_sta *sta, u32 changed);
+       int (*set_peer_cap)(struct wl1271 *wl,
+                           struct ieee80211_sta_ht_cap *ht_cap,
+                           bool allow_ht_operation,
+                           u32 rate_set, u8 hlid);
+       bool (*lnk_high_prio)(struct wl1271 *wl, u8 hlid,
+                             struct wl1271_link *lnk);
+       bool (*lnk_low_prio)(struct wl1271 *wl, u8 hlid,
+                            struct wl1271_link *lnk);
 };
 
 enum wlcore_partitions {
@@ -157,7 +183,6 @@ struct wl1271 {
 
        struct wl1271_if_operations *if_ops;
 
-       void (*set_power)(bool enable);
        int irq;
 
        spinlock_t wl_lock;
@@ -202,6 +227,8 @@ struct wl1271 {
        unsigned long klv_templates_map[
                        BITS_TO_LONGS(WLCORE_MAX_KLV_TEMPLATES)];
 
+       u8 session_ids[WL12XX_MAX_LINKS];
+
        struct list_head wlvif_list;
 
        u8 sta_count;
@@ -227,7 +254,8 @@ struct wl1271 {
 
        /* Frames scheduled for transmission, not handled yet */
        int tx_queue_count[NUM_TX_QUEUES];
-       unsigned long queue_stop_reasons[NUM_TX_QUEUES];
+       unsigned long queue_stop_reasons[
+                               NUM_TX_QUEUES * WLCORE_NUM_MAC_ADDRESSES];
 
        /* Frames received, not handled yet by mac80211 */
        struct sk_buff_head deferred_rx_queue;
@@ -269,24 +297,30 @@ struct wl1271 {
        struct work_struct recovery_work;
        bool watchdog_recovery;
 
+       /* Reg domain last configuration */
+       u32 reg_ch_conf_last[2];
+       /* Reg domain pending configuration */
+       u32 reg_ch_conf_pending[2];
+
        /* Pointer that holds DMA-friendly block for the mailbox */
-       struct event_mailbox *mbox;
+       void *mbox;
 
        /* The mbox event mask */
        u32 event_mask;
 
        /* Mailbox pointers */
+       u32 mbox_size;
        u32 mbox_ptr[2];
 
        /* Are we currently scanning */
-       struct ieee80211_vif *scan_vif;
+       struct wl12xx_vif *scan_wlvif;
        struct wl1271_scan scan;
        struct delayed_work scan_complete_work;
 
-       /* Connection loss work */
-       struct delayed_work connection_loss_work;
+       struct ieee80211_vif *roc_vif;
+       struct delayed_work roc_complete_work;
 
-       bool sched_scanning;
+       struct wl12xx_vif *sched_vif;
 
        /* The current band */
        enum ieee80211_band band;
@@ -299,7 +333,7 @@ struct wl1271 {
 
        struct wl1271_stats stats;
 
-       __le32 buffer_32;
+       __le32 *buffer_32;
        u32 buffer_cmd;
        u32 buffer_busyword[WL1271_BUSY_WORD_CNT];
 
@@ -314,6 +348,8 @@ struct wl1271 {
 
        bool enable_11a;
 
+       int recovery_count;
+
        /* Most recently reported noise in dBm */
        s8 noise;
 
@@ -333,6 +369,12 @@ struct wl1271 {
         */
        struct wl1271_link links[WL12XX_MAX_LINKS];
 
+       /* number of currently active links */
+       int active_link_count;
+
+       /* Fast/slow links bitmap according to FW */
+       u32 fw_fast_lnk_map;
+
        /* AP-mode - a bitmap of links currently in PS mode according to FW */
        u32 ap_fw_ps_map;
 
@@ -367,6 +409,12 @@ struct wl1271 {
        const char *sr_fw_name;
        const char *mr_fw_name;
 
+       u8 scan_templ_id_2_4;
+       u8 scan_templ_id_5;
+       u8 sched_scan_templ_id_2_4;
+       u8 sched_scan_templ_id_5;
+       u8 max_channels_5;
+
        /* per-chip-family private structure */
        void *priv;
 
@@ -408,20 +456,28 @@ struct wl1271 {
        /* the number of allocated MAC addresses in this chip */
        int num_mac_addr;
 
-       /* the minimum FW version required for the driver to work */
-       unsigned int min_fw_ver[NUM_FW_VER];
+       /* minimum FW version required for the driver to work in single-role */
+       unsigned int min_sr_fw_ver[NUM_FW_VER];
+
+       /* minimum FW version required for the driver to work in multi-role */
+       unsigned int min_mr_fw_ver[NUM_FW_VER];
 
        struct completion nvs_loading_complete;
+
+       /* number of concurrent channels the HW supports */
+       u32 num_channels;
 };
 
 int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev);
 int wlcore_remove(struct platform_device *pdev);
-struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size);
+struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size,
+                                    u32 mbox_size);
 int wlcore_free_hw(struct wl1271 *wl);
 int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
                   struct ieee80211_vif *vif,
                   struct ieee80211_sta *sta,
                   struct ieee80211_key_conf *key_conf);
+void wlcore_regdomain_config(struct wl1271 *wl);
 
 static inline void
 wlcore_set_ht_cap(struct wl1271 *wl, enum ieee80211_band band,
@@ -430,16 +486,27 @@ wlcore_set_ht_cap(struct wl1271 *wl, enum ieee80211_band band,
        memcpy(&wl->ht_cap[band], ht_cap, sizeof(*ht_cap));
 }
 
+/* Tell wlcore not to care about this element when checking the version */
+#define WLCORE_FW_VER_IGNORE   -1
+
 static inline void
 wlcore_set_min_fw_ver(struct wl1271 *wl, unsigned int chip,
-                     unsigned int iftype, unsigned int major,
-                     unsigned int subtype, unsigned int minor)
+                     unsigned int iftype_sr, unsigned int major_sr,
+                     unsigned int subtype_sr, unsigned int minor_sr,
+                     unsigned int iftype_mr, unsigned int major_mr,
+                     unsigned int subtype_mr, unsigned int minor_mr)
 {
-       wl->min_fw_ver[FW_VER_CHIP] = chip;
-       wl->min_fw_ver[FW_VER_IF_TYPE] = iftype;
-       wl->min_fw_ver[FW_VER_MAJOR] = major;
-       wl->min_fw_ver[FW_VER_SUBTYPE] = subtype;
-       wl->min_fw_ver[FW_VER_MINOR] = minor;
+       wl->min_sr_fw_ver[FW_VER_CHIP] = chip;
+       wl->min_sr_fw_ver[FW_VER_IF_TYPE] = iftype_sr;
+       wl->min_sr_fw_ver[FW_VER_MAJOR] = major_sr;
+       wl->min_sr_fw_ver[FW_VER_SUBTYPE] = subtype_sr;
+       wl->min_sr_fw_ver[FW_VER_MINOR] = minor_sr;
+
+       wl->min_mr_fw_ver[FW_VER_CHIP] = chip;
+       wl->min_mr_fw_ver[FW_VER_IF_TYPE] = iftype_mr;
+       wl->min_mr_fw_ver[FW_VER_MAJOR] = major_mr;
+       wl->min_mr_fw_ver[FW_VER_SUBTYPE] = subtype_mr;
+       wl->min_mr_fw_ver[FW_VER_MINOR] = minor_mr;
 }
 
 /* Firmware image load chunk size */
@@ -450,6 +517,9 @@ wlcore_set_min_fw_ver(struct wl1271 *wl, unsigned int chip,
 /* Each RX/TX transaction requires an end-of-transaction transfer */
 #define WLCORE_QUIRK_END_OF_TRANSACTION                BIT(0)
 
+/* the first start_role(sta) sometimes doesn't work on wl12xx */
+#define WLCORE_QUIRK_START_STA_FAILS           BIT(1)
+
 /* wl127x and SPI don't support SDIO block size alignment */
 #define WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN                BIT(2)
 
@@ -462,9 +532,6 @@ wlcore_set_min_fw_ver(struct wl1271 *wl, unsigned int chip,
 /* Older firmwares use an old NVS format */
 #define WLCORE_QUIRK_LEGACY_NVS                        BIT(5)
 
-/* Some firmwares may not support ELP */
-#define WLCORE_QUIRK_NO_ELP                    BIT(6)
-
 /* pad only the last frame in the aggregate buffer */
 #define WLCORE_QUIRK_TX_PAD_LAST_FRAME         BIT(7)
 
@@ -477,11 +544,11 @@ wlcore_set_min_fw_ver(struct wl1271 *wl, unsigned int chip,
 /* separate probe response templates for one-shot and sched scans */
 #define WLCORE_QUIRK_DUAL_PROBE_TMPL           BIT(10)
 
-/* TODO: move to the lower drivers when all usages are abstracted */
-#define CHIP_ID_1271_PG10              (0x4030101)
-#define CHIP_ID_1271_PG20              (0x4030111)
-#define CHIP_ID_1283_PG10              (0x05030101)
-#define CHIP_ID_1283_PG20              (0x05030111)
+/* Firmware requires reg domain configuration for active calibration */
+#define WLCORE_QUIRK_REGDOMAIN_CONF            BIT(11)
+
+/* The FW only support a zero session id for AP */
+#define WLCORE_QUIRK_AP_ZERO_SESSION_ID                BIT(12)
 
 /* TODO: move all these common registers and values elsewhere */
 #define HW_ACCESS_ELP_CTRL_REG         0x1FFFC
index 5ce26cf..508f5b0 100644 (file)
@@ -109,17 +109,6 @@ enum {
        NUM_FW_VER
 };
 
-#define FW_VER_CHIP_WL127X 6
-#define FW_VER_CHIP_WL128X 7
-
-#define FW_VER_IF_TYPE_STA 1
-#define FW_VER_IF_TYPE_AP  2
-
-#define FW_VER_MINOR_1_SPARE_STA_MIN 58
-#define FW_VER_MINOR_1_SPARE_AP_MIN  47
-
-#define FW_VER_MINOR_FWLOG_STA_MIN 70
-
 struct wl1271_chip {
        u32 id;
        char fw_ver_str[ETHTOOL_FWVERS_LEN];
@@ -141,7 +130,10 @@ struct wl_fw_packet_counters {
        /* Cumulative counter of released Voice memory blocks */
        u8 tx_voice_released_blks;
 
-       u8 padding[3];
+       /* Tx rate of the last transmitted packet */
+       u8 tx_last_rate;
+
+       u8 padding[2];
 } __packed;
 
 /* FW status registers */
@@ -214,6 +206,11 @@ struct wl1271_if_operations {
        void (*set_block_size) (struct device *child, unsigned int blksz);
 };
 
+struct wlcore_platdev_data {
+       struct wl12xx_platform_data *pdata;
+       struct wl1271_if_operations *if_ops;
+};
+
 #define MAX_NUM_KEYS 14
 #define MAX_KEY_SIZE 32
 
@@ -260,6 +257,8 @@ enum wl12xx_vif_flags {
        WLVIF_FLAG_IN_USE,
 };
 
+struct wl12xx_vif;
+
 struct wl1271_link {
        /* AP-mode - TX queue per AC in link */
        struct sk_buff_head tx_queue[NUM_TX_QUEUES];
@@ -272,6 +271,9 @@ struct wl1271_link {
 
        /* bitmap of TIDs where RX BA sessions are active for this link */
        u8 ba_bitmap;
+
+       /* The wlvif this link belongs to. Might be null for global links */
+       struct wl12xx_vif *wlvif;
 };
 
 #define WL1271_MAX_RX_FILTERS 5
@@ -315,6 +317,7 @@ struct wl12xx_rx_filter {
 
 struct wl1271_station {
        u8 hlid;
+       bool in_connection;
 };
 
 struct wl12xx_vif {
@@ -332,7 +335,6 @@ struct wl12xx_vif {
        union {
                struct {
                        u8 hlid;
-                       u8 ba_rx_bitmap;
 
                        u8 basic_rate_idx;
                        u8 ap_rate_idx;
@@ -341,6 +343,8 @@ struct wl12xx_vif {
                        u8 klv_template_id;
 
                        bool qos;
+                       /* channel type we started the STA role with */
+                       enum nl80211_channel_type role_chan_type;
                } sta;
                struct {
                        u8 global_hlid;
@@ -362,6 +366,9 @@ struct wl12xx_vif {
        /* the hlid of the last transmitted skb */
        int last_tx_hlid;
 
+       /* counters of packets per AC, across all links in the vif */
+       int tx_queue_count[NUM_TX_QUEUES];
+
        unsigned long links_map[BITS_TO_LONGS(WL12XX_MAX_LINKS)];
 
        u8 ssid[IEEE80211_MAX_SSID_LEN + 1];
@@ -396,9 +403,6 @@ struct wl12xx_vif {
        /* Our association ID */
        u16 aid;
 
-       /* Session counter for the chipset */
-       int session_counter;
-
        /* retry counter for PSM entries */
        u8 psm_entry_retry;
 
@@ -416,11 +420,28 @@ struct wl12xx_vif {
        bool ba_support;
        bool ba_allowed;
 
+       bool wmm_enabled;
+
        /* Rx Streaming */
        struct work_struct rx_streaming_enable_work;
        struct work_struct rx_streaming_disable_work;
        struct timer_list rx_streaming_timer;
 
+       struct delayed_work channel_switch_work;
+       struct delayed_work connection_loss_work;
+
+       /* number of in connection stations */
+       int inconn_count;
+
+       /*
+        * This vif's queues are mapped to mac80211 HW queues as:
+        * VO - hw_queue_base
+        * VI - hw_queue_base + 1
+        * BE - hw_queue_base + 2
+        * BK - hw_queue_base + 3
+        */
+       int hw_queue_base;
+
        /*
         * This struct must be last!
         * data that has to be saved acrossed reconfigs (e.g. recovery)
@@ -443,6 +464,7 @@ struct wl12xx_vif {
 
 static inline struct wl12xx_vif *wl12xx_vif_to_data(struct ieee80211_vif *vif)
 {
+       WARN_ON(!vif);
        return (struct wl12xx_vif *)vif->drv_priv;
 }
 
index 94b79c3..9d7f172 100644 (file)
@@ -151,6 +151,9 @@ void xen_netbk_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb);
 /* Notify xenvif that ring now has space to send an skb to the frontend */
 void xenvif_notify_tx_completion(struct xenvif *vif);
 
+/* Prevent the device from generating any further traffic. */
+void xenvif_carrier_off(struct xenvif *vif);
+
 /* Returns number of ring slots required to send an skb to the frontend */
 unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb);
 
index f733cae..d984141 100644 (file)
@@ -132,6 +132,7 @@ static void xenvif_up(struct xenvif *vif)
 static void xenvif_down(struct xenvif *vif)
 {
        disable_irq(vif->irq);
+       del_timer_sync(&vif->credit_timeout);
        xen_netbk_deschedule_xenvif(vif);
        xen_netbk_remove_xenvif(vif);
 }
@@ -345,23 +346,26 @@ err:
        return err;
 }
 
-void xenvif_disconnect(struct xenvif *vif)
+void xenvif_carrier_off(struct xenvif *vif)
 {
        struct net_device *dev = vif->dev;
-       if (netif_carrier_ok(dev)) {
-               rtnl_lock();
-               netif_carrier_off(dev); /* discard queued packets */
-               if (netif_running(dev))
-                       xenvif_down(vif);
-               rtnl_unlock();
-               xenvif_put(vif);
-       }
+
+       rtnl_lock();
+       netif_carrier_off(dev); /* discard queued packets */
+       if (netif_running(dev))
+               xenvif_down(vif);
+       rtnl_unlock();
+       xenvif_put(vif);
+}
+
+void xenvif_disconnect(struct xenvif *vif)
+{
+       if (netif_carrier_ok(vif->dev))
+               xenvif_carrier_off(vif);
 
        atomic_dec(&vif->refcnt);
        wait_event(vif->waiting_to_free, atomic_read(&vif->refcnt) == 0);
 
-       del_timer_sync(&vif->credit_timeout);
-
        if (vif->irq)
                unbind_from_irqhandler(vif->irq, vif);
 
index f2d6b78..cd49ba9 100644 (file)
@@ -147,7 +147,8 @@ void xen_netbk_remove_xenvif(struct xenvif *vif)
        atomic_dec(&netbk->netfront_count);
 }
 
-static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx);
+static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx,
+                                 u8 status);
 static void make_tx_response(struct xenvif *vif,
                             struct xen_netif_tx_request *txp,
                             s8       st);
@@ -879,7 +880,7 @@ static void netbk_tx_err(struct xenvif *vif,
 
        do {
                make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
-               if (cons >= end)
+               if (cons == end)
                        break;
                txp = RING_GET_REQUEST(&vif->tx, cons++);
        } while (1);
@@ -888,6 +889,13 @@ static void netbk_tx_err(struct xenvif *vif,
        xenvif_put(vif);
 }
 
+static void netbk_fatal_tx_err(struct xenvif *vif)
+{
+       netdev_err(vif->dev, "fatal error; disabling device\n");
+       xenvif_carrier_off(vif);
+       xenvif_put(vif);
+}
+
 static int netbk_count_requests(struct xenvif *vif,
                                struct xen_netif_tx_request *first,
                                struct xen_netif_tx_request *txp,
@@ -901,29 +909,33 @@ static int netbk_count_requests(struct xenvif *vif,
 
        do {
                if (frags >= work_to_do) {
-                       netdev_dbg(vif->dev, "Need more frags\n");
-                       return -frags;
+                       netdev_err(vif->dev, "Need more frags\n");
+                       netbk_fatal_tx_err(vif);
+                       return -ENODATA;
                }
 
                if (unlikely(frags >= MAX_SKB_FRAGS)) {
-                       netdev_dbg(vif->dev, "Too many frags\n");
-                       return -frags;
+                       netdev_err(vif->dev, "Too many frags\n");
+                       netbk_fatal_tx_err(vif);
+                       return -E2BIG;
                }
 
                memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + frags),
                       sizeof(*txp));
                if (txp->size > first->size) {
-                       netdev_dbg(vif->dev, "Frags galore\n");
-                       return -frags;
+                       netdev_err(vif->dev, "Frag is bigger than frame.\n");
+                       netbk_fatal_tx_err(vif);
+                       return -EIO;
                }
 
                first->size -= txp->size;
                frags++;
 
                if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) {
-                       netdev_dbg(vif->dev, "txp->offset: %x, size: %u\n",
+                       netdev_err(vif->dev, "txp->offset: %x, size: %u\n",
                                 txp->offset, txp->size);
-                       return -frags;
+                       netbk_fatal_tx_err(vif);
+                       return -EINVAL;
                }
        } while ((txp++)->flags & XEN_NETTXF_more_data);
        return frags;
@@ -966,7 +978,7 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk,
                pending_idx = netbk->pending_ring[index];
                page = xen_netbk_alloc_page(netbk, skb, pending_idx);
                if (!page)
-                       return NULL;
+                       goto err;
 
                gop->source.u.ref = txp->gref;
                gop->source.domid = vif->domid;
@@ -988,6 +1000,17 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk,
        }
 
        return gop;
+err:
+       /* Unwind, freeing all pages and sending error responses. */
+       while (i-- > start) {
+               xen_netbk_idx_release(netbk, frag_get_pending_idx(&frags[i]),
+                                     XEN_NETIF_RSP_ERROR);
+       }
+       /* The head too, if necessary. */
+       if (start)
+               xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR);
+
+       return NULL;
 }
 
 static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
@@ -996,30 +1019,20 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
 {
        struct gnttab_copy *gop = *gopp;
        u16 pending_idx = *((u16 *)skb->data);
-       struct pending_tx_info *pending_tx_info = netbk->pending_tx_info;
-       struct xenvif *vif = pending_tx_info[pending_idx].vif;
-       struct xen_netif_tx_request *txp;
        struct skb_shared_info *shinfo = skb_shinfo(skb);
        int nr_frags = shinfo->nr_frags;
        int i, err, start;
 
        /* Check status of header. */
        err = gop->status;
-       if (unlikely(err)) {
-               pending_ring_idx_t index;
-               index = pending_index(netbk->pending_prod++);
-               txp = &pending_tx_info[pending_idx].req;
-               make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
-               netbk->pending_ring[index] = pending_idx;
-               xenvif_put(vif);
-       }
+       if (unlikely(err))
+               xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR);
 
        /* Skip first skb fragment if it is on same page as header fragment. */
        start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
 
        for (i = start; i < nr_frags; i++) {
                int j, newerr;
-               pending_ring_idx_t index;
 
                pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
 
@@ -1028,16 +1041,12 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
                if (likely(!newerr)) {
                        /* Had a previous error? Invalidate this fragment. */
                        if (unlikely(err))
-                               xen_netbk_idx_release(netbk, pending_idx);
+                               xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
                        continue;
                }
 
                /* Error on this fragment: respond to client with an error. */
-               txp = &netbk->pending_tx_info[pending_idx].req;
-               make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
-               index = pending_index(netbk->pending_prod++);
-               netbk->pending_ring[index] = pending_idx;
-               xenvif_put(vif);
+               xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR);
 
                /* Not the first error? Preceding frags already invalidated. */
                if (err)
@@ -1045,10 +1054,10 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
 
                /* First error: invalidate header and preceding fragments. */
                pending_idx = *((u16 *)skb->data);
-               xen_netbk_idx_release(netbk, pending_idx);
+               xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
                for (j = start; j < i; j++) {
                        pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
-                       xen_netbk_idx_release(netbk, pending_idx);
+                       xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
                }
 
                /* Remember the error: invalidate all subsequent fragments. */
@@ -1082,7 +1091,7 @@ static void xen_netbk_fill_frags(struct xen_netbk *netbk, struct sk_buff *skb)
 
                /* Take an extra reference to offset xen_netbk_idx_release */
                get_page(netbk->mmap_pages[pending_idx]);
-               xen_netbk_idx_release(netbk, pending_idx);
+               xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
        }
 }
 
@@ -1095,7 +1104,8 @@ static int xen_netbk_get_extras(struct xenvif *vif,
 
        do {
                if (unlikely(work_to_do-- <= 0)) {
-                       netdev_dbg(vif->dev, "Missing extra info\n");
+                       netdev_err(vif->dev, "Missing extra info\n");
+                       netbk_fatal_tx_err(vif);
                        return -EBADR;
                }
 
@@ -1104,8 +1114,9 @@ static int xen_netbk_get_extras(struct xenvif *vif,
                if (unlikely(!extra.type ||
                             extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
                        vif->tx.req_cons = ++cons;
-                       netdev_dbg(vif->dev,
+                       netdev_err(vif->dev,
                                   "Invalid extra type: %d\n", extra.type);
+                       netbk_fatal_tx_err(vif);
                        return -EINVAL;
                }
 
@@ -1121,13 +1132,15 @@ static int netbk_set_skb_gso(struct xenvif *vif,
                             struct xen_netif_extra_info *gso)
 {
        if (!gso->u.gso.size) {
-               netdev_dbg(vif->dev, "GSO size must not be zero.\n");
+               netdev_err(vif->dev, "GSO size must not be zero.\n");
+               netbk_fatal_tx_err(vif);
                return -EINVAL;
        }
 
        /* Currently only TCPv4 S.O. is supported. */
        if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) {
-               netdev_dbg(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);
+               netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);
+               netbk_fatal_tx_err(vif);
                return -EINVAL;
        }
 
@@ -1264,9 +1277,25 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
 
                /* Get a netif from the list with work to do. */
                vif = poll_net_schedule_list(netbk);
+               /* This can sometimes happen because the test of
+                * list_empty(net_schedule_list) at the top of the
+                * loop is unlocked.  Just go back and have another
+                * look.
+                */
                if (!vif)
                        continue;
 
+               if (vif->tx.sring->req_prod - vif->tx.req_cons >
+                   XEN_NETIF_TX_RING_SIZE) {
+                       netdev_err(vif->dev,
+                                  "Impossible number of requests. "
+                                  "req_prod %d, req_cons %d, size %ld\n",
+                                  vif->tx.sring->req_prod, vif->tx.req_cons,
+                                  XEN_NETIF_TX_RING_SIZE);
+                       netbk_fatal_tx_err(vif);
+                       continue;
+               }
+
                RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, work_to_do);
                if (!work_to_do) {
                        xenvif_put(vif);
@@ -1294,17 +1323,14 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
                        work_to_do = xen_netbk_get_extras(vif, extras,
                                                          work_to_do);
                        idx = vif->tx.req_cons;
-                       if (unlikely(work_to_do < 0)) {
-                               netbk_tx_err(vif, &txreq, idx);
+                       if (unlikely(work_to_do < 0))
                                continue;
-                       }
                }
 
                ret = netbk_count_requests(vif, &txreq, txfrags, work_to_do);
-               if (unlikely(ret < 0)) {
-                       netbk_tx_err(vif, &txreq, idx - ret);
+               if (unlikely(ret < 0))
                        continue;
-               }
+
                idx += ret;
 
                if (unlikely(txreq.size < ETH_HLEN)) {
@@ -1316,11 +1342,11 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
 
                /* No crossing a page as the payload mustn't fragment. */
                if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) {
-                       netdev_dbg(vif->dev,
+                       netdev_err(vif->dev,
                                   "txreq.offset: %x, size: %u, end: %lu\n",
                                   txreq.offset, txreq.size,
                                   (txreq.offset&~PAGE_MASK) + txreq.size);
-                       netbk_tx_err(vif, &txreq, idx);
+                       netbk_fatal_tx_err(vif);
                        continue;
                }
 
@@ -1348,8 +1374,8 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
                        gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
 
                        if (netbk_set_skb_gso(vif, skb, gso)) {
+                               /* Failure in netbk_set_skb_gso is fatal. */
                                kfree_skb(skb);
-                               netbk_tx_err(vif, &txreq, idx);
                                continue;
                        }
                }
@@ -1448,7 +1474,7 @@ static void xen_netbk_tx_submit(struct xen_netbk *netbk)
                        txp->size -= data_len;
                } else {
                        /* Schedule a response immediately. */
-                       xen_netbk_idx_release(netbk, pending_idx);
+                       xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
                }
 
                if (txp->flags & XEN_NETTXF_csum_blank)
@@ -1500,7 +1526,8 @@ static void xen_netbk_tx_action(struct xen_netbk *netbk)
        xen_netbk_tx_submit(netbk);
 }
 
-static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx)
+static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx,
+                                 u8 status)
 {
        struct xenvif *vif;
        struct pending_tx_info *pending_tx_info;
@@ -1514,7 +1541,7 @@ static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx)
 
        vif = pending_tx_info->vif;
 
-       make_tx_response(vif, &pending_tx_info->req, XEN_NETIF_RSP_OKAY);
+       make_tx_response(vif, &pending_tx_info->req, status);
 
        index = pending_index(netbk->pending_prod++);
        netbk->pending_ring[index] = pending_idx;
index 80c728b..e570349 100644 (file)
@@ -27,5 +27,6 @@ config NFC_WILINK
          into the kernel or say M to compile it as module.
 
 source "drivers/nfc/pn544/Kconfig"
+source "drivers/nfc/microread/Kconfig"
 
 endmenu
index 574bbc0..a189ada 100644 (file)
@@ -3,6 +3,7 @@
 #
 
 obj-$(CONFIG_NFC_PN544)                += pn544/
+obj-$(CONFIG_NFC_MICROREAD)    += microread/
 obj-$(CONFIG_NFC_PN533)                += pn533.o
 obj-$(CONFIG_NFC_WILINK)       += nfcwilink.o
 
diff --git a/drivers/nfc/microread/Kconfig b/drivers/nfc/microread/Kconfig
new file mode 100644 (file)
index 0000000..572305b
--- /dev/null
@@ -0,0 +1,35 @@
+config NFC_MICROREAD
+       tristate "Inside Secure microread NFC driver"
+       depends on NFC_HCI
+       select CRC_CCITT
+       default n
+       ---help---
+         This module contains the main code for Inside Secure microread
+         NFC chipsets. It implements the chipset HCI logic and hooks into
+         the NFC kernel APIs. Physical layers will register against it.
+
+         To compile this driver as a module, choose m here. The module will
+         be called microread.
+         Say N if unsure.
+
+config NFC_MICROREAD_I2C
+       tristate "NFC Microread i2c support"
+       depends on NFC_MICROREAD && I2C && NFC_SHDLC
+       ---help---
+         This module adds support for the i2c interface of adapters using
+         Inside microread chipsets.  Select this if your platform is using
+         the i2c bus.
+
+         If you choose to build a module, it'll be called microread_i2c.
+         Say N if unsure.
+
+config NFC_MICROREAD_MEI
+       tristate "NFC Microread MEI support"
+       depends on NFC_MICROREAD && INTEL_MEI_BUS_NFC
+       ---help---
+         This module adds support for the mei interface of adapters using
+         Inside microread chipsets.  Select this if your microread chipset
+         is handled by Intel's Management Engine Interface on your platform.
+
+         If you choose to build a module, it'll be called microread_mei.
+         Say N if unsure.
diff --git a/drivers/nfc/microread/Makefile b/drivers/nfc/microread/Makefile
new file mode 100644 (file)
index 0000000..755c24c
--- /dev/null
@@ -0,0 +1,10 @@
+#
+# Makefile for Microread HCI based NFC driver
+#
+
+microread_i2c-objs  = i2c.o
+microread_mei-objs  = mei.o
+
+obj-$(CONFIG_NFC_MICROREAD)     += microread.o
+obj-$(CONFIG_NFC_MICROREAD_I2C) += microread_i2c.o
+obj-$(CONFIG_NFC_MICROREAD_MEI) += microread_mei.o
diff --git a/drivers/nfc/microread/i2c.c b/drivers/nfc/microread/i2c.c
new file mode 100644 (file)
index 0000000..1010894
--- /dev/null
@@ -0,0 +1,340 @@
+/*
+ * HCI based Driver for Inside Secure microread NFC Chip - i2c layer
+ *
+ * Copyright (C) 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the
+ * Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/gpio.h>
+
+#include <linux/nfc.h>
+#include <net/nfc/hci.h>
+#include <net/nfc/llc.h>
+
+#include "microread.h"
+
+#define MICROREAD_I2C_DRIVER_NAME "microread"
+
+#define MICROREAD_I2C_FRAME_HEADROOM 1
+#define MICROREAD_I2C_FRAME_TAILROOM 1
+
+/* framing in HCI mode */
+#define MICROREAD_I2C_LLC_LEN          1
+#define MICROREAD_I2C_LLC_CRC          1
+#define MICROREAD_I2C_LLC_LEN_CRC      (MICROREAD_I2C_LLC_LEN + \
+                                       MICROREAD_I2C_LLC_CRC)
+#define MICROREAD_I2C_LLC_MIN_SIZE     (1 + MICROREAD_I2C_LLC_LEN_CRC)
+#define MICROREAD_I2C_LLC_MAX_PAYLOAD  29
+#define MICROREAD_I2C_LLC_MAX_SIZE     (MICROREAD_I2C_LLC_LEN_CRC + 1 + \
+                                       MICROREAD_I2C_LLC_MAX_PAYLOAD)
+
+struct microread_i2c_phy {
+       struct i2c_client *i2c_dev;
+       struct nfc_hci_dev *hdev;
+
+       int irq;
+
+       int hard_fault;         /*
+                                * < 0 if hardware error occured (e.g. i2c err)
+                                * and prevents normal operation.
+                                */
+};
+
+#define I2C_DUMP_SKB(info, skb)                                        \
+do {                                                           \
+       pr_debug("%s:\n", info);                                \
+       print_hex_dump(KERN_DEBUG, "i2c: ", DUMP_PREFIX_OFFSET, \
+                      16, 1, (skb)->data, (skb)->len, 0);      \
+} while (0)
+
+static void microread_i2c_add_len_crc(struct sk_buff *skb)
+{
+       int i;
+       u8 crc = 0;
+       int len;
+
+       len = skb->len;
+       *skb_push(skb, 1) = len;
+
+       for (i = 0; i < skb->len; i++)
+               crc = crc ^ skb->data[i];
+
+       *skb_put(skb, 1) = crc;
+}
+
+static void microread_i2c_remove_len_crc(struct sk_buff *skb)
+{
+       skb_pull(skb, MICROREAD_I2C_FRAME_HEADROOM);
+       skb_trim(skb, MICROREAD_I2C_FRAME_TAILROOM);
+}
+
+static int check_crc(struct sk_buff *skb)
+{
+       int i;
+       u8 crc = 0;
+
+       for (i = 0; i < skb->len - 1; i++)
+               crc = crc ^ skb->data[i];
+
+       if (crc != skb->data[skb->len-1]) {
+               pr_err(MICROREAD_I2C_DRIVER_NAME
+                      ": CRC error 0x%x != 0x%x\n",
+                      crc, skb->data[skb->len-1]);
+
+               pr_info(DRIVER_DESC ": %s : BAD CRC\n", __func__);
+
+               return -EPERM;
+       }
+
+       return 0;
+}
+
+static int microread_i2c_enable(void *phy_id)
+{
+       return 0;
+}
+
+static void microread_i2c_disable(void *phy_id)
+{
+       return;
+}
+
+static int microread_i2c_write(void *phy_id, struct sk_buff *skb)
+{
+       int r;
+       struct microread_i2c_phy *phy = phy_id;
+       struct i2c_client *client = phy->i2c_dev;
+
+       if (phy->hard_fault != 0)
+               return phy->hard_fault;
+
+       usleep_range(3000, 6000);
+
+       microread_i2c_add_len_crc(skb);
+
+       I2C_DUMP_SKB("i2c frame written", skb);
+
+       r = i2c_master_send(client, skb->data, skb->len);
+
+       if (r == -EREMOTEIO) {  /* Retry, chip was in standby */
+               usleep_range(6000, 10000);
+               r = i2c_master_send(client, skb->data, skb->len);
+       }
+
+       if (r >= 0) {
+               if (r != skb->len)
+                       r = -EREMOTEIO;
+               else
+                       r = 0;
+       }
+
+       microread_i2c_remove_len_crc(skb);
+
+       return r;
+}
+
+
+static int microread_i2c_read(struct microread_i2c_phy *phy,
+                             struct sk_buff **skb)
+{
+       int r;
+       u8 len;
+       u8 tmp[MICROREAD_I2C_LLC_MAX_SIZE - 1];
+       struct i2c_client *client = phy->i2c_dev;
+
+       pr_debug("%s\n", __func__);
+
+       r = i2c_master_recv(client, &len, 1);
+       if (r != 1) {
+               dev_err(&client->dev, "cannot read len byte\n");
+               return -EREMOTEIO;
+       }
+
+       if ((len < MICROREAD_I2C_LLC_MIN_SIZE) ||
+           (len > MICROREAD_I2C_LLC_MAX_SIZE)) {
+               dev_err(&client->dev, "invalid len byte\n");
+               pr_err("invalid len byte\n");
+               r = -EBADMSG;
+               goto flush;
+       }
+
+       *skb = alloc_skb(1 + len, GFP_KERNEL);
+       if (*skb == NULL) {
+               r = -ENOMEM;
+               goto flush;
+       }
+
+       *skb_put(*skb, 1) = len;
+
+       r = i2c_master_recv(client, skb_put(*skb, len), len);
+       if (r != len) {
+               kfree_skb(*skb);
+               return -EREMOTEIO;
+       }
+
+       I2C_DUMP_SKB("cc frame read", *skb);
+
+       r = check_crc(*skb);
+       if (r != 0) {
+               kfree_skb(*skb);
+               r = -EBADMSG;
+               goto flush;
+       }
+
+       skb_pull(*skb, 1);
+       skb_trim(*skb, (*skb)->len - MICROREAD_I2C_FRAME_TAILROOM);
+
+       usleep_range(3000, 6000);
+
+       return 0;
+
+flush:
+       if (i2c_master_recv(client, tmp, sizeof(tmp)) < 0)
+               r = -EREMOTEIO;
+
+       usleep_range(3000, 6000);
+
+       return r;
+}
+
+static irqreturn_t microread_i2c_irq_thread_fn(int irq, void *phy_id)
+{
+       struct microread_i2c_phy *phy = phy_id;
+       struct i2c_client *client;
+       struct sk_buff *skb = NULL;
+       int r;
+
+       if (!phy || irq != phy->i2c_dev->irq) {
+               WARN_ON_ONCE(1);
+               return IRQ_NONE;
+       }
+
+       client = phy->i2c_dev;
+       dev_dbg(&client->dev, "IRQ\n");
+
+       if (phy->hard_fault != 0)
+               return IRQ_HANDLED;
+
+       r = microread_i2c_read(phy, &skb);
+       if (r == -EREMOTEIO) {
+               phy->hard_fault = r;
+
+               nfc_hci_recv_frame(phy->hdev, NULL);
+
+               return IRQ_HANDLED;
+       } else if ((r == -ENOMEM) || (r == -EBADMSG)) {
+               return IRQ_HANDLED;
+       }
+
+       nfc_hci_recv_frame(phy->hdev, skb);
+
+       return IRQ_HANDLED;
+}
+
+static struct nfc_phy_ops i2c_phy_ops = {
+       .write = microread_i2c_write,
+       .enable = microread_i2c_enable,
+       .disable = microread_i2c_disable,
+};
+
+static int microread_i2c_probe(struct i2c_client *client,
+                              const struct i2c_device_id *id)
+{
+       struct microread_i2c_phy *phy;
+       struct microread_nfc_platform_data *pdata =
+               dev_get_platdata(&client->dev);
+       int r;
+
+       dev_dbg(&client->dev, "client %p", client);
+
+       if (!pdata) {
+               dev_err(&client->dev, "client %p: missing platform data",
+                       client);
+               return -EINVAL;
+       }
+
+       phy = devm_kzalloc(&client->dev, sizeof(struct microread_i2c_phy),
+                          GFP_KERNEL);
+       if (!phy) {
+               dev_err(&client->dev, "Can't allocate microread phy");
+               return -ENOMEM;
+       }
+
+       i2c_set_clientdata(client, phy);
+       phy->i2c_dev = client;
+
+       r = request_threaded_irq(client->irq, NULL, microread_i2c_irq_thread_fn,
+                                IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+                                MICROREAD_I2C_DRIVER_NAME, phy);
+       if (r) {
+               dev_err(&client->dev, "Unable to register IRQ handler");
+               return r;
+       }
+
+       r = microread_probe(phy, &i2c_phy_ops, LLC_SHDLC_NAME,
+                           MICROREAD_I2C_FRAME_HEADROOM,
+                           MICROREAD_I2C_FRAME_TAILROOM,
+                           MICROREAD_I2C_LLC_MAX_PAYLOAD, &phy->hdev);
+       if (r < 0)
+               goto err_irq;
+
+       dev_info(&client->dev, "Probed");
+
+       return 0;
+
+err_irq:
+       free_irq(client->irq, phy);
+
+       return r;
+}
+
+static int microread_i2c_remove(struct i2c_client *client)
+{
+       struct microread_i2c_phy *phy = i2c_get_clientdata(client);
+
+       dev_dbg(&client->dev, "%s\n", __func__);
+
+       microread_remove(phy->hdev);
+
+       free_irq(client->irq, phy);
+
+       return 0;
+}
+
+static struct i2c_device_id microread_i2c_id[] = {
+       { MICROREAD_I2C_DRIVER_NAME, 0},
+       { }
+};
+MODULE_DEVICE_TABLE(i2c, microread_i2c_id);
+
+static struct i2c_driver microread_i2c_driver = {
+       .driver = {
+               .name = MICROREAD_I2C_DRIVER_NAME,
+       },
+       .probe          = microread_i2c_probe,
+       .remove         = microread_i2c_remove,
+       .id_table       = microread_i2c_id,
+};
+
+module_i2c_driver(microread_i2c_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/nfc/microread/mei.c b/drivers/nfc/microread/mei.c
new file mode 100644 (file)
index 0000000..eef38cf
--- /dev/null
@@ -0,0 +1,246 @@
+/*
+ * HCI based Driver for Inside Secure microread NFC Chip
+ *
+ * Copyright (C) 2013  Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the
+ * Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/gpio.h>
+#include <linux/mei_bus.h>
+
+#include <linux/nfc.h>
+#include <net/nfc/hci.h>
+#include <net/nfc/llc.h>
+
+#include "microread.h"
+
+#define MICROREAD_DRIVER_NAME "microread"
+
+#define MICROREAD_UUID UUID_LE(0x0bb17a78, 0x2a8e, 0x4c50, 0x94, \
+                              0xd4, 0x50, 0x26, 0x67, 0x23, 0x77, 0x5c)
+
+struct mei_nfc_hdr {
+       u8 cmd;
+       u8 status;
+       u16 req_id;
+       u32 reserved;
+       u16 data_size;
+} __attribute__((packed));
+
+#define MEI_NFC_HEADER_SIZE 10
+#define MEI_NFC_MAX_HCI_PAYLOAD 300
+#define MEI_NFC_MAX_READ (MEI_NFC_HEADER_SIZE + MEI_NFC_MAX_HCI_PAYLOAD)
+
+struct microread_mei_phy {
+       struct mei_device *mei_device;
+       struct nfc_hci_dev *hdev;
+
+       int powered;
+
+       int hard_fault;         /*
+                                * < 0 if hardware error occured (e.g. i2c err)
+                                * and prevents normal operation.
+                                */
+};
+
+#define MEI_DUMP_SKB_IN(info, skb)                                     \
+do {                                                           \
+       pr_debug("%s:\n", info);                                \
+       print_hex_dump(KERN_DEBUG, "mei in : ", DUMP_PREFIX_OFFSET,     \
+                      16, 1, (skb)->data, (skb)->len, 0);      \
+} while (0)
+
+#define MEI_DUMP_SKB_OUT(info, skb)                                    \
+do {                                                           \
+       pr_debug("%s:\n", info);                                \
+       print_hex_dump(KERN_DEBUG, "mei out: ", DUMP_PREFIX_OFFSET,     \
+                      16, 1, (skb)->data, (skb)->len, 0);      \
+} while (0)
+
+static int microread_mei_enable(void *phy_id)
+{
+       struct microread_mei_phy *phy = phy_id;
+
+       pr_info(DRIVER_DESC ": %s\n", __func__);
+
+       phy->powered = 1;
+
+       return 0;
+}
+
+static void microread_mei_disable(void *phy_id)
+{
+       struct microread_mei_phy *phy = phy_id;
+
+       pr_info(DRIVER_DESC ": %s\n", __func__);
+
+       phy->powered = 0;
+}
+
+/*
+ * Writing a frame must not return the number of written bytes.
+ * It must return either zero for success, or <0 for error.
+ * In addition, it must not alter the skb
+ */
+static int microread_mei_write(void *phy_id, struct sk_buff *skb)
+{
+       struct microread_mei_phy *phy = phy_id;
+       int r;
+
+       MEI_DUMP_SKB_OUT("mei frame sent", skb);
+
+       r = mei_send(phy->device, skb->data, skb->len);
+       if (r > 0)
+               r = 0;
+
+       return r;
+}
+
+static void microread_event_cb(struct mei_device *device, u32 events,
+                              void *context)
+{
+       struct microread_mei_phy *phy = context;
+
+       if (phy->hard_fault != 0)
+               return;
+
+       if (events & BIT(MEI_EVENT_RX)) {
+               struct sk_buff *skb;
+               int reply_size;
+
+               skb = alloc_skb(MEI_NFC_MAX_READ, GFP_KERNEL);
+               if (!skb)
+                       return;
+
+               reply_size = mei_recv(device, skb->data, MEI_NFC_MAX_READ);
+               if (reply_size < MEI_NFC_HEADER_SIZE) {
+                       kfree(skb);
+                       return;
+               }
+
+               skb_put(skb, reply_size);
+               skb_pull(skb, MEI_NFC_HEADER_SIZE);
+
+               MEI_DUMP_SKB_IN("mei frame read", skb);
+
+               nfc_hci_recv_frame(phy->hdev, skb);
+       }
+}
+
+static struct nfc_phy_ops mei_phy_ops = {
+       .write = microread_mei_write,
+       .enable = microread_mei_enable,
+       .disable = microread_mei_disable,
+};
+
+static int microread_mei_probe(struct mei_device *device,
+                              const struct mei_id *id)
+{
+       struct microread_mei_phy *phy;
+       int r;
+
+       pr_info("Probing NFC microread\n");
+
+       phy = kzalloc(sizeof(struct microread_mei_phy), GFP_KERNEL);
+       if (!phy) {
+               pr_err("Cannot allocate memory for microread mei phy.\n");
+               return -ENOMEM;
+       }
+
+       phy->device = device;
+       mei_set_clientdata(device, phy);
+
+       r = mei_register_event_cb(device, microread_event_cb, phy);
+       if (r) {
+               pr_err(MICROREAD_DRIVER_NAME ": event cb registration failed\n");
+               goto err_out;
+       }
+
+       r = microread_probe(phy, &mei_phy_ops, LLC_NOP_NAME,
+                           MEI_NFC_HEADER_SIZE, 0, MEI_NFC_MAX_HCI_PAYLOAD,
+                           &phy->hdev);
+       if (r < 0)
+               goto err_out;
+
+       return 0;
+
+err_out:
+       kfree(phy);
+
+       return r;
+}
+
+static int microread_mei_remove(struct mei_device *device)
+{
+       struct microread_mei_phy *phy = mei_get_clientdata(device);
+
+       pr_info("Removing microread\n");
+
+       microread_remove(phy->hdev);
+
+       if (phy->powered)
+               microread_mei_disable(phy);
+
+       kfree(phy);
+
+       return 0;
+}
+
+static struct mei_id microread_mei_tbl[] = {
+       { MICROREAD_DRIVER_NAME, MICROREAD_UUID },
+
+       /* required last entry */
+       { }
+};
+
+MODULE_DEVICE_TABLE(mei, microread_mei_tbl);
+
+static struct mei_driver microread_driver = {
+       .id_table = microread_mei_tbl,
+       .name = MICROREAD_DRIVER_NAME,
+
+       .probe = microread_mei_probe,
+       .remove = microread_mei_remove,
+};
+
+static int microread_mei_init(void)
+{
+       int r;
+
+       pr_debug(DRIVER_DESC ": %s\n", __func__);
+
+       r = mei_driver_register(&microread_driver);
+       if (r) {
+               pr_err(MICROREAD_DRIVER_NAME ": driver registration failed\n");
+               return r;
+       }
+
+       return 0;
+}
+
+static void microread_mei_exit(void)
+{
+       mei_driver_unregister(&microread_driver);
+}
+
+module_init(microread_mei_init);
+module_exit(microread_mei_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/nfc/microread/microread.c b/drivers/nfc/microread/microread.c
new file mode 100644 (file)
index 0000000..3420d83
--- /dev/null
@@ -0,0 +1,728 @@
+/*
+ * HCI based Driver for Inside Secure microread NFC Chip
+ *
+ * Copyright (C) 2013  Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the
+ * Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/crc-ccitt.h>
+
+#include <linux/nfc.h>
+#include <net/nfc/nfc.h>
+#include <net/nfc/hci.h>
+#include <net/nfc/llc.h>
+
+#include "microread.h"
+
+/* Proprietary gates, events, commands and registers */
+/* Admin */
+#define MICROREAD_GATE_ID_ADM NFC_HCI_ADMIN_GATE
+#define MICROREAD_GATE_ID_MGT 0x01
+#define MICROREAD_GATE_ID_OS 0x02
+#define MICROREAD_GATE_ID_TESTRF 0x03
+#define MICROREAD_GATE_ID_LOOPBACK NFC_HCI_LOOPBACK_GATE
+#define MICROREAD_GATE_ID_IDT NFC_HCI_ID_MGMT_GATE
+#define MICROREAD_GATE_ID_LMS NFC_HCI_LINK_MGMT_GATE
+
+/* Reader */
+#define MICROREAD_GATE_ID_MREAD_GEN 0x10
+#define MICROREAD_GATE_ID_MREAD_ISO_B NFC_HCI_RF_READER_B_GATE
+#define MICROREAD_GATE_ID_MREAD_NFC_T1 0x12
+#define MICROREAD_GATE_ID_MREAD_ISO_A NFC_HCI_RF_READER_A_GATE
+#define MICROREAD_GATE_ID_MREAD_NFC_T3 0x14
+#define MICROREAD_GATE_ID_MREAD_ISO_15_3 0x15
+#define MICROREAD_GATE_ID_MREAD_ISO_15_2 0x16
+#define MICROREAD_GATE_ID_MREAD_ISO_B_3 0x17
+#define MICROREAD_GATE_ID_MREAD_BPRIME 0x18
+#define MICROREAD_GATE_ID_MREAD_ISO_A_3 0x19
+
+/* Card */
+#define MICROREAD_GATE_ID_MCARD_GEN 0x20
+#define MICROREAD_GATE_ID_MCARD_ISO_B 0x21
+#define MICROREAD_GATE_ID_MCARD_BPRIME 0x22
+#define MICROREAD_GATE_ID_MCARD_ISO_A 0x23
+#define MICROREAD_GATE_ID_MCARD_NFC_T3 0x24
+#define MICROREAD_GATE_ID_MCARD_ISO_15_3 0x25
+#define MICROREAD_GATE_ID_MCARD_ISO_15_2 0x26
+#define MICROREAD_GATE_ID_MCARD_ISO_B_2 0x27
+#define MICROREAD_GATE_ID_MCARD_ISO_CUSTOM 0x28
+#define MICROREAD_GATE_ID_SECURE_ELEMENT 0x2F
+
+/* P2P */
+#define MICROREAD_GATE_ID_P2P_GEN 0x30
+#define MICROREAD_GATE_ID_P2P_TARGET 0x31
+#define MICROREAD_PAR_P2P_TARGET_MODE 0x01
+#define MICROREAD_PAR_P2P_TARGET_GT 0x04
+#define MICROREAD_GATE_ID_P2P_INITIATOR 0x32
+#define MICROREAD_PAR_P2P_INITIATOR_GI 0x01
+#define MICROREAD_PAR_P2P_INITIATOR_GT 0x03
+
+/* Those pipes are created/opened by default in the chip */
+#define MICROREAD_PIPE_ID_LMS 0x00
+#define MICROREAD_PIPE_ID_ADMIN 0x01
+#define MICROREAD_PIPE_ID_MGT 0x02
+#define MICROREAD_PIPE_ID_OS 0x03
+#define MICROREAD_PIPE_ID_HDS_LOOPBACK 0x04
+#define MICROREAD_PIPE_ID_HDS_IDT 0x05
+#define MICROREAD_PIPE_ID_HDS_MCARD_ISO_B 0x08
+#define MICROREAD_PIPE_ID_HDS_MCARD_ISO_BPRIME 0x09
+#define MICROREAD_PIPE_ID_HDS_MCARD_ISO_A 0x0A
+#define MICROREAD_PIPE_ID_HDS_MCARD_ISO_15_3 0x0B
+#define MICROREAD_PIPE_ID_HDS_MCARD_ISO_15_2 0x0C
+#define MICROREAD_PIPE_ID_HDS_MCARD_NFC_T3 0x0D
+#define MICROREAD_PIPE_ID_HDS_MCARD_ISO_B_2 0x0E
+#define MICROREAD_PIPE_ID_HDS_MCARD_CUSTOM 0x0F
+#define MICROREAD_PIPE_ID_HDS_MREAD_ISO_B 0x10
+#define MICROREAD_PIPE_ID_HDS_MREAD_NFC_T1 0x11
+#define MICROREAD_PIPE_ID_HDS_MREAD_ISO_A 0x12
+#define MICROREAD_PIPE_ID_HDS_MREAD_ISO_15_3 0x13
+#define MICROREAD_PIPE_ID_HDS_MREAD_ISO_15_2 0x14
+#define MICROREAD_PIPE_ID_HDS_MREAD_NFC_T3 0x15
+#define MICROREAD_PIPE_ID_HDS_MREAD_ISO_B_3 0x16
+#define MICROREAD_PIPE_ID_HDS_MREAD_BPRIME 0x17
+#define MICROREAD_PIPE_ID_HDS_MREAD_ISO_A_3 0x18
+#define MICROREAD_PIPE_ID_HDS_MREAD_GEN 0x1B
+#define MICROREAD_PIPE_ID_HDS_STACKED_ELEMENT 0x1C
+#define MICROREAD_PIPE_ID_HDS_INSTANCES 0x1D
+#define MICROREAD_PIPE_ID_HDS_TESTRF 0x1E
+#define MICROREAD_PIPE_ID_HDS_P2P_TARGET 0x1F
+#define MICROREAD_PIPE_ID_HDS_P2P_INITIATOR 0x20
+
+/* Events */
+#define MICROREAD_EVT_MREAD_DISCOVERY_OCCURED NFC_HCI_EVT_TARGET_DISCOVERED
+#define MICROREAD_EVT_MREAD_CARD_FOUND 0x3D
+#define MICROREAD_EMCF_A_ATQA 0
+#define MICROREAD_EMCF_A_SAK 2
+#define MICROREAD_EMCF_A_LEN 3
+#define MICROREAD_EMCF_A_UID 4
+#define MICROREAD_EMCF_A3_ATQA 0
+#define MICROREAD_EMCF_A3_SAK 2
+#define MICROREAD_EMCF_A3_LEN 3
+#define MICROREAD_EMCF_A3_UID 4
+#define MICROREAD_EMCF_B_UID 0
+#define MICROREAD_EMCF_T1_ATQA 0
+#define MICROREAD_EMCF_T1_UID 4
+#define MICROREAD_EMCF_T3_UID 0
+#define MICROREAD_EVT_MREAD_DISCOVERY_START NFC_HCI_EVT_READER_REQUESTED
+#define MICROREAD_EVT_MREAD_DISCOVERY_START_SOME 0x3E
+#define MICROREAD_EVT_MREAD_DISCOVERY_STOP NFC_HCI_EVT_END_OPERATION
+#define MICROREAD_EVT_MREAD_SIM_REQUESTS 0x3F
+#define MICROREAD_EVT_MCARD_EXCHANGE NFC_HCI_EVT_TARGET_DISCOVERED
+#define MICROREAD_EVT_P2P_INITIATOR_EXCHANGE_TO_RF 0x20
+#define MICROREAD_EVT_P2P_INITIATOR_EXCHANGE_FROM_RF 0x21
+#define MICROREAD_EVT_MCARD_FIELD_ON 0x11
+#define MICROREAD_EVT_P2P_TARGET_ACTIVATED 0x13
+#define MICROREAD_EVT_P2P_TARGET_DEACTIVATED 0x12
+#define MICROREAD_EVT_MCARD_FIELD_OFF 0x14
+
+/* Commands */
+#define MICROREAD_CMD_MREAD_EXCHANGE 0x10
+#define MICROREAD_CMD_MREAD_SUBSCRIBE 0x3F
+
+/* Hosts IDs */
+#define MICROREAD_ELT_ID_HDS NFC_HCI_TERMINAL_HOST_ID
+#define MICROREAD_ELT_ID_SIM NFC_HCI_UICC_HOST_ID
+#define MICROREAD_ELT_ID_SE1 0x03
+#define MICROREAD_ELT_ID_SE2 0x04
+#define MICROREAD_ELT_ID_SE3 0x05
+
+static struct nfc_hci_gate microread_gates[] = {
+       {MICROREAD_GATE_ID_ADM, MICROREAD_PIPE_ID_ADMIN},
+       {MICROREAD_GATE_ID_LOOPBACK, MICROREAD_PIPE_ID_HDS_LOOPBACK},
+       {MICROREAD_GATE_ID_IDT, MICROREAD_PIPE_ID_HDS_IDT},
+       {MICROREAD_GATE_ID_LMS, MICROREAD_PIPE_ID_LMS},
+       {MICROREAD_GATE_ID_MREAD_ISO_B, MICROREAD_PIPE_ID_HDS_MREAD_ISO_B},
+       {MICROREAD_GATE_ID_MREAD_ISO_A, MICROREAD_PIPE_ID_HDS_MREAD_ISO_A},
+       {MICROREAD_GATE_ID_MREAD_ISO_A_3, MICROREAD_PIPE_ID_HDS_MREAD_ISO_A_3},
+       {MICROREAD_GATE_ID_MGT, MICROREAD_PIPE_ID_MGT},
+       {MICROREAD_GATE_ID_OS, MICROREAD_PIPE_ID_OS},
+       {MICROREAD_GATE_ID_MREAD_NFC_T1, MICROREAD_PIPE_ID_HDS_MREAD_NFC_T1},
+       {MICROREAD_GATE_ID_MREAD_NFC_T3, MICROREAD_PIPE_ID_HDS_MREAD_NFC_T3},
+       {MICROREAD_GATE_ID_P2P_TARGET, MICROREAD_PIPE_ID_HDS_P2P_TARGET},
+       {MICROREAD_GATE_ID_P2P_INITIATOR, MICROREAD_PIPE_ID_HDS_P2P_INITIATOR}
+};
+
+/* Largest headroom needed for outgoing custom commands */
+#define MICROREAD_CMDS_HEADROOM        2
+#define MICROREAD_CMD_TAILROOM 2
+
+struct microread_info {
+       struct nfc_phy_ops *phy_ops;
+       void *phy_id;
+
+       struct nfc_hci_dev *hdev;
+
+       int async_cb_type;
+       data_exchange_cb_t async_cb;
+       void *async_cb_context;
+};
+
+static int microread_open(struct nfc_hci_dev *hdev)
+{
+       struct microread_info *info = nfc_hci_get_clientdata(hdev);
+
+       return info->phy_ops->enable(info->phy_id);
+}
+
+static void microread_close(struct nfc_hci_dev *hdev)
+{
+       struct microread_info *info = nfc_hci_get_clientdata(hdev);
+
+       info->phy_ops->disable(info->phy_id);
+}
+
+static int microread_hci_ready(struct nfc_hci_dev *hdev)
+{
+       int r;
+       u8 param[4];
+
+       param[0] = 0x03;
+       r = nfc_hci_send_cmd(hdev, MICROREAD_GATE_ID_MREAD_ISO_A,
+                            MICROREAD_CMD_MREAD_SUBSCRIBE, param, 1, NULL);
+       if (r)
+               return r;
+
+       r = nfc_hci_send_cmd(hdev, MICROREAD_GATE_ID_MREAD_ISO_A_3,
+                            MICROREAD_CMD_MREAD_SUBSCRIBE, NULL, 0, NULL);
+       if (r)
+               return r;
+
+       param[0] = 0x00;
+       param[1] = 0x03;
+       param[2] = 0x00;
+       r = nfc_hci_send_cmd(hdev, MICROREAD_GATE_ID_MREAD_ISO_B,
+                            MICROREAD_CMD_MREAD_SUBSCRIBE, param, 3, NULL);
+       if (r)
+               return r;
+
+       r = nfc_hci_send_cmd(hdev, MICROREAD_GATE_ID_MREAD_NFC_T1,
+                            MICROREAD_CMD_MREAD_SUBSCRIBE, NULL, 0, NULL);
+       if (r)
+               return r;
+
+       param[0] = 0xFF;
+       param[1] = 0xFF;
+       param[2] = 0x00;
+       param[3] = 0x00;
+       r = nfc_hci_send_cmd(hdev, MICROREAD_GATE_ID_MREAD_NFC_T3,
+                            MICROREAD_CMD_MREAD_SUBSCRIBE, param, 4, NULL);
+
+       return r;
+}
+
+static int microread_xmit(struct nfc_hci_dev *hdev, struct sk_buff *skb)
+{
+       struct microread_info *info = nfc_hci_get_clientdata(hdev);
+
+       return info->phy_ops->write(info->phy_id, skb);
+}
+
+static int microread_start_poll(struct nfc_hci_dev *hdev,
+                               u32 im_protocols, u32 tm_protocols)
+{
+       int r;
+
+       u8 param[2];
+       u8 mode;
+
+       param[0] = 0x00;
+       param[1] = 0x00;
+
+       if (im_protocols & NFC_PROTO_ISO14443_MASK)
+               param[0] |= (1 << 2);
+
+       if (im_protocols & NFC_PROTO_ISO14443_B_MASK)
+               param[0] |= 1;
+
+       if (im_protocols & NFC_PROTO_MIFARE_MASK)
+               param[1] |= 1;
+
+       if (im_protocols & NFC_PROTO_JEWEL_MASK)
+               param[0] |= (1 << 1);
+
+       if (im_protocols & NFC_PROTO_FELICA_MASK)
+               param[0] |= (1 << 5);
+
+       if (im_protocols & NFC_PROTO_NFC_DEP_MASK)
+               param[1] |= (1 << 1);
+
+       if ((im_protocols | tm_protocols) & NFC_PROTO_NFC_DEP_MASK) {
+               hdev->gb = nfc_get_local_general_bytes(hdev->ndev,
+                                                      &hdev->gb_len);
+               if (hdev->gb == NULL || hdev->gb_len == 0) {
+                       im_protocols &= ~NFC_PROTO_NFC_DEP_MASK;
+                       tm_protocols &= ~NFC_PROTO_NFC_DEP_MASK;
+               }
+       }
+
+       r = nfc_hci_send_event(hdev, MICROREAD_GATE_ID_MREAD_ISO_A,
+                              MICROREAD_EVT_MREAD_DISCOVERY_STOP, NULL, 0);
+       if (r)
+               return r;
+
+       mode = 0xff;
+       r = nfc_hci_set_param(hdev, MICROREAD_GATE_ID_P2P_TARGET,
+                             MICROREAD_PAR_P2P_TARGET_MODE, &mode, 1);
+       if (r)
+               return r;
+
+       if (im_protocols & NFC_PROTO_NFC_DEP_MASK) {
+               r = nfc_hci_set_param(hdev, MICROREAD_GATE_ID_P2P_INITIATOR,
+                                     MICROREAD_PAR_P2P_INITIATOR_GI,
+                                     hdev->gb, hdev->gb_len);
+               if (r)
+                       return r;
+       }
+
+       if (tm_protocols & NFC_PROTO_NFC_DEP_MASK) {
+               r = nfc_hci_set_param(hdev, MICROREAD_GATE_ID_P2P_TARGET,
+                                     MICROREAD_PAR_P2P_TARGET_GT,
+                                     hdev->gb, hdev->gb_len);
+               if (r)
+                       return r;
+
+               mode = 0x02;
+               r = nfc_hci_set_param(hdev, MICROREAD_GATE_ID_P2P_TARGET,
+                                     MICROREAD_PAR_P2P_TARGET_MODE, &mode, 1);
+               if (r)
+                       return r;
+       }
+
+       return nfc_hci_send_event(hdev, MICROREAD_GATE_ID_MREAD_ISO_A,
+                                 MICROREAD_EVT_MREAD_DISCOVERY_START_SOME,
+                                 param, 2);
+}
+
+static int microread_dep_link_up(struct nfc_hci_dev *hdev,
+                               struct nfc_target *target, u8 comm_mode,
+                               u8 *gb, size_t gb_len)
+{
+       struct sk_buff *rgb_skb = NULL;
+       int r;
+
+       r = nfc_hci_get_param(hdev, target->hci_reader_gate,
+                             MICROREAD_PAR_P2P_INITIATOR_GT, &rgb_skb);
+       if (r < 0)
+               return r;
+
+       if (rgb_skb->len == 0 || rgb_skb->len > NFC_GB_MAXSIZE) {
+               r = -EPROTO;
+               goto exit;
+       }
+
+       r = nfc_set_remote_general_bytes(hdev->ndev, rgb_skb->data,
+                                        rgb_skb->len);
+       if (r == 0)
+               r = nfc_dep_link_is_up(hdev->ndev, target->idx, comm_mode,
+                                      NFC_RF_INITIATOR);
+exit:
+       kfree_skb(rgb_skb);
+
+       return r;
+}
+
+static int microread_dep_link_down(struct nfc_hci_dev *hdev)
+{
+       return nfc_hci_send_event(hdev, MICROREAD_GATE_ID_P2P_INITIATOR,
+                                 MICROREAD_EVT_MREAD_DISCOVERY_STOP, NULL, 0);
+}
+
+static int microread_target_from_gate(struct nfc_hci_dev *hdev, u8 gate,
+                                     struct nfc_target *target)
+{
+       switch (gate) {
+       case MICROREAD_GATE_ID_P2P_INITIATOR:
+               target->supported_protocols = NFC_PROTO_NFC_DEP_MASK;
+               break;
+       default:
+               return -EPROTO;
+       }
+
+       return 0;
+}
+
+static int microread_complete_target_discovered(struct nfc_hci_dev *hdev,
+                                               u8 gate,
+                                               struct nfc_target *target)
+{
+       return 0;
+}
+
+#define MICROREAD_CB_TYPE_READER_ALL 1
+
+static void microread_im_transceive_cb(void *context, struct sk_buff *skb,
+                                      int err)
+{
+       struct microread_info *info = context;
+
+       switch (info->async_cb_type) {
+       case MICROREAD_CB_TYPE_READER_ALL:
+               if (err == 0) {
+                       if (skb->len == 0) {
+                               err = -EPROTO;
+                               kfree_skb(skb);
+                               info->async_cb(info->async_cb_context, NULL,
+                                              -EPROTO);
+                               return;
+                       }
+
+                       if (skb->data[skb->len - 1] != 0) {
+                               err = nfc_hci_result_to_errno(
+                                                      skb->data[skb->len - 1]);
+                               kfree_skb(skb);
+                               info->async_cb(info->async_cb_context, NULL,
+                                              err);
+                               return;
+                       }
+
+                       skb_trim(skb, skb->len - 1);    /* RF Error ind. */
+               }
+               info->async_cb(info->async_cb_context, skb, err);
+               break;
+       default:
+               if (err == 0)
+                       kfree_skb(skb);
+               break;
+       }
+}
+
+/*
+ * Returns:
+ * <= 0: driver handled the data exchange
+ *    1: driver doesn't especially handle, please do standard processing
+ */
+static int microread_im_transceive(struct nfc_hci_dev *hdev,
+                                  struct nfc_target *target,
+                                  struct sk_buff *skb, data_exchange_cb_t cb,
+                                  void *cb_context)
+{
+       struct microread_info *info = nfc_hci_get_clientdata(hdev);
+       u8 control_bits;
+       u16 crc;
+
+       pr_info("data exchange to gate 0x%x\n", target->hci_reader_gate);
+
+       if (target->hci_reader_gate == MICROREAD_GATE_ID_P2P_INITIATOR) {
+               *skb_push(skb, 1) = 0;
+
+               return nfc_hci_send_event(hdev, target->hci_reader_gate,
+                                    MICROREAD_EVT_P2P_INITIATOR_EXCHANGE_TO_RF,
+                                    skb->data, skb->len);
+       }
+
+       switch (target->hci_reader_gate) {
+       case MICROREAD_GATE_ID_MREAD_ISO_A:
+               control_bits = 0xCB;
+               break;
+       case MICROREAD_GATE_ID_MREAD_ISO_A_3:
+               control_bits = 0xCB;
+               break;
+       case MICROREAD_GATE_ID_MREAD_ISO_B:
+               control_bits = 0xCB;
+               break;
+       case MICROREAD_GATE_ID_MREAD_NFC_T1:
+               control_bits = 0x1B;
+
+               crc = crc_ccitt(0xffff, skb->data, skb->len);
+               crc = ~crc;
+               *skb_put(skb, 1) = crc & 0xff;
+               *skb_put(skb, 1) = crc >> 8;
+               break;
+       case MICROREAD_GATE_ID_MREAD_NFC_T3:
+               control_bits = 0xDB;
+               break;
+       default:
+               pr_info("Abort im_transceive to invalid gate 0x%x\n",
+                       target->hci_reader_gate);
+               return 1;
+       }
+
+       *skb_push(skb, 1) = control_bits;
+
+       info->async_cb_type = MICROREAD_CB_TYPE_READER_ALL;
+       info->async_cb = cb;
+       info->async_cb_context = cb_context;
+
+       return nfc_hci_send_cmd_async(hdev, target->hci_reader_gate,
+                                     MICROREAD_CMD_MREAD_EXCHANGE,
+                                     skb->data, skb->len,
+                                     microread_im_transceive_cb, info);
+}
+
+static int microread_tm_send(struct nfc_hci_dev *hdev, struct sk_buff *skb)
+{
+       int r;
+
+       r = nfc_hci_send_event(hdev, MICROREAD_GATE_ID_P2P_TARGET,
+                              MICROREAD_EVT_MCARD_EXCHANGE,
+                              skb->data, skb->len);
+
+       kfree_skb(skb);
+
+       return r;
+}
+
+static void microread_target_discovered(struct nfc_hci_dev *hdev, u8 gate,
+                                       struct sk_buff *skb)
+{
+       struct nfc_target *targets;
+       int r = 0;
+
+       pr_info("target discovered to gate 0x%x\n", gate);
+
+       targets = kzalloc(sizeof(struct nfc_target), GFP_KERNEL);
+       if (targets == NULL) {
+               r = -ENOMEM;
+               goto exit;
+       }
+
+       targets->hci_reader_gate = gate;
+
+       switch (gate) {
+       case MICROREAD_GATE_ID_MREAD_ISO_A:
+               targets->supported_protocols =
+                     nfc_hci_sak_to_protocol(skb->data[MICROREAD_EMCF_A_SAK]);
+               targets->sens_res =
+                        be16_to_cpu(*(u16 *)&skb->data[MICROREAD_EMCF_A_ATQA]);
+               targets->sel_res = skb->data[MICROREAD_EMCF_A_SAK];
+               memcpy(targets->nfcid1, &skb->data[MICROREAD_EMCF_A_UID],
+                      skb->data[MICROREAD_EMCF_A_LEN]);
+               targets->nfcid1_len = skb->data[MICROREAD_EMCF_A_LEN];
+               break;
+       case MICROREAD_GATE_ID_MREAD_ISO_A_3:
+               targets->supported_protocols =
+                     nfc_hci_sak_to_protocol(skb->data[MICROREAD_EMCF_A3_SAK]);
+               targets->sens_res =
+                        be16_to_cpu(*(u16 *)&skb->data[MICROREAD_EMCF_A3_ATQA]);
+               targets->sel_res = skb->data[MICROREAD_EMCF_A3_SAK];
+               memcpy(targets->nfcid1, &skb->data[MICROREAD_EMCF_A3_UID],
+                      skb->data[MICROREAD_EMCF_A3_LEN]);
+               targets->nfcid1_len = skb->data[MICROREAD_EMCF_A3_LEN];
+               break;
+       case MICROREAD_GATE_ID_MREAD_ISO_B:
+               targets->supported_protocols = NFC_PROTO_ISO14443_B_MASK;
+               memcpy(targets->nfcid1, &skb->data[MICROREAD_EMCF_B_UID], 4);
+               targets->nfcid1_len = 4;
+               break;
+       case MICROREAD_GATE_ID_MREAD_NFC_T1:
+               targets->supported_protocols = NFC_PROTO_JEWEL_MASK;
+               targets->sens_res =
+                       le16_to_cpu(*(u16 *)&skb->data[MICROREAD_EMCF_T1_ATQA]);
+               memcpy(targets->nfcid1, &skb->data[MICROREAD_EMCF_T1_UID], 4);
+               targets->nfcid1_len = 4;
+               break;
+       case MICROREAD_GATE_ID_MREAD_NFC_T3:
+               targets->supported_protocols = NFC_PROTO_FELICA_MASK;
+               memcpy(targets->nfcid1, &skb->data[MICROREAD_EMCF_T3_UID], 8);
+               targets->nfcid1_len = 8;
+               break;
+       default:
+               pr_info("discard target discovered to gate 0x%x\n", gate);
+               goto exit_free;
+       }
+
+       r = nfc_targets_found(hdev->ndev, targets, 1);
+
+exit_free:
+       kfree(targets);
+
+exit:
+       kfree_skb(skb);
+
+       if (r)
+               pr_err("Failed to handle discovered target err=%d", r);
+}
+
+static int microread_event_received(struct nfc_hci_dev *hdev, u8 gate,
+                                    u8 event, struct sk_buff *skb)
+{
+       int r;
+       u8 mode;
+
+       pr_info("Microread received event 0x%x to gate 0x%x\n", event, gate);
+
+       switch (event) {
+       case MICROREAD_EVT_MREAD_CARD_FOUND:
+               microread_target_discovered(hdev, gate, skb);
+               return 0;
+
+       case MICROREAD_EVT_P2P_INITIATOR_EXCHANGE_FROM_RF:
+               if (skb->len < 1) {
+                       kfree_skb(skb);
+                       return -EPROTO;
+               }
+
+               if (skb->data[skb->len - 1]) {
+                       kfree_skb(skb);
+                       return -EIO;
+               }
+
+               skb_trim(skb, skb->len - 1);
+
+               r = nfc_tm_data_received(hdev->ndev, skb);
+               break;
+
+       case MICROREAD_EVT_MCARD_FIELD_ON:
+       case MICROREAD_EVT_MCARD_FIELD_OFF:
+               kfree_skb(skb);
+               return 0;
+
+       case MICROREAD_EVT_P2P_TARGET_ACTIVATED:
+               r = nfc_tm_activated(hdev->ndev, NFC_PROTO_NFC_DEP_MASK,
+                                    NFC_COMM_PASSIVE, skb->data,
+                                    skb->len);
+
+               kfree_skb(skb);
+               break;
+
+       case MICROREAD_EVT_MCARD_EXCHANGE:
+               if (skb->len < 1) {
+                       kfree_skb(skb);
+                       return -EPROTO;
+               }
+
+               if (skb->data[skb->len-1]) {
+                       kfree_skb(skb);
+                       return -EIO;
+               }
+
+               skb_trim(skb, skb->len - 1);
+
+               r = nfc_tm_data_received(hdev->ndev, skb);
+               break;
+
+       case MICROREAD_EVT_P2P_TARGET_DEACTIVATED:
+               kfree_skb(skb);
+
+               mode = 0xff;
+               r = nfc_hci_set_param(hdev, MICROREAD_GATE_ID_P2P_TARGET,
+                                     MICROREAD_PAR_P2P_TARGET_MODE, &mode, 1);
+               if (r)
+                       break;
+
+               r = nfc_hci_send_event(hdev, gate,
+                                      MICROREAD_EVT_MREAD_DISCOVERY_STOP, NULL,
+                                      0);
+               break;
+
+       default:
+               return 1;
+       }
+
+       return r;
+}
+
+static struct nfc_hci_ops microread_hci_ops = {
+       .open = microread_open,
+       .close = microread_close,
+       .hci_ready = microread_hci_ready,
+       .xmit = microread_xmit,
+       .start_poll = microread_start_poll,
+       .dep_link_up = microread_dep_link_up,
+       .dep_link_down = microread_dep_link_down,
+       .target_from_gate = microread_target_from_gate,
+       .complete_target_discovered = microread_complete_target_discovered,
+       .im_transceive = microread_im_transceive,
+       .tm_send = microread_tm_send,
+       .check_presence = NULL,
+       .event_received = microread_event_received,
+};
+
+int microread_probe(void *phy_id, struct nfc_phy_ops *phy_ops, char *llc_name,
+                   int phy_headroom, int phy_tailroom, int phy_payload,
+                   struct nfc_hci_dev **hdev)
+{
+       struct microread_info *info;
+       unsigned long quirks = 0;
+       u32 protocols, se;
+       struct nfc_hci_init_data init_data;
+       int r;
+
+       info = kzalloc(sizeof(struct microread_info), GFP_KERNEL);
+       if (!info) {
+               pr_err("Cannot allocate memory for microread_info.\n");
+               r = -ENOMEM;
+               goto err_info_alloc;
+       }
+
+       info->phy_ops = phy_ops;
+       info->phy_id = phy_id;
+
+       init_data.gate_count = ARRAY_SIZE(microread_gates);
+       memcpy(init_data.gates, microread_gates, sizeof(microread_gates));
+
+       strcpy(init_data.session_id, "MICROREA");
+
+       set_bit(NFC_HCI_QUIRK_SHORT_CLEAR, &quirks);
+
+       protocols = NFC_PROTO_JEWEL_MASK |
+                   NFC_PROTO_MIFARE_MASK |
+                   NFC_PROTO_FELICA_MASK |
+                   NFC_PROTO_ISO14443_MASK |
+                   NFC_PROTO_ISO14443_B_MASK |
+                   NFC_PROTO_NFC_DEP_MASK;
+
+       se = NFC_SE_UICC | NFC_SE_EMBEDDED;
+
+       info->hdev = nfc_hci_allocate_device(&microread_hci_ops, &init_data,
+                                            quirks, protocols, se, llc_name,
+                                            phy_headroom +
+                                            MICROREAD_CMDS_HEADROOM,
+                                            phy_tailroom +
+                                            MICROREAD_CMD_TAILROOM,
+                                            phy_payload);
+       if (!info->hdev) {
+               pr_err("Cannot allocate nfc hdev.\n");
+               r = -ENOMEM;
+               goto err_alloc_hdev;
+       }
+
+       nfc_hci_set_clientdata(info->hdev, info);
+
+       r = nfc_hci_register_device(info->hdev);
+       if (r)
+               goto err_regdev;
+
+       *hdev = info->hdev;
+
+       return 0;
+
+err_regdev:
+       nfc_hci_free_device(info->hdev);
+
+err_alloc_hdev:
+       kfree(info);
+
+err_info_alloc:
+       return r;
+}
+EXPORT_SYMBOL(microread_probe);
+
+void microread_remove(struct nfc_hci_dev *hdev)
+{
+       struct microread_info *info = nfc_hci_get_clientdata(hdev);
+
+       nfc_hci_unregister_device(hdev);
+       nfc_hci_free_device(hdev);
+       kfree(info);
+}
+EXPORT_SYMBOL(microread_remove);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/nfc/microread/microread.h b/drivers/nfc/microread/microread.h
new file mode 100644 (file)
index 0000000..64b447a
--- /dev/null
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2011 - 2012  Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the
+ * Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef __LOCAL_MICROREAD_H_
+#define __LOCAL_MICROREAD_H_
+
+#include <net/nfc/hci.h>
+
+#define DRIVER_DESC "NFC driver for microread"
+
+int microread_probe(void *phy_id, struct nfc_phy_ops *phy_ops, char *llc_name,
+                   int phy_headroom, int phy_tailroom, int phy_payload,
+                   struct nfc_hci_dev **hdev);
+
+void microread_remove(struct nfc_hci_dev *hdev);
+
+#endif /* __LOCAL_MICROREAD_H_ */
index f696318..f0f6763 100644 (file)
@@ -219,7 +219,7 @@ struct pn533_poll_modulations {
        u8 len;
 };
 
-const struct pn533_poll_modulations poll_mod[] = {
+static const struct pn533_poll_modulations poll_mod[] = {
        [PN533_POLL_MOD_106KBPS_A] = {
                .data = {
                        .maxtg = 1,
@@ -485,7 +485,7 @@ static u8 pn533_get_cmd_code(void *frame)
        return PN533_FRAME_CMD(f);
 }
 
-struct pn533_frame_ops pn533_std_frame_ops = {
+static struct pn533_frame_ops pn533_std_frame_ops = {
        .tx_frame_init = pn533_tx_frame_init,
        .tx_frame_finish = pn533_tx_frame_finish,
        .tx_update_payload_len = pn533_tx_update_payload_len,
@@ -532,7 +532,6 @@ static void pn533_recv_response(struct urb *urb)
                            urb->status);
                dev->wq_in_error = urb->status;
                goto sched_wq;
-               break;
        case -ESHUTDOWN:
        default:
                nfc_dev_err(&dev->interface->dev,
@@ -589,7 +588,6 @@ static void pn533_recv_ack(struct urb *urb)
                            urb->status);
                dev->wq_in_error = urb->status;
                goto sched_wq;
-               break;
        case -ESHUTDOWN:
        default:
                nfc_dev_err(&dev->interface->dev,
@@ -1380,7 +1378,7 @@ static struct sk_buff *pn533_alloc_poll_tg_frame(struct pn533 *dev)
                return NULL;
 
        /* DEP support only */
-       *skb_put(skb, 1) |= PN533_INIT_TARGET_DEP;
+       *skb_put(skb, 1) = PN533_INIT_TARGET_DEP;
 
        /* MIFARE params */
        memcpy(skb_put(skb, 6), mifare_params, 6);
index c31aeb0..a5f3c8c 100644 (file)
@@ -181,12 +181,11 @@ config PINCTRL_COH901
 
 config PINCTRL_SAMSUNG
        bool
-       depends on OF && GPIOLIB
        select PINMUX
        select PINCONF
 
-config PINCTRL_EXYNOS4
-       bool "Pinctrl driver data for Exynos4 SoC"
+config PINCTRL_EXYNOS
+       bool "Pinctrl driver data for Samsung EXYNOS SoCs"
        depends on OF && GPIOLIB
        select PINCTRL_SAMSUNG
 
index fc4606f..6e87e52 100644 (file)
@@ -36,7 +36,7 @@ obj-$(CONFIG_PINCTRL_TEGRA30) += pinctrl-tegra30.o
 obj-$(CONFIG_PINCTRL_U300)     += pinctrl-u300.o
 obj-$(CONFIG_PINCTRL_COH901)   += pinctrl-coh901.o
 obj-$(CONFIG_PINCTRL_SAMSUNG)  += pinctrl-samsung.o
-obj-$(CONFIG_PINCTRL_EXYNOS4)  += pinctrl-exynos.o
+obj-$(CONFIG_PINCTRL_EXYNOS  += pinctrl-exynos.o
 obj-$(CONFIG_PINCTRL_EXYNOS5440)       += pinctrl-exynos5440.o
 obj-$(CONFIG_PINCTRL_XWAY)     += pinctrl-xway.o
 obj-$(CONFIG_PINCTRL_LANTIQ)   += pinctrl-lantiq.o
index 69aba36..428ea96 100644 (file)
@@ -588,7 +588,7 @@ static int dove_pinctrl_probe(struct platform_device *pdev)
 {
        const struct of_device_id *match =
                of_match_device(dove_pinctrl_of_match, &pdev->dev);
-       pdev->dev.platform_data = match->data;
+       pdev->dev.platform_data = (void *)match->data;
 
        /*
         * General MPP Configuration Register is part of pdma registers.
index f12084e..cdd483d 100644 (file)
@@ -66,9 +66,9 @@ static struct mvebu_mpp_mode mv88f6xxx_mpp_modes[] = {
                MPP_VAR_FUNCTION(0x5, "sata0", "act",    V(0, 1, 1, 1, 1, 0)),
                MPP_VAR_FUNCTION(0xb, "lcd", "vsync",    V(0, 0, 0, 0, 1, 0))),
        MPP_MODE(6,
-               MPP_VAR_FUNCTION(0x0, "sysrst", "out",   V(1, 1, 1, 1, 1, 1)),
-               MPP_VAR_FUNCTION(0x1, "spi", "mosi",     V(1, 1, 1, 1, 1, 1)),
-               MPP_VAR_FUNCTION(0x2, "ptp", "trig",     V(1, 1, 1, 1, 0, 0))),
+               MPP_VAR_FUNCTION(0x1, "sysrst", "out",   V(1, 1, 1, 1, 1, 1)),
+               MPP_VAR_FUNCTION(0x2, "spi", "mosi",     V(1, 1, 1, 1, 1, 1)),
+               MPP_VAR_FUNCTION(0x3, "ptp", "trig",     V(1, 1, 1, 1, 0, 0))),
        MPP_MODE(7,
                MPP_VAR_FUNCTION(0x0, "gpo", NULL,       V(1, 1, 1, 1, 1, 1)),
                MPP_VAR_FUNCTION(0x1, "pex", "rsto",     V(1, 1, 1, 1, 0, 1)),
@@ -458,7 +458,7 @@ static int kirkwood_pinctrl_probe(struct platform_device *pdev)
 {
        const struct of_device_id *match =
                of_match_device(kirkwood_pinctrl_of_match, &pdev->dev);
-       pdev->dev.platform_data = match->data;
+       pdev->dev.platform_data = (void *)match->data;
        return mvebu_pinctrl_probe(pdev);
 }
 
index de05b64..1427299 100644 (file)
@@ -599,7 +599,7 @@ static int exynos5440_gpio_direction_output(struct gpio_chip *gc, unsigned offse
 }
 
 /* parse the pin numbers listed in the 'samsung,exynos5440-pins' property */
-static int __init exynos5440_pinctrl_parse_dt_pins(struct platform_device *pdev,
+static int exynos5440_pinctrl_parse_dt_pins(struct platform_device *pdev,
                        struct device_node *cfg_np, unsigned int **pin_list,
                        unsigned int *npins)
 {
@@ -630,7 +630,7 @@ static int __init exynos5440_pinctrl_parse_dt_pins(struct platform_device *pdev,
  * Parse the information about all the available pin groups and pin functions
  * from device node of the pin-controller.
  */
-static int __init exynos5440_pinctrl_parse_dt(struct platform_device *pdev,
+static int exynos5440_pinctrl_parse_dt(struct platform_device *pdev,
                                struct exynos5440_pinctrl_priv_data *priv)
 {
        struct device *dev = &pdev->dev;
@@ -723,7 +723,7 @@ static int __init exynos5440_pinctrl_parse_dt(struct platform_device *pdev,
 }
 
 /* register the pinctrl interface with the pinctrl subsystem */
-static int __init exynos5440_pinctrl_register(struct platform_device *pdev,
+static int exynos5440_pinctrl_register(struct platform_device *pdev,
                                struct exynos5440_pinctrl_priv_data *priv)
 {
        struct device *dev = &pdev->dev;
@@ -798,7 +798,7 @@ static int __init exynos5440_pinctrl_register(struct platform_device *pdev,
 }
 
 /* register the gpiolib interface with the gpiolib subsystem */
-static int __init exynos5440_gpiolib_register(struct platform_device *pdev,
+static int exynos5440_gpiolib_register(struct platform_device *pdev,
                                struct exynos5440_pinctrl_priv_data *priv)
 {
        struct gpio_chip *gc;
@@ -831,7 +831,7 @@ static int __init exynos5440_gpiolib_register(struct platform_device *pdev,
 }
 
 /* unregister the gpiolib interface with the gpiolib subsystem */
-static int __init exynos5440_gpiolib_unregister(struct platform_device *pdev,
+static int exynos5440_gpiolib_unregister(struct platform_device *pdev,
                                struct exynos5440_pinctrl_priv_data *priv)
 {
        int ret = gpiochip_remove(priv->gc);
index dd227d2..23af9f1 100644 (file)
@@ -146,7 +146,7 @@ free:
 static void mxs_dt_free_map(struct pinctrl_dev *pctldev,
                            struct pinctrl_map *map, unsigned num_maps)
 {
-       int i;
+       u32 i;
 
        for (i = 0; i < num_maps; i++) {
                if (map[i].type == PIN_MAP_TYPE_MUX_GROUP)
@@ -203,7 +203,7 @@ static int mxs_pinctrl_enable(struct pinctrl_dev *pctldev, unsigned selector,
        void __iomem *reg;
        u8 bank, shift;
        u16 pin;
-       int i;
+       u32 i;
 
        for (i = 0; i < g->npins; i++) {
                bank = PINID_TO_BANK(g->pins[i]);
@@ -256,7 +256,7 @@ static int mxs_pinconf_group_set(struct pinctrl_dev *pctldev,
        void __iomem *reg;
        u8 ma, vol, pull, bank, shift;
        u16 pin;
-       int i;
+       u32 i;
 
        ma = CONFIG_TO_MA(config);
        vol = CONFIG_TO_VOL(config);
@@ -345,8 +345,7 @@ static int mxs_pinctrl_parse_group(struct platform_device *pdev,
        const char *propname = "fsl,pinmux-ids";
        char *group;
        int length = strlen(np->name) + SUFFIX_LEN;
-       int i;
-       u32 val;
+       u32 val, i;
 
        group = devm_kzalloc(&pdev->dev, length, GFP_KERNEL);
        if (!group)
index 1bb16ff..5767b18 100644 (file)
@@ -676,7 +676,7 @@ int nmk_gpio_set_mode(int gpio, int gpio_mode)
 }
 EXPORT_SYMBOL(nmk_gpio_set_mode);
 
-static int nmk_prcm_gpiocr_get_mode(struct pinctrl_dev *pctldev, int gpio)
+static int __maybe_unused nmk_prcm_gpiocr_get_mode(struct pinctrl_dev *pctldev, int gpio)
 {
        int i;
        u16 reg;
index f6a360b..5c32e88 100644 (file)
@@ -30,7 +30,6 @@
 #define PCS_MUX_BITS_NAME              "pinctrl-single,bits"
 #define PCS_REG_NAME_LEN               ((sizeof(unsigned long) * 2) + 1)
 #define PCS_OFF_DISABLED               ~0U
-#define PCS_MAX_GPIO_VALUES            2
 
 /**
  * struct pcs_pingroup - pingroups for a function
@@ -78,16 +77,6 @@ struct pcs_function {
 };
 
 /**
- * struct pcs_gpio_range - pinctrl gpio range
- * @range:     subrange of the GPIO number space
- * @gpio_func: gpio function value in the pinmux register
- */
-struct pcs_gpio_range {
-       struct pinctrl_gpio_range range;
-       int gpio_func;
-};
-
-/**
  * struct pcs_data - wrapper for data needed by pinctrl framework
  * @pa:                pindesc array
  * @cur:       index to current element
@@ -414,26 +403,9 @@ static void pcs_disable(struct pinctrl_dev *pctldev, unsigned fselector,
 }
 
 static int pcs_request_gpio(struct pinctrl_dev *pctldev,
-                           struct pinctrl_gpio_range *range, unsigned pin)
+                       struct pinctrl_gpio_range *range, unsigned offset)
 {
-       struct pcs_device *pcs = pinctrl_dev_get_drvdata(pctldev);
-       struct pcs_gpio_range *gpio = NULL;
-       int end, mux_bytes;
-       unsigned data;
-
-       gpio = container_of(range, struct pcs_gpio_range, range);
-       end = range->pin_base + range->npins - 1;
-       if (pin < range->pin_base || pin > end) {
-               dev_err(pctldev->dev,
-                       "pin %d isn't in the range of %d to %d\n",
-                       pin, range->pin_base, end);
-               return -EINVAL;
-       }
-       mux_bytes = pcs->width / BITS_PER_BYTE;
-       data = pcs->read(pcs->base + pin * mux_bytes) & ~pcs->fmask;
-       data |= gpio->gpio_func;
-       pcs->write(data, pcs->base + pin * mux_bytes);
-       return 0;
+       return -ENOTSUPP;
 }
 
 static struct pinmux_ops pcs_pinmux_ops = {
@@ -907,49 +879,6 @@ static void pcs_free_resources(struct pcs_device *pcs)
 
 static struct of_device_id pcs_of_match[];
 
-static int pcs_add_gpio_range(struct device_node *node, struct pcs_device *pcs)
-{
-       struct pcs_gpio_range *gpio;
-       struct device_node *child;
-       struct resource r;
-       const char name[] = "pinctrl-single";
-       u32 gpiores[PCS_MAX_GPIO_VALUES];
-       int ret, i = 0, mux_bytes = 0;
-
-       for_each_child_of_node(node, child) {
-               ret = of_address_to_resource(child, 0, &r);
-               if (ret < 0)
-                       continue;
-               memset(gpiores, 0, sizeof(u32) * PCS_MAX_GPIO_VALUES);
-               ret = of_property_read_u32_array(child, "pinctrl-single,gpio",
-                                                gpiores, PCS_MAX_GPIO_VALUES);
-               if (ret < 0)
-                       continue;
-               gpio = devm_kzalloc(pcs->dev, sizeof(*gpio), GFP_KERNEL);
-               if (!gpio) {
-                       dev_err(pcs->dev, "failed to allocate pcs gpio\n");
-                       return -ENOMEM;
-               }
-               gpio->range.name = devm_kzalloc(pcs->dev, sizeof(name),
-                                               GFP_KERNEL);
-               if (!gpio->range.name) {
-                       dev_err(pcs->dev, "failed to allocate range name\n");
-                       return -ENOMEM;
-               }
-               memcpy((char *)gpio->range.name, name, sizeof(name));
-
-               gpio->range.id = i++;
-               gpio->range.base = gpiores[0];
-               gpio->gpio_func = gpiores[1];
-               mux_bytes = pcs->width / BITS_PER_BYTE;
-               gpio->range.pin_base = (r.start - pcs->res->start) / mux_bytes;
-               gpio->range.npins = (r.end - r.start) / mux_bytes + 1;
-
-               pinctrl_add_gpio_range(pcs->pctl, &gpio->range);
-       }
-       return 0;
-}
-
 static int pcs_probe(struct platform_device *pdev)
 {
        struct device_node *np = pdev->dev.of_node;
@@ -1046,10 +975,6 @@ static int pcs_probe(struct platform_device *pdev)
                goto free;
        }
 
-       ret = pcs_add_gpio_range(np, pcs);
-       if (ret < 0)
-               goto free;
-
        dev_info(pcs->dev, "%i pins at pa %p size %u\n",
                 pcs->desc.npins, pcs->base, pcs->size);
 
index 498b2ba..d02498b 100644 (file)
@@ -1246,6 +1246,22 @@ static void __iomem *sirfsoc_rsc_of_iomap(void)
        return of_iomap(np, 0);
 }
 
+static int sirfsoc_gpio_of_xlate(struct gpio_chip *gc,
+       const struct of_phandle_args *gpiospec,
+       u32 *flags)
+{
+       if (gpiospec->args[0] > SIRFSOC_GPIO_NO_OF_BANKS * SIRFSOC_GPIO_BANK_SIZE)
+               return -EINVAL;
+
+       if (gc != &sgpio_bank[gpiospec->args[0] / SIRFSOC_GPIO_BANK_SIZE].chip.gc)
+               return -EINVAL;
+
+       if (flags)
+               *flags = gpiospec->args[1];
+
+       return gpiospec->args[0] % SIRFSOC_GPIO_BANK_SIZE;
+}
+
 static int sirfsoc_pinmux_probe(struct platform_device *pdev)
 {
        int ret;
@@ -1736,6 +1752,8 @@ static int sirfsoc_gpio_probe(struct device_node *np)
                bank->chip.gc.ngpio = SIRFSOC_GPIO_BANK_SIZE;
                bank->chip.gc.label = kstrdup(np->full_name, GFP_KERNEL);
                bank->chip.gc.of_node = np;
+               bank->chip.gc.of_xlate = sirfsoc_gpio_of_xlate;
+               bank->chip.gc.of_gpio_n_cells = 2;
                bank->chip.regs = regs;
                bank->id = i;
                bank->is_marco = is_marco;
index 7481146..97c2be1 100644 (file)
@@ -244,7 +244,7 @@ static int __init ibm_rtl_init(void) {
        if (force)
                pr_warn("module loaded by force\n");
        /* first ensure that we are running on IBM HW */
-       else if (efi_enabled || !dmi_check_system(ibm_rtl_dmi_table))
+       else if (efi_enabled(EFI_BOOT) || !dmi_check_system(ibm_rtl_dmi_table))
                return -ENODEV;
 
        /* Get the address for the Extended BIOS Data Area */
index 71623a2..d1f0300 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/seq_file.h>
 #include <linux/debugfs.h>
 #include <linux/ctype.h>
+#include <linux/efi.h>
 #include <acpi/video.h>
 
 /*
@@ -1544,6 +1545,9 @@ static int __init samsung_init(void)
        struct samsung_laptop *samsung;
        int ret;
 
+       if (efi_enabled(EFI_BOOT))
+               return -ENODEV;
+
        quirks = &samsung_unknown;
        if (!force && !dmi_check_system(samsung_dmi_table))
                return -ENODEV;
index 261f3d2..89bd2fa 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/debugfs.h>
 #include <linux/seq_file.h>
 #include <linux/slab.h>
+#include <linux/module.h>
 
 #include "dbx500-prcmu.h"
 
index b85040c..cca18a3 100644 (file)
@@ -379,9 +379,10 @@ static struct regulator_desc regulators[] = {
 };
 
 #ifdef CONFIG_OF
-static int max77686_pmic_dt_parse_pdata(struct max77686_dev *iodev,
+static int max77686_pmic_dt_parse_pdata(struct platform_device *pdev,
                                        struct max77686_platform_data *pdata)
 {
+       struct max77686_dev *iodev = dev_get_drvdata(pdev->dev.parent);
        struct device_node *pmic_np, *regulators_np;
        struct max77686_regulator_data *rdata;
        struct of_regulator_match rmatch;
@@ -390,15 +391,15 @@ static int max77686_pmic_dt_parse_pdata(struct max77686_dev *iodev,
        pmic_np = iodev->dev->of_node;
        regulators_np = of_find_node_by_name(pmic_np, "voltage-regulators");
        if (!regulators_np) {
-               dev_err(iodev->dev, "could not find regulators sub-node\n");
+               dev_err(&pdev->dev, "could not find regulators sub-node\n");
                return -EINVAL;
        }
 
        pdata->num_regulators = ARRAY_SIZE(regulators);
-       rdata = devm_kzalloc(iodev->dev, sizeof(*rdata) *
+       rdata = devm_kzalloc(&pdev->dev, sizeof(*rdata) *
                             pdata->num_regulators, GFP_KERNEL);
        if (!rdata) {
-               dev_err(iodev->dev,
+               dev_err(&pdev->dev,
                        "could not allocate memory for regulator data\n");
                return -ENOMEM;
        }
@@ -407,7 +408,7 @@ static int max77686_pmic_dt_parse_pdata(struct max77686_dev *iodev,
                rmatch.name = regulators[i].name;
                rmatch.init_data = NULL;
                rmatch.of_node = NULL;
-               of_regulator_match(iodev->dev, regulators_np, &rmatch, 1);
+               of_regulator_match(&pdev->dev, regulators_np, &rmatch, 1);
                rdata[i].initdata = rmatch.init_data;
                rdata[i].of_node = rmatch.of_node;
        }
@@ -417,7 +418,7 @@ static int max77686_pmic_dt_parse_pdata(struct max77686_dev *iodev,
        return 0;
 }
 #else
-static int max77686_pmic_dt_parse_pdata(struct max77686_dev *iodev,
+static int max77686_pmic_dt_parse_pdata(struct platform_device *pdev,
                                        struct max77686_platform_data *pdata)
 {
        return 0;
@@ -440,7 +441,7 @@ static int max77686_pmic_probe(struct platform_device *pdev)
        }
 
        if (iodev->dev->of_node) {
-               ret = max77686_pmic_dt_parse_pdata(iodev, pdata);
+               ret = max77686_pmic_dt_parse_pdata(pdev, pdata);
                if (ret)
                        return ret;
        }
index d1a7751..d40cf7f 100644 (file)
@@ -237,8 +237,7 @@ static int max8907_regulator_parse_dt(struct platform_device *pdev)
                return -EINVAL;
        }
 
-       ret = of_regulator_match(pdev->dev.parent, regulators,
-                                max8907_matches,
+       ret = of_regulator_match(&pdev->dev, regulators, max8907_matches,
                                 ARRAY_SIZE(max8907_matches));
        if (ret < 0) {
                dev_err(&pdev->dev, "Error parsing regulator init data: %d\n",
index 02be7fc..836908c 100644 (file)
@@ -934,7 +934,7 @@ static struct regulator_desc regulators[] = {
 };
 
 #ifdef CONFIG_OF
-static int max8997_pmic_dt_parse_dvs_gpio(struct max8997_dev *iodev,
+static int max8997_pmic_dt_parse_dvs_gpio(struct platform_device *pdev,
                        struct max8997_platform_data *pdata,
                        struct device_node *pmic_np)
 {
@@ -944,7 +944,7 @@ static int max8997_pmic_dt_parse_dvs_gpio(struct max8997_dev *iodev,
                gpio = of_get_named_gpio(pmic_np,
                                        "max8997,pmic-buck125-dvs-gpios", i);
                if (!gpio_is_valid(gpio)) {
-                       dev_err(iodev->dev, "invalid gpio[%d]: %d\n", i, gpio);
+                       dev_err(&pdev->dev, "invalid gpio[%d]: %d\n", i, gpio);
                        return -EINVAL;
                }
                pdata->buck125_gpios[i] = gpio;
@@ -952,22 +952,23 @@ static int max8997_pmic_dt_parse_dvs_gpio(struct max8997_dev *iodev,
        return 0;
 }
 
-static int max8997_pmic_dt_parse_pdata(struct max8997_dev *iodev,
+static int max8997_pmic_dt_parse_pdata(struct platform_device *pdev,
                                        struct max8997_platform_data *pdata)
 {
+       struct max8997_dev *iodev = dev_get_drvdata(pdev->dev.parent);
        struct device_node *pmic_np, *regulators_np, *reg_np;
        struct max8997_regulator_data *rdata;
        unsigned int i, dvs_voltage_nr = 1, ret;
 
        pmic_np = iodev->dev->of_node;
        if (!pmic_np) {
-               dev_err(iodev->dev, "could not find pmic sub-node\n");
+               dev_err(&pdev->dev, "could not find pmic sub-node\n");
                return -ENODEV;
        }
 
        regulators_np = of_find_node_by_name(pmic_np, "regulators");
        if (!regulators_np) {
-               dev_err(iodev->dev, "could not find regulators sub-node\n");
+               dev_err(&pdev->dev, "could not find regulators sub-node\n");
                return -EINVAL;
        }
 
@@ -976,11 +977,10 @@ static int max8997_pmic_dt_parse_pdata(struct max8997_dev *iodev,
        for_each_child_of_node(regulators_np, reg_np)
                pdata->num_regulators++;
 
-       rdata = devm_kzalloc(iodev->dev, sizeof(*rdata) *
+       rdata = devm_kzalloc(&pdev->dev, sizeof(*rdata) *
                                pdata->num_regulators, GFP_KERNEL);
        if (!rdata) {
-               dev_err(iodev->dev, "could not allocate memory for "
-                                               "regulator data\n");
+               dev_err(&pdev->dev, "could not allocate memory for regulator data\n");
                return -ENOMEM;
        }
 
@@ -991,14 +991,14 @@ static int max8997_pmic_dt_parse_pdata(struct max8997_dev *iodev,
                                break;
 
                if (i == ARRAY_SIZE(regulators)) {
-                       dev_warn(iodev->dev, "don't know how to configure "
-                               "regulator %s\n", reg_np->name);
+                       dev_warn(&pdev->dev, "don't know how to configure regulator %s\n",
+                                reg_np->name);
                        continue;
                }
 
                rdata->id = i;
-               rdata->initdata = of_get_regulator_init_data(
-                                               iodev->dev, reg_np);
+               rdata->initdata = of_get_regulator_init_data(&pdev->dev,
+                                                            reg_np);
                rdata->reg_node = reg_np;
                rdata++;
        }
@@ -1014,7 +1014,7 @@ static int max8997_pmic_dt_parse_pdata(struct max8997_dev *iodev,
 
        if (pdata->buck1_gpiodvs || pdata->buck2_gpiodvs ||
                                                pdata->buck5_gpiodvs) {
-               ret = max8997_pmic_dt_parse_dvs_gpio(iodev, pdata, pmic_np);
+               ret = max8997_pmic_dt_parse_dvs_gpio(pdev, pdata, pmic_np);
                if (ret)
                        return -EINVAL;
 
@@ -1025,8 +1025,7 @@ static int max8997_pmic_dt_parse_pdata(struct max8997_dev *iodev,
                } else {
                        if (pdata->buck125_default_idx >= 8) {
                                pdata->buck125_default_idx = 0;
-                               dev_info(iodev->dev, "invalid value for "
-                               "default dvs index, using 0 instead\n");
+                               dev_info(&pdev->dev, "invalid value for default dvs index, using 0 instead\n");
                        }
                }
 
@@ -1040,28 +1039,28 @@ static int max8997_pmic_dt_parse_pdata(struct max8997_dev *iodev,
        if (of_property_read_u32_array(pmic_np,
                                "max8997,pmic-buck1-dvs-voltage",
                                pdata->buck1_voltage, dvs_voltage_nr)) {
-               dev_err(iodev->dev, "buck1 voltages not specified\n");
+               dev_err(&pdev->dev, "buck1 voltages not specified\n");
                return -EINVAL;
        }
 
        if (of_property_read_u32_array(pmic_np,
                                "max8997,pmic-buck2-dvs-voltage",
                                pdata->buck2_voltage, dvs_voltage_nr)) {
-               dev_err(iodev->dev, "buck2 voltages not specified\n");
+               dev_err(&pdev->dev, "buck2 voltages not specified\n");
                return -EINVAL;
        }
 
        if (of_property_read_u32_array(pmic_np,
                                "max8997,pmic-buck5-dvs-voltage",
                                pdata->buck5_voltage, dvs_voltage_nr)) {
-               dev_err(iodev->dev, "buck5 voltages not specified\n");
+               dev_err(&pdev->dev, "buck5 voltages not specified\n");
                return -EINVAL;
        }
 
        return 0;
 }
 #else
-static int max8997_pmic_dt_parse_pdata(struct max8997_dev *iodev,
+static int max8997_pmic_dt_parse_pdata(struct platform_device *pdev,
                                        struct max8997_platform_data *pdata)
 {
        return 0;
@@ -1085,7 +1084,7 @@ static int max8997_pmic_probe(struct platform_device *pdev)
        }
 
        if (iodev->dev->of_node) {
-               ret = max8997_pmic_dt_parse_pdata(iodev, pdata);
+               ret = max8997_pmic_dt_parse_pdata(pdev, pdata);
                if (ret)
                        return ret;
        }
index 1f0df40..0a8dd1c 100644 (file)
@@ -65,7 +65,7 @@ static const struct voltage_map_desc ldo9_voltage_map_desc = {
        .min = 2800000, .step = 100000, .max = 3100000,
 };
 static const struct voltage_map_desc ldo10_voltage_map_desc = {
-       .min = 95000  .step = 50000,  .max = 1300000,
+       .min = 950000,  .step = 50000,  .max = 1300000,
 };
 static const struct voltage_map_desc ldo1213_voltage_map_desc = {
        .min = 800000,  .step = 100000, .max = 3300000,
index 6f68491..66ca769 100644 (file)
@@ -120,6 +120,12 @@ int of_regulator_match(struct device *dev, struct device_node *node,
        if (!dev || !node)
                return -EINVAL;
 
+       for (i = 0; i < num_matches; i++) {
+               struct of_regulator_match *match = &matches[i];
+               match->init_data = NULL;
+               match->of_node = NULL;
+       }
+
        for_each_child_of_node(node, child) {
                name = of_get_property(child,
                                        "regulator-compatible", NULL);
index bd062a2..cd9ea2e 100644 (file)
@@ -174,9 +174,9 @@ static struct regulator_ops s2mps11_buck_ops = {
        .min_uV         = S2MPS11_BUCK_MIN2,                    \
        .uV_step        = S2MPS11_BUCK_STEP2,                   \
        .n_voltages     = S2MPS11_BUCK_N_VOLTAGES,              \
-       .vsel_reg       = S2MPS11_REG_B9CTRL2,                  \
+       .vsel_reg       = S2MPS11_REG_B10CTRL2,                 \
        .vsel_mask      = S2MPS11_BUCK_VSEL_MASK,               \
-       .enable_reg     = S2MPS11_REG_B9CTRL1,                  \
+       .enable_reg     = S2MPS11_REG_B10CTRL1,                 \
        .enable_mask    = S2MPS11_ENABLE_MASK                   \
 }
 
index 73dce76..df39518 100644 (file)
@@ -305,8 +305,8 @@ static struct tps65217_board *tps65217_parse_dt(struct platform_device *pdev)
        if (!regs)
                return NULL;
 
-       count = of_regulator_match(pdev->dev.parent, regs,
-                               reg_matches, TPS65217_NUM_REGULATOR);
+       count = of_regulator_match(&pdev->dev, regs, reg_matches,
+                                  TPS65217_NUM_REGULATOR);
        of_node_put(regs);
        if ((count < 0) || (count > TPS65217_NUM_REGULATOR))
                return NULL;
index 59c3770..b0e4c0b 100644 (file)
@@ -998,7 +998,7 @@ static struct tps65910_board *tps65910_parse_dt_reg_data(
                return NULL;
        }
 
-       ret = of_regulator_match(pdev->dev.parent, regulators, matches, count);
+       ret = of_regulator_match(&pdev->dev, regulators, matches, count);
        if (ret < 0) {
                dev_err(&pdev->dev, "Error parsing regulator init data: %d\n",
                        ret);
index b15d711..9019d0e 100644 (file)
@@ -728,7 +728,7 @@ static int tps80031_regulator_probe(struct platform_device *pdev)
                        }
                }
                rdev = regulator_register(&ri->rinfo->desc, &config);
-               if (IS_ERR_OR_NULL(rdev)) {
+               if (IS_ERR(rdev)) {
                        dev_err(&pdev->dev,
                                "register regulator failed %s\n",
                                        ri->rinfo->desc.name);
index afb7cfa..c016ad8 100644 (file)
@@ -506,6 +506,7 @@ isl1208_rtc_interrupt(int irq, void *data)
 {
        unsigned long timeout = jiffies + msecs_to_jiffies(1000);
        struct i2c_client *client = data;
+       struct rtc_device *rtc = i2c_get_clientdata(client);
        int handled = 0, sr, err;
 
        /*
@@ -528,6 +529,8 @@ isl1208_rtc_interrupt(int irq, void *data)
        if (sr & ISL1208_REG_SR_ALM) {
                dev_dbg(&client->dev, "alarm!\n");
 
+               rtc_update_irq(rtc, 1, RTC_IRQF | RTC_AF);
+
                /* Clear the alarm */
                sr &= ~ISL1208_REG_SR_ALM;
                sr = i2c_smbus_write_byte_data(client, ISL1208_REG_SR, sr);
index 08378e3..81c5077 100644 (file)
@@ -44,6 +44,7 @@
 #define RTC_YMR                0x34    /* Year match register */
 #define RTC_YLR                0x38    /* Year data load register */
 
+#define RTC_CR_EN      (1 << 0)        /* counter enable bit */
 #define RTC_CR_CWEN    (1 << 26)       /* Clockwatch enable bit */
 
 #define RTC_TCR_EN     (1 << 1) /* Periodic timer enable bit */
@@ -320,7 +321,7 @@ static int pl031_probe(struct amba_device *adev, const struct amba_id *id)
        struct pl031_local *ldata;
        struct pl031_vendor_data *vendor = id->data;
        struct rtc_class_ops *ops = &vendor->ops;
-       unsigned long time;
+       unsigned long time, data;
 
        ret = amba_request_regions(adev, NULL);
        if (ret)
@@ -345,10 +346,13 @@ static int pl031_probe(struct amba_device *adev, const struct amba_id *id)
        dev_dbg(&adev->dev, "designer ID = 0x%02x\n", amba_manf(adev));
        dev_dbg(&adev->dev, "revision = 0x%01x\n", amba_rev(adev));
 
+       data = readl(ldata->base + RTC_CR);
        /* Enable the clockwatch on ST Variants */
        if (vendor->clockwatch)
-               writel(readl(ldata->base + RTC_CR) | RTC_CR_CWEN,
-                      ldata->base + RTC_CR);
+               data |= RTC_CR_CWEN;
+       else
+               data |= RTC_CR_EN;
+       writel(data, ldata->base + RTC_CR);
 
        /*
         * On ST PL031 variants, the RTC reset value does not provide correct
index 00c930f..2730533 100644 (file)
@@ -137,7 +137,7 @@ static int vt8500_rtc_set_time(struct device *dev, struct rtc_time *tm)
                return -EINVAL;
        }
 
-       writel((bin2bcd(tm->tm_year - 100) << DATE_YEAR_S)
+       writel((bin2bcd(tm->tm_year % 100) << DATE_YEAR_S)
                | (bin2bcd(tm->tm_mon + 1) << DATE_MONTH_S)
                | (bin2bcd(tm->tm_mday))
                | ((tm->tm_year >= 200) << DATE_CENTURY_S),
index d73fdcf..2839baa 100644 (file)
@@ -633,7 +633,7 @@ static int isci_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
                return -ENOMEM;
        pci_set_drvdata(pdev, pci_info);
 
-       if (efi_enabled)
+       if (efi_enabled(EFI_RUNTIME_SERVICES))
                orom = isci_get_efi_var(pdev);
 
        if (!orom)
index 97ac0a3..dc109de 100644 (file)
@@ -74,6 +74,16 @@ static void ssb_gpio_chipco_free(struct gpio_chip *chip, unsigned gpio)
        ssb_chipco_gpio_pullup(&bus->chipco, 1 << gpio, 0);
 }
 
+static int ssb_gpio_chipco_to_irq(struct gpio_chip *chip, unsigned gpio)
+{
+       struct ssb_bus *bus = ssb_gpio_get_bus(chip);
+
+       if (bus->bustype == SSB_BUSTYPE_SSB)
+               return ssb_mips_irq(bus->chipco.dev) + 2;
+       else
+               return -EINVAL;
+}
+
 static int ssb_gpio_chipco_init(struct ssb_bus *bus)
 {
        struct gpio_chip *chip = &bus->gpio;
@@ -86,6 +96,7 @@ static int ssb_gpio_chipco_init(struct ssb_bus *bus)
        chip->set               = ssb_gpio_chipco_set_value;
        chip->direction_input   = ssb_gpio_chipco_direction_input;
        chip->direction_output  = ssb_gpio_chipco_direction_output;
+       chip->to_irq            = ssb_gpio_chipco_to_irq;
        chip->ngpio             = 16;
        /* There is just one SoC in one device and its GPIO addresses should be
         * deterministic to address them more easily. The other buses could get
@@ -134,6 +145,16 @@ static int ssb_gpio_extif_direction_output(struct gpio_chip *chip,
        return 0;
 }
 
+static int ssb_gpio_extif_to_irq(struct gpio_chip *chip, unsigned gpio)
+{
+       struct ssb_bus *bus = ssb_gpio_get_bus(chip);
+
+       if (bus->bustype == SSB_BUSTYPE_SSB)
+               return ssb_mips_irq(bus->extif.dev) + 2;
+       else
+               return -EINVAL;
+}
+
 static int ssb_gpio_extif_init(struct ssb_bus *bus)
 {
        struct gpio_chip *chip = &bus->gpio;
@@ -144,6 +165,7 @@ static int ssb_gpio_extif_init(struct ssb_bus *bus)
        chip->set               = ssb_gpio_extif_set_value;
        chip->direction_input   = ssb_gpio_extif_direction_input;
        chip->direction_output  = ssb_gpio_extif_direction_output;
+       chip->to_irq            = ssb_gpio_extif_to_irq;
        chip->ngpio             = 5;
        /* There is just one SoC in one device and its GPIO addresses should be
         * deterministic to address them more easily. The other buses could get
@@ -174,3 +196,15 @@ int ssb_gpio_init(struct ssb_bus *bus)
 
        return -1;
 }
+
+int ssb_gpio_unregister(struct ssb_bus *bus)
+{
+       if (ssb_chipco_available(&bus->chipco) ||
+           ssb_extif_available(&bus->extif)) {
+               return gpiochip_remove(&bus->gpio);
+       } else {
+               SSB_WARN_ON(1);
+       }
+
+       return -1;
+}
index 2a7684c..33b37da 100644 (file)
@@ -10,6 +10,7 @@
 
 #include <linux/ssb/ssb.h>
 
+#include <linux/mtd/physmap.h>
 #include <linux/serial.h>
 #include <linux/serial_core.h>
 #include <linux/serial_reg.h>
 
 #include "ssb_private.h"
 
+static const char *part_probes[] = { "bcm47xxpart", NULL };
+
+static struct physmap_flash_data ssb_pflash_data = {
+       .part_probe_types       = part_probes,
+};
+
+static struct resource ssb_pflash_resource = {
+       .name   = "ssb_pflash",
+       .flags  = IORESOURCE_MEM,
+};
+
+struct platform_device ssb_pflash_dev = {
+       .name           = "physmap-flash",
+       .dev            = {
+               .platform_data  = &ssb_pflash_data,
+       },
+       .resource       = &ssb_pflash_resource,
+       .num_resources  = 1,
+};
 
 static inline u32 mips_read32(struct ssb_mipscore *mcore,
                              u16 offset)
@@ -189,14 +209,15 @@ static void ssb_mips_serial_init(struct ssb_mipscore *mcore)
 static void ssb_mips_flash_detect(struct ssb_mipscore *mcore)
 {
        struct ssb_bus *bus = mcore->dev->bus;
+       struct ssb_pflash *pflash = &mcore->pflash;
 
        /* When there is no chipcommon on the bus there is 4MB flash */
        if (!ssb_chipco_available(&bus->chipco)) {
-               mcore->pflash.present = true;
-               mcore->pflash.buswidth = 2;
-               mcore->pflash.window = SSB_FLASH1;
-               mcore->pflash.window_size = SSB_FLASH1_SZ;
-               return;
+               pflash->present = true;
+               pflash->buswidth = 2;
+               pflash->window = SSB_FLASH1;
+               pflash->window_size = SSB_FLASH1_SZ;
+               goto ssb_pflash;
        }
 
        /* There is ChipCommon, so use it to read info about flash */
@@ -208,16 +229,23 @@ static void ssb_mips_flash_detect(struct ssb_mipscore *mcore)
                break;
        case SSB_CHIPCO_FLASHT_PARA:
                pr_debug("Found parallel flash\n");
-               mcore->pflash.present = true;
-               mcore->pflash.window = SSB_FLASH2;
-               mcore->pflash.window_size = SSB_FLASH2_SZ;
+               pflash->present = true;
+               pflash->window = SSB_FLASH2;
+               pflash->window_size = SSB_FLASH2_SZ;
                if ((ssb_read32(bus->chipco.dev, SSB_CHIPCO_FLASH_CFG)
                               & SSB_CHIPCO_CFG_DS16) == 0)
-                       mcore->pflash.buswidth = 1;
+                       pflash->buswidth = 1;
                else
-                       mcore->pflash.buswidth = 2;
+                       pflash->buswidth = 2;
                break;
        }
+
+ssb_pflash:
+       if (pflash->present) {
+               ssb_pflash_data.width = pflash->buswidth;
+               ssb_pflash_resource.start = pflash->window;
+               ssb_pflash_resource.end = pflash->window + pflash->window_size;
+       }
 }
 
 u32 ssb_cpu_clock(struct ssb_mipscore *mcore)
index 772ad9b..3b645b8 100644 (file)
@@ -443,6 +443,15 @@ static void ssb_devices_unregister(struct ssb_bus *bus)
 
 void ssb_bus_unregister(struct ssb_bus *bus)
 {
+       int err;
+
+       err = ssb_gpio_unregister(bus);
+       if (err == -EBUSY)
+               ssb_dprintk(KERN_ERR PFX "Some GPIOs are still in use.\n");
+       else if (err)
+               ssb_dprintk(KERN_ERR PFX
+                           "Can not unregister GPIO driver: %i\n", err);
+
        ssb_buses_lock();
        ssb_devices_unregister(bus);
        list_del(&bus->list);
@@ -540,6 +549,14 @@ static int ssb_devices_register(struct ssb_bus *bus)
                dev_idx++;
        }
 
+#ifdef CONFIG_SSB_DRIVER_MIPS
+       if (bus->mipscore.pflash.present) {
+               err = platform_device_register(&ssb_pflash_dev);
+               if (err)
+                       pr_err("Error registering parallel flash\n");
+       }
+#endif
+
        return 0;
 error:
        /* Unwind the already registered devices. */
index 77d9426..466171b 100644 (file)
@@ -228,6 +228,10 @@ static inline int ssb_sflash_init(struct ssb_chipcommon *cc)
 }
 #endif /* CONFIG_SSB_SFLASH */
 
+#ifdef CONFIG_SSB_DRIVER_MIPS
+extern struct platform_device ssb_pflash_dev;
+#endif
+
 #ifdef CONFIG_SSB_DRIVER_EXTIF
 extern u32 ssb_extif_watchdog_timer_set_wdt(struct bcm47xx_wdt *wdt, u32 ticks);
 extern u32 ssb_extif_watchdog_timer_set_ms(struct bcm47xx_wdt *wdt, u32 ms);
@@ -263,11 +267,16 @@ static inline void ssb_extif_init(struct ssb_extif *extif)
 
 #ifdef CONFIG_SSB_DRIVER_GPIO
 extern int ssb_gpio_init(struct ssb_bus *bus);
+extern int ssb_gpio_unregister(struct ssb_bus *bus);
 #else /* CONFIG_SSB_DRIVER_GPIO */
 static inline int ssb_gpio_init(struct ssb_bus *bus)
 {
        return -ENOTSUPP;
 }
+static inline int ssb_gpio_unregister(struct ssb_bus *bus)
+{
+       return 0;
+}
 #endif /* CONFIG_SSB_DRIVER_GPIO */
 
 #endif /* LINUX_SSB_PRIVATE_H_ */
index 1d31eab..f1bce18 100644 (file)
@@ -424,7 +424,7 @@ int prism2_scan(struct wiphy *wiphy, struct cfg80211_scan_request *request)
                        goto exit;
                }
 
-               cfg80211_put_bss(bss);
+               cfg80211_put_bss(wiphy, bss);
        }
 
        if (result)
index e269510..f2aa754 100644 (file)
@@ -941,6 +941,8 @@ int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
 
 int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors)
 {
+       int block_size = dev->dev_attrib.block_size;
+
        if (dev->export_count) {
                pr_err("dev[%p]: Unable to change SE Device"
                        " fabric_max_sectors while export_count is %d\n",
@@ -978,8 +980,12 @@ int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors)
        /*
         * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks()
         */
+       if (!block_size) {
+               block_size = 512;
+               pr_warn("Defaulting to 512 for zero block_size\n");
+       }
        fabric_max_sectors = se_dev_align_max_sectors(fabric_max_sectors,
-                                                     dev->dev_attrib.block_size);
+                                                     block_size);
 
        dev->dev_attrib.fabric_max_sectors = fabric_max_sectors;
        pr_debug("dev[%p]: SE Device max_sectors changed to %u\n",
index 810263d..c57bbbc 100644 (file)
@@ -754,6 +754,11 @@ static int target_fabric_port_link(
                return -EFAULT;
        }
 
+       if (!(dev->dev_flags & DF_CONFIGURED)) {
+               pr_err("se_device not configured yet, cannot port link\n");
+               return -ENODEV;
+       }
+
        tpg_ci = &lun_ci->ci_parent->ci_group->cg_item;
        se_tpg = container_of(to_config_group(tpg_ci),
                                struct se_portal_group, tpg_group);
index 26a6d18..a664c66 100644 (file)
@@ -58,11 +58,10 @@ sbc_emulate_readcapacity(struct se_cmd *cmd)
        buf[7] = dev->dev_attrib.block_size & 0xff;
 
        rbuf = transport_kmap_data_sg(cmd);
-       if (!rbuf)
-               return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
-
-       memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
-       transport_kunmap_data_sg(cmd);
+       if (rbuf) {
+               memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
+               transport_kunmap_data_sg(cmd);
+       }
 
        target_complete_cmd(cmd, GOOD);
        return 0;
@@ -97,11 +96,10 @@ sbc_emulate_readcapacity_16(struct se_cmd *cmd)
                buf[14] = 0x80;
 
        rbuf = transport_kmap_data_sg(cmd);
-       if (!rbuf)
-               return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
-
-       memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
-       transport_kunmap_data_sg(cmd);
+       if (rbuf) {
+               memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
+               transport_kunmap_data_sg(cmd);
+       }
 
        target_complete_cmd(cmd, GOOD);
        return 0;
index 84f9e96..2d88f08 100644 (file)
@@ -641,11 +641,10 @@ spc_emulate_inquiry(struct se_cmd *cmd)
 
 out:
        rbuf = transport_kmap_data_sg(cmd);
-       if (!rbuf)
-               return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
-
-       memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
-       transport_kunmap_data_sg(cmd);
+       if (rbuf) {
+               memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
+               transport_kunmap_data_sg(cmd);
+       }
 
        if (!ret)
                target_complete_cmd(cmd, GOOD);
@@ -851,7 +850,7 @@ static sense_reason_t spc_emulate_modesense(struct se_cmd *cmd)
 {
        struct se_device *dev = cmd->se_dev;
        char *cdb = cmd->t_task_cdb;
-       unsigned char *buf, *map_buf;
+       unsigned char buf[SE_MODE_PAGE_BUF], *rbuf;
        int type = dev->transport->get_device_type(dev);
        int ten = (cmd->t_task_cdb[0] == MODE_SENSE_10);
        bool dbd = !!(cdb[1] & 0x08);
@@ -863,26 +862,8 @@ static sense_reason_t spc_emulate_modesense(struct se_cmd *cmd)
        int ret;
        int i;
 
-       map_buf = transport_kmap_data_sg(cmd);
-       if (!map_buf)
-               return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
-       /*
-        * If SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is not set, then we
-        * know we actually allocated a full page.  Otherwise, if the
-        * data buffer is too small, allocate a temporary buffer so we
-        * don't have to worry about overruns in all our INQUIRY
-        * emulation handling.
-        */
-       if (cmd->data_length < SE_MODE_PAGE_BUF &&
-           (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC)) {
-               buf = kzalloc(SE_MODE_PAGE_BUF, GFP_KERNEL);
-               if (!buf) {
-                       transport_kunmap_data_sg(cmd);
-                       return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
-               }
-       } else {
-               buf = map_buf;
-       }
+       memset(buf, 0, SE_MODE_PAGE_BUF);
+
        /*
         * Skip over MODE DATA LENGTH + MEDIUM TYPE fields to byte 3 for
         * MODE_SENSE_10 and byte 2 for MODE_SENSE (6).
@@ -934,8 +915,6 @@ static sense_reason_t spc_emulate_modesense(struct se_cmd *cmd)
        if (page == 0x3f) {
                if (subpage != 0x00 && subpage != 0xff) {
                        pr_warn("MODE_SENSE: Invalid subpage code: 0x%02x\n", subpage);
-                       kfree(buf);
-                       transport_kunmap_data_sg(cmd);
                        return TCM_INVALID_CDB_FIELD;
                }
 
@@ -972,7 +951,6 @@ static sense_reason_t spc_emulate_modesense(struct se_cmd *cmd)
                pr_err("MODE SENSE: unimplemented page/subpage: 0x%02x/0x%02x\n",
                       page, subpage);
 
-       transport_kunmap_data_sg(cmd);
        return TCM_UNKNOWN_MODE_PAGE;
 
 set_length:
@@ -981,12 +959,12 @@ set_length:
        else
                buf[0] = length - 1;
 
-       if (buf != map_buf) {
-               memcpy(map_buf, buf, cmd->data_length);
-               kfree(buf);
+       rbuf = transport_kmap_data_sg(cmd);
+       if (rbuf) {
+               memcpy(rbuf, buf, min_t(u32, SE_MODE_PAGE_BUF, cmd->data_length));
+               transport_kunmap_data_sg(cmd);
        }
 
-       transport_kunmap_data_sg(cmd);
        target_complete_cmd(cmd, GOOD);
        return 0;
 }
index 4225d5e..8e64adf 100644 (file)
@@ -39,6 +39,7 @@
 #include <asm/unaligned.h>
 #include <linux/platform_device.h>
 #include <linux/workqueue.h>
+#include <linux/pm_runtime.h>
 
 #include <linux/usb.h>
 #include <linux/usb/hcd.h>
@@ -1025,6 +1026,49 @@ static int register_root_hub(struct usb_hcd *hcd)
        return retval;
 }
 
+/*
+ * usb_hcd_start_port_resume - a root-hub port is sending a resume signal
+ * @bus: the bus which the root hub belongs to
+ * @portnum: the port which is being resumed
+ *
+ * HCDs should call this function when they know that a resume signal is
+ * being sent to a root-hub port.  The root hub will be prevented from
+ * going into autosuspend until usb_hcd_end_port_resume() is called.
+ *
+ * The bus's private lock must be held by the caller.
+ */
+void usb_hcd_start_port_resume(struct usb_bus *bus, int portnum)
+{
+       unsigned bit = 1 << portnum;
+
+       if (!(bus->resuming_ports & bit)) {
+               bus->resuming_ports |= bit;
+               pm_runtime_get_noresume(&bus->root_hub->dev);
+       }
+}
+EXPORT_SYMBOL_GPL(usb_hcd_start_port_resume);
+
+/*
+ * usb_hcd_end_port_resume - a root-hub port has stopped sending a resume signal
+ * @bus: the bus which the root hub belongs to
+ * @portnum: the port which is being resumed
+ *
+ * HCDs should call this function when they know that a resume signal has
+ * stopped being sent to a root-hub port.  The root hub will be allowed to
+ * autosuspend again.
+ *
+ * The bus's private lock must be held by the caller.
+ */
+void usb_hcd_end_port_resume(struct usb_bus *bus, int portnum)
+{
+       unsigned bit = 1 << portnum;
+
+       if (bus->resuming_ports & bit) {
+               bus->resuming_ports &= ~bit;
+               pm_runtime_put_noidle(&bus->root_hub->dev);
+       }
+}
+EXPORT_SYMBOL_GPL(usb_hcd_end_port_resume);
 
 /*-------------------------------------------------------------------------*/
 
index 957ed2c..cbf7168 100644 (file)
@@ -2838,6 +2838,23 @@ void usb_enable_ltm(struct usb_device *udev)
 EXPORT_SYMBOL_GPL(usb_enable_ltm);
 
 #ifdef CONFIG_USB_SUSPEND
+/*
+ * usb_disable_function_remotewakeup - disable usb3.0
+ * device's function remote wakeup
+ * @udev: target device
+ *
+ * Assume there's only one function on the USB 3.0
+ * device and disable remote wake for the first
+ * interface. FIXME if the interface association
+ * descriptor shows there's more than one function.
+ */
+static int usb_disable_function_remotewakeup(struct usb_device *udev)
+{
+       return usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
+                               USB_REQ_CLEAR_FEATURE, USB_RECIP_INTERFACE,
+                               USB_INTRF_FUNC_SUSPEND, 0, NULL, 0,
+                               USB_CTRL_SET_TIMEOUT);
+}
 
 /*
  * usb_port_suspend - suspend a usb device's upstream port
@@ -2955,12 +2972,19 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
                dev_dbg(hub->intfdev, "can't suspend port %d, status %d\n",
                                port1, status);
                /* paranoia:  "should not happen" */
-               if (udev->do_remote_wakeup)
-                       (void) usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
-                               USB_REQ_CLEAR_FEATURE, USB_RECIP_DEVICE,
-                               USB_DEVICE_REMOTE_WAKEUP, 0,
-                               NULL, 0,
-                               USB_CTRL_SET_TIMEOUT);
+               if (udev->do_remote_wakeup) {
+                       if (!hub_is_superspeed(hub->hdev)) {
+                               (void) usb_control_msg(udev,
+                                               usb_sndctrlpipe(udev, 0),
+                                               USB_REQ_CLEAR_FEATURE,
+                                               USB_RECIP_DEVICE,
+                                               USB_DEVICE_REMOTE_WAKEUP, 0,
+                                               NULL, 0,
+                                               USB_CTRL_SET_TIMEOUT);
+                       } else
+                               (void) usb_disable_function_remotewakeup(udev);
+
+               }
 
                /* Try to enable USB2 hardware LPM again */
                if (udev->usb2_hw_lpm_capable == 1)
@@ -3052,20 +3076,30 @@ static int finish_port_resume(struct usb_device *udev)
         * udev->reset_resume
         */
        } else if (udev->actconfig && !udev->reset_resume) {
-               le16_to_cpus(&devstatus);
-               if (devstatus & (1 << USB_DEVICE_REMOTE_WAKEUP)) {
-                       status = usb_control_msg(udev,
-                                       usb_sndctrlpipe(udev, 0),
-                                       USB_REQ_CLEAR_FEATURE,
+               if (!hub_is_superspeed(udev->parent)) {
+                       le16_to_cpus(&devstatus);
+                       if (devstatus & (1 << USB_DEVICE_REMOTE_WAKEUP))
+                               status = usb_control_msg(udev,
+                                               usb_sndctrlpipe(udev, 0),
+                                               USB_REQ_CLEAR_FEATURE,
                                                USB_RECIP_DEVICE,
-                                       USB_DEVICE_REMOTE_WAKEUP, 0,
-                                       NULL, 0,
-                                       USB_CTRL_SET_TIMEOUT);
-                       if (status)
-                               dev_dbg(&udev->dev,
-                                       "disable remote wakeup, status %d\n",
-                                       status);
+                                               USB_DEVICE_REMOTE_WAKEUP, 0,
+                                               NULL, 0,
+                                               USB_CTRL_SET_TIMEOUT);
+               } else {
+                       status = usb_get_status(udev, USB_RECIP_INTERFACE, 0,
+                                       &devstatus);
+                       le16_to_cpus(&devstatus);
+                       if (!status && devstatus & (USB_INTRF_STAT_FUNC_RW_CAP
+                                       | USB_INTRF_STAT_FUNC_RW))
+                               status =
+                                       usb_disable_function_remotewakeup(udev);
                }
+
+               if (status)
+                       dev_dbg(&udev->dev,
+                               "disable remote wakeup, status %d\n",
+                               status);
                status = 0;
        }
        return status;
index 09537b2..b416a3f 100644 (file)
@@ -797,6 +797,7 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd)
                        ehci->reset_done[i] = jiffies + msecs_to_jiffies(25);
                        set_bit(i, &ehci->resuming_ports);
                        ehci_dbg (ehci, "port %d remote wakeup\n", i + 1);
+                       usb_hcd_start_port_resume(&hcd->self, i);
                        mod_timer(&hcd->rh_timer, ehci->reset_done[i]);
                }
        }
index 4ccb97c..4d3b294 100644 (file)
@@ -649,7 +649,11 @@ ehci_hub_status_data (struct usb_hcd *hcd, char *buf)
                        status = STS_PCD;
                }
        }
-       /* FIXME autosuspend idle root hubs */
+
+       /* If a resume is in progress, make sure it can finish */
+       if (ehci->resuming_ports)
+               mod_timer(&hcd->rh_timer, jiffies + msecs_to_jiffies(25));
+
        spin_unlock_irqrestore (&ehci->lock, flags);
        return status ? retval : 0;
 }
@@ -851,6 +855,7 @@ static int ehci_hub_control (
                                /* resume signaling for 20 msec */
                                ehci->reset_done[wIndex] = jiffies
                                                + msecs_to_jiffies(20);
+                               usb_hcd_start_port_resume(&hcd->self, wIndex);
                                /* check the port again */
                                mod_timer(&ehci_to_hcd(ehci)->rh_timer,
                                                ehci->reset_done[wIndex]);
@@ -862,6 +867,7 @@ static int ehci_hub_control (
                                clear_bit(wIndex, &ehci->suspended_ports);
                                set_bit(wIndex, &ehci->port_c_suspend);
                                ehci->reset_done[wIndex] = 0;
+                               usb_hcd_end_port_resume(&hcd->self, wIndex);
 
                                /* stop resume signaling */
                                temp = ehci_readl(ehci, status_reg);
@@ -950,6 +956,7 @@ static int ehci_hub_control (
                        ehci->reset_done[wIndex] = 0;
                        if (temp & PORT_PE)
                                set_bit(wIndex, &ehci->port_c_suspend);
+                       usb_hcd_end_port_resume(&hcd->self, wIndex);
                }
 
                if (temp & PORT_OC)
index 3d98902..fd252f0 100644 (file)
@@ -1197,17 +1197,26 @@ static void start_iaa_cycle(struct ehci_hcd *ehci, bool nested)
        if (ehci->async_iaa || ehci->async_unlinking)
                return;
 
-       /* Do all the waiting QHs at once */
-       ehci->async_iaa = ehci->async_unlink;
-       ehci->async_unlink = NULL;
-
        /* If the controller isn't running, we don't have to wait for it */
        if (unlikely(ehci->rh_state < EHCI_RH_RUNNING)) {
+
+               /* Do all the waiting QHs */
+               ehci->async_iaa = ehci->async_unlink;
+               ehci->async_unlink = NULL;
+
                if (!nested)            /* Avoid recursion */
                        end_unlink_async(ehci);
 
        /* Otherwise start a new IAA cycle */
        } else if (likely(ehci->rh_state == EHCI_RH_RUNNING)) {
+               struct ehci_qh          *qh;
+
+               /* Do only the first waiting QH (nVidia bug?) */
+               qh = ehci->async_unlink;
+               ehci->async_iaa = qh;
+               ehci->async_unlink = qh->unlink_next;
+               qh->unlink_next = NULL;
+
                /* Make sure the unlinks are all visible to the hardware */
                wmb();
 
@@ -1255,34 +1264,35 @@ static void end_unlink_async(struct ehci_hcd *ehci)
        }
 }
 
+static void start_unlink_async(struct ehci_hcd *ehci, struct ehci_qh *qh);
+
 static void unlink_empty_async(struct ehci_hcd *ehci)
 {
-       struct ehci_qh          *qh, *next;
-       bool                    stopped = (ehci->rh_state < EHCI_RH_RUNNING);
+       struct ehci_qh          *qh;
+       struct ehci_qh          *qh_to_unlink = NULL;
        bool                    check_unlinks_later = false;
+       int                     count = 0;
 
-       /* Unlink all the async QHs that have been empty for a timer cycle */
-       next = ehci->async->qh_next.qh;
-       while (next) {
-               qh = next;
-               next = qh->qh_next.qh;
-
+       /* Find the last async QH which has been empty for a timer cycle */
+       for (qh = ehci->async->qh_next.qh; qh; qh = qh->qh_next.qh) {
                if (list_empty(&qh->qtd_list) &&
                                qh->qh_state == QH_STATE_LINKED) {
-                       if (!stopped && qh->unlink_cycle ==
-                                       ehci->async_unlink_cycle)
+                       ++count;
+                       if (qh->unlink_cycle == ehci->async_unlink_cycle)
                                check_unlinks_later = true;
                        else
-                               single_unlink_async(ehci, qh);
+                               qh_to_unlink = qh;
                }
        }
 
-       /* Start a new IAA cycle if any QHs are waiting for it */
-       if (ehci->async_unlink)
-               start_iaa_cycle(ehci, false);
+       /* If nothing else is being unlinked, unlink the last empty QH */
+       if (!ehci->async_iaa && !ehci->async_unlink && qh_to_unlink) {
+               start_unlink_async(ehci, qh_to_unlink);
+               --count;
+       }
 
-       /* QHs that haven't been empty for long enough will be handled later */
-       if (check_unlinks_later) {
+       /* Other QHs will be handled later */
+       if (count > 0) {
                ehci_enable_event(ehci, EHCI_HRTIMER_ASYNC_UNLINKS, true);
                ++ehci->async_unlink_cycle;
        }
index 69ebee7..b476daf 100644 (file)
@@ -213,7 +213,7 @@ static inline unsigned char tt_start_uframe(struct ehci_hcd *ehci, __hc32 mask)
 }
 
 static const unsigned char
-max_tt_usecs[] = { 125, 125, 125, 125, 125, 125, 30, 0 };
+max_tt_usecs[] = { 125, 125, 125, 125, 125, 125, 125, 25 };
 
 /* carryover low/fullspeed bandwidth that crosses uframe boundries */
 static inline void carryover_tt_bandwidth(unsigned short tt_usecs[8])
@@ -2212,11 +2212,11 @@ static void scan_isoc(struct ehci_hcd *ehci)
        }
        ehci->now_frame = now_frame;
 
+       frame = ehci->last_iso_frame;
        for (;;) {
                union ehci_shadow       q, *q_p;
                __hc32                  type, *hw_p;
 
-               frame = ehci->last_iso_frame;
 restart:
                /* scan each element in frame's queue for completions */
                q_p = &ehci->pshadow [frame];
@@ -2321,6 +2321,9 @@ restart:
                /* Stop when we have reached the current frame */
                if (frame == now_frame)
                        break;
-               ehci->last_iso_frame = (frame + 1) & fmask;
+
+               /* The last frame may still have active siTDs */
+               ehci->last_iso_frame = frame;
+               frame = (frame + 1) & fmask;
        }
 }
index 20dbdcb..f904071 100644 (file)
@@ -113,14 +113,15 @@ static void ehci_poll_ASS(struct ehci_hcd *ehci)
 
        if (want != actual) {
 
-               /* Poll again later, but give up after about 20 ms */
-               if (ehci->ASS_poll_count++ < 20) {
-                       ehci_enable_event(ehci, EHCI_HRTIMER_POLL_ASS, true);
-                       return;
-               }
-               ehci_dbg(ehci, "Waited too long for the async schedule status (%x/%x), giving up\n",
-                               want, actual);
+               /* Poll again later */
+               ehci_enable_event(ehci, EHCI_HRTIMER_POLL_ASS, true);
+               ++ehci->ASS_poll_count;
+               return;
        }
+
+       if (ehci->ASS_poll_count > 20)
+               ehci_dbg(ehci, "ASS poll count reached %d\n",
+                               ehci->ASS_poll_count);
        ehci->ASS_poll_count = 0;
 
        /* The status is up-to-date; restart or stop the schedule as needed */
@@ -159,14 +160,14 @@ static void ehci_poll_PSS(struct ehci_hcd *ehci)
 
        if (want != actual) {
 
-               /* Poll again later, but give up after about 20 ms */
-               if (ehci->PSS_poll_count++ < 20) {
-                       ehci_enable_event(ehci, EHCI_HRTIMER_POLL_PSS, true);
-                       return;
-               }
-               ehci_dbg(ehci, "Waited too long for the periodic schedule status (%x/%x), giving up\n",
-                               want, actual);
+               /* Poll again later */
+               ehci_enable_event(ehci, EHCI_HRTIMER_POLL_PSS, true);
+               return;
        }
+
+       if (ehci->PSS_poll_count > 20)
+               ehci_dbg(ehci, "PSS poll count reached %d\n",
+                               ehci->PSS_poll_count);
        ehci->PSS_poll_count = 0;
 
        /* The status is up-to-date; restart or stop the schedule as needed */
index a3b6d71..4c338ec 100644 (file)
@@ -780,6 +780,7 @@ void usb_enable_xhci_ports(struct pci_dev *xhci_pdev)
                                "defaulting to EHCI.\n");
                dev_warn(&xhci_pdev->dev,
                                "USB 3.0 devices will work at USB 2.0 speeds.\n");
+               usb_disable_xhci_ports(xhci_pdev);
                return;
        }
 
index 768d542..15d1322 100644 (file)
@@ -116,6 +116,7 @@ static void uhci_finish_suspend(struct uhci_hcd *uhci, int port,
                }
        }
        clear_bit(port, &uhci->resuming_ports);
+       usb_hcd_end_port_resume(&uhci_to_hcd(uhci)->self, port);
 }
 
 /* Wait for the UHCI controller in HP's iLO2 server management chip.
@@ -167,6 +168,8 @@ static void uhci_check_ports(struct uhci_hcd *uhci)
                                set_bit(port, &uhci->resuming_ports);
                                uhci->ports_timeout = jiffies +
                                                msecs_to_jiffies(25);
+                               usb_hcd_start_port_resume(
+                                               &uhci_to_hcd(uhci)->self, port);
 
                                /* Make sure we see the port again
                                 * after the resuming period is over. */
index 59fb5c6..7f76a49 100644 (file)
@@ -1698,7 +1698,7 @@ static void handle_port_status(struct xhci_hcd *xhci,
                                faked_port_index + 1);
                if (slot_id && xhci->devs[slot_id])
                        xhci_ring_device(xhci, slot_id);
-               if (bus_state->port_remote_wakeup && (1 << faked_port_index)) {
+               if (bus_state->port_remote_wakeup & (1 << faked_port_index)) {
                        bus_state->port_remote_wakeup &=
                                ~(1 << faked_port_index);
                        xhci_test_and_clear_bit(xhci, port_array,
@@ -2589,6 +2589,8 @@ cleanup:
                                (trb_comp_code != COMP_STALL &&
                                        trb_comp_code != COMP_BABBLE))
                                xhci_urb_free_priv(xhci, urb_priv);
+                       else
+                               kfree(urb_priv);
 
                        usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);
                        if ((urb->actual_length != urb->transfer_buffer_length &&
@@ -3108,7 +3110,7 @@ static u32 xhci_v1_0_td_remainder(int running_total, int trb_buff_len,
         * running_total.
         */
        packets_transferred = (running_total + trb_buff_len) /
-               usb_endpoint_maxp(&urb->ep->desc);
+               GET_MAX_PACKET(usb_endpoint_maxp(&urb->ep->desc));
 
        if ((total_packet_count - packets_transferred) > 31)
                return 31 << 17;
@@ -3642,7 +3644,8 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
                td_len = urb->iso_frame_desc[i].length;
                td_remain_len = td_len;
                total_packet_count = DIV_ROUND_UP(td_len,
-                               usb_endpoint_maxp(&urb->ep->desc));
+                               GET_MAX_PACKET(
+                                       usb_endpoint_maxp(&urb->ep->desc)));
                /* A zero-length transfer still involves at least one packet. */
                if (total_packet_count == 0)
                        total_packet_count++;
@@ -3664,9 +3667,11 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
                td = urb_priv->td[i];
                for (j = 0; j < trbs_per_td; j++) {
                        u32 remainder = 0;
-                       field = TRB_TBC(burst_count) | TRB_TLBPC(residue);
+                       field = 0;
 
                        if (first_trb) {
+                               field = TRB_TBC(burst_count) |
+                                       TRB_TLBPC(residue);
                                /* Queue the isoc TRB */
                                field |= TRB_TYPE(TRB_ISOC);
                                /* Assume URB_ISO_ASAP is set */
index f14736f..edc0f0d 100644 (file)
@@ -60,6 +60,7 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(0x0FCF, 0x1003) }, /* Dynastream ANT development board */
        { USB_DEVICE(0x0FCF, 0x1004) }, /* Dynastream ANT2USB */
        { USB_DEVICE(0x0FCF, 0x1006) }, /* Dynastream ANT development board */
+       { USB_DEVICE(0x0FDE, 0xCA05) }, /* OWL Wireless Electricity Monitor CM-160 */
        { USB_DEVICE(0x10A6, 0xAA26) }, /* Knock-off DCU-11 cable */
        { USB_DEVICE(0x10AB, 0x10C5) }, /* Siemens MC60 Cable */
        { USB_DEVICE(0x10B5, 0xAC70) }, /* Nokia CA-42 USB */
index ba68835..90ceef1 100644 (file)
@@ -584,6 +584,7 @@ static struct usb_device_id id_table_combined [] = {
        /*
         * ELV devices:
         */
+       { USB_DEVICE(FTDI_ELV_VID, FTDI_ELV_WS300_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_ELV_USR_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_ELV_MSM1_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_ELV_KL100_PID) },
@@ -670,6 +671,7 @@ static struct usb_device_id id_table_combined [] = {
        { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_5_PID) },
        { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_6_PID) },
        { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_7_PID) },
+       { USB_DEVICE(FTDI_VID, FTDI_OMNI1509) },
        { USB_DEVICE(MOBILITY_VID, MOBILITY_USB_SERIAL_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_ACTIVE_ROBOTS_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_MHAM_KW_PID) },
index fa5d560..9d359e1 100644 (file)
 #define XSENS_CONVERTER_6_PID  0xD38E
 #define XSENS_CONVERTER_7_PID  0xD38F
 
+/**
+ * Zolix (www.zolix.com.cb) product ids
+ */
+#define FTDI_OMNI1509                  0xD491  /* Omni1509 embedded USB-serial */
+
 /*
  * NDI (www.ndigital.com) product ids
  */
 
 /*
  * ELV USB devices submitted by Christian Abt of ELV (www.elv.de).
- * All of these devices use FTDI's vendor ID (0x0403).
+ * Almost all of these devices use FTDI's vendor ID (0x0403).
  * Further IDs taken from ELV Windows .inf file.
  *
  * The previously included PID for the UO 100 module was incorrect.
  *
  * Armin Laeuger originally sent the PID for the UM 100 module.
  */
+#define FTDI_ELV_VID   0x1B1F  /* ELV AG */
+#define FTDI_ELV_WS300_PID     0xC006  /* eQ3 WS 300 PC II */
 #define FTDI_ELV_USR_PID       0xE000  /* ELV Universal-Sound-Recorder */
 #define FTDI_ELV_MSM1_PID      0xE001  /* ELV Mini-Sound-Modul */
 #define FTDI_ELV_KL100_PID     0xE002  /* ELV Kfz-Leistungsmesser KL 100 */
index 0d9dac9..567bc77 100644 (file)
@@ -242,6 +242,7 @@ static void option_instat_callback(struct urb *urb);
 #define TELIT_PRODUCT_CC864_DUAL               0x1005
 #define TELIT_PRODUCT_CC864_SINGLE             0x1006
 #define TELIT_PRODUCT_DE910_DUAL               0x1010
+#define TELIT_PRODUCT_LE920                    0x1200
 
 /* ZTE PRODUCTS */
 #define ZTE_VENDOR_ID                          0x19d2
@@ -453,6 +454,10 @@ static void option_instat_callback(struct urb *urb);
 #define TPLINK_VENDOR_ID                       0x2357
 #define TPLINK_PRODUCT_MA180                   0x0201
 
+/* Changhong products */
+#define CHANGHONG_VENDOR_ID                    0x2077
+#define CHANGHONG_PRODUCT_CH690                        0x7001
+
 /* some devices interfaces need special handling due to a number of reasons */
 enum option_blacklist_reason {
                OPTION_BLACKLIST_NONE = 0,
@@ -534,6 +539,11 @@ static const struct option_blacklist_info zte_1255_blacklist = {
        .reserved = BIT(3) | BIT(4),
 };
 
+static const struct option_blacklist_info telit_le920_blacklist = {
+       .sendsetup = BIT(0),
+       .reserved = BIT(1) | BIT(5),
+};
+
 static const struct usb_device_id option_ids[] = {
        { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
        { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) },
@@ -784,6 +794,8 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_DUAL) },
        { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_SINGLE) },
        { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_DE910_DUAL) },
+       { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920),
+               .driver_info = (kernel_ulong_t)&telit_le920_blacklist },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff),
                .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
@@ -1318,6 +1330,7 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T) },
        { USB_DEVICE(TPLINK_VENDOR_ID, TPLINK_PRODUCT_MA180),
          .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+       { USB_DEVICE(CHANGHONG_VENDOR_ID, CHANGHONG_PRODUCT_CH690) },
        { } /* Terminating entry */
 };
 MODULE_DEVICE_TABLE(usb, option_ids);
index aa148c2..2466254 100644 (file)
@@ -53,6 +53,7 @@ static const struct usb_device_id id_table[] = {
        {DEVICE_G1K(0x05c6, 0x9221)},   /* Generic Gobi QDL device */
        {DEVICE_G1K(0x05c6, 0x9231)},   /* Generic Gobi QDL device */
        {DEVICE_G1K(0x1f45, 0x0001)},   /* Unknown Gobi QDL device */
+       {DEVICE_G1K(0x1bc7, 0x900e)},   /* Telit Gobi QDL device */
 
        /* Gobi 2000 devices */
        {USB_DEVICE(0x1410, 0xa010)},   /* Novatel Gobi 2000 QDL device */
index 105d900..16b0bf0 100644 (file)
@@ -92,8 +92,8 @@ int usb_stor_ucr61s2b_init(struct us_data *us)
        return 0;
 }
 
-/* This places the HUAWEI E220 devices in multi-port mode */
-int usb_stor_huawei_e220_init(struct us_data *us)
+/* This places the HUAWEI usb dongles in multi-port mode */
+static int usb_stor_huawei_feature_init(struct us_data *us)
 {
        int result;
 
@@ -104,3 +104,75 @@ int usb_stor_huawei_e220_init(struct us_data *us)
        US_DEBUGP("Huawei mode set result is %d\n", result);
        return 0;
 }
+
+/*
+ * It will send a scsi switch command called rewind' to huawei dongle.
+ * When the dongle receives this command at the first time,
+ * it will reboot immediately. After rebooted, it will ignore this command.
+ * So it is  unnecessary to read its response.
+ */
+static int usb_stor_huawei_scsi_init(struct us_data *us)
+{
+       int result = 0;
+       int act_len = 0;
+       struct bulk_cb_wrap *bcbw = (struct bulk_cb_wrap *) us->iobuf;
+       char rewind_cmd[] = {0x11, 0x06, 0x20, 0x00, 0x00, 0x01, 0x01, 0x00,
+                       0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+
+       bcbw->Signature = cpu_to_le32(US_BULK_CB_SIGN);
+       bcbw->Tag = 0;
+       bcbw->DataTransferLength = 0;
+       bcbw->Flags = bcbw->Lun = 0;
+       bcbw->Length = sizeof(rewind_cmd);
+       memset(bcbw->CDB, 0, sizeof(bcbw->CDB));
+       memcpy(bcbw->CDB, rewind_cmd, sizeof(rewind_cmd));
+
+       result = usb_stor_bulk_transfer_buf(us, us->send_bulk_pipe, bcbw,
+                                       US_BULK_CB_WRAP_LEN, &act_len);
+       US_DEBUGP("transfer actual length=%d, result=%d\n", act_len, result);
+       return result;
+}
+
+/*
+ * It tries to find the supported Huawei USB dongles.
+ * In Huawei, they assign the following product IDs
+ * for all of their mobile broadband dongles,
+ * including the new dongles in the future.
+ * So if the product ID is not included in this list,
+ * it means it is not Huawei's mobile broadband dongles.
+ */
+static int usb_stor_huawei_dongles_pid(struct us_data *us)
+{
+       struct usb_interface_descriptor *idesc;
+       int idProduct;
+
+       idesc = &us->pusb_intf->cur_altsetting->desc;
+       idProduct = us->pusb_dev->descriptor.idProduct;
+       /* The first port is CDROM,
+        * means the dongle in the single port mode,
+        * and a switch command is required to be sent. */
+       if (idesc && idesc->bInterfaceNumber == 0) {
+               if ((idProduct == 0x1001)
+                       || (idProduct == 0x1003)
+                       || (idProduct == 0x1004)
+                       || (idProduct >= 0x1401 && idProduct <= 0x1500)
+                       || (idProduct >= 0x1505 && idProduct <= 0x1600)
+                       || (idProduct >= 0x1c02 && idProduct <= 0x2202)) {
+                       return 1;
+               }
+       }
+       return 0;
+}
+
+int usb_stor_huawei_init(struct us_data *us)
+{
+       int result = 0;
+
+       if (usb_stor_huawei_dongles_pid(us)) {
+               if (us->pusb_dev->descriptor.idProduct >= 0x1446)
+                       result = usb_stor_huawei_scsi_init(us);
+               else
+                       result = usb_stor_huawei_feature_init(us);
+       }
+       return result;
+}
index 529327f..5376d4f 100644 (file)
@@ -46,5 +46,5 @@ int usb_stor_euscsi_init(struct us_data *us);
  * flash reader */
 int usb_stor_ucr61s2b_init(struct us_data *us);
 
-/* This places the HUAWEI E220 devices in multi-port mode */
-int usb_stor_huawei_e220_init(struct us_data *us);
+/* This places the HUAWEI usb dongles in multi-port mode */
+int usb_stor_huawei_init(struct us_data *us);
index d305a5a..72923b5 100644 (file)
@@ -1527,335 +1527,10 @@ UNUSUAL_DEV(  0x1210, 0x0003, 0x0100, 0x0100,
 /* Reported by fangxiaozhi <huananhu@huawei.com>
  * This brings the HUAWEI data card devices into multi-port mode
  */
-UNUSUAL_DEV(  0x12d1, 0x1001, 0x0000, 0x0000,
+UNUSUAL_VENDOR_INTF(0x12d1, 0x08, 0x06, 0x50,
                "HUAWEI MOBILE",
                "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x1003, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x1004, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x1401, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x1402, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x1403, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x1404, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x1405, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x1406, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x1407, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x1408, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x1409, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x140A, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x140B, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x140C, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x140D, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x140E, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x140F, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x1410, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x1411, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x1412, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x1413, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x1414, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x1415, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x1416, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x1417, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x1418, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x1419, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x141A, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x141B, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x141C, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x141D, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x141E, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x141F, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x1420, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x1421, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x1422, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x1423, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x1424, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x1425, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x1426, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x1427, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x1428, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x1429, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x142A, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x142B, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x142C, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x142D, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x142E, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x142F, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x1430, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x1431, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x1432, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x1433, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x1434, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x1435, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x1436, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x1437, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x1438, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x1439, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x143A, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x143B, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x143C, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x143D, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x143E, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x143F, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_init,
                0),
 
 /* Reported by Vilius Bilinkevicius <vilisas AT xxx DOT lt) */
index 31b3e1a..cf09b6b 100644 (file)
@@ -120,6 +120,17 @@ MODULE_PARM_DESC(quirks, "supplemental list of device IDs and their quirks");
        .useTransport = use_transport,  \
 }
 
+#define UNUSUAL_VENDOR_INTF(idVendor, cl, sc, pr, \
+               vendor_name, product_name, use_protocol, use_transport, \
+               init_function, Flags) \
+{ \
+       .vendorName = vendor_name,      \
+       .productName = product_name,    \
+       .useProtocol = use_protocol,    \
+       .useTransport = use_transport,  \
+       .initFunction = init_function,  \
+}
+
 static struct us_unusual_dev us_unusual_dev_list[] = {
 #      include "unusual_devs.h"
        { }             /* Terminating entry */
@@ -131,6 +142,7 @@ static struct us_unusual_dev for_dynamic_ids =
 #undef UNUSUAL_DEV
 #undef COMPLIANT_DEV
 #undef USUAL_DEV
+#undef UNUSUAL_VENDOR_INTF
 
 #ifdef CONFIG_LOCKDEP
 
index b78a526..5ef8ce7 100644 (file)
 #define USUAL_DEV(useProto, useTrans) \
 { USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, useProto, useTrans) }
 
+/* Define the device is matched with Vendor ID and interface descriptors */
+#define UNUSUAL_VENDOR_INTF(id_vendor, cl, sc, pr, \
+                       vendorName, productName, useProtocol, useTransport, \
+                       initFunction, flags) \
+{ \
+       .match_flags = USB_DEVICE_ID_MATCH_INT_INFO \
+                               | USB_DEVICE_ID_MATCH_VENDOR, \
+       .idVendor    = (id_vendor), \
+       .bInterfaceClass = (cl), \
+       .bInterfaceSubClass = (sc), \
+       .bInterfaceProtocol = (pr), \
+       .driver_info = (flags) \
+}
+
 struct usb_device_id usb_storage_usb_ids[] = {
 #      include "unusual_devs.h"
        { }             /* Terminating entry */
@@ -50,6 +64,7 @@ MODULE_DEVICE_TABLE(usb, usb_storage_usb_ids);
 #undef UNUSUAL_DEV
 #undef COMPLIANT_DEV
 #undef USUAL_DEV
+#undef UNUSUAL_VENDOR_INTF
 
 /*
  * The table of devices to ignore
index ebd08b2..959b1cd 100644 (file)
@@ -165,12 +165,16 @@ static void tx_poll_stop(struct vhost_net *net)
 }
 
 /* Caller must have TX VQ lock */
-static void tx_poll_start(struct vhost_net *net, struct socket *sock)
+static int tx_poll_start(struct vhost_net *net, struct socket *sock)
 {
+       int ret;
+
        if (unlikely(net->tx_poll_state != VHOST_NET_POLL_STOPPED))
-               return;
-       vhost_poll_start(net->poll + VHOST_NET_VQ_TX, sock->file);
-       net->tx_poll_state = VHOST_NET_POLL_STARTED;
+               return 0;
+       ret = vhost_poll_start(net->poll + VHOST_NET_VQ_TX, sock->file);
+       if (!ret)
+               net->tx_poll_state = VHOST_NET_POLL_STARTED;
+       return ret;
 }
 
 /* In case of DMA done not in order in lower device driver for some reason.
@@ -642,20 +646,23 @@ static void vhost_net_disable_vq(struct vhost_net *n,
                vhost_poll_stop(n->poll + VHOST_NET_VQ_RX);
 }
 
-static void vhost_net_enable_vq(struct vhost_net *n,
+static int vhost_net_enable_vq(struct vhost_net *n,
                                struct vhost_virtqueue *vq)
 {
        struct socket *sock;
+       int ret;
 
        sock = rcu_dereference_protected(vq->private_data,
                                         lockdep_is_held(&vq->mutex));
        if (!sock)
-               return;
+               return 0;
        if (vq == n->vqs + VHOST_NET_VQ_TX) {
                n->tx_poll_state = VHOST_NET_POLL_STOPPED;
-               tx_poll_start(n, sock);
+               ret = tx_poll_start(n, sock);
        } else
-               vhost_poll_start(n->poll + VHOST_NET_VQ_RX, sock->file);
+               ret = vhost_poll_start(n->poll + VHOST_NET_VQ_RX, sock->file);
+
+       return ret;
 }
 
 static struct socket *vhost_net_stop_vq(struct vhost_net *n,
@@ -827,15 +834,18 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
                        r = PTR_ERR(ubufs);
                        goto err_ubufs;
                }
-               oldubufs = vq->ubufs;
-               vq->ubufs = ubufs;
+
                vhost_net_disable_vq(n, vq);
                rcu_assign_pointer(vq->private_data, sock);
-               vhost_net_enable_vq(n, vq);
-
                r = vhost_init_used(vq);
                if (r)
-                       goto err_vq;
+                       goto err_used;
+               r = vhost_net_enable_vq(n, vq);
+               if (r)
+                       goto err_used;
+
+               oldubufs = vq->ubufs;
+               vq->ubufs = ubufs;
 
                n->tx_packets = 0;
                n->tx_zcopy_err = 0;
@@ -859,6 +869,11 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
        mutex_unlock(&n->dev.mutex);
        return 0;
 
+err_used:
+       rcu_assign_pointer(vq->private_data, oldsock);
+       vhost_net_enable_vq(n, vq);
+       if (ubufs)
+               vhost_ubuf_put_and_wait(ubufs);
 err_ubufs:
        fput(sock->file);
 err_vq:
index b20df5c..22321cf 100644 (file)
@@ -575,10 +575,8 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs)
 
        /* Must use ioctl VHOST_SCSI_SET_ENDPOINT */
        tv_tpg = vs->vs_tpg;
-       if (unlikely(!tv_tpg)) {
-               pr_err("%s endpoint not set\n", __func__);
+       if (unlikely(!tv_tpg))
                return;
-       }
 
        mutex_lock(&vq->mutex);
        vhost_disable_notify(&vs->dev, vq);
index 34389f7..9759249 100644 (file)
@@ -77,26 +77,38 @@ void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
        init_poll_funcptr(&poll->table, vhost_poll_func);
        poll->mask = mask;
        poll->dev = dev;
+       poll->wqh = NULL;
 
        vhost_work_init(&poll->work, fn);
 }
 
 /* Start polling a file. We add ourselves to file's wait queue. The caller must
  * keep a reference to a file until after vhost_poll_stop is called. */
-void vhost_poll_start(struct vhost_poll *poll, struct file *file)
+int vhost_poll_start(struct vhost_poll *poll, struct file *file)
 {
        unsigned long mask;
+       int ret = 0;
 
        mask = file->f_op->poll(file, &poll->table);
        if (mask)
                vhost_poll_wakeup(&poll->wait, 0, 0, (void *)mask);
+       if (mask & POLLERR) {
+               if (poll->wqh)
+                       remove_wait_queue(poll->wqh, &poll->wait);
+               ret = -EINVAL;
+       }
+
+       return ret;
 }
 
 /* Stop polling a file. After this function returns, it becomes safe to drop the
  * file reference. You must also flush afterwards. */
 void vhost_poll_stop(struct vhost_poll *poll)
 {
-       remove_wait_queue(poll->wqh, &poll->wait);
+       if (poll->wqh) {
+               remove_wait_queue(poll->wqh, &poll->wait);
+               poll->wqh = NULL;
+       }
 }
 
 static bool vhost_work_seq_done(struct vhost_dev *dev, struct vhost_work *work,
@@ -792,7 +804,7 @@ long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp)
                fput(filep);
 
        if (pollstart && vq->handle_kick)
-               vhost_poll_start(&vq->poll, vq->kick);
+               r = vhost_poll_start(&vq->poll, vq->kick);
 
        mutex_unlock(&vq->mutex);
 
index 2639c58..17261e2 100644 (file)
@@ -42,7 +42,7 @@ void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work);
 
 void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
                     unsigned long mask, struct vhost_dev *dev);
-void vhost_poll_start(struct vhost_poll *poll, struct file *file);
+int vhost_poll_start(struct vhost_poll *poll, struct file *file);
 void vhost_poll_stop(struct vhost_poll *poll);
 void vhost_poll_flush(struct vhost_poll *poll);
 void vhost_poll_queue(struct vhost_poll *poll);
index 0be4df3..74d77df 100644 (file)
@@ -840,7 +840,7 @@ int bind_evtchn_to_irq(unsigned int evtchn)
 
        if (irq == -1) {
                irq = xen_allocate_irq_dynamic();
-               if (irq == -1)
+               if (irq < 0)
                        goto out;
 
                irq_set_chip_and_handler_name(irq, &xen_dynamic_chip,
@@ -944,7 +944,7 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
 
        if (irq == -1) {
                irq = xen_allocate_irq_dynamic();
-               if (irq == -1)
+               if (irq < 0)
                        goto out;
 
                irq_set_chip_and_handler_name(irq, &xen_percpu_chip,
index 97f5d26..37c1f82 100644 (file)
@@ -135,7 +135,6 @@ int xen_pcibk_enable_msi(struct xen_pcibk_device *pdev,
                         struct pci_dev *dev, struct xen_pci_op *op)
 {
        struct xen_pcibk_dev_data *dev_data;
-       int otherend = pdev->xdev->otherend_id;
        int status;
 
        if (unlikely(verbose_request))
@@ -144,8 +143,9 @@ int xen_pcibk_enable_msi(struct xen_pcibk_device *pdev,
        status = pci_enable_msi(dev);
 
        if (status) {
-               printk(KERN_ERR "error enable msi for guest %x status %x\n",
-                       otherend, status);
+               pr_warn_ratelimited(DRV_NAME ": %s: error enabling MSI for guest %u: err %d\n",
+                                   pci_name(dev), pdev->xdev->otherend_id,
+                                   status);
                op->value = 0;
                return XEN_PCI_ERR_op_failed;
        }
@@ -223,10 +223,10 @@ int xen_pcibk_enable_msix(struct xen_pcibk_device *pdev,
                                                pci_name(dev), i,
                                                op->msix_entries[i].vector);
                }
-       } else {
-               printk(KERN_WARNING DRV_NAME ": %s: failed to enable MSI-X: err %d!\n",
-                       pci_name(dev), result);
-       }
+       } else
+               pr_warn_ratelimited(DRV_NAME ": %s: error enabling MSI-X for guest %u: err %d!\n",
+                                   pci_name(dev), pdev->xdev->otherend_id,
+                                   result);
        kfree(entries);
 
        op->value = result;
index a8b8adc..5a3327b 100644 (file)
@@ -4534,7 +4534,7 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
        unsigned nr_extents = 0;
        int extra_reserve = 0;
        enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_FLUSH_ALL;
-       int ret;
+       int ret = 0;
        bool delalloc_lock = true;
 
        /* If we are a free space inode we need to not flush since we will be in
@@ -4579,20 +4579,18 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
        csum_bytes = BTRFS_I(inode)->csum_bytes;
        spin_unlock(&BTRFS_I(inode)->lock);
 
-       if (root->fs_info->quota_enabled) {
+       if (root->fs_info->quota_enabled)
                ret = btrfs_qgroup_reserve(root, num_bytes +
                                           nr_extents * root->leafsize);
-               if (ret) {
-                       spin_lock(&BTRFS_I(inode)->lock);
-                       calc_csum_metadata_size(inode, num_bytes, 0);
-                       spin_unlock(&BTRFS_I(inode)->lock);
-                       if (delalloc_lock)
-                               mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
-                       return ret;
-               }
-       }
 
-       ret = reserve_metadata_bytes(root, block_rsv, to_reserve, flush);
+       /*
+        * ret != 0 here means the qgroup reservation failed, we go straight to
+        * the shared error handling then.
+        */
+       if (ret == 0)
+               ret = reserve_metadata_bytes(root, block_rsv,
+                                            to_reserve, flush);
+
        if (ret) {
                u64 to_free = 0;
                unsigned dropped;
index 2e8cae6..fdb7a8d 100644 (file)
@@ -288,7 +288,8 @@ out:
 void clear_em_logging(struct extent_map_tree *tree, struct extent_map *em)
 {
        clear_bit(EXTENT_FLAG_LOGGING, &em->flags);
-       try_merge_map(tree, em);
+       if (em->in_tree)
+               try_merge_map(tree, em);
 }
 
 /**
index f76b1fd..aeb8446 100644 (file)
@@ -293,15 +293,24 @@ static int __btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info,
        struct btrfs_key key;
        struct btrfs_ioctl_defrag_range_args range;
        int num_defrag;
+       int index;
+       int ret;
 
        /* get the inode */
        key.objectid = defrag->root;
        btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY);
        key.offset = (u64)-1;
+
+       index = srcu_read_lock(&fs_info->subvol_srcu);
+
        inode_root = btrfs_read_fs_root_no_name(fs_info, &key);
        if (IS_ERR(inode_root)) {
-               kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
-               return PTR_ERR(inode_root);
+               ret = PTR_ERR(inode_root);
+               goto cleanup;
+       }
+       if (btrfs_root_refs(&inode_root->root_item) == 0) {
+               ret = -ENOENT;
+               goto cleanup;
        }
 
        key.objectid = defrag->ino;
@@ -309,9 +318,10 @@ static int __btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info,
        key.offset = 0;
        inode = btrfs_iget(fs_info->sb, &key, inode_root, NULL);
        if (IS_ERR(inode)) {
-               kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
-               return PTR_ERR(inode);
+               ret = PTR_ERR(inode);
+               goto cleanup;
        }
+       srcu_read_unlock(&fs_info->subvol_srcu, index);
 
        /* do a chunk of defrag */
        clear_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags);
@@ -346,6 +356,10 @@ static int __btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info,
 
        iput(inode);
        return 0;
+cleanup:
+       srcu_read_unlock(&fs_info->subvol_srcu, index);
+       kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
+       return ret;
 }
 
 /*
@@ -1594,9 +1608,10 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
                if (err < 0 && num_written > 0)
                        num_written = err;
        }
-out:
+
        if (sync)
                atomic_dec(&BTRFS_I(inode)->sync_writers);
+out:
        sb_end_write(inode->i_sb);
        current->backing_dev_info = NULL;
        return num_written ? num_written : err;
index 5b22d45..338f259 100644 (file)
@@ -515,7 +515,6 @@ static noinline int create_subvol(struct btrfs_root *root,
 
        BUG_ON(ret);
 
-       d_instantiate(dentry, btrfs_lookup_dentry(dir, dentry));
 fail:
        if (async_transid) {
                *async_transid = trans->transid;
@@ -525,6 +524,10 @@ fail:
        }
        if (err && !ret)
                ret = err;
+
+       if (!ret)
+               d_instantiate(dentry, btrfs_lookup_dentry(dir, dentry));
+
        return ret;
 }
 
index f107312..e5ed567 100644 (file)
@@ -836,9 +836,16 @@ int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
         * if the disk i_size is already at the inode->i_size, or
         * this ordered extent is inside the disk i_size, we're done
         */
-       if (disk_i_size == i_size || offset <= disk_i_size) {
+       if (disk_i_size == i_size)
+               goto out;
+
+       /*
+        * We still need to update disk_i_size if outstanding_isize is greater
+        * than disk_i_size.
+        */
+       if (offset <= disk_i_size &&
+           (!ordered || ordered->outstanding_isize <= disk_i_size))
                goto out;
-       }
 
        /*
         * walk backward from this ordered extent to disk_i_size.
@@ -870,7 +877,7 @@ int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
                        break;
                if (test->file_offset >= i_size)
                        break;
-               if (test->file_offset >= disk_i_size) {
+               if (entry_end(test) > disk_i_size) {
                        /*
                         * we don't update disk_i_size now, so record this
                         * undealt i_size. Or we will not know the real
index bdbb94f..67783e0 100644 (file)
@@ -580,20 +580,29 @@ static int scrub_fixup_readpage(u64 inum, u64 offset, u64 root, void *fixup_ctx)
        int corrected = 0;
        struct btrfs_key key;
        struct inode *inode = NULL;
+       struct btrfs_fs_info *fs_info;
        u64 end = offset + PAGE_SIZE - 1;
        struct btrfs_root *local_root;
+       int srcu_index;
 
        key.objectid = root;
        key.type = BTRFS_ROOT_ITEM_KEY;
        key.offset = (u64)-1;
-       local_root = btrfs_read_fs_root_no_name(fixup->root->fs_info, &key);
-       if (IS_ERR(local_root))
+
+       fs_info = fixup->root->fs_info;
+       srcu_index = srcu_read_lock(&fs_info->subvol_srcu);
+
+       local_root = btrfs_read_fs_root_no_name(fs_info, &key);
+       if (IS_ERR(local_root)) {
+               srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
                return PTR_ERR(local_root);
+       }
 
        key.type = BTRFS_INODE_ITEM_KEY;
        key.objectid = inum;
        key.offset = 0;
-       inode = btrfs_iget(fixup->root->fs_info->sb, &key, local_root, NULL);
+       inode = btrfs_iget(fs_info->sb, &key, local_root, NULL);
+       srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
        if (IS_ERR(inode))
                return PTR_ERR(inode);
 
@@ -606,7 +615,6 @@ static int scrub_fixup_readpage(u64 inum, u64 offset, u64 root, void *fixup_ctx)
        }
 
        if (PageUptodate(page)) {
-               struct btrfs_fs_info *fs_info;
                if (PageDirty(page)) {
                        /*
                         * we need to write the data to the defect sector. the
@@ -3180,18 +3188,25 @@ static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root, void *ctx)
        u64 physical_for_dev_replace;
        u64 len;
        struct btrfs_fs_info *fs_info = nocow_ctx->sctx->dev_root->fs_info;
+       int srcu_index;
 
        key.objectid = root;
        key.type = BTRFS_ROOT_ITEM_KEY;
        key.offset = (u64)-1;
+
+       srcu_index = srcu_read_lock(&fs_info->subvol_srcu);
+
        local_root = btrfs_read_fs_root_no_name(fs_info, &key);
-       if (IS_ERR(local_root))
+       if (IS_ERR(local_root)) {
+               srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
                return PTR_ERR(local_root);
+       }
 
        key.type = BTRFS_INODE_ITEM_KEY;
        key.objectid = inum;
        key.offset = 0;
        inode = btrfs_iget(fs_info->sb, &key, local_root, NULL);
+       srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
        if (IS_ERR(inode))
                return PTR_ERR(inode);
 
index f154946..fc03aa6 100644 (file)
@@ -333,12 +333,14 @@ start_transaction(struct btrfs_root *root, u64 num_items, int type,
                                          &root->fs_info->trans_block_rsv,
                                          num_bytes, flush);
                if (ret)
-                       return ERR_PTR(ret);
+                       goto reserve_fail;
        }
 again:
        h = kmem_cache_alloc(btrfs_trans_handle_cachep, GFP_NOFS);
-       if (!h)
-               return ERR_PTR(-ENOMEM);
+       if (!h) {
+               ret = -ENOMEM;
+               goto alloc_fail;
+       }
 
        /*
         * If we are JOIN_NOLOCK we're already committing a transaction and
@@ -365,11 +367,7 @@ again:
        if (ret < 0) {
                /* We must get the transaction if we are JOIN_NOLOCK. */
                BUG_ON(type == TRANS_JOIN_NOLOCK);
-
-               if (type < TRANS_JOIN_NOLOCK)
-                       sb_end_intwrite(root->fs_info->sb);
-               kmem_cache_free(btrfs_trans_handle_cachep, h);
-               return ERR_PTR(ret);
+               goto join_fail;
        }
 
        cur_trans = root->fs_info->running_transaction;
@@ -410,6 +408,19 @@ got_it:
        if (!current->journal_info && type != TRANS_USERSPACE)
                current->journal_info = h;
        return h;
+
+join_fail:
+       if (type < TRANS_JOIN_NOLOCK)
+               sb_end_intwrite(root->fs_info->sb);
+       kmem_cache_free(btrfs_trans_handle_cachep, h);
+alloc_fail:
+       if (num_bytes)
+               btrfs_block_rsv_release(root, &root->fs_info->trans_block_rsv,
+                                       num_bytes);
+reserve_fail:
+       if (qgroup_reserved)
+               btrfs_qgroup_free(root, qgroup_reserved);
+       return ERR_PTR(ret);
 }
 
 struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
index 15f6efd..5cbb7f4 100644 (file)
@@ -1556,7 +1556,8 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
        ret = 0;
 
        /* Notify udev that device has changed */
-       btrfs_kobject_uevent(bdev, KOBJ_CHANGE);
+       if (bdev)
+               btrfs_kobject_uevent(bdev, KOBJ_CHANGE);
 
 error_brelse:
        brelse(bh);
index 7ff4985..911649a 100644 (file)
@@ -503,11 +503,11 @@ static ssize_t device_write(struct file *file, const char __user *buf,
 #endif
                return -EINVAL;
 
-#ifdef CONFIG_COMPAT
-       if (count > sizeof(struct dlm_write_request32) + DLM_RESNAME_MAXLEN)
-#else
+       /*
+        * can't compare against COMPAT/dlm_write_request32 because
+        * we don't yet know if is64bit is zero
+        */
        if (count > sizeof(struct dlm_write_request) + DLM_RESNAME_MAXLEN)
-#endif
                return -EINVAL;
 
        kbuf = kzalloc(count + 1, GFP_NOFS);
index dd057bc..fc8dc20 100644 (file)
@@ -177,11 +177,31 @@ out_nofree:
        return mnt;
 }
 
+static int
+nfs_namespace_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
+{
+       if (NFS_FH(dentry->d_inode)->size != 0)
+               return nfs_getattr(mnt, dentry, stat);
+       generic_fillattr(dentry->d_inode, stat);
+       return 0;
+}
+
+static int
+nfs_namespace_setattr(struct dentry *dentry, struct iattr *attr)
+{
+       if (NFS_FH(dentry->d_inode)->size != 0)
+               return nfs_setattr(dentry, attr);
+       return -EACCES;
+}
+
 const struct inode_operations nfs_mountpoint_inode_operations = {
        .getattr        = nfs_getattr,
+       .setattr        = nfs_setattr,
 };
 
 const struct inode_operations nfs_referral_inode_operations = {
+       .getattr        = nfs_namespace_getattr,
+       .setattr        = nfs_namespace_setattr,
 };
 
 static void nfs_expire_automounts(struct work_struct *work)
index acc3472..2e9779b 100644 (file)
@@ -236,11 +236,10 @@ struct nfs_client *nfs4_init_client(struct nfs_client *clp,
        error = nfs4_discover_server_trunking(clp, &old);
        if (error < 0)
                goto error;
+       nfs_put_client(clp);
        if (clp != old) {
                clp->cl_preserve_clid = true;
-               nfs_put_client(clp);
                clp = old;
-               atomic_inc(&clp->cl_count);
        }
 
        return clp;
@@ -306,7 +305,7 @@ int nfs40_walk_client_list(struct nfs_client *new,
                .clientid       = new->cl_clientid,
                .confirm        = new->cl_confirm,
        };
-       int status;
+       int status = -NFS4ERR_STALE_CLIENTID;
 
        spin_lock(&nn->nfs_client_lock);
        list_for_each_entry_safe(pos, n, &nn->nfs_client_list, cl_share_link) {
@@ -332,40 +331,33 @@ int nfs40_walk_client_list(struct nfs_client *new,
 
                if (prev)
                        nfs_put_client(prev);
+               prev = pos;
 
                status = nfs4_proc_setclientid_confirm(pos, &clid, cred);
-               if (status == 0) {
+               switch (status) {
+               case -NFS4ERR_STALE_CLIENTID:
+                       break;
+               case 0:
                        nfs4_swap_callback_idents(pos, new);
 
-                       nfs_put_client(pos);
+                       prev = NULL;
                        *result = pos;
                        dprintk("NFS: <-- %s using nfs_client = %p ({%d})\n",
                                __func__, pos, atomic_read(&pos->cl_count));
-                       return 0;
-               }
-               if (status != -NFS4ERR_STALE_CLIENTID) {
-                       nfs_put_client(pos);
-                       dprintk("NFS: <-- %s status = %d, no result\n",
-                               __func__, status);
-                       return status;
+               default:
+                       goto out;
                }
 
                spin_lock(&nn->nfs_client_lock);
-               prev = pos;
        }
+       spin_unlock(&nn->nfs_client_lock);
 
-       /*
-        * No matching nfs_client found.  This should be impossible,
-        * because the new nfs_client has already been added to
-        * nfs_client_list by nfs_get_client().
-        *
-        * Don't BUG(), since the caller is holding a mutex.
-        */
+       /* No match found. The server lost our clientid */
+out:
        if (prev)
                nfs_put_client(prev);
-       spin_unlock(&nn->nfs_client_lock);
-       pr_err("NFS: %s Error: no matching nfs_client found\n", __func__);
-       return -NFS4ERR_STALE_CLIENTID;
+       dprintk("NFS: <-- %s status = %d\n", __func__, status);
+       return status;
 }
 
 #ifdef CONFIG_NFS_V4_1
@@ -432,7 +424,7 @@ int nfs41_walk_client_list(struct nfs_client *new,
 {
        struct nfs_net *nn = net_generic(new->cl_net, nfs_net_id);
        struct nfs_client *pos, *n, *prev = NULL;
-       int error;
+       int status = -NFS4ERR_STALE_CLIENTID;
 
        spin_lock(&nn->nfs_client_lock);
        list_for_each_entry_safe(pos, n, &nn->nfs_client_list, cl_share_link) {
@@ -448,14 +440,17 @@ int nfs41_walk_client_list(struct nfs_client *new,
                                nfs_put_client(prev);
                        prev = pos;
 
-                       error = nfs_wait_client_init_complete(pos);
-                       if (error < 0) {
+                       nfs4_schedule_lease_recovery(pos);
+                       status = nfs_wait_client_init_complete(pos);
+                       if (status < 0) {
                                nfs_put_client(pos);
                                spin_lock(&nn->nfs_client_lock);
                                continue;
                        }
-
+                       status = pos->cl_cons_state;
                        spin_lock(&nn->nfs_client_lock);
+                       if (status < 0)
+                               continue;
                }
 
                if (pos->rpc_ops != new->rpc_ops)
@@ -473,6 +468,7 @@ int nfs41_walk_client_list(struct nfs_client *new,
                if (!nfs4_match_serverowners(pos, new))
                        continue;
 
+               atomic_inc(&pos->cl_count);
                spin_unlock(&nn->nfs_client_lock);
                dprintk("NFS: <-- %s using nfs_client = %p ({%d})\n",
                        __func__, pos, atomic_read(&pos->cl_count));
@@ -481,16 +477,10 @@ int nfs41_walk_client_list(struct nfs_client *new,
                return 0;
        }
 
-       /*
-        * No matching nfs_client found.  This should be impossible,
-        * because the new nfs_client has already been added to
-        * nfs_client_list by nfs_get_client().
-        *
-        * Don't BUG(), since the caller is holding a mutex.
-        */
+       /* No matching nfs_client found. */
        spin_unlock(&nn->nfs_client_lock);
-       pr_err("NFS: %s Error: no matching nfs_client found\n", __func__);
-       return -NFS4ERR_STALE_CLIENTID;
+       dprintk("NFS: <-- %s status = %d\n", __func__, status);
+       return status;
 }
 #endif /* CONFIG_NFS_V4_1 */
 
index 9448c57..e61f68d 100644 (file)
@@ -136,16 +136,11 @@ int nfs40_discover_server_trunking(struct nfs_client *clp,
        clp->cl_confirm = clid.confirm;
 
        status = nfs40_walk_client_list(clp, result, cred);
-       switch (status) {
-       case -NFS4ERR_STALE_CLIENTID:
-               set_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state);
-       case 0:
+       if (status == 0) {
                /* Sustain the lease, even if it's empty.  If the clientid4
                 * goes stale it's of no use for trunking discovery. */
                nfs4_schedule_state_renewal(*result);
-               break;
        }
-
 out:
        return status;
 }
@@ -1863,6 +1858,7 @@ again:
        case -ETIMEDOUT:
        case -EAGAIN:
                ssleep(1);
+       case -NFS4ERR_STALE_CLIENTID:
                dprintk("NFS: %s after status %d, retrying\n",
                        __func__, status);
                goto again;
@@ -2022,8 +2018,18 @@ static int nfs4_reset_session(struct nfs_client *clp)
        nfs4_begin_drain_session(clp);
        cred = nfs4_get_exchange_id_cred(clp);
        status = nfs4_proc_destroy_session(clp->cl_session, cred);
-       if (status && status != -NFS4ERR_BADSESSION &&
-           status != -NFS4ERR_DEADSESSION) {
+       switch (status) {
+       case 0:
+       case -NFS4ERR_BADSESSION:
+       case -NFS4ERR_DEADSESSION:
+               break;
+       case -NFS4ERR_BACK_CHAN_BUSY:
+       case -NFS4ERR_DELAY:
+               set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state);
+               status = 0;
+               ssleep(1);
+               goto out;
+       default:
                status = nfs4_recovery_handle_error(clp, status);
                goto out;
        }
index 2e7e8c8..b056b16 100644 (file)
@@ -2589,27 +2589,23 @@ nfs_xdev_mount(struct file_system_type *fs_type, int flags,
        struct nfs_server *server;
        struct dentry *mntroot = ERR_PTR(-ENOMEM);
        struct nfs_subversion *nfs_mod = NFS_SB(data->sb)->nfs_client->cl_nfs_mod;
-       int error;
 
-       dprintk("--> nfs_xdev_mount_common()\n");
+       dprintk("--> nfs_xdev_mount()\n");
 
        mount_info.mntfh = mount_info.cloned->fh;
 
        /* create a new volume representation */
        server = nfs_mod->rpc_ops->clone_server(NFS_SB(data->sb), data->fh, data->fattr, data->authflavor);
-       if (IS_ERR(server)) {
-               error = PTR_ERR(server);
-               goto out_err;
-       }
 
-       mntroot = nfs_fs_mount_common(server, flags, dev_name, &mount_info, nfs_mod);
-       dprintk("<-- nfs_xdev_mount_common() = 0\n");
-out:
-       return mntroot;
+       if (IS_ERR(server))
+               mntroot = ERR_CAST(server);
+       else
+               mntroot = nfs_fs_mount_common(server, flags,
+                               dev_name, &mount_info, nfs_mod);
 
-out_err:
-       dprintk("<-- nfs_xdev_mount_common() = %d [error]\n", error);
-       goto out;
+       dprintk("<-- nfs_xdev_mount() = %ld\n",
+                       IS_ERR(mntroot) ? PTR_ERR(mntroot) : 0L);
+       return mntroot;
 }
 
 #if IS_ENABLED(CONFIG_NFS_V4)
index fdb1807..f385935 100644 (file)
@@ -664,8 +664,11 @@ static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp,
        if (ret < 0)
                printk(KERN_ERR "NILFS: GC failed during preparation: "
                        "cannot read source blocks: err=%d\n", ret);
-       else
+       else {
+               if (nilfs_sb_need_update(nilfs))
+                       set_nilfs_discontinued(nilfs);
                ret = nilfs_clean_segments(inode->i_sb, argv, kbufs);
+       }
 
        nilfs_remove_all_gcinodes(nilfs);
        clear_nilfs_gc_running(nilfs);
index fe72cd0..3131a03 100644 (file)
@@ -177,20 +177,6 @@ const struct file_operations proc_net_operations = {
        .readdir        = proc_tgid_net_readdir,
 };
 
-
-struct proc_dir_entry *proc_net_fops_create(struct net *net,
-       const char *name, umode_t mode, const struct file_operations *fops)
-{
-       return proc_create(name, mode, net->proc_net, fops);
-}
-EXPORT_SYMBOL_GPL(proc_net_fops_create);
-
-void proc_net_remove(struct net *net, const char *name)
-{
-       remove_proc_entry(name, net->proc_net);
-}
-EXPORT_SYMBOL_GPL(proc_net_remove);
-
 static __net_init int proc_net_ns_init(struct net *net)
 {
        struct proc_dir_entry *netd, *net_statd;
index 4111a40..5f707e5 100644 (file)
@@ -86,11 +86,11 @@ xfs_destroy_ioend(
        }
 
        if (ioend->io_iocb) {
+               inode_dio_done(ioend->io_inode);
                if (ioend->io_isasync) {
                        aio_complete(ioend->io_iocb, ioend->io_error ?
                                        ioend->io_error : ioend->io_result, 0);
                }
-               inode_dio_done(ioend->io_inode);
        }
 
        mempool_free(ioend, xfs_ioend_pool);
index 0e92d12..cdb2d33 100644 (file)
@@ -4680,9 +4680,6 @@ __xfs_bmapi_allocate(
                        return error;
        }
 
-       if (bma->flags & XFS_BMAPI_STACK_SWITCH)
-               bma->stack_switch = 1;
-
        error = xfs_bmap_alloc(bma);
        if (error)
                return error;
@@ -4956,6 +4953,9 @@ xfs_bmapi_write(
        bma.flist = flist;
        bma.firstblock = firstblock;
 
+       if (flags & XFS_BMAPI_STACK_SWITCH)
+               bma.stack_switch = 1;
+
        while (bno < end && n < *nmap) {
                inhole = eof || bma.got.br_startoff > bno;
                wasdelay = !inhole && isnullstartblock(bma.got.br_startblock);
index 56d1614..fbbb9eb 100644 (file)
@@ -487,6 +487,7 @@ _xfs_buf_find(
        struct rb_node          *parent;
        xfs_buf_t               *bp;
        xfs_daddr_t             blkno = map[0].bm_bn;
+       xfs_daddr_t             eofs;
        int                     numblks = 0;
        int                     i;
 
@@ -498,6 +499,23 @@ _xfs_buf_find(
        ASSERT(!(numbytes < (1 << btp->bt_sshift)));
        ASSERT(!(BBTOB(blkno) & (xfs_off_t)btp->bt_smask));
 
+       /*
+        * Corrupted block numbers can get through to here, unfortunately, so we
+        * have to check that the buffer falls within the filesystem bounds.
+        */
+       eofs = XFS_FSB_TO_BB(btp->bt_mount, btp->bt_mount->m_sb.sb_dblocks);
+       if (blkno >= eofs) {
+               /*
+                * XXX (dgc): we should really be returning EFSCORRUPTED here,
+                * but none of the higher level infrastructure supports
+                * returning a specific error on buffer lookup failures.
+                */
+               xfs_alert(btp->bt_mount,
+                         "%s: Block out of range: block 0x%llx, EOFS 0x%llx ",
+                         __func__, blkno, eofs);
+               return NULL;
+       }
+
        /* get tree root */
        pag = xfs_perag_get(btp->bt_mount,
                                xfs_daddr_to_agno(btp->bt_mount, blkno));
@@ -1487,6 +1505,8 @@ restart:
        while (!list_empty(&btp->bt_lru)) {
                bp = list_first_entry(&btp->bt_lru, struct xfs_buf, b_lru);
                if (atomic_read(&bp->b_hold) > 1) {
+                       trace_xfs_buf_wait_buftarg(bp, _RET_IP_);
+                       list_move_tail(&bp->b_lru, &btp->bt_lru);
                        spin_unlock(&btp->bt_lru_lock);
                        delay(100);
                        goto restart;
index 77b0975..3f9949f 100644 (file)
@@ -652,7 +652,10 @@ xfs_buf_item_unlock(
 
        /*
         * If the buf item isn't tracking any data, free it, otherwise drop the
-        * reference we hold to it.
+        * reference we hold to it. If we are aborting the transaction, this may
+        * be the only reference to the buf item, so we free it anyway
+        * regardless of whether it is dirty or not. A dirty abort implies a
+        * shutdown, anyway.
         */
        clean = 1;
        for (i = 0; i < bip->bli_format_count; i++) {
@@ -664,7 +667,12 @@ xfs_buf_item_unlock(
        }
        if (clean)
                xfs_buf_item_relse(bp);
-       else
+       else if (aborted) {
+               if (atomic_dec_and_test(&bip->bli_refcount)) {
+                       ASSERT(XFS_FORCED_SHUTDOWN(lip->li_mountp));
+                       xfs_buf_item_relse(bp);
+               }
+       } else
                atomic_dec(&bip->bli_refcount);
 
        if (!hold)
index d0e9c74..a8bd26b 100644 (file)
@@ -246,10 +246,10 @@ xfs_swap_extents(
                goto out_unlock;
        }
 
-       error = -filemap_write_and_wait(VFS_I(ip)->i_mapping);
+       error = -filemap_write_and_wait(VFS_I(tip)->i_mapping);
        if (error)
                goto out_unlock;
-       truncate_pagecache_range(VFS_I(ip), 0, -1);
+       truncate_pagecache_range(VFS_I(tip), 0, -1);
 
        /* Verify O_DIRECT for ftmp */
        if (VN_CACHED(VFS_I(tip)) != 0) {
index add06b4..364818e 100644 (file)
@@ -351,6 +351,15 @@ xfs_iomap_prealloc_size(
                }
                if (shift)
                        alloc_blocks >>= shift;
+
+               /*
+                * If we are still trying to allocate more space than is
+                * available, squash the prealloc hard. This can happen if we
+                * have a large file on a small filesystem and the above
+                * lowspace thresholds are smaller than MAXEXTLEN.
+                */
+               while (alloc_blocks >= freesp)
+                       alloc_blocks >>= 4;
        }
 
        if (alloc_blocks < mp->m_writeio_blocks)
index da50846..7d6df7c 100644 (file)
@@ -658,7 +658,7 @@ xfs_sb_quiet_read_verify(
                return;
        }
        /* quietly fail */
-       xfs_buf_ioerror(bp, EFSCORRUPTED);
+       xfs_buf_ioerror(bp, EWRONGFS);
 }
 
 static void
index 2e137d4..16a8129 100644 (file)
@@ -341,6 +341,7 @@ DEFINE_BUF_EVENT(xfs_buf_item_relse);
 DEFINE_BUF_EVENT(xfs_buf_item_iodone);
 DEFINE_BUF_EVENT(xfs_buf_item_iodone_async);
 DEFINE_BUF_EVENT(xfs_buf_error_relse);
+DEFINE_BUF_EVENT(xfs_buf_wait_buftarg);
 DEFINE_BUF_EVENT(xfs_trans_read_buf_io);
 DEFINE_BUF_EVENT(xfs_trans_read_buf_shut);
 
index ee332fa..1d002b5 100644 (file)
@@ -27,7 +27,7 @@
 #define   BCMA_CC_FLASHT_NONE          0x00000000      /* No flash */
 #define   BCMA_CC_FLASHT_STSER         0x00000100      /* ST serial flash */
 #define   BCMA_CC_FLASHT_ATSER         0x00000200      /* Atmel serial flash */
-#define   BCMA_CC_FLASHT_NFLASH                0x00000200      /* NAND flash */
+#define   BCMA_CC_FLASHT_NAND          0x00000300      /* NAND flash */
 #define          BCMA_CC_FLASHT_PARA           0x00000700      /* Parallel flash */
 #define  BCMA_CC_CAP_PLLT              0x00038000      /* PLL Type */
 #define   BCMA_PLLTYPE_NONE            0x00000000
index 0d1ea29..fb61f3f 100644 (file)
@@ -42,13 +42,18 @@ struct bcma_drv_mips {
 #ifdef CONFIG_BCMA_DRIVER_MIPS
 extern void bcma_core_mips_init(struct bcma_drv_mips *mcore);
 extern void bcma_core_mips_early_init(struct bcma_drv_mips *mcore);
+
+extern unsigned int bcma_core_irq(struct bcma_device *core);
 #else
 static inline void bcma_core_mips_init(struct bcma_drv_mips *mcore) { }
 static inline void bcma_core_mips_early_init(struct bcma_drv_mips *mcore) { }
+
+static inline unsigned int bcma_core_irq(struct bcma_device *core)
+{
+       return 0;
+}
 #endif
 
 extern u32 bcma_cpu_clock(struct bcma_drv_mips *mcore);
 
-extern unsigned int bcma_core_irq(struct bcma_device *core);
-
 #endif /* LINUX_BCMA_DRIVER_MIPS_H_ */
index 8b84916..7a9498a 100644 (file)
@@ -618,18 +618,30 @@ extern int __init efi_setup_pcdp_console(char *);
 #endif
 
 /*
- * We play games with efi_enabled so that the compiler will, if possible, remove
- * EFI-related code altogether.
+ * We play games with efi_enabled so that the compiler will, if
+ * possible, remove EFI-related code altogether.
  */
+#define EFI_BOOT               0       /* Were we booted from EFI? */
+#define EFI_SYSTEM_TABLES      1       /* Can we use EFI system tables? */
+#define EFI_CONFIG_TABLES      2       /* Can we use EFI config tables? */
+#define EFI_RUNTIME_SERVICES   3       /* Can we use runtime services? */
+#define EFI_MEMMAP             4       /* Can we use EFI memory map? */
+#define EFI_64BIT              5       /* Is the firmware 64-bit? */
+
 #ifdef CONFIG_EFI
 # ifdef CONFIG_X86
-   extern int efi_enabled;
-   extern bool efi_64bit;
+extern int efi_enabled(int facility);
 # else
-#  define efi_enabled 1
+static inline int efi_enabled(int facility)
+{
+       return 1;
+}
 # endif
 #else
-# define efi_enabled 0
+static inline int efi_enabled(int facility)
+{
+       return 0;
+}
 #endif
 
 /*
index ccf9ee1..7e24fe0 100644 (file)
 /* Mesh Control 802.11s */
 #define IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT  0x0100
 
+/* Mesh Power Save Level */
+#define IEEE80211_QOS_CTL_MESH_PS_LEVEL                0x0200
+/* Mesh Receiver Service Period Initiated */
+#define IEEE80211_QOS_CTL_RSPI                 0x0400
+
 /* U-APSD queue for WMM IEs sent by AP */
 #define IEEE80211_WMM_IE_AP_QOSINFO_UAPSD      (1<<7)
 #define IEEE80211_WMM_IE_AP_QOSINFO_PARAM_SET_CNT_MASK 0x0f
@@ -675,11 +680,14 @@ struct ieee80211_meshconf_ie {
  * @IEEE80211_MESHCONF_CAPAB_FORWARDING: the STA forwards MSDUs
  * @IEEE80211_MESHCONF_CAPAB_TBTT_ADJUSTING: TBTT adjustment procedure
  *     is ongoing
+ * @IEEE80211_MESHCONF_CAPAB_POWER_SAVE_LEVEL: STA is in deep sleep mode or has
+ *     neighbors in deep sleep mode
  */
 enum mesh_config_capab_flags {
        IEEE80211_MESHCONF_CAPAB_ACCEPT_PLINKS          = 0x01,
        IEEE80211_MESHCONF_CAPAB_FORWARDING             = 0x08,
        IEEE80211_MESHCONF_CAPAB_TBTT_ADJUSTING         = 0x20,
+       IEEE80211_MESHCONF_CAPAB_POWER_SAVE_LEVEL       = 0x40,
 };
 
 /**
@@ -706,6 +714,30 @@ enum ieee80211_ht_chanwidth_values {
        IEEE80211_HT_CHANWIDTH_ANY = 1,
 };
 
+/**
+ * enum ieee80211_opmode_bits - VHT operating mode field bits
+ * @IEEE80211_OPMODE_NOTIF_CHANWIDTH_MASK: channel width mask
+ * @IEEE80211_OPMODE_NOTIF_CHANWIDTH_20MHZ: 20 MHz channel width
+ * @IEEE80211_OPMODE_NOTIF_CHANWIDTH_40MHZ: 40 MHz channel width
+ * @IEEE80211_OPMODE_NOTIF_CHANWIDTH_80MHZ: 80 MHz channel width
+ * @IEEE80211_OPMODE_NOTIF_CHANWIDTH_160MHZ: 160 MHz or 80+80 MHz channel width
+ * @IEEE80211_OPMODE_NOTIF_RX_NSS_MASK: number of spatial streams mask
+ *     (the NSS value is the value of this field + 1)
+ * @IEEE80211_OPMODE_NOTIF_RX_NSS_SHIFT: number of spatial streams shift
+ * @IEEE80211_OPMODE_NOTIF_RX_NSS_TYPE_BF: indicates streams in SU-MIMO PPDU
+ *     using a beamforming steering matrix
+ */
+enum ieee80211_vht_opmode_bits {
+       IEEE80211_OPMODE_NOTIF_CHANWIDTH_MASK   = 3,
+       IEEE80211_OPMODE_NOTIF_CHANWIDTH_20MHZ  = 0,
+       IEEE80211_OPMODE_NOTIF_CHANWIDTH_40MHZ  = 1,
+       IEEE80211_OPMODE_NOTIF_CHANWIDTH_80MHZ  = 2,
+       IEEE80211_OPMODE_NOTIF_CHANWIDTH_160MHZ = 3,
+       IEEE80211_OPMODE_NOTIF_RX_NSS_MASK      = 0x70,
+       IEEE80211_OPMODE_NOTIF_RX_NSS_SHIFT     = 4,
+       IEEE80211_OPMODE_NOTIF_RX_NSS_TYPE_BF   = 0x80,
+};
+
 #define WLAN_SA_QUERY_TR_ID_LEN 2
 
 struct ieee80211_mgmt {
@@ -836,6 +868,10 @@ struct ieee80211_mgmt {
                                        __le16 capability;
                                        u8 variable[0];
                                } __packed tdls_discover_resp;
+                               struct {
+                                       u8 action_code;
+                                       u8 operating_mode;
+                               } __packed vht_opmode_notif;
                        } u;
                } __packed action;
        } u;
@@ -1265,6 +1301,7 @@ struct ieee80211_vht_operation {
 #define IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454                        0x00000002
 #define IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ               0x00000004
 #define IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ      0x00000008
+#define IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK                 0x0000000C
 #define IEEE80211_VHT_CAP_RXLDPC                               0x00000010
 #define IEEE80211_VHT_CAP_SHORT_GI_80                          0x00000020
 #define IEEE80211_VHT_CAP_SHORT_GI_160                         0x00000040
@@ -1590,6 +1627,7 @@ enum ieee80211_eid {
 
        WLAN_EID_VHT_CAPABILITY = 191,
        WLAN_EID_VHT_OPERATION = 192,
+       WLAN_EID_OPMODE_NOTIF = 199,
 
        /* 802.11ad */
        WLAN_EID_NON_TX_BSSID_CAP =  83,
@@ -1644,6 +1682,7 @@ enum ieee80211_category {
        WLAN_CATEGORY_WMM = 17,
        WLAN_CATEGORY_FST = 18,
        WLAN_CATEGORY_UNPROT_DMG = 20,
+       WLAN_CATEGORY_VHT = 21,
        WLAN_CATEGORY_VENDOR_SPECIFIC_PROTECTED = 126,
        WLAN_CATEGORY_VENDOR_SPECIFIC = 127,
 };
@@ -1669,6 +1708,13 @@ enum ieee80211_ht_actioncode {
        WLAN_HT_ACTION_ASEL_IDX_FEEDBACK = 7,
 };
 
+/* VHT action codes */
+enum ieee80211_vht_actioncode {
+       WLAN_VHT_ACTION_COMPRESSED_BF = 0,
+       WLAN_VHT_ACTION_GROUPID_MGMT = 1,
+       WLAN_VHT_ACTION_OPMODE_NOTIF = 2,
+};
+
 /* Self Protected Action codes */
 enum ieee80211_self_protected_actioncode {
        WLAN_SP_RESERVED = 0,
@@ -1730,6 +1776,8 @@ enum ieee80211_tdls_actioncode {
 #define WLAN_EXT_CAPA5_TDLS_ENABLED    BIT(5)
 #define WLAN_EXT_CAPA5_TDLS_PROHIBITED BIT(6)
 
+#define WLAN_EXT_CAPA8_OPMODE_NOTIF    BIT(6)
+
 /* TDLS specific payload type in the LLC/SNAP header */
 #define WLAN_TDLS_SNAP_RFTYPE  0x2
 
@@ -1898,7 +1946,10 @@ enum ieee80211_sa_query_action {
 /* AKM suite selectors */
 #define WLAN_AKM_SUITE_8021X           0x000FAC01
 #define WLAN_AKM_SUITE_PSK             0x000FAC02
-#define WLAN_AKM_SUITE_SAE                     0x000FAC08
+#define WLAN_AKM_SUITE_8021X_SHA256    0x000FAC05
+#define WLAN_AKM_SUITE_PSK_SHA256      0x000FAC06
+#define WLAN_AKM_SUITE_TDLS            0x000FAC07
+#define WLAN_AKM_SUITE_SAE             0x000FAC08
 #define WLAN_AKM_SUITE_FT_OVER_SAE     0x000FAC09
 
 #define WLAN_MAX_KEY_LEN               32
@@ -2103,7 +2154,7 @@ static inline unsigned long ieee80211_tu_to_usec(unsigned long tu)
  * @tim_len: length of the TIM IE
  * @aid: the AID to look for
  */
-static inline bool ieee80211_check_tim(struct ieee80211_tim_ie *tim,
+static inline bool ieee80211_check_tim(const struct ieee80211_tim_ie *tim,
                                       u8 tim_len, u16 aid)
 {
        u8 mask;
index f65e8d2..84dde1d 100644 (file)
@@ -52,6 +52,9 @@ struct macvlan_pcpu_stats {
  */
 #define MAX_MACVTAP_QUEUES     (NR_CPUS < 16 ? NR_CPUS : 16)
 
+#define MACVLAN_MC_FILTER_BITS 8
+#define MACVLAN_MC_FILTER_SZ   (1 << MACVLAN_MC_FILTER_BITS)
+
 struct macvlan_dev {
        struct net_device       *dev;
        struct list_head        list;
@@ -59,6 +62,9 @@ struct macvlan_dev {
        struct macvlan_port     *port;
        struct net_device       *lowerdev;
        struct macvlan_pcpu_stats __percpu *pcpu_stats;
+
+       DECLARE_BITMAP(mc_filter, MACVLAN_MC_FILTER_SZ);
+
        enum macvlan_mode       mode;
        u16                     flags;
        int (*receive)(struct sk_buff *skb);
index 0245def..4648d80 100644 (file)
@@ -186,6 +186,7 @@ struct team {
 
        const struct team_mode *mode;
        struct team_mode_ops ops;
+       bool user_carrier_enabled;
        bool queue_override_enabled;
        struct list_head *qom_lists; /* array of queue override mapping lists */
        long mode_priv[TEAM_MODE_PRIV_LONGS];
index a16e193..34edf1f 100644 (file)
@@ -36,4 +36,13 @@ extern const struct in6_addr in6addr_linklocal_allnodes;
 extern const struct in6_addr in6addr_linklocal_allrouters;
 #define IN6ADDR_LINKLOCAL_ALLROUTERS_INIT \
                { { { 0xff,2,0,0,0,0,0,0,0,0,0,0,0,0,0,2 } } }
+extern const struct in6_addr in6addr_interfacelocal_allnodes;
+#define IN6ADDR_INTERFACELOCAL_ALLNODES_INIT \
+               { { { 0xff,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1 } } }
+extern const struct in6_addr in6addr_interfacelocal_allrouters;
+#define IN6ADDR_INTERFACELOCAL_ALLROUTERS_INIT \
+               { { { 0xff,1,0,0,0,0,0,0,0,0,0,0,0,0,0,2 } } }
+extern const struct in6_addr in6addr_sitelocal_allrouters;
+#define IN6ADDR_SITELOCAL_ALLROUTERS_INIT \
+               { { { 0xff,5,0,0,0,0,0,0,0,0,0,0,0,0,0,2 } } }
 #endif
index a5199f6..d0ab98f 100644 (file)
@@ -125,6 +125,31 @@ static inline void init_llist_head(struct llist_head *list)
             (pos) = llist_entry((pos)->member.next, typeof(*(pos)), member))
 
 /**
+ * llist_for_each_entry_safe - iterate safely against remove over some entries
+ * of lock-less list of given type.
+ * @pos:       the type * to use as a loop cursor.
+ * @n:         another type * to use as a temporary storage.
+ * @node:      the fist entry of deleted list entries.
+ * @member:    the name of the llist_node with the struct.
+ *
+ * In general, some entries of the lock-less list can be traversed
+ * safely only after being removed from list, so start with an entry
+ * instead of list head. This variant allows removal of entries
+ * as we iterate.
+ *
+ * If being used on entries deleted from lock-less list directly, the
+ * traverse order is from the newest to the oldest added entry.  If
+ * you want to traverse from the oldest to the newest, you must
+ * reverse the order by yourself before traversing.
+ */
+#define llist_for_each_entry_safe(pos, n, node, member)                \
+       for ((pos) = llist_entry((node), typeof(*(pos)), member),       \
+            (n) = (pos)->member.next;                                  \
+            &(pos)->member != NULL;                                    \
+            (pos) = llist_entry(n, typeof(*(pos)), member),            \
+            (n) = (&(pos)->member != NULL) ? (pos)->member.next : NULL)
+
+/**
  * llist_empty - tests whether a lock-less list is empty
  * @head:      the list to test
  *
index 0108a56..28bd5fa 100644 (file)
@@ -429,7 +429,7 @@ extern int memcg_limited_groups_array_size;
  * the slab_mutex must be held when looping through those caches
  */
 #define for_each_memcg_cache_index(_idx)       \
-       for ((_idx) = 0; i < memcg_limited_groups_array_size; (_idx)++)
+       for ((_idx) = 0; (_idx) < memcg_limited_groups_array_size; (_idx)++)
 
 static inline bool memcg_kmem_enabled(void)
 {
index 1883e8e..6d48fce 100644 (file)
@@ -956,9 +956,8 @@ int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port, u64 mac, u64 clear, u8 mo
 
 int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac);
 void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac);
-int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac);
-int mlx4_get_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn);
-void mlx4_put_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int qpn);
+int mlx4_get_base_qpn(struct mlx4_dev *dev, u8 port);
+int __mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac);
 void mlx4_set_stats_bitmap(struct mlx4_dev *dev, u64 *stats_bitmap);
 int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu,
                          u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx);
index bc823c4..deca874 100644 (file)
@@ -151,7 +151,7 @@ struct mmu_notifier_ops {
  * Therefore notifier chains can only be traversed when either
  *
  * 1. mmap_sem is held.
- * 2. One of the reverse map locks is held (i_mmap_mutex or anon_vma->mutex).
+ * 2. One of the reverse map locks is held (i_mmap_mutex or anon_vma->rwsem).
  * 3. No other concurrent thread can access the list (release)
  */
 struct mmu_notifier {
index 5ac3212..3dd3934 100644 (file)
@@ -41,7 +41,7 @@ enum {
        NETIF_F_TSO_ECN_BIT,            /* ... TCP ECN support */
        NETIF_F_TSO6_BIT,               /* ... TCPv6 segmentation */
        NETIF_F_FSO_BIT,                /* ... FCoE segmentation */
-       NETIF_F_GSO_RESERVED1,          /* ... free (fill GSO_MASK to 8 bits) */
+       NETIF_F_GSO_GRE_BIT,            /* ... GRE with TSO */
        /**/NETIF_F_GSO_LAST,           /* [can't be last bit, see GSO_MASK] */
        NETIF_F_GSO_RESERVED2           /* ... free (fill GSO_MASK to 8 bits) */
                = NETIF_F_GSO_LAST,
@@ -102,6 +102,7 @@ enum {
 #define NETIF_F_VLAN_CHALLENGED        __NETIF_F(VLAN_CHALLENGED)
 #define NETIF_F_RXFCS          __NETIF_F(RXFCS)
 #define NETIF_F_RXALL          __NETIF_F(RXALL)
+#define NETIF_F_GRE_GSO                __NETIF_F(GSO_GRE)
 
 /* Features valid for ethtool to change */
 /* = all defined minus driver/device-class-related */
index 85b0949..920361b 100644 (file)
@@ -884,7 +884,8 @@ struct netdev_fcoe_hbainfo {
  *                   struct net_device *dev,
  *                   const unsigned char *addr, u16 flags)
  *     Adds an FDB entry to dev for addr.
- * int (*ndo_fdb_del)(struct ndmsg *ndm, struct net_device *dev,
+ * int (*ndo_fdb_del)(struct ndmsg *ndm, struct nlattr *tb[],
+ *                   struct net_device *dev,
  *                   const unsigned char *addr)
  *     Deletes the FDB entry from dev coresponding to addr.
  * int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb,
@@ -1008,6 +1009,7 @@ struct net_device_ops {
                                               const unsigned char *addr,
                                               u16 flags);
        int                     (*ndo_fdb_del)(struct ndmsg *ndm,
+                                              struct nlattr *tb[],
                                               struct net_device *dev,
                                               const unsigned char *addr);
        int                     (*ndo_fdb_dump)(struct sk_buff *skb,
@@ -1019,7 +1021,10 @@ struct net_device_ops {
                                                      struct nlmsghdr *nlh);
        int                     (*ndo_bridge_getlink)(struct sk_buff *skb,
                                                      u32 pid, u32 seq,
-                                                     struct net_device *dev);
+                                                     struct net_device *dev,
+                                                     u32 filter_mask);
+       int                     (*ndo_bridge_dellink)(struct net_device *dev,
+                                                     struct nlmsghdr *nlh);
        int                     (*ndo_change_carrier)(struct net_device *dev,
                                                      bool new_carrier);
 };
@@ -1290,6 +1295,8 @@ struct net_device {
        };
        /* GARP */
        struct garp_port __rcu  *garp_port;
+       /* MRP */
+       struct mrp_port __rcu   *mrp_port;
 
        /* class/net/name entry */
        struct device           dev;
@@ -2662,8 +2669,17 @@ extern int netdev_master_upper_dev_link(struct net_device *dev,
 extern void netdev_upper_dev_unlink(struct net_device *dev,
                                    struct net_device *upper_dev);
 extern int skb_checksum_help(struct sk_buff *skb);
-extern struct sk_buff *skb_gso_segment(struct sk_buff *skb,
-       netdev_features_t features);
+extern struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
+       netdev_features_t features, bool tx_path);
+extern struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
+                                         netdev_features_t features);
+
+static inline
+struct sk_buff *skb_gso_segment(struct sk_buff *skb, netdev_features_t features)
+{
+       return __skb_gso_segment(skb, features, true);
+}
+
 #ifdef CONFIG_BUG
 extern void netdev_rx_csum_fault(struct net_device *dev);
 #else
index f54c3bb..9d7d8c6 100644 (file)
@@ -32,14 +32,15 @@ struct netpoll {
        u8 remote_mac[ETH_ALEN];
 
        struct list_head rx; /* rx_np list element */
-       struct rcu_head rcu;
+       struct work_struct cleanup_work;
 };
 
 struct netpoll_info {
        atomic_t refcnt;
 
-       int rx_flags;
+       unsigned long rx_flags;
        spinlock_t rx_lock;
+       struct mutex dev_lock;
        struct list_head rx_np; /* netpolls that registered an rx_hook */
 
        struct sk_buff_head neigh_tx; /* list of neigh requests to reply to */
@@ -51,6 +52,14 @@ struct netpoll_info {
        struct rcu_head rcu;
 };
 
+#ifdef CONFIG_NETPOLL
+extern int netpoll_rx_disable(struct net_device *dev);
+extern void netpoll_rx_enable(struct net_device *dev);
+#else
+static inline int netpoll_rx_disable(struct net_device *dev) { return 0; }
+static inline void netpoll_rx_enable(struct net_device *dev) { return; }
+#endif
+
 void netpoll_send_udp(struct netpoll *np, const char *msg, int len);
 void netpoll_print_options(struct netpoll *np);
 int netpoll_parse_options(struct netpoll *np, char *opt);
@@ -59,7 +68,7 @@ int netpoll_setup(struct netpoll *np);
 int netpoll_trap(void);
 void netpoll_set_trap(int trap);
 void __netpoll_cleanup(struct netpoll *np);
-void __netpoll_free_rcu(struct netpoll *np);
+void __netpoll_free_async(struct netpoll *np);
 void netpoll_cleanup(struct netpoll *np);
 int __netpoll_rx(struct sk_buff *skb, struct netpoll_info *npinfo);
 void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
index 0eb6579..907e7e5 100644 (file)
 #define PCI_DEVICE_ID_TIGON3_5754M     0x1672
 #define PCI_DEVICE_ID_TIGON3_5755M     0x1673
 #define PCI_DEVICE_ID_TIGON3_5756      0x1674
+#define PCI_DEVICE_ID_TIGON3_5750      0x1676
 #define PCI_DEVICE_ID_TIGON3_5751      0x1677
 #define PCI_DEVICE_ID_TIGON3_5715      0x1678
 #define PCI_DEVICE_ID_TIGON3_5715S     0x1679
index 24368a2..798fb80 100644 (file)
@@ -21,6 +21,8 @@ struct cpsw_slave_data {
        char            phy_id[MII_BUS_ID_SIZE];
        int             phy_if;
        u8              mac_addr[ETH_ALEN];
+       u16             dual_emac_res_vlan;     /* Reserved VLAN for DualEMAC */
+
 };
 
 struct cpsw_platform_data {
@@ -35,6 +37,8 @@ struct cpsw_platform_data {
        u32     bd_ram_size;  /*buffer descriptor ram size */
        u32     rx_descs;       /* Number of Rx Descriptios */
        u32     mac_control;    /* Mac control register */
+       u16     default_vlan;   /* Def VLAN for ALE lookup in VLAN aware mode*/
+       bool    dual_emac;      /* Enable Dual EMAC mode */
 };
 
 #endif /* __CPSW_H__ */
diff --git a/include/linux/platform_data/microread.h b/include/linux/platform_data/microread.h
new file mode 100644 (file)
index 0000000..cfda59b
--- /dev/null
@@ -0,0 +1,35 @@
+/*
+ * Driver include for the PN544 NFC chip.
+ *
+ * Copyright (C) 2011 Tieto Poland
+ * Copyright (C) 2012 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.        See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _MICROREAD_H
+#define _MICROREAD_H
+
+#include <linux/i2c.h>
+
+#define MICROREAD_DRIVER_NAME  "microread"
+
+/* board config platform data for microread */
+struct microread_nfc_platform_data {
+       unsigned int rst_gpio;
+       unsigned int irq_gpio;
+       unsigned int ioh_gpio;
+};
+
+#endif /* _MICROREAD_H */
index 32676b3..319f694 100644 (file)
@@ -171,9 +171,6 @@ static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
        return res;
 }
  
-extern struct proc_dir_entry *proc_net_fops_create(struct net *net,
-       const char *name, umode_t mode, const struct file_operations *fops);
-extern void proc_net_remove(struct net *net, const char *name);
 extern struct proc_dir_entry *proc_net_mkdir(struct net *net, const char *name,
        struct proc_dir_entry *parent);
 
@@ -184,9 +181,6 @@ extern int proc_alloc_inum(unsigned int *pino);
 extern void proc_free_inum(unsigned int inum);
 #else
 
-#define proc_net_fops_create(net, name, mode, fops)  ({ (void)(mode), NULL; })
-static inline void proc_net_remove(struct net *net, const char *name) {}
-
 static inline void proc_flush_task(struct task_struct *task)
 {
 }
index 0259b71..821c7f4 100644 (file)
@@ -230,6 +230,13 @@ enum {
 
        /* generate wifi status information (where possible) */
        SKBTX_WIFI_STATUS = 1 << 4,
+
+       /* This indicates at least one fragment might be overwritten
+        * (as in vmsplice(), sendfile() ...)
+        * If we need to compute a TX checksum, we'll need to copy
+        * all frags to avoid possible bad checksum
+        */
+       SKBTX_SHARED_FRAG = 1 << 5,
 };
 
 /*
@@ -308,12 +315,7 @@ enum {
 
        SKB_GSO_FCOE = 1 << 5,
 
-       /* This indicates at least one fragment might be overwritten
-        * (as in vmsplice(), sendfile() ...)
-        * If we need to compute a TX checksum, we'll need to copy
-        * all frags to avoid possible bad checksum
-        */
-       SKB_GSO_SHARED_FRAG = 1 << 6,
+       SKB_GSO_GRE = 1 << 6,
 };
 
 #if BITS_PER_LONG > 32
@@ -804,6 +806,16 @@ static inline int skb_cloned(const struct sk_buff *skb)
               (atomic_read(&skb_shinfo(skb)->dataref) & SKB_DATAREF_MASK) != 1;
 }
 
+static inline int skb_unclone(struct sk_buff *skb, gfp_t pri)
+{
+       might_sleep_if(pri & __GFP_WAIT);
+
+       if (skb_cloned(skb))
+               return pskb_expand_head(skb, 0, 0, pri);
+
+       return 0;
+}
+
 /**
  *     skb_header_cloned - is the header a clone
  *     @skb: buffer to check
@@ -1832,6 +1844,10 @@ static inline void __skb_queue_purge(struct sk_buff_head *list)
                kfree_skb(skb);
 }
 
+#define NETDEV_FRAG_PAGE_MAX_ORDER get_order(32768)
+#define NETDEV_FRAG_PAGE_MAX_SIZE  (PAGE_SIZE << NETDEV_FRAG_PAGE_MAX_ORDER)
+#define NETDEV_PAGECNT_MAX_BIAS           NETDEV_FRAG_PAGE_MAX_SIZE
+
 extern void *netdev_alloc_frag(unsigned int fragsz);
 
 extern struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
@@ -2216,7 +2232,8 @@ static inline int skb_linearize(struct sk_buff *skb)
  */
 static inline bool skb_has_shared_frag(const struct sk_buff *skb)
 {
-       return skb_shinfo(skb)->gso_type & SKB_GSO_SHARED_FRAG;
+       return skb_is_nonlinear(skb) &&
+              skb_shinfo(skb)->tx_flags & SKBTX_SHARED_FRAG;
 }
 
 /**
@@ -2717,6 +2734,21 @@ static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
 }
 #endif
 
+/* Keeps track of mac header offset relative to skb->head.
+ * It is useful for TSO of Tunneling protocol. e.g. GRE.
+ * For non-tunnel skb it points to skb_mac_header() and for
+ * tunnel skb it points to outer mac header. */
+struct skb_gso_cb {
+       int mac_offset;
+};
+#define SKB_GSO_CB(skb) ((struct skb_gso_cb *)(skb)->cb)
+
+static inline int skb_tnl_header_len(const struct sk_buff *inner_skb)
+{
+       return (skb_mac_header(inner_skb) - inner_skb->head) -
+               SKB_GSO_CB(inner_skb)->mac_offset;
+}
+
 static inline bool skb_is_gso(const struct sk_buff *skb)
 {
        return skb_shinfo(skb)->gso_size;
index 9a546ff..2b9f74b 100644 (file)
@@ -178,7 +178,8 @@ struct ucred {
 #define AF_CAIF                37      /* CAIF sockets                 */
 #define AF_ALG         38      /* Algorithm sockets            */
 #define AF_NFC         39      /* NFC sockets                  */
-#define AF_MAX         40      /* For now.. */
+#define AF_VSOCK       40      /* vSockets                     */
+#define AF_MAX         41      /* For now.. */
 
 /* Protocol families, same as address families. */
 #define PF_UNSPEC      AF_UNSPEC
@@ -221,6 +222,7 @@ struct ucred {
 #define PF_CAIF                AF_CAIF
 #define PF_ALG         AF_ALG
 #define PF_NFC         AF_NFC
+#define PF_VSOCK       AF_VSOCK
 #define PF_MAX         AF_MAX
 
 /* Maximum queue length specifiable by listen.  */
index 6b05dcd..86a12b0 100644 (file)
@@ -97,21 +97,16 @@ static inline bool ssb_gige_must_flush_posted_writes(struct pci_dev *pdev)
        return 0;
 }
 
-#ifdef CONFIG_BCM47XX
-#include <asm/mach-bcm47xx/nvram.h>
 /* Get the device MAC address */
-static inline void ssb_gige_get_macaddr(struct pci_dev *pdev, u8 *macaddr)
-{
-       char buf[20];
-       if (nvram_getenv("et0macaddr", buf, sizeof(buf)) < 0)
-               return;
-       nvram_parse_macaddr(buf, macaddr);
-}
-#else
-static inline void ssb_gige_get_macaddr(struct pci_dev *pdev, u8 *macaddr)
+static inline int ssb_gige_get_macaddr(struct pci_dev *pdev, u8 *macaddr)
 {
+       struct ssb_gige *dev = pdev_to_ssb_gige(pdev);
+       if (!dev)
+               return -ENODEV;
+
+       memcpy(macaddr, dev->dev->bus->sprom.et0mac, 6);
+       return 0;
 }
-#endif
 
 extern int ssb_gige_pcibios_plat_dev_init(struct ssb_device *sdev,
                                          struct pci_dev *pdev);
@@ -175,6 +170,10 @@ static inline bool ssb_gige_must_flush_posted_writes(struct pci_dev *pdev)
 {
        return 0;
 }
+static inline int ssb_gige_get_macaddr(struct pci_dev *pdev, u8 *macaddr)
+{
+       return -ENODEV;
+}
 
 #endif /* CONFIG_SSB_DRIVER_GIGE */
 #endif /* LINUX_SSB_DRIVER_GIGE_H_ */
index 07a9c7a..afe79d4 100644 (file)
@@ -45,6 +45,11 @@ void ssb_mipscore_init(struct ssb_mipscore *mcore)
 {
 }
 
+static inline unsigned int ssb_mips_irq(struct ssb_device *dev)
+{
+       return 0;
+}
+
 #endif /* CONFIG_SSB_DRIVER_MIPS */
 
 #endif /* LINUX_SSB_MIPSCORE_H_ */
index 4e1d228..f28408c 100644 (file)
@@ -162,6 +162,8 @@ struct tcp_sock {
        u32     rcv_tstamp;     /* timestamp of last received ACK (for keepalives) */
        u32     lsndtime;       /* timestamp of last sent data packet (for restart window) */
 
+       u32     tsoffset;       /* timestamp offset */
+
        struct list_head tsq_node; /* anchor in tsq_tasklet.head list */
        unsigned long   tsq_flags;
 
@@ -246,7 +248,6 @@ struct tcp_sock {
        u32     sacked_out;     /* SACK'd packets                       */
        u32     fackets_out;    /* FACK'd packets                       */
        u32     tso_deferred;
-       u32     bytes_acked;    /* Appropriate Byte Counting - RFC3465 */
 
        /* from STCP, retrans queue hinting */
        struct sk_buff* lost_skb_hint;
@@ -354,6 +355,7 @@ struct tcp_timewait_sock {
        u32                       tw_rcv_nxt;
        u32                       tw_snd_nxt;
        u32                       tw_rcv_wnd;
+       u32                       tw_ts_offset;
        u32                       tw_ts_recent;
        long                      tw_ts_recent_stamp;
 #ifdef CONFIG_TCP_MD5SIG
index 689b14b..4d22d0f 100644 (file)
@@ -357,6 +357,8 @@ struct usb_bus {
        int bandwidth_int_reqs;         /* number of Interrupt requests */
        int bandwidth_isoc_reqs;        /* number of Isoc. requests */
 
+       unsigned resuming_ports;        /* bit array: resuming root-hub ports */
+
 #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
        struct mon_bus *mon_bus;        /* non-null when associated */
        int monitored;                  /* non-zero when monitored */
index 608050b..0a78df5 100644 (file)
@@ -430,6 +430,9 @@ extern void usb_hcd_poll_rh_status(struct usb_hcd *hcd);
 extern void usb_wakeup_notification(struct usb_device *hdev,
                unsigned int portnum);
 
+extern void usb_hcd_start_port_resume(struct usb_bus *bus, int portnum);
+extern void usb_hcd_end_port_resume(struct usb_bus *bus, int portnum);
+
 /* The D0/D1 toggle bits ... USE WITH CAUTION (they're almost hcd-internal) */
 #define usb_gettoggle(dev, ep, out) (((dev)->toggle[out] >> (ep)) & 1)
 #define        usb_dotoggle(dev, ep, out)  ((dev)->toggle[out] ^= (1 << (ep)))
index 5de7a22..0e5ac93 100644 (file)
@@ -33,6 +33,7 @@ struct usbnet {
        wait_queue_head_t       *wait;
        struct mutex            phy_mutex;
        unsigned char           suspend_count;
+       unsigned char           pkt_cnt, pkt_err;
 
        /* i/o info: pipes etc */
        unsigned                in, out;
@@ -70,6 +71,7 @@ struct usbnet {
 #              define EVENT_DEV_OPEN   7
 #              define EVENT_DEVICE_REPORT_IDLE 8
 #              define EVENT_NO_RUNTIME_PM      9
+#              define EVENT_RX_KILL    10
 };
 
 static inline struct usb_driver *driver_of(struct usb_interface *intf)
@@ -100,7 +102,6 @@ struct driver_info {
 #define FLAG_LINK_INTR 0x0800          /* updates link (carrier) status */
 
 #define FLAG_POINTTOPOINT 0x1000       /* possibly use "usb%d" names */
-#define FLAG_NOARP     0x2000          /* device can't do ARP */
 
 /*
  * Indicates to usbnet, that USB driver accumulates multiple IP packets.
@@ -108,6 +109,7 @@ struct driver_info {
  */
 #define FLAG_MULTI_PACKET      0x2000
 #define FLAG_RX_ASSEMBLE       0x4000  /* rx packets may span >1 frames */
+#define FLAG_NOARP             0x8000  /* device can't do ARP */
 
        /* init device ... can sleep, or cause probe() failure */
        int     (*bind)(struct usbnet *, struct usb_interface *);
index 0d63731..a54fe82 100644 (file)
@@ -24,6 +24,8 @@
 #ifndef _LINUX_WL12XX_H
 #define _LINUX_WL12XX_H
 
+#include <linux/err.h>
+
 /* Reference clock values */
 enum {
        WL12XX_REFCLOCK_19      = 0, /* 19.2 MHz */
@@ -55,17 +57,17 @@ struct wl12xx_platform_data {
        int board_tcxo_clock;
        unsigned long platform_quirks;
        bool pwr_in_suspend;
-
-       struct wl1271_if_operations *ops;
 };
 
 /* Platform does not support level trigger interrupts */
 #define WL12XX_PLATFORM_QUIRK_EDGE_IRQ BIT(0)
 
-#ifdef CONFIG_WL12XX_PLATFORM_DATA
+#ifdef CONFIG_WILINK_PLATFORM_DATA
 
 int wl12xx_set_platform_data(const struct wl12xx_platform_data *data);
 
+struct wl12xx_platform_data *wl12xx_get_platform_data(void);
+
 #else
 
 static inline
@@ -74,8 +76,12 @@ int wl12xx_set_platform_data(const struct wl12xx_platform_data *data)
        return -ENOSYS;
 }
 
-#endif
+static inline
+struct wl12xx_platform_data *wl12xx_get_platform_data(void)
+{
+       return ERR_PTR(-ENODATA);
+}
 
-struct wl12xx_platform_data *wl12xx_get_platform_data(void);
+#endif
 
 #endif
index 112c25c..06ef7e9 100644 (file)
@@ -35,21 +35,6 @@ struct tcf_common {
 #define tcf_lock       common.tcfc_lock
 #define tcf_rcu                common.tcfc_rcu
 
-struct tcf_police {
-       struct tcf_common       common;
-       int                     tcfp_result;
-       u32                     tcfp_ewma_rate;
-       u32                     tcfp_burst;
-       u32                     tcfp_mtu;
-       u32                     tcfp_toks;
-       u32                     tcfp_ptoks;
-       psched_time_t           tcfp_t_c;
-       struct qdisc_rate_table *tcfp_R_tab;
-       struct qdisc_rate_table *tcfp_P_tab;
-};
-#define to_police(pc)  \
-       container_of(pc, struct tcf_police, common)
-
 struct tcf_hashinfo {
        struct tcf_common       **htab;
        unsigned int            hmask;
index 42f2176..487b54c 100644 (file)
@@ -23,6 +23,7 @@ enum amp_mgr_state {
        READ_LOC_AMP_INFO,
        READ_LOC_AMP_ASSOC,
        READ_LOC_AMP_ASSOC_FINAL,
+       WRITE_REMOTE_AMP_ASSOC,
 };
 
 struct amp_mgr {
@@ -33,7 +34,7 @@ struct amp_mgr {
        struct kref             kref;
        __u8                    ident;
        __u8                    handle;
-       enum amp_mgr_state      state;
+       unsigned long           state;
        unsigned long           flags;
 
        struct list_head        amp_ctrls;
@@ -144,5 +145,6 @@ void a2mp_discover_amp(struct l2cap_chan *chan);
 void a2mp_send_getinfo_rsp(struct hci_dev *hdev);
 void a2mp_send_getampassoc_rsp(struct hci_dev *hdev, u8 status);
 void a2mp_send_create_phy_link_req(struct hci_dev *hdev, u8 status);
+void a2mp_send_create_phy_link_rsp(struct hci_dev *hdev, u8 status);
 
 #endif /* __A2MP_H */
index 2554b3f..9531bee 100644 (file)
@@ -166,6 +166,29 @@ typedef struct {
 #define BDADDR_LE_PUBLIC       0x01
 #define BDADDR_LE_RANDOM       0x02
 
+static inline bool bdaddr_type_is_valid(__u8 type)
+{
+       switch (type) {
+       case BDADDR_BREDR:
+       case BDADDR_LE_PUBLIC:
+       case BDADDR_LE_RANDOM:
+               return true;
+       }
+
+       return false;
+}
+
+static inline bool bdaddr_type_is_le(__u8 type)
+{
+       switch (type) {
+       case BDADDR_LE_PUBLIC:
+       case BDADDR_LE_RANDOM:
+               return true;
+       }
+
+       return false;
+}
+
 #define BDADDR_ANY   (&(bdaddr_t) {{0, 0, 0, 0, 0, 0} })
 #define BDADDR_LOCAL (&(bdaddr_t) {{0, 0, 0, 0xff, 0xff, 0xff} })
 
index 45eee08..7f12c25 100644 (file)
@@ -943,6 +943,12 @@ struct hci_rp_le_read_buffer_size {
        __u8     le_max_pkt;
 } __packed;
 
+#define HCI_OP_LE_READ_LOCAL_FEATURES  0x2003
+struct hci_rp_le_read_local_features {
+       __u8     status;
+       __u8     features[8];
+} __packed;
+
 #define HCI_OP_LE_READ_ADV_TX_POWER    0x2007
 struct hci_rp_le_read_adv_tx_power {
        __u8    status;
@@ -995,6 +1001,12 @@ struct hci_cp_le_create_conn {
 
 #define HCI_OP_LE_CREATE_CONN_CANCEL   0x200e
 
+#define HCI_OP_LE_READ_WHITE_LIST_SIZE 0x200f
+struct hci_rp_le_read_white_list_size {
+       __u8    status;
+       __u8    size;
+} __packed;
+
 #define HCI_OP_LE_CONN_UPDATE          0x2013
 struct hci_cp_le_conn_update {
        __le16   handle;
@@ -1033,6 +1045,12 @@ struct hci_rp_le_ltk_neg_reply {
        __le16  handle;
 } __packed;
 
+#define HCI_OP_LE_READ_SUPPORTED_STATES        0x201c
+struct hci_rp_le_read_supported_states {
+       __u8    status;
+       __u8    le_states[8];
+} __packed;
+
 /* ---- HCI Events ---- */
 #define HCI_EV_INQUIRY_COMPLETE                0x01
 
index 014a2ea..90cf75a 100644 (file)
@@ -86,6 +86,7 @@ struct bdaddr_list {
 struct bt_uuid {
        struct list_head list;
        u8 uuid[16];
+       u8 size;
        u8 svc_hint;
 };
 
@@ -152,6 +153,9 @@ struct hci_dev {
        __u8            minor_class;
        __u8            features[8];
        __u8            host_features[8];
+       __u8            le_features[8];
+       __u8            le_white_list_size;
+       __u8            le_states[8];
        __u8            commands[64];
        __u8            hci_ver;
        __u16           hci_rev;
@@ -216,6 +220,7 @@ struct hci_dev {
        unsigned long   le_last_tx;
 
        struct workqueue_struct *workqueue;
+       struct workqueue_struct *req_workqueue;
 
        struct work_struct      power_on;
        struct delayed_work     power_off;
index 7588ef4..cdd3302 100644 (file)
@@ -496,7 +496,6 @@ struct l2cap_chan {
        __u16           frames_sent;
        __u16           unacked_frames;
        __u8            retry_count;
-       __u16           srej_queue_next;
        __u16           sdu_len;
        struct sk_buff  *sdu;
        struct sk_buff  *sdu_last_frag;
index 1b9830e..d581c6d 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/nl80211.h>
 #include <linux/if_ether.h>
 #include <linux/ieee80211.h>
+#include <linux/net.h>
 #include <net/regulatory.h>
 
 /**
@@ -99,6 +100,16 @@ enum ieee80211_band {
  * @IEEE80211_CHAN_NO_HT40MINUS: extension channel below this channel
  *     is not permitted.
  * @IEEE80211_CHAN_NO_OFDM: OFDM is not allowed on this channel.
+ * @IEEE80211_CHAN_NO_80MHZ: If the driver supports 80 MHz on the band,
+ *     this flag indicates that an 80 MHz channel cannot use this
+ *     channel as the control or any of the secondary channels.
+ *     This may be due to the driver or due to regulatory bandwidth
+ *     restrictions.
+ * @IEEE80211_CHAN_NO_160MHZ: If the driver supports 160 MHz on the band,
+ *     this flag indicates that an 160 MHz channel cannot use this
+ *     channel as the control or any of the secondary channels.
+ *     This may be due to the driver or due to regulatory bandwidth
+ *     restrictions.
  */
 enum ieee80211_channel_flags {
        IEEE80211_CHAN_DISABLED         = 1<<0,
@@ -108,11 +119,16 @@ enum ieee80211_channel_flags {
        IEEE80211_CHAN_NO_HT40PLUS      = 1<<4,
        IEEE80211_CHAN_NO_HT40MINUS     = 1<<5,
        IEEE80211_CHAN_NO_OFDM          = 1<<6,
+       IEEE80211_CHAN_NO_80MHZ         = 1<<7,
+       IEEE80211_CHAN_NO_160MHZ        = 1<<8,
 };
 
 #define IEEE80211_CHAN_NO_HT40 \
        (IEEE80211_CHAN_NO_HT40PLUS | IEEE80211_CHAN_NO_HT40MINUS)
 
+#define IEEE80211_DFS_MIN_CAC_TIME_MS          60000
+#define IEEE80211_DFS_MIN_NOP_TIME_MS          (30 * 60 * 1000)
+
 /**
  * struct ieee80211_channel - channel definition
  *
@@ -133,6 +149,9 @@ enum ieee80211_channel_flags {
  *     to enable this, this is useful only on 5 GHz band.
  * @orig_mag: internal use
  * @orig_mpwr: internal use
+ * @dfs_state: current state of this channel. Only relevant if radar is required
+ *     on this channel.
+ * @dfs_state_entered: timestamp (jiffies) when the dfs state was entered.
  */
 struct ieee80211_channel {
        enum ieee80211_band band;
@@ -145,6 +164,8 @@ struct ieee80211_channel {
        bool beacon_found;
        u32 orig_flags;
        int orig_mag, orig_mpwr;
+       enum nl80211_dfs_state dfs_state;
+       unsigned long dfs_state_entered;
 };
 
 /**
@@ -527,6 +548,26 @@ struct cfg80211_beacon_data {
        size_t probe_resp_len;
 };
 
+struct mac_address {
+       u8 addr[ETH_ALEN];
+};
+
+/**
+ * struct cfg80211_acl_data - Access control list data
+ *
+ * @acl_policy: ACL policy to be applied on the station's
+ *     entry specified by mac_addr
+ * @n_acl_entries: Number of MAC address entries passed
+ * @mac_addrs: List of MAC addresses of stations to be used for ACL
+ */
+struct cfg80211_acl_data {
+       enum nl80211_acl_policy acl_policy;
+       int n_acl_entries;
+
+       /* Keep it last */
+       struct mac_address mac_addrs[];
+};
+
 /**
  * struct cfg80211_ap_settings - AP configuration
  *
@@ -546,6 +587,9 @@ struct cfg80211_beacon_data {
  * @inactivity_timeout: time in seconds to determine station's inactivity.
  * @p2p_ctwindow: P2P CT Window
  * @p2p_opp_ps: P2P opportunistic PS
+ * @acl: ACL configuration used by the drivers which has support for
+ *     MAC address based access control
+ * @radar_required: set if radar detection is required
  */
 struct cfg80211_ap_settings {
        struct cfg80211_chan_def chandef;
@@ -562,6 +606,8 @@ struct cfg80211_ap_settings {
        int inactivity_timeout;
        u8 p2p_ctwindow;
        bool p2p_opp_ps;
+       const struct cfg80211_acl_data *acl;
+       bool radar_required;
 };
 
 /**
@@ -580,12 +626,14 @@ enum plink_actions {
 /**
  * enum station_parameters_apply_mask - station parameter values to apply
  * @STATION_PARAM_APPLY_UAPSD: apply new uAPSD parameters (uapsd_queues, max_sp)
+ * @STATION_PARAM_APPLY_CAPABILITY: apply new capability
  *
  * Not all station parameters have in-band "no change" signalling,
  * for those that don't these flags will are used.
  */
 enum station_parameters_apply_mask {
        STATION_PARAM_APPLY_UAPSD = BIT(0),
+       STATION_PARAM_APPLY_CAPABILITY = BIT(1),
 };
 
 /**
@@ -616,6 +664,9 @@ enum station_parameters_apply_mask {
  *     see &enum station_parameters_apply_mask
  * @local_pm: local link-specific mesh power save mode (no change when set
  *     to unknown)
+ * @capability: station capability
+ * @ext_capab: extended capabilities of the station
+ * @ext_capab_len: number of extended capabilities
  */
 struct station_parameters {
        u8 *supported_rates;
@@ -632,6 +683,9 @@ struct station_parameters {
        u8 uapsd_queues;
        u8 max_sp;
        enum nl80211_mesh_power_mode local_pm;
+       u16 capability;
+       u8 *ext_capab;
+       u8 ext_capab_len;
 };
 
 /**
@@ -643,14 +697,16 @@ struct station_parameters {
  * @STATION_INFO_INACTIVE_TIME: @inactive_time filled
  * @STATION_INFO_RX_BYTES: @rx_bytes filled
  * @STATION_INFO_TX_BYTES: @tx_bytes filled
+ * @STATION_INFO_RX_BYTES64: @rx_bytes filled with 64-bit value
+ * @STATION_INFO_TX_BYTES64: @tx_bytes filled with 64-bit value
  * @STATION_INFO_LLID: @llid filled
  * @STATION_INFO_PLID: @plid filled
  * @STATION_INFO_PLINK_STATE: @plink_state filled
  * @STATION_INFO_SIGNAL: @signal filled
  * @STATION_INFO_TX_BITRATE: @txrate fields are filled
  *  (tx_bitrate, tx_bitrate_flags and tx_bitrate_mcs)
- * @STATION_INFO_RX_PACKETS: @rx_packets filled
- * @STATION_INFO_TX_PACKETS: @tx_packets filled
+ * @STATION_INFO_RX_PACKETS: @rx_packets filled with 32-bit value
+ * @STATION_INFO_TX_PACKETS: @tx_packets filled with 32-bit value
  * @STATION_INFO_TX_RETRIES: @tx_retries filled
  * @STATION_INFO_TX_FAILED: @tx_failed filled
  * @STATION_INFO_RX_DROP_MISC: @rx_dropped_misc filled
@@ -691,6 +747,8 @@ enum station_info_flags {
        STATION_INFO_LOCAL_PM           = 1<<21,
        STATION_INFO_PEER_PM            = 1<<22,
        STATION_INFO_NONPEER_PM         = 1<<23,
+       STATION_INFO_RX_BYTES64         = 1<<24,
+       STATION_INFO_TX_BYTES64         = 1<<25,
 };
 
 /**
@@ -812,8 +870,8 @@ struct station_info {
        u32 filled;
        u32 connected_time;
        u32 inactive_time;
-       u32 rx_bytes;
-       u32 tx_bytes;
+       u64 rx_bytes;
+       u64 tx_bytes;
        u16 llid;
        u16 plid;
        u8 plink_state;
@@ -1199,6 +1257,7 @@ struct cfg80211_match_set {
  * @n_match_sets: number of match sets
  * @wiphy: the wiphy this was for
  * @dev: the interface
+ * @scan_start: start time of the scheduled scan
  * @channels: channels to scan
  * @rssi_thold: don't report scan results below this threshold (in s32 dBm)
  */
@@ -1238,11 +1297,13 @@ enum cfg80211_signal_type {
 
 /**
  * struct cfg80211_bss_ie_data - BSS entry IE data
+ * @tsf: TSF contained in the frame that carried these IEs
  * @rcu_head: internal use, for freeing
  * @len: length of the IEs
  * @data: IE data
  */
 struct cfg80211_bss_ies {
+       u64 tsf;
        struct rcu_head rcu_head;
        int len;
        u8 data[];
@@ -1256,29 +1317,32 @@ struct cfg80211_bss_ies {
  *
  * @channel: channel this BSS is on
  * @bssid: BSSID of the BSS
- * @tsf: timestamp of last received update
  * @beacon_interval: the beacon interval as from the frame
  * @capability: the capability field in host byte order
- * @ies: the information elements (Note that there
- *     is no guarantee that these are well-formed!); this is a pointer to
- *     either the beacon_ies or proberesp_ies depending on whether Probe
- *     Response frame has been received
+ * @ies: the information elements (Note that there is no guarantee that these
+ *     are well-formed!); this is a pointer to either the beacon_ies or
+ *     proberesp_ies depending on whether Probe Response frame has been
+ *     received. It is always non-%NULL.
  * @beacon_ies: the information elements from the last Beacon frame
+ *     (implementation note: if @hidden_beacon_bss is set this struct doesn't
+ *     own the beacon_ies, but they're just pointers to the ones from the
+ *     @hidden_beacon_bss struct)
  * @proberesp_ies: the information elements from the last Probe Response frame
+ * @hidden_beacon_bss: in case this BSS struct represents a probe response from
+ *     a BSS that hides the SSID in its beacon, this points to the BSS struct
+ *     that holds the beacon data. @beacon_ies is still valid, of course, and
+ *     points to the same data as hidden_beacon_bss->beacon_ies in that case.
  * @signal: signal strength value (type depends on the wiphy's signal_type)
- * @free_priv: function pointer to free private data
  * @priv: private area for driver use, has at least wiphy->bss_priv_size bytes
  */
 struct cfg80211_bss {
-       u64 tsf;
-
        struct ieee80211_channel *channel;
 
        const struct cfg80211_bss_ies __rcu *ies;
        const struct cfg80211_bss_ies __rcu *beacon_ies;
        const struct cfg80211_bss_ies __rcu *proberesp_ies;
 
-       void (*free_priv)(struct cfg80211_bss *bss);
+       struct cfg80211_bss *hidden_beacon_bss;
 
        s32 signal;
 
@@ -1380,6 +1444,8 @@ struct cfg80211_assoc_request {
  * @ie: Extra IEs to add to Deauthentication frame or %NULL
  * @ie_len: Length of ie buffer in octets
  * @reason_code: The reason code for the deauthentication
+ * @local_state_change: if set, change local state only and
+ *     do not set a deauth frame
  */
 struct cfg80211_deauth_request {
        const u8 *bssid;
@@ -1541,6 +1607,7 @@ struct cfg80211_pmksa {
  *     one bit per byte, in same format as nl80211
  * @pattern: bytes to match where bitmask is 1
  * @pattern_len: length of pattern (in bytes)
+ * @pkt_offset: packet offset (in bytes)
  *
  * Internal note: @mask and @pattern are allocated in one chunk of
  * memory, free @mask only!
@@ -1548,6 +1615,42 @@ struct cfg80211_pmksa {
 struct cfg80211_wowlan_trig_pkt_pattern {
        u8 *mask, *pattern;
        int pattern_len;
+       int pkt_offset;
+};
+
+/**
+ * struct cfg80211_wowlan_tcp - TCP connection parameters
+ *
+ * @sock: (internal) socket for source port allocation
+ * @src: source IP address
+ * @dst: destination IP address
+ * @dst_mac: destination MAC address
+ * @src_port: source port
+ * @dst_port: destination port
+ * @payload_len: data payload length
+ * @payload: data payload buffer
+ * @payload_seq: payload sequence stamping configuration
+ * @data_interval: interval at which to send data packets
+ * @wake_len: wakeup payload match length
+ * @wake_data: wakeup payload match data
+ * @wake_mask: wakeup payload match mask
+ * @tokens_size: length of the tokens buffer
+ * @payload_tok: payload token usage configuration
+ */
+struct cfg80211_wowlan_tcp {
+       struct socket *sock;
+       __be32 src, dst;
+       u16 src_port, dst_port;
+       u8 dst_mac[ETH_ALEN];
+       int payload_len;
+       const u8 *payload;
+       struct nl80211_wowlan_tcp_data_seq payload_seq;
+       u32 data_interval;
+       u32 wake_len;
+       const u8 *wake_data, *wake_mask;
+       u32 tokens_size;
+       /* must be last, variable member */
+       struct nl80211_wowlan_tcp_data_token payload_tok;
 };
 
 /**
@@ -1564,16 +1667,49 @@ struct cfg80211_wowlan_trig_pkt_pattern {
  * @eap_identity_req: wake up on EAP identity request packet
  * @four_way_handshake: wake up on 4-way handshake
  * @rfkill_release: wake up when rfkill is released
+ * @tcp: TCP connection establishment/wakeup parameters, see nl80211.h.
+ *     NULL if not configured.
  */
 struct cfg80211_wowlan {
        bool any, disconnect, magic_pkt, gtk_rekey_failure,
             eap_identity_req, four_way_handshake,
             rfkill_release;
        struct cfg80211_wowlan_trig_pkt_pattern *patterns;
+       struct cfg80211_wowlan_tcp *tcp;
        int n_patterns;
 };
 
 /**
+ * struct cfg80211_wowlan_wakeup - wakeup report
+ * @disconnect: woke up by getting disconnected
+ * @magic_pkt: woke up by receiving magic packet
+ * @gtk_rekey_failure: woke up by GTK rekey failure
+ * @eap_identity_req: woke up by EAP identity request packet
+ * @four_way_handshake: woke up by 4-way handshake
+ * @rfkill_release: woke up by rfkill being released
+ * @pattern_idx: pattern that caused wakeup, -1 if not due to pattern
+ * @packet_present_len: copied wakeup packet data
+ * @packet_len: original wakeup packet length
+ * @packet: The packet causing the wakeup, if any.
+ * @packet_80211:  For pattern match, magic packet and other data
+ *     frame triggers an 802.3 frame should be reported, for
+ *     disconnect due to deauth 802.11 frame. This indicates which
+ *     it is.
+ * @tcp_match: TCP wakeup packet received
+ * @tcp_connlost: TCP connection lost or failed to establish
+ * @tcp_nomoretokens: TCP data ran out of tokens
+ */
+struct cfg80211_wowlan_wakeup {
+       bool disconnect, magic_pkt, gtk_rekey_failure,
+            eap_identity_req, four_way_handshake,
+            rfkill_release, packet_80211,
+            tcp_match, tcp_connlost, tcp_nomoretokens;
+       s32 pattern_idx;
+       u32 packet_present_len, packet_len;
+       const void *packet;
+};
+
+/**
  * struct cfg80211_gtk_rekey_data - rekey data
  * @kek: key encryption key
  * @kck: key confirmation key
@@ -1796,6 +1932,15 @@ struct cfg80211_gtk_rekey_data {
  *
  * @start_p2p_device: Start the given P2P device.
  * @stop_p2p_device: Stop the given P2P device.
+ *
+ * @set_mac_acl: Sets MAC address control list in AP and P2P GO mode.
+ *     Parameters include ACL policy, an array of MAC address of stations
+ *     and the number of MAC addresses. If there is already a list in driver
+ *     this new list replaces the existing one. Driver has to clear its ACL
+ *     when number of MAC addresses entries is passed as 0. Drivers which
+ *     advertise the support for MAC based ACL have to implement this callback.
+ *
+ * @start_radar_detection: Start radar detection in the driver.
  */
 struct cfg80211_ops {
        int     (*suspend)(struct wiphy *wiphy, struct cfg80211_wowlan *wow);
@@ -2016,6 +2161,13 @@ struct cfg80211_ops {
                                    struct wireless_dev *wdev);
        void    (*stop_p2p_device)(struct wiphy *wiphy,
                                   struct wireless_dev *wdev);
+
+       int     (*set_mac_acl)(struct wiphy *wiphy, struct net_device *dev,
+                              const struct cfg80211_acl_data *params);
+
+       int     (*start_radar_detection)(struct wiphy *wiphy,
+                                        struct net_device *dev,
+                                        struct cfg80211_chan_def *chandef);
 };
 
 /*
@@ -2181,10 +2333,6 @@ struct ieee80211_iface_combination {
        u8 radar_detect_widths;
 };
 
-struct mac_address {
-       u8 addr[ETH_ALEN];
-};
-
 struct ieee80211_txrx_stypes {
        u16 tx, rx;
 };
@@ -2216,6 +2364,14 @@ enum wiphy_wowlan_support_flags {
        WIPHY_WOWLAN_RFKILL_RELEASE     = BIT(7),
 };
 
+struct wiphy_wowlan_tcp_support {
+       const struct nl80211_wowlan_tcp_data_token_feature *tok;
+       u32 data_payload_max;
+       u32 data_interval_max;
+       u32 wake_payload_max;
+       bool seq;
+};
+
 /**
  * struct wiphy_wowlan_support - WoWLAN support data
  * @flags: see &enum wiphy_wowlan_support_flags
@@ -2223,12 +2379,16 @@ enum wiphy_wowlan_support_flags {
  *     (see nl80211.h for the pattern definition)
  * @pattern_max_len: maximum length of each pattern
  * @pattern_min_len: minimum length of each pattern
+ * @max_pkt_offset: maximum Rx packet offset
+ * @tcp: TCP wakeup support information
  */
 struct wiphy_wowlan_support {
        u32 flags;
        int n_patterns;
        int pattern_max_len;
        int pattern_min_len;
+       int max_pkt_offset;
+       const struct wiphy_wowlan_tcp_support *tcp;
 };
 
 /**
@@ -2325,6 +2485,17 @@ struct wiphy_wowlan_support {
  * @ap_sme_capa: AP SME capabilities, flags from &enum nl80211_ap_sme_features.
  * @ht_capa_mod_mask:  Specify what ht_cap values can be over-ridden.
  *     If null, then none can be over-ridden.
+ *
+ * @max_acl_mac_addrs: Maximum number of MAC addresses that the device
+ *     supports for ACL.
+ *
+ * @extended_capabilities: extended capabilities supported by the driver,
+ *     additional capabilities might be supported by userspace; these are
+ *     the 802.11 extended capabilities ("Extended Capabilities element")
+ *     and are in the same format as in the information element. See
+ *     802.11-2012 8.4.2.29 for the defined fields.
+ * @extended_capabilities_mask: mask of the valid values
+ * @extended_capabilities_len: length of the extended capabilities
  */
 struct wiphy {
        /* assign these fields before you register the wiphy */
@@ -2346,6 +2517,8 @@ struct wiphy {
        /* Supported interface modes, OR together BIT(NL80211_IFTYPE_...) */
        u16 interface_modes;
 
+       u16 max_acl_mac_addrs;
+
        u32 flags, features;
 
        u32 ap_sme_capa;
@@ -2389,6 +2562,9 @@ struct wiphy {
         */
        u32 probe_resp_offload;
 
+       const u8 *extended_capabilities, *extended_capabilities_mask;
+       u8 extended_capabilities_len;
+
        /* If multiple wiphys are registered and you're handed e.g.
         * a regular netdev with assigned ieee80211_ptr, you won't
         * know whether it points to a wiphy your driver has registered
@@ -2568,7 +2744,6 @@ struct cfg80211_cached_keys;
  *     the user-set AP, monitor and WDS channel
  * @preset_chan: (private) Used by the internal configuration code to
  *     track the channel to be used for AP later
- * @preset_chantype: (private) the corresponding channel type
  * @bssid: (private) Used by the internal configuration code
  * @ssid: (private) Used by the internal configuration code
  * @ssid_len: (private) Used by the internal configuration code
@@ -2587,6 +2762,8 @@ struct cfg80211_cached_keys;
  *     beacons, 0 when not valid
  * @address: The address for this device, valid only if @netdev is %NULL
  * @p2p_started: true if this is a P2P Device that has been started
+ * @cac_started: true if DFS channel availability check has been started
+ * @cac_start_time: timestamp (jiffies) when the dfs state was entered.
  */
 struct wireless_dev {
        struct wiphy *wiphy;
@@ -2638,6 +2815,9 @@ struct wireless_dev {
 
        u32 ap_unexpected_nlportid;
 
+       bool cac_started;
+       unsigned long cac_start_time;
+
 #ifdef CONFIG_CFG80211_WEXT
        /* wext data */
        struct {
@@ -3103,25 +3283,23 @@ cfg80211_get_ibss(struct wiphy *wiphy,
                                WLAN_CAPABILITY_IBSS, WLAN_CAPABILITY_IBSS);
 }
 
-struct cfg80211_bss *cfg80211_get_mesh(struct wiphy *wiphy,
-                                      struct ieee80211_channel *channel,
-                                      const u8 *meshid, size_t meshidlen,
-                                      const u8 *meshcfg);
 /**
  * cfg80211_ref_bss - reference BSS struct
+ * @wiphy: the wiphy this BSS struct belongs to
  * @bss: the BSS struct to reference
  *
  * Increments the refcount of the given BSS struct.
  */
-void cfg80211_ref_bss(struct cfg80211_bss *bss);
+void cfg80211_ref_bss(struct wiphy *wiphy, struct cfg80211_bss *bss);
 
 /**
  * cfg80211_put_bss - unref BSS struct
+ * @wiphy: the wiphy this BSS struct belongs to
  * @bss: the BSS struct
  *
  * Decrements the refcount of the given BSS struct.
  */
-void cfg80211_put_bss(struct cfg80211_bss *bss);
+void cfg80211_put_bss(struct wiphy *wiphy, struct cfg80211_bss *bss);
 
 /**
  * cfg80211_unlink_bss - unlink BSS from internal data structures
@@ -3629,6 +3807,31 @@ void cfg80211_cqm_rssi_notify(struct net_device *dev,
                              gfp_t gfp);
 
 /**
+ * cfg80211_radar_event - radar detection event
+ * @wiphy: the wiphy
+ * @chandef: chandef for the current channel
+ * @gfp: context flags
+ *
+ * This function is called when a radar is detected on the current chanenl.
+ */
+void cfg80211_radar_event(struct wiphy *wiphy,
+                         struct cfg80211_chan_def *chandef, gfp_t gfp);
+
+/**
+ * cfg80211_cac_event - Channel availability check (CAC) event
+ * @netdev: network device
+ * @event: type of event
+ * @gfp: context flags
+ *
+ * This function is called when a Channel availability check (CAC) is finished
+ * or aborted. This must be called to notify the completion of a CAC process,
+ * also by full-MAC drivers.
+ */
+void cfg80211_cac_event(struct net_device *netdev,
+                       enum nl80211_radar_event event, gfp_t gfp);
+
+
+/**
  * cfg80211_cqm_pktloss_notify - notify userspace about packetloss to peer
  * @dev: network device
  * @peer: peer's MAC address
@@ -3818,6 +4021,21 @@ int cfg80211_get_p2p_attr(const u8 *ies, unsigned int len,
                          enum ieee80211_p2p_attr_id attr,
                          u8 *buf, unsigned int bufsize);
 
+/**
+ * cfg80211_report_wowlan_wakeup - report wakeup from WoWLAN
+ * @wdev: the wireless device reporting the wakeup
+ * @wakeup: the wakeup report
+ * @gfp: allocation flags
+ *
+ * This function reports that the given device woke up. If it
+ * caused the wakeup, report the reason(s), otherwise you may
+ * pass %NULL as the @wakeup parameter to advertise that something
+ * else caused the wakeup.
+ */
+void cfg80211_report_wowlan_wakeup(struct wireless_dev *wdev,
+                                  struct cfg80211_wowlan_wakeup *wakeup,
+                                  gfp_t gfp);
+
 /* Logging, debugging and troubleshooting/diagnostic helpers. */
 
 /* wiphy_printk helpers, similar to dev_printk */
index 9a78810..3da47e0 100644 (file)
@@ -61,6 +61,7 @@ struct dst_entry {
 #define DST_NOPEER             0x0040
 #define DST_FAKE_RTABLE                0x0080
 #define DST_XFRM_TUNNEL                0x0100
+#define DST_XFRM_QUEUE         0x0200
 
        unsigned short          pending_confirm;
 
index 3037f49..f7eba13 100644 (file)
@@ -147,10 +147,12 @@ struct ieee80211_low_level_stats {
  * enum ieee80211_chanctx_change - change flag for channel context
  * @IEEE80211_CHANCTX_CHANGE_WIDTH: The channel width changed
  * @IEEE80211_CHANCTX_CHANGE_RX_CHAINS: The number of RX chains changed
+ * @IEEE80211_CHANCTX_CHANGE_RADAR: radar detection flag changed
  */
 enum ieee80211_chanctx_change {
        IEEE80211_CHANCTX_CHANGE_WIDTH          = BIT(0),
        IEEE80211_CHANCTX_CHANGE_RX_CHAINS      = BIT(1),
+       IEEE80211_CHANCTX_CHANGE_RADAR          = BIT(2),
 };
 
 /**
@@ -165,6 +167,7 @@ enum ieee80211_chanctx_change {
  * @rx_chains_dynamic: The number of RX chains that must be enabled
  *     after RTS/CTS handshake to receive SMPS MIMO transmissions;
  *     this will always be >= @rx_chains_static.
+ * @radar_enabled: whether radar detection is enabled on this channel.
  * @drv_priv: data area for driver use, will always be aligned to
  *     sizeof(void *), size is determined in hw information.
  */
@@ -173,6 +176,8 @@ struct ieee80211_chanctx_conf {
 
        u8 rx_chains_static, rx_chains_dynamic;
 
+       bool radar_enabled;
+
        u8 drv_priv[0] __aligned(sizeof(void *));
 };
 
@@ -208,6 +213,11 @@ struct ieee80211_chanctx_conf {
  * @BSS_CHANGED_TXPOWER: TX power setting changed for this interface
  * @BSS_CHANGED_P2P_PS: P2P powersave settings (CTWindow, opportunistic PS)
  *     changed (currently only in P2P client mode, GO mode will be later)
+ * @BSS_CHANGED_DTIM_PERIOD: the DTIM period value was changed (set when
+ *     it becomes valid, managed mode only)
+ * @BSS_CHANGED_BANDWIDTH: The bandwidth used by this interface changed,
+ *     note that this is only called when it changes after the channel
+ *     context had been assigned.
  */
 enum ieee80211_bss_change {
        BSS_CHANGED_ASSOC               = 1<<0,
@@ -230,6 +240,8 @@ enum ieee80211_bss_change {
        BSS_CHANGED_PS                  = 1<<17,
        BSS_CHANGED_TXPOWER             = 1<<18,
        BSS_CHANGED_P2P_PS              = 1<<19,
+       BSS_CHANGED_DTIM_PERIOD         = 1<<20,
+       BSS_CHANGED_BANDWIDTH           = 1<<21,
 
        /* when adding here, make sure to change ieee80211_reconfig */
 };
@@ -271,13 +283,19 @@ enum ieee80211_rssi_event {
  *     if the hardware cannot handle this it must set the
  *     IEEE80211_HW_2GHZ_SHORT_SLOT_INCAPABLE hardware flag
  * @dtim_period: num of beacons before the next DTIM, for beaconing,
- *     valid in station mode only while @assoc is true and if also
- *     requested by %IEEE80211_HW_NEED_DTIM_PERIOD (cf. also hw conf
- *     @ps_dtim_period)
+ *     valid in station mode only if after the driver was notified
+ *     with the %BSS_CHANGED_DTIM_PERIOD flag, will be non-zero then.
  * @sync_tsf: last beacon's/probe response's TSF timestamp (could be old
- *     as it may have been received during scanning long ago)
+ *     as it may have been received during scanning long ago). If the
+ *     HW flag %IEEE80211_HW_TIMING_BEACON_ONLY is set, then this can
+ *     only come from a beacon, but might not become valid until after
+ *     association when a beacon is received (which is notified with the
+ *     %BSS_CHANGED_DTIM flag.)
  * @sync_device_ts: the device timestamp corresponding to the sync_tsf,
  *     the driver/device can use this to calculate synchronisation
+ *     (see @sync_tsf)
+ * @sync_dtim_count: Only valid when %IEEE80211_HW_TIMING_BEACON_ONLY
+ *     is requested, see @sync_tsf/@sync_device_ts.
  * @beacon_int: beacon interval
  * @assoc_capability: capabilities taken from assoc resp
  * @basic_rates: bitmap of basic rates, each bit stands for an
@@ -297,11 +315,9 @@ enum ieee80211_rssi_event {
  *     may filter ARP queries targeted for other addresses than listed here.
  *     The driver must allow ARP queries targeted for all address listed here
  *     to pass through. An empty list implies no ARP queries need to pass.
- * @arp_addr_cnt: Number of addresses currently on the list.
- * @arp_filter_enabled: Enable ARP filtering - if enabled, the hardware may
- *     filter ARP queries based on the @arp_addr_list, if disabled, the
- *     hardware must not perform any ARP filtering. Note, that the filter will
- *     be enabled also in promiscuous mode.
+ * @arp_addr_cnt: Number of addresses currently on the list. Note that this
+ *     may be larger than %IEEE80211_BSS_ARP_ADDR_LIST_LEN (the arp_addr_list
+ *     array size), it's up to the driver what to do in that case.
  * @qos: This is a QoS-enabled BSS.
  * @idle: This interface is idle. There's also a global idle flag in the
  *     hardware config which may be more appropriate depending on what
@@ -331,6 +347,7 @@ struct ieee80211_bss_conf {
        u16 assoc_capability;
        u64 sync_tsf;
        u32 sync_device_ts;
+       u8 sync_dtim_count;
        u32 basic_rates;
        int mcast_rate[IEEE80211_NUM_BANDS];
        u16 ht_operation_mode;
@@ -338,8 +355,7 @@ struct ieee80211_bss_conf {
        u32 cqm_rssi_hyst;
        struct cfg80211_chan_def chandef;
        __be32 arp_addr_list[IEEE80211_BSS_ARP_ADDR_LIST_LEN];
-       u8 arp_addr_cnt;
-       bool arp_filter_enabled;
+       int arp_addr_cnt;
        bool qos;
        bool idle;
        bool ps;
@@ -392,6 +408,9 @@ struct ieee80211_bss_conf {
  * @IEEE80211_TX_CTL_RATE_CTRL_PROBE: internal to mac80211, can be
  *     set by rate control algorithms to indicate probe rate, will
  *     be cleared for fragmented frames (except on the last fragment)
+ * @IEEE80211_TX_INTFL_OFFCHAN_TX_OK: Internal to mac80211. Used to indicate
+ *     that a frame can be transmitted while the queues are stopped for
+ *     off-channel operation.
  * @IEEE80211_TX_INTFL_NEED_TXPROCESSING: completely internal to mac80211,
  *     used to indicate that a pending frame requires TX processing before
  *     it can be sent out.
@@ -409,6 +428,9 @@ struct ieee80211_bss_conf {
  * @IEEE80211_TX_INTFL_RETRANSMISSION: This frame is being retransmitted
  *     after TX status because the destination was asleep, it must not
  *     be modified again (no seqno assignment, crypto, etc.)
+ * @IEEE80211_TX_INTFL_MLME_CONN_TX: This frame was transmitted by the MLME
+ *     code for connection establishment, this indicates that its status
+ *     should kick the MLME state machine.
  * @IEEE80211_TX_INTFL_NL80211_FRAME_TX: Frame was requested through nl80211
  *     MLME command (internal to mac80211 to figure out whether to send TX
  *     status to user space)
@@ -454,13 +476,14 @@ enum mac80211_tx_control_flags {
        IEEE80211_TX_STAT_AMPDU                 = BIT(10),
        IEEE80211_TX_STAT_AMPDU_NO_BACK         = BIT(11),
        IEEE80211_TX_CTL_RATE_CTRL_PROBE        = BIT(12),
+       IEEE80211_TX_INTFL_OFFCHAN_TX_OK        = BIT(13),
        IEEE80211_TX_INTFL_NEED_TXPROCESSING    = BIT(14),
        IEEE80211_TX_INTFL_RETRIED              = BIT(15),
        IEEE80211_TX_INTFL_DONT_ENCRYPT         = BIT(16),
        IEEE80211_TX_CTL_NO_PS_BUFFER           = BIT(17),
        IEEE80211_TX_CTL_MORE_FRAMES            = BIT(18),
        IEEE80211_TX_INTFL_RETRANSMISSION       = BIT(19),
-       /* hole at 20, use later */
+       IEEE80211_TX_INTFL_MLME_CONN_TX         = BIT(20),
        IEEE80211_TX_INTFL_NL80211_FRAME_TX     = BIT(21),
        IEEE80211_TX_CTL_LDPC                   = BIT(22),
        IEEE80211_TX_CTL_STBC                   = BIT(23) | BIT(24),
@@ -953,6 +976,7 @@ enum ieee80211_smps_mode {
  *
  * @channel: the channel to tune to
  * @channel_type: the channel (HT) type
+ * @radar_enabled: whether radar detection is enabled
  *
  * @long_frame_max_tx_count: Maximum number of transmissions for a "long" frame
  *    (a frame not RTS protected), called "dot11LongRetryLimit" in 802.11,
@@ -979,6 +1003,7 @@ struct ieee80211_conf {
 
        struct ieee80211_channel *channel;
        enum nl80211_channel_type channel_type;
+       bool radar_enabled;
        enum ieee80211_smps_mode smps_mode;
 };
 
@@ -1176,6 +1201,24 @@ enum ieee80211_sta_state {
 };
 
 /**
+ * enum ieee80211_sta_rx_bandwidth - station RX bandwidth
+ * @IEEE80211_STA_RX_BW_20: station can only receive 20 MHz
+ * @IEEE80211_STA_RX_BW_40: station can receive up to 40 MHz
+ * @IEEE80211_STA_RX_BW_80: station can receive up to 80 MHz
+ * @IEEE80211_STA_RX_BW_160: station can receive up to 160 MHz
+ *     (including 80+80 MHz)
+ *
+ * Implementation note: 20 must be zero to be initialized
+ *     correctly, the values must be sorted.
+ */
+enum ieee80211_sta_rx_bandwidth {
+       IEEE80211_STA_RX_BW_20 = 0,
+       IEEE80211_STA_RX_BW_40,
+       IEEE80211_STA_RX_BW_80,
+       IEEE80211_STA_RX_BW_160,
+};
+
+/**
  * struct ieee80211_sta - station table entry
  *
  * A station table entry represents a station we are possibly
@@ -1197,6 +1240,12 @@ enum ieee80211_sta_state {
  * @uapsd_queues: bitmap of queues configured for uapsd. Only valid
  *     if wme is supported.
  * @max_sp: max Service Period. Only valid if wme is supported.
+ * @bandwidth: current bandwidth the station can receive with
+ * @rx_nss: in HT/VHT, the maximum number of spatial streams the
+ *     station can receive at the moment, changed by operating mode
+ *     notifications and capabilities. The value is only valid after
+ *     the station moves to associated state.
+ * @smps_mode: current SMPS mode (off, static or dynamic)
  */
 struct ieee80211_sta {
        u32 supp_rates[IEEE80211_NUM_BANDS];
@@ -1207,6 +1256,9 @@ struct ieee80211_sta {
        bool wme;
        u8 uapsd_queues;
        u8 max_sp;
+       u8 rx_nss;
+       enum ieee80211_sta_rx_bandwidth bandwidth;
+       enum ieee80211_smps_mode smps_mode;
 
        /* must be last */
        u8 drv_priv[0] __aligned(sizeof(void *));
@@ -1331,9 +1383,9 @@ struct ieee80211_tx_control {
  *      When this flag is set, signaling beacon-loss will cause an immediate
  *      change to disassociated state.
  *
- * @IEEE80211_HW_NEED_DTIM_PERIOD:
- *     This device needs to know the DTIM period for the BSS before
- *     associating.
+ * @IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC:
+ *     This device needs to get data from beacon before association (i.e.
+ *     dtim_period).
  *
  * @IEEE80211_HW_SUPPORTS_PER_STA_GTK: The device's crypto engine supports
  *     per-station GTKs as used by IBSS RSN or during fast transition. If
@@ -1353,10 +1405,6 @@ struct ieee80211_tx_control {
  *     setup strictly in HW. mac80211 should not attempt to do this in
  *     software.
  *
- * @IEEE80211_HW_SCAN_WHILE_IDLE: The device can do hw scan while
- *     being idle (i.e. mac80211 doesn't have to go idle-off during the
- *     the scan).
- *
  * @IEEE80211_HW_WANT_MONITOR_VIF: The driver would like to be informed of
  *     a virtual monitor interface when monitor interfaces are the only
  *     active interfaces.
@@ -1370,9 +1418,8 @@ struct ieee80211_tx_control {
  *     P2P Interface. This will be honoured even if more than one interface
  *     is supported.
  *
- * @IEEE80211_HW_TEARDOWN_AGGR_ON_BAR_FAIL: On this hardware TX BA session
- *     should be tear down once BAR frame will not be acked.
- *
+ * @IEEE80211_HW_TIMING_BEACON_ONLY: Use sync timing from beacon frames
+ *     only, to allow getting TBTT of a DTIM beacon.
  */
 enum ieee80211_hw_flags {
        IEEE80211_HW_HAS_RATE_CONTROL                   = 1<<0,
@@ -1382,7 +1429,7 @@ enum ieee80211_hw_flags {
        IEEE80211_HW_2GHZ_SHORT_PREAMBLE_INCAPABLE      = 1<<4,
        IEEE80211_HW_SIGNAL_UNSPEC                      = 1<<5,
        IEEE80211_HW_SIGNAL_DBM                         = 1<<6,
-       IEEE80211_HW_NEED_DTIM_PERIOD                   = 1<<7,
+       IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC             = 1<<7,
        IEEE80211_HW_SPECTRUM_MGMT                      = 1<<8,
        IEEE80211_HW_AMPDU_AGGREGATION                  = 1<<9,
        IEEE80211_HW_SUPPORTS_PS                        = 1<<10,
@@ -1399,9 +1446,8 @@ enum ieee80211_hw_flags {
        IEEE80211_HW_SUPPORTS_PER_STA_GTK               = 1<<21,
        IEEE80211_HW_AP_LINK_PS                         = 1<<22,
        IEEE80211_HW_TX_AMPDU_SETUP_IN_HW               = 1<<23,
-       IEEE80211_HW_SCAN_WHILE_IDLE                    = 1<<24,
        IEEE80211_HW_P2P_DEV_ADDR_FOR_INTF              = 1<<25,
-       IEEE80211_HW_TEARDOWN_AGGR_ON_BAR_FAIL          = 1<<26,
+       IEEE80211_HW_TIMING_BEACON_ONLY                 = 1<<26,
 };
 
 /**
@@ -1630,6 +1676,10 @@ void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb);
  * rekeying), it will not include a valid phase 1 key. The valid phase 1 key is
  * provided by update_tkip_key only. The trigger that makes mac80211 call this
  * handler is software decryption with wrap around of iv16.
+ *
+ * The set_default_unicast_key() call updates the default WEP key index
+ * configured to the hardware for WEP encryption type. This is required
+ * for devices that support offload of data packets (e.g. ARP responses).
  */
 
 /**
@@ -1682,15 +1732,6 @@ void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb);
  * dynamic PS feature in stack and will just keep %IEEE80211_CONF_PS
  * enabled whenever user has enabled powersave.
  *
- * Some hardware need to toggle a single shared antenna between WLAN and
- * Bluetooth to facilitate co-existence. These types of hardware set
- * limitations on the use of host controlled dynamic powersave whenever there
- * is simultaneous WLAN and Bluetooth traffic. For these types of hardware, the
- * driver may request temporarily going into full power save, in order to
- * enable toggling the antenna between BT and WLAN. If the driver requests
- * disabling dynamic powersave, the @dynamic_ps_timeout value will be
- * temporarily set to zero until the driver re-enables dynamic powersave.
- *
  * Driver informs U-APSD client support by enabling
  * %IEEE80211_HW_SUPPORTS_UAPSD flag. The mode is configured through the
  * uapsd paramater in conf_tx() operation. Hardware needs to send the QoS
@@ -2076,16 +2117,21 @@ enum ieee80211_frame_release_type {
  * enum ieee80211_rate_control_changed - flags to indicate what changed
  *
  * @IEEE80211_RC_BW_CHANGED: The bandwidth that can be used to transmit
- *     to this station changed.
+ *     to this station changed. The actual bandwidth is in the station
+ *     information -- for HT20/40 the IEEE80211_HT_CAP_SUP_WIDTH_20_40
+ *     flag changes, for HT and VHT the bandwidth field changes.
  * @IEEE80211_RC_SMPS_CHANGED: The SMPS state of the station changed.
  * @IEEE80211_RC_SUPP_RATES_CHANGED: The supported rate set of this peer
  *     changed (in IBSS mode) due to discovering more information about
  *     the peer.
+ * @IEEE80211_RC_NSS_CHANGED: N_SS (number of spatial streams) was changed
+ *     by the peer
  */
 enum ieee80211_rate_control_changed {
        IEEE80211_RC_BW_CHANGED         = BIT(0),
        IEEE80211_RC_SMPS_CHANGED       = BIT(1),
        IEEE80211_RC_SUPP_RATES_CHANGED = BIT(2),
+       IEEE80211_RC_NSS_CHANGED        = BIT(3),
 };
 
 /**
@@ -2166,6 +2212,18 @@ enum ieee80211_rate_control_changed {
  *     MAC address of the device going away.
  *     Hence, this callback must be implemented. It can sleep.
  *
+ * @add_interface_debugfs: Drivers can use this callback to add debugfs files
+ *     when a vif is added to mac80211. This callback and
+ *     @remove_interface_debugfs should be within a CONFIG_MAC80211_DEBUGFS
+ *     conditional. @remove_interface_debugfs must be provided for cleanup.
+ *     This callback can sleep.
+ *
+ * @remove_interface_debugfs: Remove the debugfs files which were added using
+ *     @add_interface_debugfs. This callback must remove all debugfs entries
+ *     that were added because mac80211 only removes interface debugfs when the
+ *     interface is destroyed, not when it is removed from the driver.
+ *     This callback can sleep.
+ *
  * @config: Handler for configuration requests. IEEE 802.11 code calls this
  *     function to change hardware configuration, e.g., channel.
  *     This function should never fail but returns a negative error code
@@ -2208,6 +2266,10 @@ enum ieee80211_rate_control_changed {
  *     After rekeying was done it should (for example during resume) notify
  *     userspace of the new replay counter using ieee80211_gtk_rekey_notify().
  *
+ * @set_default_unicast_key: Set the default (unicast) key index, useful for
+ *     WEP when the device sends data packets autonomously, e.g. for ARP
+ *     offloading. The index can be 0-3, or -1 for unsetting it.
+ *
  * @hw_scan: Ask the hardware to service the scan request, no need to start
  *     the scan state machine in stack. The scan must honour the channel
  *     configuration done by the regulatory agent in the wiphy's
@@ -2492,6 +2554,9 @@ enum ieee80211_rate_control_changed {
  *     driver's resume function returned 1, as this is just like an "inline"
  *     hardware restart. This callback may sleep.
  *
+ * @ipv6_addr_change: IPv6 address assignment on the given interface changed.
+ *     Currently, this is only called for managed or P2P client interfaces.
+ *     This callback is optional; it must not sleep.
  */
 struct ieee80211_ops {
        void (*tx)(struct ieee80211_hw *hw,
@@ -2539,6 +2604,8 @@ struct ieee80211_ops {
        void (*set_rekey_data)(struct ieee80211_hw *hw,
                               struct ieee80211_vif *vif,
                               struct cfg80211_gtk_rekey_data *data);
+       void (*set_default_unicast_key)(struct ieee80211_hw *hw,
+                                       struct ieee80211_vif *vif, int idx);
        int (*hw_scan)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
                       struct cfg80211_scan_request *req);
        void (*cancel_hw_scan)(struct ieee80211_hw *hw,
@@ -2570,6 +2637,12 @@ struct ieee80211_ops {
                                   struct ieee80211_vif *vif,
                                   struct ieee80211_sta *sta,
                                   struct dentry *dir);
+       void (*add_interface_debugfs)(struct ieee80211_hw *hw,
+                                     struct ieee80211_vif *vif,
+                                     struct dentry *dir);
+       void (*remove_interface_debugfs)(struct ieee80211_hw *hw,
+                                        struct ieee80211_vif *vif,
+                                        struct dentry *dir);
 #endif
        void (*sta_notify)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
                        enum sta_notify_cmd, struct ieee80211_sta *sta);
@@ -2623,6 +2696,7 @@ struct ieee80211_ops {
        int (*set_bitrate_mask)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
                                const struct cfg80211_bitrate_mask *mask);
        void (*rssi_callback)(struct ieee80211_hw *hw,
+                             struct ieee80211_vif *vif,
                              enum ieee80211_rssi_event rssi_event);
 
        void (*allow_buffered_frames)(struct ieee80211_hw *hw,
@@ -2665,6 +2739,12 @@ struct ieee80211_ops {
                                     struct ieee80211_chanctx_conf *ctx);
 
        void (*restart_complete)(struct ieee80211_hw *hw);
+
+#if IS_ENABLED(CONFIG_IPV6)
+       void (*ipv6_addr_change)(struct ieee80211_hw *hw,
+                                struct ieee80211_vif *vif,
+                                struct inet6_dev *idev);
+#endif
 };
 
 /**
@@ -3860,6 +3940,8 @@ void ieee80211_beacon_loss(struct ieee80211_vif *vif);
  * When beacon filtering is enabled with %IEEE80211_VIF_BEACON_FILTER, and
  * %IEEE80211_CONF_PS and %IEEE80211_HW_CONNECTION_MONITOR are set, the driver
  * needs to inform if the connection to the AP has been lost.
+ * The function may also be called if the connection needs to be terminated
+ * for some other reason, even if %IEEE80211_HW_CONNECTION_MONITOR isn't set.
  *
  * This function will cause immediate change to disassociated state,
  * without connection recovery attempts.
@@ -3890,36 +3972,6 @@ void ieee80211_connection_loss(struct ieee80211_vif *vif);
 void ieee80211_resume_disconnect(struct ieee80211_vif *vif);
 
 /**
- * ieee80211_disable_dyn_ps - force mac80211 to temporarily disable dynamic psm
- *
- * @vif: &struct ieee80211_vif pointer from the add_interface callback.
- *
- * Some hardware require full power save to manage simultaneous BT traffic
- * on the WLAN frequency. Full PSM is required periodically, whenever there are
- * burst of BT traffic. The hardware gets information of BT traffic via
- * hardware co-existence lines, and consequentially requests mac80211 to
- * (temporarily) enter full psm.
- * This function will only temporarily disable dynamic PS, not enable PSM if
- * it was not already enabled.
- * The driver must make sure to re-enable dynamic PS using
- * ieee80211_enable_dyn_ps() if the driver has disabled it.
- *
- */
-void ieee80211_disable_dyn_ps(struct ieee80211_vif *vif);
-
-/**
- * ieee80211_enable_dyn_ps - restore dynamic psm after being disabled
- *
- * @vif: &struct ieee80211_vif pointer from the add_interface callback.
- *
- * This function restores dynamic PS after being temporarily disabled via
- * ieee80211_disable_dyn_ps(). Each ieee80211_disable_dyn_ps() call must
- * be coupled with an eventual call to this function.
- *
- */
-void ieee80211_enable_dyn_ps(struct ieee80211_vif *vif);
-
-/**
  * ieee80211_cqm_rssi_notify - inform a configured connection quality monitoring
  *     rssi threshold triggered
  *
@@ -3936,6 +3988,13 @@ void ieee80211_cqm_rssi_notify(struct ieee80211_vif *vif,
                               gfp_t gfp);
 
 /**
+ * ieee80211_radar_detected - inform that a radar was detected
+ *
+ * @hw: pointer as obtained from ieee80211_alloc_hw()
+ */
+void ieee80211_radar_detected(struct ieee80211_hw *hw);
+
+/**
  * ieee80211_chswitch_done - Complete channel switch process
  * @vif: &struct ieee80211_vif pointer from the add_interface callback.
  * @success: make the channel switch successful or not
@@ -4194,4 +4253,16 @@ void ieee80211_disable_rssi_reports(struct ieee80211_vif *vif);
  */
 int ieee80211_ave_rssi(struct ieee80211_vif *vif);
 
+/**
+ * ieee80211_report_wowlan_wakeup - report WoWLAN wakeup
+ * @vif: virtual interface
+ * @wakeup: wakeup reason(s)
+ * @gfp: allocation flags
+ *
+ * See cfg80211_report_wowlan_wakeup().
+ */
+void ieee80211_report_wowlan_wakeup(struct ieee80211_vif *vif,
+                                   struct cfg80211_wowlan_wakeup *wakeup,
+                                   gfp_t gfp);
+
 #endif /* MAC80211_H */
diff --git a/include/net/mrp.h b/include/net/mrp.h
new file mode 100644 (file)
index 0000000..4fbf02a
--- /dev/null
@@ -0,0 +1,143 @@
+#ifndef _NET_MRP_H
+#define _NET_MRP_H
+
+#define MRP_END_MARK           0x0
+
+struct mrp_pdu_hdr {
+       u8      version;
+};
+
+struct mrp_msg_hdr {
+       u8      attrtype;
+       u8      attrlen;
+};
+
+struct mrp_vecattr_hdr {
+       __be16  lenflags;
+       unsigned char   firstattrvalue[];
+#define MRP_VECATTR_HDR_LEN_MASK cpu_to_be16(0x1FFF)
+#define MRP_VECATTR_HDR_FLAG_LA cpu_to_be16(0x2000)
+};
+
+enum mrp_vecattr_event {
+       MRP_VECATTR_EVENT_NEW,
+       MRP_VECATTR_EVENT_JOIN_IN,
+       MRP_VECATTR_EVENT_IN,
+       MRP_VECATTR_EVENT_JOIN_MT,
+       MRP_VECATTR_EVENT_MT,
+       MRP_VECATTR_EVENT_LV,
+       __MRP_VECATTR_EVENT_MAX
+};
+
+struct mrp_skb_cb {
+       struct mrp_msg_hdr      *mh;
+       struct mrp_vecattr_hdr  *vah;
+       unsigned char           attrvalue[];
+};
+
+static inline struct mrp_skb_cb *mrp_cb(struct sk_buff *skb)
+{
+       BUILD_BUG_ON(sizeof(struct mrp_skb_cb) >
+                    FIELD_SIZEOF(struct sk_buff, cb));
+       return (struct mrp_skb_cb *)skb->cb;
+}
+
+enum mrp_applicant_state {
+       MRP_APPLICANT_INVALID,
+       MRP_APPLICANT_VO,
+       MRP_APPLICANT_VP,
+       MRP_APPLICANT_VN,
+       MRP_APPLICANT_AN,
+       MRP_APPLICANT_AA,
+       MRP_APPLICANT_QA,
+       MRP_APPLICANT_LA,
+       MRP_APPLICANT_AO,
+       MRP_APPLICANT_QO,
+       MRP_APPLICANT_AP,
+       MRP_APPLICANT_QP,
+       __MRP_APPLICANT_MAX
+};
+#define MRP_APPLICANT_MAX      (__MRP_APPLICANT_MAX - 1)
+
+enum mrp_event {
+       MRP_EVENT_NEW,
+       MRP_EVENT_JOIN,
+       MRP_EVENT_LV,
+       MRP_EVENT_TX,
+       MRP_EVENT_R_NEW,
+       MRP_EVENT_R_JOIN_IN,
+       MRP_EVENT_R_IN,
+       MRP_EVENT_R_JOIN_MT,
+       MRP_EVENT_R_MT,
+       MRP_EVENT_R_LV,
+       MRP_EVENT_R_LA,
+       MRP_EVENT_REDECLARE,
+       MRP_EVENT_PERIODIC,
+       __MRP_EVENT_MAX
+};
+#define MRP_EVENT_MAX          (__MRP_EVENT_MAX - 1)
+
+enum mrp_tx_action {
+       MRP_TX_ACTION_NONE,
+       MRP_TX_ACTION_S_NEW,
+       MRP_TX_ACTION_S_JOIN_IN,
+       MRP_TX_ACTION_S_JOIN_IN_OPTIONAL,
+       MRP_TX_ACTION_S_IN_OPTIONAL,
+       MRP_TX_ACTION_S_LV,
+};
+
+struct mrp_attr {
+       struct rb_node                  node;
+       enum mrp_applicant_state        state;
+       u8                              type;
+       u8                              len;
+       unsigned char                   value[];
+};
+
+enum mrp_applications {
+       MRP_APPLICATION_MVRP,
+       __MRP_APPLICATION_MAX
+};
+#define MRP_APPLICATION_MAX    (__MRP_APPLICATION_MAX - 1)
+
+struct mrp_application {
+       enum mrp_applications   type;
+       unsigned int            maxattr;
+       struct packet_type      pkttype;
+       unsigned char           group_address[ETH_ALEN];
+       u8                      version;
+};
+
+struct mrp_applicant {
+       struct mrp_application  *app;
+       struct net_device       *dev;
+       struct timer_list       join_timer;
+
+       spinlock_t              lock;
+       struct sk_buff_head     queue;
+       struct sk_buff          *pdu;
+       struct rb_root          mad;
+       struct rcu_head         rcu;
+};
+
+struct mrp_port {
+       struct mrp_applicant __rcu      *applicants[MRP_APPLICATION_MAX + 1];
+       struct rcu_head                 rcu;
+};
+
+extern int     mrp_register_application(struct mrp_application *app);
+extern void    mrp_unregister_application(struct mrp_application *app);
+
+extern int     mrp_init_applicant(struct net_device *dev,
+                                   struct mrp_application *app);
+extern void    mrp_uninit_applicant(struct net_device *dev,
+                                     struct mrp_application *app);
+
+extern int     mrp_request_join(const struct net_device *dev,
+                                 const struct mrp_application *app,
+                                 const void *value, u8 len, u8 type);
+extern void    mrp_request_leave(const struct net_device *dev,
+                                  const struct mrp_application *app,
+                                  const void *value, u8 len, u8 type);
+
+#endif /* _NET_MRP_H */
index 629ee57..7e748ad 100644 (file)
@@ -181,6 +181,7 @@ struct neigh_table {
 };
 
 #define NEIGH_PRIV_ALIGN       sizeof(long long)
+#define NEIGH_ENTRY_SIZE(size) ALIGN((size), NEIGH_PRIV_ALIGN)
 
 static inline void *neighbour_priv(const struct neighbour *n)
 {
index 9b78862..2ba9de8 100644 (file)
@@ -22,6 +22,7 @@ struct netns_ipv4 {
        struct ctl_table_header *frags_hdr;
        struct ctl_table_header *ipv4_hdr;
        struct ctl_table_header *route_hdr;
+       struct ctl_table_header *xfrm4_hdr;
 #endif
        struct ipv4_devconf     *devconf_all;
        struct ipv4_devconf     *devconf_dflt;
index 214cb0a..1242f37 100644 (file)
@@ -16,6 +16,7 @@ struct netns_sysctl_ipv6 {
        struct ctl_table_header *route_hdr;
        struct ctl_table_header *icmp_hdr;
        struct ctl_table_header *frags_hdr;
+       struct ctl_table_header *xfrm6_hdr;
 #endif
        int bindv6only;
        int flush_delay;
index 66f5ac3..388bf8b 100644 (file)
@@ -65,8 +65,14 @@ struct qdisc_watchdog {
 };
 
 extern void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc);
-extern void qdisc_watchdog_schedule(struct qdisc_watchdog *wd,
-                                   psched_time_t expires);
+extern void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires);
+
+static inline void qdisc_watchdog_schedule(struct qdisc_watchdog *wd,
+                                          psched_time_t expires)
+{
+       qdisc_watchdog_schedule_ns(wd, PSCHED_TICKS2NS(expires));
+}
+
 extern void qdisc_watchdog_cancel(struct qdisc_watchdog *wd);
 
 extern struct Qdisc_ops pfifo_qdisc_ops;
index 2d06c2a..2761c90 100644 (file)
@@ -679,4 +679,23 @@ static inline struct sk_buff *skb_act_clone(struct sk_buff *skb, gfp_t gfp_mask,
 }
 #endif
 
+struct psched_ratecfg {
+       u64 rate_bps;
+       u32 mult;
+       u32 shift;
+};
+
+static inline u64 psched_l2t_ns(const struct psched_ratecfg *r,
+                               unsigned int len)
+{
+       return ((u64)len * r->mult) >> r->shift;
+}
+
+extern void psched_ratecfg_precompute(struct psched_ratecfg *r, u32 rate);
+
+static inline u32 psched_ratecfg_getrate(const struct psched_ratecfg *r)
+{
+       return r->rate_bps >> 3;
+}
+
 #endif
index c29707d..a7dd5c5 100644 (file)
@@ -303,7 +303,7 @@ enum { SCTP_MAX_GABS = 16 };
                                          * to which we will raise the P-MTU.
                                         */
 #define SCTP_DEFAULT_MINSEGMENT 512    /* MTU size ... if no mtu disc */
-#define SCTP_HOW_MANY_SECRETS 2                /* How many secrets I keep */
+
 #define SCTP_SECRET_SIZE 32            /* Number of octets in a 256 bits. */
 
 #define SCTP_SIGNATURE_SIZE 20         /* size of a SLA-1 signature */
index fdeb85a..0e0f9d2 100644 (file)
@@ -1236,10 +1236,7 @@ struct sctp_endpoint {
         *            Discussion in [RFC1750] can be helpful in
         *            selection of the key.
         */
-       __u8 secret_key[SCTP_HOW_MANY_SECRETS][SCTP_SECRET_SIZE];
-       int current_key;
-       int last_key;
-       int key_changed_at;
+       __u8 secret_key[SCTP_SECRET_SIZE];
 
        /* digest:  This is a digest of the sctp cookie.  This field is
         *          only used on the receive path when we try to validate
index a340ab4..a66caa2 100644 (file)
@@ -1041,7 +1041,7 @@ static inline void sk_refcnt_debug_dec(struct sock *sk)
               sk->sk_prot->name, sk, atomic_read(&sk->sk_prot->socks));
 }
 
-inline void sk_refcnt_debug_release(const struct sock *sk)
+static inline void sk_refcnt_debug_release(const struct sock *sk)
 {
        if (atomic_read(&sk->sk_refcnt) != 1)
                printk(KERN_DEBUG "Destruction of the %s socket %p delayed, refcnt=%d\n",
index 614af8b..23f2e98 100644 (file)
@@ -279,7 +279,6 @@ extern int sysctl_tcp_dma_copybreak;
 extern int sysctl_tcp_nometrics_save;
 extern int sysctl_tcp_moderate_rcvbuf;
 extern int sysctl_tcp_tso_win_divisor;
-extern int sysctl_tcp_abc;
 extern int sysctl_tcp_mtu_probing;
 extern int sysctl_tcp_base_mss;
 extern int sysctl_tcp_workaround_signed_windows;
index 498433d..938b7fd 100644 (file)
@@ -34,17 +34,17 @@ extern int                          udpv6_connect(struct sock *sk,
                                                      struct sockaddr *uaddr,
                                                      int addr_len);
 
-extern int                     datagram_recv_ctl(struct sock *sk,
-                                                 struct msghdr *msg,
-                                                 struct sk_buff *skb);
-
-extern int                     datagram_send_ctl(struct net *net,
-                                                 struct sock *sk,
-                                                 struct msghdr *msg,
-                                                 struct flowi6 *fl6,
-                                                 struct ipv6_txoptions *opt,
-                                                 int *hlimit, int *tclass,
-                                                 int *dontfrag);
+extern int                     ip6_datagram_recv_ctl(struct sock *sk,
+                                                     struct msghdr *msg,
+                                                     struct sk_buff *skb);
+
+extern int                     ip6_datagram_send_ctl(struct net *net,
+                                                     struct sock *sk,
+                                                     struct msghdr *msg,
+                                                     struct flowi6 *fl6,
+                                                     struct ipv6_txoptions *opt,
+                                                     int *hlimit, int *tclass,
+                                                     int *dontfrag);
 
 #define                LOOPBACK4_IPV6          cpu_to_be32(0x7f000006)
 
index de34883..24c8886 100644 (file)
@@ -501,6 +501,12 @@ struct xfrm_policy_walk {
        u32 seq;
 };
 
+struct xfrm_policy_queue {
+       struct sk_buff_head     hold_queue;
+       struct timer_list       hold_timer;
+       unsigned long           timeout;
+};
+
 struct xfrm_policy {
 #ifdef CONFIG_NET_NS
        struct net              *xp_net;
@@ -522,6 +528,7 @@ struct xfrm_policy {
        struct xfrm_lifetime_cfg lft;
        struct xfrm_lifetime_cur curlft;
        struct xfrm_policy_walk_entry walk;
+       struct xfrm_policy_queue polq;
        u8                      type;
        u8                      action;
        u8                      flags;
@@ -1320,6 +1327,7 @@ struct xfrm_algo_desc {
        char *name;
        char *compat;
        u8 available:1;
+       u8 pfkey_supported:1;
        union {
                struct xfrm_algo_aead_info aead;
                struct xfrm_algo_auth_info auth;
@@ -1561,8 +1569,8 @@ extern void xfrm_input_init(void);
 extern int xfrm_parse_spi(struct sk_buff *skb, u8 nexthdr, __be32 *spi, __be32 *seq);
 
 extern void xfrm_probe_algs(void);
-extern int xfrm_count_auth_supported(void);
-extern int xfrm_count_enc_supported(void);
+extern int xfrm_count_pfkey_auth_supported(void);
+extern int xfrm_count_pfkey_enc_supported(void);
 extern struct xfrm_algo_desc *xfrm_aalg_get_byidx(unsigned int idx);
 extern struct xfrm_algo_desc *xfrm_ealg_get_byidx(unsigned int idx);
 extern struct xfrm_algo_desc *xfrm_aalg_get_byid(int alg_id);
index 77cdba9..bb991df 100644 (file)
 #define AUTOFS_MIN_PROTO_VERSION       AUTOFS_PROTO_VERSION
 
 /*
- * Architectures where both 32- and 64-bit binaries can be executed
- * on 64-bit kernels need this.  This keeps the structure format
- * uniform, and makes sure the wait_queue_token isn't too big to be
- * passed back down to the kernel.
- *
- * This assumes that on these architectures:
- * mode     32 bit    64 bit
- * -------------------------
- * int      32 bit    32 bit
- * long     32 bit    64 bit
- *
- * If so, 32-bit user-space code should be backwards compatible.
+ * The wait_queue_token (autofs_wqt_t) is part of a structure which is passed
+ * back to the kernel via ioctl from userspace. On architectures where 32- and
+ * 64-bit userspace binaries can be executed it's important that the size of
+ * autofs_wqt_t stays constant between 32- and 64-bit Linux kernels so that we
+ * do not break the binary ABI interface by changing the structure size.
  */
-
-#if defined(__sparc__) || defined(__mips__) || defined(__x86_64__) \
- || defined(__powerpc__) || defined(__s390__)
-typedef unsigned int autofs_wqt_t;
-#else
+#if defined(__ia64__) || defined(__alpha__) /* pure 64bit architectures */
 typedef unsigned long autofs_wqt_t;
+#else
+typedef unsigned int autofs_wqt_t;
 #endif
 
 /* Packet types */
index 5db2975..2d70d79 100644 (file)
@@ -108,15 +108,26 @@ struct __fdb_entry {
  * [IFLA_AF_SPEC] = {
  *     [IFLA_BRIDGE_FLAGS]
  *     [IFLA_BRIDGE_MODE]
+ *     [IFLA_BRIDGE_VLAN_INFO]
  * }
  */
 enum {
        IFLA_BRIDGE_FLAGS,
        IFLA_BRIDGE_MODE,
+       IFLA_BRIDGE_VLAN_INFO,
        __IFLA_BRIDGE_MAX,
 };
 #define IFLA_BRIDGE_MAX (__IFLA_BRIDGE_MAX - 1)
 
+#define BRIDGE_VLAN_INFO_MASTER        (1<<0)  /* Operate on Bridge device as well */
+#define BRIDGE_VLAN_INFO_PVID  (1<<1)  /* VLAN is PVID, ingress untagged */
+#define BRIDGE_VLAN_INFO_UNTAGGED      (1<<2)  /* VLAN egresses untagged */
+
+struct bridge_vlan_info {
+       __u16 flags;
+       __u16 vid;
+};
+
 /* Bridge multicast database attributes
  * [MDBA_MDB] = {
  *     [MDBA_MDB_ENTRY] = {
index 67fb87c..798032d 100644 (file)
@@ -83,6 +83,7 @@
 #define ETH_P_802_EX1  0x88B5          /* 802.1 Local Experimental 1.  */
 #define ETH_P_TIPC     0x88CA          /* TIPC                         */
 #define ETH_P_8021AH   0x88E7          /* 802.1ah Backbone Service Tag */
+#define ETH_P_MVRP     0x88F5          /* 802.1Q MVRP                  */
 #define ETH_P_1588     0x88F7          /* IEEE 1588 Timesync */
 #define ETH_P_FCOE     0x8906          /* Fibre Channel over Ethernet  */
 #define ETH_P_TDLS     0x890D          /* TDLS */
index 0744f8e..7e5e6b3 100644 (file)
@@ -34,6 +34,7 @@ enum vlan_flags {
        VLAN_FLAG_REORDER_HDR   = 0x1,
        VLAN_FLAG_GVRP          = 0x2,
        VLAN_FLAG_LOOSE_BINDING = 0x4,
+       VLAN_FLAG_MVRP          = 0x8,
 };
 
 enum vlan_name_types {
index 275e5d6..adb068c 100644 (file)
@@ -20,6 +20,7 @@ enum {
        NDA_LLADDR,
        NDA_CACHEINFO,
        NDA_PROBES,
+       NDA_VLAN,
        __NDA_MAX
 };
 
index e6eeb4b..c46bb01 100644 (file)
  *     %NL80211_ATTR_HIDDEN_SSID, %NL80211_ATTR_CIPHERS_PAIRWISE,
  *     %NL80211_ATTR_CIPHER_GROUP, %NL80211_ATTR_WPA_VERSIONS,
  *     %NL80211_ATTR_AKM_SUITES, %NL80211_ATTR_PRIVACY,
- *     %NL80211_ATTR_AUTH_TYPE and %NL80211_ATTR_INACTIVITY_TIMEOUT.
+ *     %NL80211_ATTR_AUTH_TYPE, %NL80211_ATTR_INACTIVITY_TIMEOUT,
+ *     %NL80211_ATTR_ACL_POLICY and %NL80211_ATTR_MAC_ADDRS.
  *     The channel to use can be set on the interface or be given using the
  *     %NL80211_ATTR_WIPHY_FREQ and the attributes determining channel width.
  * @NL80211_CMD_NEW_BEACON: old alias for %NL80211_CMD_START_AP
  *     command with the %NL80211_ATTR_WOWLAN_TRIGGERS attribute. For
  *     more background information, see
  *     http://wireless.kernel.org/en/users/Documentation/WoWLAN.
+ *     The @NL80211_CMD_SET_WOWLAN command can also be used as a notification
+ *     from the driver reporting the wakeup reason. In this case, the
+ *     @NL80211_ATTR_WOWLAN_TRIGGERS attribute will contain the reason
+ *     for the wakeup, if it was caused by wireless. If it is not present
+ *     in the wakeup notification, the wireless device didn't cause the
+ *     wakeup but reports that it was woken up.
  *
  * @NL80211_CMD_SET_REKEY_OFFLOAD: This command is used give the driver
  *     the necessary information for supporting GTK rekey offload. This
  * @NL80211_CMD_SET_MCAST_RATE: Change the rate used to send multicast frames
  *     for IBSS or MESH vif.
  *
+ * @NL80211_CMD_SET_MAC_ACL: sets ACL for MAC address based access control.
+ *     This is to be used with the drivers advertising the support of MAC
+ *     address based access control. List of MAC addresses is passed in
+ *     %NL80211_ATTR_MAC_ADDRS and ACL policy is passed in
+ *     %NL80211_ATTR_ACL_POLICY. Driver will enable ACL with this list, if it
+ *     is not already done. The new list will replace any existing list. Driver
+ *     will clear its ACL when the list of MAC addresses passed is empty. This
+ *     command is used in AP/P2P GO mode. Driver has to make sure to clear its
+ *     ACL list during %NL80211_CMD_STOP_AP.
+ *
+ * @NL80211_CMD_RADAR_DETECT: Start a Channel availability check (CAC). Once
+ *     a radar is detected or the channel availability scan (CAC) has finished
+ *     or was aborted, or a radar was detected, usermode will be notified with
+ *     this event. This command is also used to notify userspace about radars
+ *     while operating on this channel.
+ *     %NL80211_ATTR_RADAR_EVENT is used to inform about the type of the
+ *     event.
+ *
  * @NL80211_CMD_MAX: highest used command number
  * @__NL80211_CMD_AFTER_LAST: internal use
  */
@@ -736,6 +761,10 @@ enum nl80211_commands {
 
        NL80211_CMD_SET_MCAST_RATE,
 
+       NL80211_CMD_SET_MAC_ACL,
+
+       NL80211_CMD_RADAR_DETECT,
+
        /* add new commands above here */
 
        /* used to define NL80211_CMD_MAX below */
@@ -1313,6 +1342,32 @@ enum nl80211_commands {
  * @NL80211_ATTR_LOCAL_MESH_POWER_MODE: local mesh STA link-specific power mode
  *     defined in &enum nl80211_mesh_power_mode.
  *
+ * @NL80211_ATTR_ACL_POLICY: ACL policy, see &enum nl80211_acl_policy,
+ *     carried in a u32 attribute
+ *
+ * @NL80211_ATTR_MAC_ADDRS: Array of nested MAC addresses, used for
+ *     MAC ACL.
+ *
+ * @NL80211_ATTR_MAC_ACL_MAX: u32 attribute to advertise the maximum
+ *     number of MAC addresses that a device can support for MAC
+ *     ACL.
+ *
+ * @NL80211_ATTR_RADAR_EVENT: Type of radar event for notification to userspace,
+ *     contains a value of enum nl80211_radar_event (u32).
+ *
+ * @NL80211_ATTR_EXT_CAPA: 802.11 extended capabilities that the kernel driver
+ *     has and handles. The format is the same as the IE contents. See
+ *     802.11-2012 8.4.2.29 for more information.
+ * @NL80211_ATTR_EXT_CAPA_MASK: Extended capabilities that the kernel driver
+ *     has set in the %NL80211_ATTR_EXT_CAPA value, for multibit fields.
+ *
+ * @NL80211_ATTR_STA_CAPABILITY: Station capabilities (u16) are advertised to
+ *     the driver, e.g., to enable TDLS power save (PU-APSD).
+ *
+ * @NL80211_ATTR_STA_EXT_CAPABILITY: Station extended capabilities are
+ *     advertised to the driver, e.g., to enable TDLS off channel operations
+ *     and PU-APSD.
+ *
  * @NL80211_ATTR_MAX: highest attribute number currently defined
  * @__NL80211_ATTR_AFTER_LAST: internal use
  */
@@ -1585,6 +1640,20 @@ enum nl80211_attrs {
 
        NL80211_ATTR_LOCAL_MESH_POWER_MODE,
 
+       NL80211_ATTR_ACL_POLICY,
+
+       NL80211_ATTR_MAC_ADDRS,
+
+       NL80211_ATTR_MAC_ACL_MAX,
+
+       NL80211_ATTR_RADAR_EVENT,
+
+       NL80211_ATTR_EXT_CAPA,
+       NL80211_ATTR_EXT_CAPA_MASK,
+
+       NL80211_ATTR_STA_CAPABILITY,
+       NL80211_ATTR_STA_EXT_CAPABILITY,
+
        /* add attributes here, update the policy in nl80211.c */
 
        __NL80211_ATTR_AFTER_LAST,
@@ -1822,6 +1891,8 @@ enum nl80211_sta_bss_param {
  * @NL80211_STA_INFO_INACTIVE_TIME: time since last activity (u32, msecs)
  * @NL80211_STA_INFO_RX_BYTES: total received bytes (u32, from this station)
  * @NL80211_STA_INFO_TX_BYTES: total transmitted bytes (u32, to this station)
+ * @NL80211_STA_INFO_RX_BYTES64: total received bytes (u64, from this station)
+ * @NL80211_STA_INFO_TX_BYTES64: total transmitted bytes (u64, to this station)
  * @NL80211_STA_INFO_SIGNAL: signal strength of last received PPDU (u8, dBm)
  * @NL80211_STA_INFO_TX_BITRATE: current unicast tx rate, nested attribute
  *     containing info as possible, see &enum nl80211_rate_info
@@ -1874,6 +1945,8 @@ enum nl80211_sta_info {
        NL80211_STA_INFO_LOCAL_PM,
        NL80211_STA_INFO_PEER_PM,
        NL80211_STA_INFO_NONPEER_PM,
+       NL80211_STA_INFO_RX_BYTES64,
+       NL80211_STA_INFO_TX_BYTES64,
 
        /* keep last */
        __NL80211_STA_INFO_AFTER_LAST,
@@ -1983,6 +2056,20 @@ enum nl80211_band_attr {
  *     on this channel in current regulatory domain.
  * @NL80211_FREQUENCY_ATTR_MAX_TX_POWER: Maximum transmission power in mBm
  *     (100 * dBm).
+ * @NL80211_FREQUENCY_ATTR_DFS_STATE: current state for DFS
+ *     (enum nl80211_dfs_state)
+ * @NL80211_FREQUENCY_ATTR_DFS_TIME: time in miliseconds for how long
+ *     this channel is in this DFS state.
+ * @NL80211_FREQUENCY_ATTR_NO_HT40_MINUS: HT40- isn't possible with this
+ *     channel as the control channel
+ * @NL80211_FREQUENCY_ATTR_NO_HT40_PLUS: HT40+ isn't possible with this
+ *     channel as the control channel
+ * @NL80211_FREQUENCY_ATTR_NO_80MHZ: any 80 MHz channel using this channel
+ *     as the primary or any of the secondary channels isn't possible,
+ *     this includes 80+80 channels
+ * @NL80211_FREQUENCY_ATTR_NO_160MHZ: any 160 MHz (but not 80+80) channel
+ *     using this channel as the primary or any of the secondary channels
+ *     isn't possible
  * @NL80211_FREQUENCY_ATTR_MAX: highest frequency attribute number
  *     currently defined
  * @__NL80211_FREQUENCY_ATTR_AFTER_LAST: internal use
@@ -1995,6 +2082,12 @@ enum nl80211_frequency_attr {
        NL80211_FREQUENCY_ATTR_NO_IBSS,
        NL80211_FREQUENCY_ATTR_RADAR,
        NL80211_FREQUENCY_ATTR_MAX_TX_POWER,
+       NL80211_FREQUENCY_ATTR_DFS_STATE,
+       NL80211_FREQUENCY_ATTR_DFS_TIME,
+       NL80211_FREQUENCY_ATTR_NO_HT40_MINUS,
+       NL80211_FREQUENCY_ATTR_NO_HT40_PLUS,
+       NL80211_FREQUENCY_ATTR_NO_80MHZ,
+       NL80211_FREQUENCY_ATTR_NO_160MHZ,
 
        /* keep last */
        __NL80211_FREQUENCY_ATTR_AFTER_LAST,
@@ -2867,10 +2960,12 @@ enum nl80211_tx_power_setting {
  *     corresponds to the lowest-order bit in the second byte of the mask.
  *     For example: The match 00:xx:00:00:xx:00:00:00:00:xx:xx:xx (where
  *     xx indicates "don't care") would be represented by a pattern of
- *     twelve zero bytes, and a mask of "0xed,0x07".
+ *     twelve zero bytes, and a mask of "0xed,0x01".
  *     Note that the pattern matching is done as though frames were not
  *     802.11 frames but 802.3 frames, i.e. the frame is fully unpacked
  *     first (including SNAP header unpacking) and then matched.
+ * @NL80211_WOWLAN_PKTPAT_OFFSET: packet offset, pattern is matched after
+ *     these fixed number of bytes of received packet
  * @NUM_NL80211_WOWLAN_PKTPAT: number of attributes
  * @MAX_NL80211_WOWLAN_PKTPAT: max attribute number
  */
@@ -2878,6 +2973,7 @@ enum nl80211_wowlan_packet_pattern_attr {
        __NL80211_WOWLAN_PKTPAT_INVALID,
        NL80211_WOWLAN_PKTPAT_MASK,
        NL80211_WOWLAN_PKTPAT_PATTERN,
+       NL80211_WOWLAN_PKTPAT_OFFSET,
 
        NUM_NL80211_WOWLAN_PKTPAT,
        MAX_NL80211_WOWLAN_PKTPAT = NUM_NL80211_WOWLAN_PKTPAT - 1,
@@ -2888,6 +2984,7 @@ enum nl80211_wowlan_packet_pattern_attr {
  * @max_patterns: maximum number of patterns supported
  * @min_pattern_len: minimum length of each pattern
  * @max_pattern_len: maximum length of each pattern
+ * @max_pkt_offset: maximum Rx packet offset
  *
  * This struct is carried in %NL80211_WOWLAN_TRIG_PKT_PATTERN when
  * that is part of %NL80211_ATTR_WOWLAN_TRIGGERS_SUPPORTED in the
@@ -2897,6 +2994,7 @@ struct nl80211_wowlan_pattern_support {
        __u32 max_patterns;
        __u32 min_pattern_len;
        __u32 max_pattern_len;
+       __u32 max_pkt_offset;
 } __attribute__((packed));
 
 /**
@@ -2912,12 +3010,17 @@ struct nl80211_wowlan_pattern_support {
  * @NL80211_WOWLAN_TRIG_PKT_PATTERN: wake up on the specified packet patterns
  *     which are passed in an array of nested attributes, each nested attribute
  *     defining a with attributes from &struct nl80211_wowlan_trig_pkt_pattern.
- *     Each pattern defines a wakeup packet. The matching is done on the MSDU,
- *     i.e. as though the packet was an 802.3 packet, so the pattern matching
- *     is done after the packet is converted to the MSDU.
+ *     Each pattern defines a wakeup packet. Packet offset is associated with
+ *     each pattern which is used while matching the pattern. The matching is
+ *     done on the MSDU, i.e. as though the packet was an 802.3 packet, so the
+ *     pattern matching is done after the packet is converted to the MSDU.
  *
  *     In %NL80211_ATTR_WOWLAN_TRIGGERS_SUPPORTED, it is a binary attribute
  *     carrying a &struct nl80211_wowlan_pattern_support.
+ *
+ *     When reporting wakeup. it is a u32 attribute containing the 0-based
+ *     index of the pattern that caused the wakeup, in the patterns passed
+ *     to the kernel when configuring.
  * @NL80211_WOWLAN_TRIG_GTK_REKEY_SUPPORTED: Not a real trigger, and cannot be
  *     used when setting, used only to indicate that GTK rekeying is supported
  *     by the device (flag)
@@ -2928,8 +3031,36 @@ struct nl80211_wowlan_pattern_support {
  * @NL80211_WOWLAN_TRIG_4WAY_HANDSHAKE: wake up on 4-way handshake (flag)
  * @NL80211_WOWLAN_TRIG_RFKILL_RELEASE: wake up when rfkill is released
  *     (on devices that have rfkill in the device) (flag)
+ * @NL80211_WOWLAN_TRIG_WAKEUP_PKT_80211: For wakeup reporting only, contains
+ *     the 802.11 packet that caused the wakeup, e.g. a deauth frame. The frame
+ *     may be truncated, the @NL80211_WOWLAN_TRIG_WAKEUP_PKT_80211_LEN
+ *     attribute contains the original length.
+ * @NL80211_WOWLAN_TRIG_WAKEUP_PKT_80211_LEN: Original length of the 802.11
+ *     packet, may be bigger than the @NL80211_WOWLAN_TRIG_WAKEUP_PKT_80211
+ *     attribute if the packet was truncated somewhere.
+ * @NL80211_WOWLAN_TRIG_WAKEUP_PKT_8023: For wakeup reporting only, contains the
+ *     802.11 packet that caused the wakeup, e.g. a magic packet. The frame may
+ *     be truncated, the @NL80211_WOWLAN_TRIG_WAKEUP_PKT_8023_LEN attribute
+ *     contains the original length.
+ * @NL80211_WOWLAN_TRIG_WAKEUP_PKT_8023_LEN: Original length of the 802.3
+ *     packet, may be bigger than the @NL80211_WOWLAN_TRIG_WAKEUP_PKT_8023
+ *     attribute if the packet was truncated somewhere.
+ * @NL80211_WOWLAN_TRIG_TCP_CONNECTION: TCP connection wake, see DOC section
+ *     "TCP connection wakeup" for more details. This is a nested attribute
+ *     containing the exact information for establishing and keeping alive
+ *     the TCP connection.
+ * @NL80211_WOWLAN_TRIG_TCP_WAKEUP_MATCH: For wakeup reporting only, the
+ *     wakeup packet was received on the TCP connection
+ * @NL80211_WOWLAN_TRIG_WAKEUP_TCP_CONNLOST: For wakeup reporting only, the
+ *     TCP connection was lost or failed to be established
+ * @NL80211_WOWLAN_TRIG_WAKEUP_TCP_NOMORETOKENS: For wakeup reporting only,
+ *     the TCP connection ran out of tokens to use for data to send to the
+ *     service
  * @NUM_NL80211_WOWLAN_TRIG: number of wake on wireless triggers
  * @MAX_NL80211_WOWLAN_TRIG: highest wowlan trigger attribute number
+ *
+ * These nested attributes are used to configure the wakeup triggers and
+ * to report the wakeup reason(s).
  */
 enum nl80211_wowlan_triggers {
        __NL80211_WOWLAN_TRIG_INVALID,
@@ -2942,6 +3073,14 @@ enum nl80211_wowlan_triggers {
        NL80211_WOWLAN_TRIG_EAP_IDENT_REQUEST,
        NL80211_WOWLAN_TRIG_4WAY_HANDSHAKE,
        NL80211_WOWLAN_TRIG_RFKILL_RELEASE,
+       NL80211_WOWLAN_TRIG_WAKEUP_PKT_80211,
+       NL80211_WOWLAN_TRIG_WAKEUP_PKT_80211_LEN,
+       NL80211_WOWLAN_TRIG_WAKEUP_PKT_8023,
+       NL80211_WOWLAN_TRIG_WAKEUP_PKT_8023_LEN,
+       NL80211_WOWLAN_TRIG_TCP_CONNECTION,
+       NL80211_WOWLAN_TRIG_WAKEUP_TCP_MATCH,
+       NL80211_WOWLAN_TRIG_WAKEUP_TCP_CONNLOST,
+       NL80211_WOWLAN_TRIG_WAKEUP_TCP_NOMORETOKENS,
 
        /* keep last */
        NUM_NL80211_WOWLAN_TRIG,
@@ -2949,6 +3088,116 @@ enum nl80211_wowlan_triggers {
 };
 
 /**
+ * DOC: TCP connection wakeup
+ *
+ * Some devices can establish a TCP connection in order to be woken up by a
+ * packet coming in from outside their network segment, or behind NAT. If
+ * configured, the device will establish a TCP connection to the given
+ * service, and periodically send data to that service. The first data
+ * packet is usually transmitted after SYN/ACK, also ACKing the SYN/ACK.
+ * The data packets can optionally include a (little endian) sequence
+ * number (in the TCP payload!) that is generated by the device, and, also
+ * optionally, a token from a list of tokens. This serves as a keep-alive
+ * with the service, and for NATed connections, etc.
+ *
+ * During this keep-alive period, the server doesn't send any data to the
+ * client. When receiving data, it is compared against the wakeup pattern
+ * (and mask) and if it matches, the host is woken up. Similarly, if the
+ * connection breaks or cannot be established to start with, the host is
+ * also woken up.
+ *
+ * Developer's note: ARP offload is required for this, otherwise TCP
+ * response packets might not go through correctly.
+ */
+
+/**
+ * struct nl80211_wowlan_tcp_data_seq - WoWLAN TCP data sequence
+ * @start: starting value
+ * @offset: offset of sequence number in packet
+ * @len: length of the sequence value to write, 1 through 4
+ *
+ * Note: don't confuse with the TCP sequence number(s), this is for the
+ * keepalive packet payload. The actual value is written into the packet
+ * in little endian.
+ */
+struct nl80211_wowlan_tcp_data_seq {
+       __u32 start, offset, len;
+};
+
+/**
+ * struct nl80211_wowlan_tcp_data_token - WoWLAN TCP data token config
+ * @offset: offset of token in packet
+ * @len: length of each token
+ * @token_stream: stream of data to be used for the tokens, the length must
+ *     be a multiple of @len for this to make sense
+ */
+struct nl80211_wowlan_tcp_data_token {
+       __u32 offset, len;
+       __u8 token_stream[];
+};
+
+/**
+ * struct nl80211_wowlan_tcp_data_token_feature - data token features
+ * @min_len: minimum token length
+ * @max_len: maximum token length
+ * @bufsize: total available token buffer size (max size of @token_stream)
+ */
+struct nl80211_wowlan_tcp_data_token_feature {
+       __u32 min_len, max_len, bufsize;
+};
+
+/**
+ * enum nl80211_wowlan_tcp_attrs - WoWLAN TCP connection parameters
+ * @__NL80211_WOWLAN_TCP_INVALID: invalid number for nested attributes
+ * @NL80211_WOWLAN_TCP_SRC_IPV4: source IPv4 address (in network byte order)
+ * @NL80211_WOWLAN_TCP_DST_IPV4: destination IPv4 address
+ *     (in network byte order)
+ * @NL80211_WOWLAN_TCP_DST_MAC: destination MAC address, this is given because
+ *     route lookup when configured might be invalid by the time we suspend,
+ *     and doing a route lookup when suspending is no longer possible as it
+ *     might require ARP querying.
+ * @NL80211_WOWLAN_TCP_SRC_PORT: source port (u16); optional, if not given a
+ *     socket and port will be allocated
+ * @NL80211_WOWLAN_TCP_DST_PORT: destination port (u16)
+ * @NL80211_WOWLAN_TCP_DATA_PAYLOAD: data packet payload, at least one byte.
+ *     For feature advertising, a u32 attribute holding the maximum length
+ *     of the data payload.
+ * @NL80211_WOWLAN_TCP_DATA_PAYLOAD_SEQ: data packet sequence configuration
+ *     (if desired), a &struct nl80211_wowlan_tcp_data_seq. For feature
+ *     advertising it is just a flag
+ * @NL80211_WOWLAN_TCP_DATA_PAYLOAD_TOKEN: data packet token configuration,
+ *     see &struct nl80211_wowlan_tcp_data_token and for advertising see
+ *     &struct nl80211_wowlan_tcp_data_token_feature.
+ * @NL80211_WOWLAN_TCP_DATA_INTERVAL: data interval in seconds, maximum
+ *     interval in feature advertising (u32)
+ * @NL80211_WOWLAN_TCP_WAKE_PAYLOAD: wake packet payload, for advertising a
+ *     u32 attribute holding the maximum length
+ * @NL80211_WOWLAN_TCP_WAKE_MASK: Wake packet payload mask, not used for
+ *     feature advertising. The mask works like @NL80211_WOWLAN_PKTPAT_MASK
+ *     but on the TCP payload only.
+ * @NUM_NL80211_WOWLAN_TCP: number of TCP attributes
+ * @MAX_NL80211_WOWLAN_TCP: highest attribute number
+ */
+enum nl80211_wowlan_tcp_attrs {
+       __NL80211_WOWLAN_TCP_INVALID,
+       NL80211_WOWLAN_TCP_SRC_IPV4,
+       NL80211_WOWLAN_TCP_DST_IPV4,
+       NL80211_WOWLAN_TCP_DST_MAC,
+       NL80211_WOWLAN_TCP_SRC_PORT,
+       NL80211_WOWLAN_TCP_DST_PORT,
+       NL80211_WOWLAN_TCP_DATA_PAYLOAD,
+       NL80211_WOWLAN_TCP_DATA_PAYLOAD_SEQ,
+       NL80211_WOWLAN_TCP_DATA_PAYLOAD_TOKEN,
+       NL80211_WOWLAN_TCP_DATA_INTERVAL,
+       NL80211_WOWLAN_TCP_WAKE_PAYLOAD,
+       NL80211_WOWLAN_TCP_WAKE_MASK,
+
+       /* keep last */
+       NUM_NL80211_WOWLAN_TCP,
+       MAX_NL80211_WOWLAN_TCP = NUM_NL80211_WOWLAN_TCP - 1
+};
+
+/**
  * enum nl80211_iface_limit_attrs - limit attributes
  * @NL80211_IFACE_LIMIT_UNSPEC: (reserved)
  * @NL80211_IFACE_LIMIT_MAX: maximum number of interfaces that
@@ -3205,6 +3454,8 @@ enum nl80211_ap_sme_features {
  *     Note that even for drivers that support this, the default is to add
  *     stations in authenticated/associated state, so to add unauthenticated
  *     stations the authenticated/associated bits have to be set in the mask.
+ * @NL80211_FEATURE_ADVERTISE_CHAN_LIMITS: cfg80211 advertises channel limits
+ *     (HT40, VHT 80/160 MHz) if this flag is set
  */
 enum nl80211_feature_flags {
        NL80211_FEATURE_SK_TX_STATUS                    = 1 << 0,
@@ -3220,7 +3471,9 @@ enum nl80211_feature_flags {
        NL80211_FEATURE_NEED_OBSS_SCAN                  = 1 << 10,
        NL80211_FEATURE_P2P_GO_CTWIN                    = 1 << 11,
        NL80211_FEATURE_P2P_GO_OPPPS                    = 1 << 12,
-       NL80211_FEATURE_FULL_AP_CLIENT_STATE            = 1 << 13,
+       /* bit 13 is reserved */
+       NL80211_FEATURE_ADVERTISE_CHAN_LIMITS           = 1 << 14,
+       NL80211_FEATURE_FULL_AP_CLIENT_STATE            = 1 << 15,
 };
 
 /**
@@ -3248,7 +3501,7 @@ enum nl80211_probe_resp_offload_support_attr {
  * enum nl80211_connect_failed_reason - connection request failed reasons
  * @NL80211_CONN_FAIL_MAX_CLIENTS: Maximum number of clients that can be
  *     handled by the AP is reached.
- * @NL80211_CONN_FAIL_BLOCKED_CLIENT: Client's MAC is in the AP's blocklist.
+ * @NL80211_CONN_FAIL_BLOCKED_CLIENT: Connection request is rejected due to ACL.
  */
 enum nl80211_connect_failed_reason {
        NL80211_CONN_FAIL_MAX_CLIENTS,
@@ -3276,4 +3529,62 @@ enum nl80211_scan_flags {
        NL80211_SCAN_FLAG_AP                            = 1<<2,
 };
 
+/**
+ * enum nl80211_acl_policy - access control policy
+ *
+ * Access control policy is applied on a MAC list set by
+ * %NL80211_CMD_START_AP and %NL80211_CMD_SET_MAC_ACL, to
+ * be used with %NL80211_ATTR_ACL_POLICY.
+ *
+ * @NL80211_ACL_POLICY_ACCEPT_UNLESS_LISTED: Deny stations which are
+ *     listed in ACL, i.e. allow all the stations which are not listed
+ *     in ACL to authenticate.
+ * @NL80211_ACL_POLICY_DENY_UNLESS_LISTED: Allow the stations which are listed
+ *     in ACL, i.e. deny all the stations which are not listed in ACL.
+ */
+enum nl80211_acl_policy {
+       NL80211_ACL_POLICY_ACCEPT_UNLESS_LISTED,
+       NL80211_ACL_POLICY_DENY_UNLESS_LISTED,
+};
+
+/**
+ * enum nl80211_radar_event - type of radar event for DFS operation
+ *
+ * Type of event to be used with NL80211_ATTR_RADAR_EVENT to inform userspace
+ * about detected radars or success of the channel available check (CAC)
+ *
+ * @NL80211_RADAR_DETECTED: A radar pattern has been detected. The channel is
+ *     now unusable.
+ * @NL80211_RADAR_CAC_FINISHED: Channel Availability Check has been finished,
+ *     the channel is now available.
+ * @NL80211_RADAR_CAC_ABORTED: Channel Availability Check has been aborted, no
+ *     change to the channel status.
+ * @NL80211_RADAR_NOP_FINISHED: The Non-Occupancy Period for this channel is
+ *     over, channel becomes usable.
+ */
+enum nl80211_radar_event {
+       NL80211_RADAR_DETECTED,
+       NL80211_RADAR_CAC_FINISHED,
+       NL80211_RADAR_CAC_ABORTED,
+       NL80211_RADAR_NOP_FINISHED,
+};
+
+/**
+ * enum nl80211_dfs_state - DFS states for channels
+ *
+ * Channel states used by the DFS code.
+ *
+ * @IEEE80211_DFS_USABLE: The channel can be used, but channel availability
+ *     check (CAC) must be performed before using it for AP or IBSS.
+ * @IEEE80211_DFS_UNAVAILABLE: A radar has been detected on this channel, it
+ *     is therefore marked as not available.
+ * @IEEE80211_DFS_AVAILABLE: The channel has been CAC checked and is available.
+ */
+
+enum nl80211_dfs_state {
+       NL80211_DFS_USABLE,
+       NL80211_DFS_UNAVAILABLE,
+       NL80211_DFS_AVAILABLE,
+};
+
 #endif /* __LINUX_NL80211_H */
index 7a5eb19..7a2144e 100644 (file)
@@ -630,6 +630,7 @@ struct tcamsg {
 
 /* New extended info filters for IFLA_EXT_MASK */
 #define RTEXT_FILTER_VF                (1 << 0)
+#define RTEXT_FILTER_BRVLAN    (1 << 1)
 
 /* End of information exported to user level */
 
index e962faa..6b1ead0 100644 (file)
@@ -111,6 +111,7 @@ enum {
 #define TCP_QUEUE_SEQ          21
 #define TCP_REPAIR_OPTIONS     22
 #define TCP_FASTOPEN           23      /* Enable FastOpen on listeners */
+#define TCP_TIMESTAMP          24
 
 struct tcp_repair_opt {
        __u32   opt_code;
index 5059847..f738e25 100644 (file)
 #define USB_INTRF_FUNC_SUSPEND_LP      (1 << (8 + 0))
 #define USB_INTRF_FUNC_SUSPEND_RW      (1 << (8 + 1))
 
+/*
+ * Interface status, Figure 9-5 USB 3.0 spec
+ */
+#define USB_INTRF_STAT_FUNC_RW_CAP     1
+#define USB_INTRF_STAT_FUNC_RW         2
+
 #define USB_ENDPOINT_HALT              0       /* IN/OUT will STALL */
 
 /* Bit array elements as returned by the USB_REQ_GET_STATUS request. */
diff --git a/include/uapi/linux/vm_sockets.h b/include/uapi/linux/vm_sockets.h
new file mode 100644 (file)
index 0000000..df91301
--- /dev/null
@@ -0,0 +1,163 @@
+/*
+ * VMware vSockets Driver
+ *
+ * Copyright (C) 2007-2013 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _VM_SOCKETS_H_
+#define _VM_SOCKETS_H_
+
+#if !defined(__KERNEL__)
+#include <sys/socket.h>
+#endif
+
+/* Option name for STREAM socket buffer size.  Use as the option name in
+ * setsockopt(3) or getsockopt(3) to set or get an unsigned long long that
+ * specifies the size of the buffer underlying a vSockets STREAM socket.
+ * Value is clamped to the MIN and MAX.
+ */
+
+#define SO_VM_SOCKETS_BUFFER_SIZE 0
+
+/* Option name for STREAM socket minimum buffer size.  Use as the option name
+ * in setsockopt(3) or getsockopt(3) to set or get an unsigned long long that
+ * specifies the minimum size allowed for the buffer underlying a vSockets
+ * STREAM socket.
+ */
+
+#define SO_VM_SOCKETS_BUFFER_MIN_SIZE 1
+
+/* Option name for STREAM socket maximum buffer size.  Use as the option name
+ * in setsockopt(3) or getsockopt(3) to set or get an unsigned long long
+ * that specifies the maximum size allowed for the buffer underlying a
+ * vSockets STREAM socket.
+ */
+
+#define SO_VM_SOCKETS_BUFFER_MAX_SIZE 2
+
+/* Option name for socket peer's host-specific VM ID.  Use as the option name
+ * in getsockopt(3) to get a host-specific identifier for the peer endpoint's
+ * VM.  The identifier is a signed integer.
+ * Only available for hypervisor endpoints.
+ */
+
+#define SO_VM_SOCKETS_PEER_HOST_VM_ID 3
+
+/* Option name for determining if a socket is trusted.  Use as the option name
+ * in getsockopt(3) to determine if a socket is trusted.  The value is a
+ * signed integer.
+ */
+
+#define SO_VM_SOCKETS_TRUSTED 5
+
+/* Option name for STREAM socket connection timeout.  Use as the option name
+ * in setsockopt(3) or getsockopt(3) to set or get the connection
+ * timeout for a STREAM socket.
+ */
+
+#define SO_VM_SOCKETS_CONNECT_TIMEOUT 6
+
+/* Option name for using non-blocking send/receive.  Use as the option name
+ * for setsockopt(3) or getsockopt(3) to set or get the non-blocking
+ * transmit/receive flag for a STREAM socket.  This flag determines whether
+ * send() and recv() can be called in non-blocking contexts for the given
+ * socket.  The value is a signed integer.
+ *
+ * This option is only relevant to kernel endpoints, where descheduling the
+ * thread of execution is not allowed, for example, while holding a spinlock.
+ * It is not to be confused with conventional non-blocking socket operations.
+ *
+ * Only available for hypervisor endpoints.
+ */
+
+#define SO_VM_SOCKETS_NONBLOCK_TXRX 7
+
+/* The vSocket equivalent of INADDR_ANY.  This works for the svm_cid field of
+ * sockaddr_vm and indicates the context ID of the current endpoint.
+ */
+
+#define VMADDR_CID_ANY -1U
+
+/* Bind to any available port.  Works for the svm_port field of
+ * sockaddr_vm.
+ */
+
+#define VMADDR_PORT_ANY -1U
+
+/* Use this as the destination CID in an address when referring to the
+ * hypervisor.  VMCI relies on it being 0, but this would be useful for other
+ * transports too.
+ */
+
+#define VMADDR_CID_HYPERVISOR 0
+
+/* This CID is specific to VMCI and can be considered reserved (even VMCI
+ * doesn't use it anymore, it's a legacy value from an older release).
+ */
+
+#define VMADDR_CID_RESERVED 1
+
+/* Use this as the destination CID in an address when referring to the host
+ * (any process other than the hypervisor).  VMCI relies on it being 2, but
+ * this would be useful for other transports too.
+ */
+
+#define VMADDR_CID_HOST 2
+
+/* Invalid vSockets version. */
+
+#define VM_SOCKETS_INVALID_VERSION -1U
+
+/* The epoch (first) component of the vSockets version.  A single byte
+ * representing the epoch component of the vSockets version.
+ */
+
+#define VM_SOCKETS_VERSION_EPOCH(_v) (((_v) & 0xFF000000) >> 24)
+
+/* The major (second) component of the vSockets version.   A single byte
+ * representing the major component of the vSockets version.  Typically
+ * changes for every major release of a product.
+ */
+
+#define VM_SOCKETS_VERSION_MAJOR(_v) (((_v) & 0x00FF0000) >> 16)
+
+/* The minor (third) component of the vSockets version.  Two bytes representing
+ * the minor component of the vSockets version.
+ */
+
+#define VM_SOCKETS_VERSION_MINOR(_v) (((_v) & 0x0000FFFF))
+
+/* Address structure for vSockets.   The address family should be set to
+ * whatever vmci_sock_get_af_value_fd() returns.  The structure members should
+ * all align on their natural boundaries without resorting to compiler packing
+ * directives.  The total size of this structure should be exactly the same as
+ * that of struct sockaddr.
+ */
+
+struct sockaddr_vm {
+       sa_family_t svm_family;
+       unsigned short svm_reserved1;
+       unsigned int svm_port;
+       unsigned int svm_cid;
+       unsigned char svm_zero[sizeof(struct sockaddr) -
+                              sizeof(sa_family_t) -
+                              sizeof(unsigned short) -
+                              sizeof(unsigned int) - sizeof(unsigned int)];
+};
+
+#define IOCTL_VM_SOCKETS_GET_LOCAL_CID         _IO(7, 0xb9)
+
+#if defined(__KERNEL__)
+int vm_sockets_get_local_cid(void);
+#endif
+
+#endif
index 92d728a..cee4b5c 100644 (file)
@@ -604,7 +604,7 @@ asmlinkage void __init start_kernel(void)
        pidmap_init();
        anon_vma_init();
 #ifdef CONFIG_X86
-       if (efi_enabled)
+       if (efi_enabled(EFI_RUNTIME_SERVICES))
                efi_enter_virtual_mode();
 #endif
        thread_info_cache_init();
@@ -632,7 +632,7 @@ asmlinkage void __init start_kernel(void)
        acpi_early_init(); /* before LAPIC and SMP init */
        sfi_init_late();
 
-       if (efi_enabled) {
+       if (efi_enabled(EFI_RUNTIME_SERVICES)) {
                efi_late_init();
                efi_free_boot_services();
        }
index 301079d..7b6646a 100644 (file)
@@ -908,6 +908,15 @@ list_add_event(struct perf_event *event, struct perf_event_context *ctx)
 }
 
 /*
+ * Initialize event state based on the perf_event_attr::disabled.
+ */
+static inline void perf_event__state_init(struct perf_event *event)
+{
+       event->state = event->attr.disabled ? PERF_EVENT_STATE_OFF :
+                                             PERF_EVENT_STATE_INACTIVE;
+}
+
+/*
  * Called at perf_event creation and when events are attached/detached from a
  * group.
  */
@@ -6179,8 +6188,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
        event->overflow_handler = overflow_handler;
        event->overflow_handler_context = context;
 
-       if (attr->disabled)
-               event->state = PERF_EVENT_STATE_OFF;
+       perf_event__state_init(event);
 
        pmu = NULL;
 
@@ -6609,9 +6617,17 @@ SYSCALL_DEFINE5(perf_event_open,
 
                mutex_lock(&gctx->mutex);
                perf_remove_from_context(group_leader);
+
+               /*
+                * Removing from the context ends up with disabled
+                * event. What we want here is event in the initial
+                * startup state, ready to be add into new context.
+                */
+               perf_event__state_init(group_leader);
                list_for_each_entry(sibling, &group_leader->sibling_list,
                                    group_entry) {
                        perf_remove_from_context(sibling);
+                       perf_event__state_init(sibling);
                        put_ctx(gctx);
                }
                mutex_unlock(&gctx->mutex);
index de9af60..f2c6a68 100644 (file)
@@ -331,7 +331,7 @@ out:
        return pid;
 
 out_unlock:
-       spin_unlock(&pidmap_lock);
+       spin_unlock_irq(&pidmap_lock);
 out_free:
        while (++i <= ns->level)
                free_pidmap(pid->numbers + i);
index 357f714..267ce78 100644 (file)
@@ -87,12 +87,6 @@ static DEFINE_SEMAPHORE(console_sem);
 struct console *console_drivers;
 EXPORT_SYMBOL_GPL(console_drivers);
 
-#ifdef CONFIG_LOCKDEP
-static struct lockdep_map console_lock_dep_map = {
-       .name = "console_lock"
-};
-#endif
-
 /*
  * This is used for debugging the mess that is the VT code by
  * keeping track if we have the console semaphore held. It's
@@ -1924,7 +1918,6 @@ void console_lock(void)
                return;
        console_locked = 1;
        console_may_schedule = 1;
-       mutex_acquire(&console_lock_dep_map, 0, 0, _RET_IP_);
 }
 EXPORT_SYMBOL(console_lock);
 
@@ -1946,7 +1939,6 @@ int console_trylock(void)
        }
        console_locked = 1;
        console_may_schedule = 0;
-       mutex_acquire(&console_lock_dep_map, 0, 1, _RET_IP_);
        return 1;
 }
 EXPORT_SYMBOL(console_trylock);
@@ -2107,7 +2099,6 @@ skip:
                local_irq_restore(flags);
        }
        console_locked = 0;
-       mutex_release(&console_lock_dep_map, 1, _RET_IP_);
 
        /* Release the exclusive_console once it is used */
        if (unlikely(exclusive_console))
index f6e5ec2..c1cc7e1 100644 (file)
@@ -40,8 +40,7 @@
 #ifdef CONFIG_RCU_NOCB_CPU
 static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */
 static bool have_rcu_nocb_mask;            /* Was rcu_nocb_mask allocated? */
-static bool rcu_nocb_poll;         /* Offload kthread are to poll. */
-module_param(rcu_nocb_poll, bool, 0444);
+static bool __read_mostly rcu_nocb_poll;    /* Offload kthread are to poll. */
 static char __initdata nocb_buf[NR_CPUS * 5];
 #endif /* #ifdef CONFIG_RCU_NOCB_CPU */
 
@@ -2159,6 +2158,13 @@ static int __init rcu_nocb_setup(char *str)
 }
 __setup("rcu_nocbs=", rcu_nocb_setup);
 
+static int __init parse_rcu_nocb_poll(char *arg)
+{
+       rcu_nocb_poll = 1;
+       return 0;
+}
+early_param("rcu_nocb_poll", parse_rcu_nocb_poll);
+
 /* Is the specified CPU a no-CPUs CPU? */
 static bool is_nocb_cpu(int cpu)
 {
@@ -2366,10 +2372,11 @@ static int rcu_nocb_kthread(void *arg)
        for (;;) {
                /* If not polling, wait for next batch of callbacks. */
                if (!rcu_nocb_poll)
-                       wait_event(rdp->nocb_wq, rdp->nocb_head);
+                       wait_event_interruptible(rdp->nocb_wq, rdp->nocb_head);
                list = ACCESS_ONCE(rdp->nocb_head);
                if (!list) {
                        schedule_timeout_interruptible(1);
+                       flush_signals(current);
                        continue;
                }
 
index 2cd3c1b..7ae4c4c 100644 (file)
@@ -222,8 +222,8 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
                        cfs_rq->runnable_load_avg);
        SEQ_printf(m, "  .%-30s: %lld\n", "blocked_load_avg",
                        cfs_rq->blocked_load_avg);
-       SEQ_printf(m, "  .%-30s: %ld\n", "tg_load_avg",
-                       atomic64_read(&cfs_rq->tg->load_avg));
+       SEQ_printf(m, "  .%-30s: %lld\n", "tg_load_avg",
+                       (unsigned long long)atomic64_read(&cfs_rq->tg->load_avg));
        SEQ_printf(m, "  .%-30s: %lld\n", "tg_load_contrib",
                        cfs_rq->tg_load_contrib);
        SEQ_printf(m, "  .%-30s: %d\n", "tg_runnable_contrib",
index 5eea870..81fa536 100644 (file)
@@ -2663,7 +2663,7 @@ static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
        hrtimer_cancel(&cfs_b->slack_timer);
 }
 
-static void unthrottle_offline_cfs_rqs(struct rq *rq)
+static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq)
 {
        struct cfs_rq *cfs_rq;
 
index 418feb0..4f02b28 100644 (file)
@@ -566,7 +566,7 @@ static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
 static int do_balance_runtime(struct rt_rq *rt_rq)
 {
        struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
-       struct root_domain *rd = cpu_rq(smp_processor_id())->rd;
+       struct root_domain *rd = rq_of_rt_rq(rt_rq)->rd;
        int i, weight, more = 0;
        u64 rt_period;
 
index 29dd40a..69f38bd 100644 (file)
@@ -33,6 +33,7 @@ struct call_function_data {
        struct call_single_data csd;
        atomic_t                refs;
        cpumask_var_t           cpumask;
+       cpumask_var_t           cpumask_ipi;
 };
 
 static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data);
@@ -56,6 +57,9 @@ hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
                if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
                                cpu_to_node(cpu)))
                        return notifier_from_errno(-ENOMEM);
+               if (!zalloc_cpumask_var_node(&cfd->cpumask_ipi, GFP_KERNEL,
+                               cpu_to_node(cpu)))
+                       return notifier_from_errno(-ENOMEM);
                break;
 
 #ifdef CONFIG_HOTPLUG_CPU
@@ -65,6 +69,7 @@ hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
        case CPU_DEAD:
        case CPU_DEAD_FROZEN:
                free_cpumask_var(cfd->cpumask);
+               free_cpumask_var(cfd->cpumask_ipi);
                break;
 #endif
        };
@@ -526,6 +531,12 @@ void smp_call_function_many(const struct cpumask *mask,
                return;
        }
 
+       /*
+        * After we put an entry into the list, data->cpumask
+        * may be cleared again when another CPU sends another IPI for
+        * a SMP function call, so data->cpumask will be zero.
+        */
+       cpumask_copy(data->cpumask_ipi, data->cpumask);
        raw_spin_lock_irqsave(&call_function.lock, flags);
        /*
         * Place entry at the _HEAD_ of the list, so that any cpu still
@@ -549,7 +560,7 @@ void smp_call_function_many(const struct cpumask *mask,
        smp_mb();
 
        /* Send a message to all CPUs in the map */
-       arch_send_call_function_ipi_mask(data->cpumask);
+       arch_send_call_function_ipi_mask(data->cpumask_ipi);
 
        /* Optionally wait for the CPUs to complete */
        if (wait)
index 5a63844..b669ca1 100644 (file)
@@ -387,7 +387,6 @@ static const struct bin_table bin_net_ipv4_table[] = {
        { CTL_INT,      NET_TCP_MODERATE_RCVBUF,                "tcp_moderate_rcvbuf" },
        { CTL_INT,      NET_TCP_TSO_WIN_DIVISOR,                "tcp_tso_win_divisor" },
        { CTL_STR,      NET_TCP_CONG_CONTROL,                   "tcp_congestion_control" },
-       { CTL_INT,      NET_TCP_ABC,                            "tcp_abc" },
        { CTL_INT,      NET_TCP_MTU_PROBING,                    "tcp_mtu_probing" },
        { CTL_INT,      NET_TCP_BASE_MSS,                       "tcp_base_mss" },
        { CTL_INT,      NET_IPV4_TCP_WORKAROUND_SIGNED_WINDOWS, "tcp_workaround_signed_windows" },
index 8c0e629..dc2be7e 100644 (file)
@@ -162,6 +162,8 @@ static int digsig_verify_rsa(struct key *key,
        memset(out1, 0, head);
        memcpy(out1 + head, p, l);
 
+       kfree(p);
+
        err = pkcs_1_v1_5_decode_emsa(out1, len, mblen, out2, &len);
        if (err)
                goto err;
index 6001ee6..b5783d8 100644 (file)
@@ -1257,6 +1257,10 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
        if (flags & FOLL_WRITE && !pmd_write(*pmd))
                goto out;
 
+       /* Avoid dumping huge zero page */
+       if ((flags & FOLL_DUMP) && is_huge_zero_pmd(*pmd))
+               return ERR_PTR(-EFAULT);
+
        page = pmd_page(*pmd);
        VM_BUG_ON(!PageHead(page));
        if (flags & FOLL_TOUCH) {
index 4f3ea0b..546db81 100644 (file)
@@ -3033,6 +3033,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
                if (!huge_pte_none(huge_ptep_get(ptep))) {
                        pte = huge_ptep_get_and_clear(mm, address, ptep);
                        pte = pte_mkhuge(pte_modify(pte, newprot));
+                       pte = arch_make_huge_pte(pte, vma, NULL, 0);
                        set_huge_pte_at(mm, address, ptep, pte);
                        pages++;
                }
index 09255ec..fbb60b1 100644 (file)
@@ -3030,7 +3030,9 @@ int memcg_register_cache(struct mem_cgroup *memcg, struct kmem_cache *s,
        if (memcg) {
                s->memcg_params->memcg = memcg;
                s->memcg_params->root_cache = root_cache;
-       }
+       } else
+               s->memcg_params->is_root_cache = true;
+
        return 0;
 }
 
index c387786..2fd8b4a 100644 (file)
@@ -160,8 +160,10 @@ static int remove_migration_pte(struct page *new, struct vm_area_struct *vma,
        if (is_write_migration_entry(entry))
                pte = pte_mkwrite(pte);
 #ifdef CONFIG_HUGETLB_PAGE
-       if (PageHuge(new))
+       if (PageHuge(new)) {
                pte = pte_mkhuge(pte);
+               pte = arch_make_huge_pte(pte, vma, new, 0);
+       }
 #endif
        flush_cache_page(vma, addr, pte_pfn(pte));
        set_pte_at(mm, addr, ptep, pte);
index f0b9ce5..c9bd528 100644 (file)
@@ -517,11 +517,11 @@ SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
 static int do_mlockall(int flags)
 {
        struct vm_area_struct * vma, * prev = NULL;
-       unsigned int def_flags = 0;
 
        if (flags & MCL_FUTURE)
-               def_flags = VM_LOCKED;
-       current->mm->def_flags = def_flags;
+               current->mm->def_flags |= VM_LOCKED;
+       else
+               current->mm->def_flags &= ~VM_LOCKED;
        if (flags == MCL_FUTURE)
                goto out;
 
index 35730ee..d1e4124 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2943,7 +2943,7 @@ static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping)
  * vma in this mm is backed by the same anon_vma or address_space.
  *
  * We can take all the locks in random order because the VM code
- * taking i_mmap_mutex or anon_vma->mutex outside the mmap_sem never
+ * taking i_mmap_mutex or anon_vma->rwsem outside the mmap_sem never
  * takes more than one of them in a row. Secondly we're protected
  * against a concurrent mm_take_all_locks() by the mm_all_locks_mutex.
  *
index df2022f..9673d96 100644 (file)
@@ -773,6 +773,10 @@ void __init init_cma_reserved_pageblock(struct page *page)
        set_pageblock_migratetype(page, MIGRATE_CMA);
        __free_pages(page, pageblock_order);
        totalram_pages += pageblock_nr_pages;
+#ifdef CONFIG_HIGHMEM
+       if (PageHighMem(page))
+               totalhigh_pages += pageblock_nr_pages;
+#endif
 }
 #endif
 
index be33d27..80d4bf7 100644 (file)
@@ -5,3 +5,6 @@ config STP
 config GARP
        tristate
        select STP
+
+config MRP
+       tristate
index a30d6e3..37e654d 100644 (file)
@@ -11,3 +11,4 @@ obj-$(CONFIG_IPX)     += p8022.o psnap.o p8023.o
 obj-$(CONFIG_ATALK)    += p8022.o psnap.o
 obj-$(CONFIG_STP)      += stp.o
 obj-$(CONFIG_GARP)     += garp.o
+obj-$(CONFIG_MRP)      += mrp.o
diff --git a/net/802/mrp.c b/net/802/mrp.c
new file mode 100644 (file)
index 0000000..a4cc322
--- /dev/null
@@ -0,0 +1,895 @@
+/*
+ *     IEEE 802.1Q Multiple Registration Protocol (MRP)
+ *
+ *     Copyright (c) 2012 Massachusetts Institute of Technology
+ *
+ *     Adapted from code in net/802/garp.c
+ *     Copyright (c) 2008 Patrick McHardy <kaber@trash.net>
+ *
+ *     This program is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License
+ *     version 2 as published by the Free Software Foundation.
+ */
+#include <linux/kernel.h>
+#include <linux/timer.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/rtnetlink.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <net/mrp.h>
+#include <asm/unaligned.h>
+
+static unsigned int mrp_join_time __read_mostly = 200;
+module_param(mrp_join_time, uint, 0644);
+MODULE_PARM_DESC(mrp_join_time, "Join time in ms (default 200ms)");
+MODULE_LICENSE("GPL");
+
+static const u8
+mrp_applicant_state_table[MRP_APPLICANT_MAX + 1][MRP_EVENT_MAX + 1] = {
+       [MRP_APPLICANT_VO] = {
+               [MRP_EVENT_NEW]         = MRP_APPLICANT_VN,
+               [MRP_EVENT_JOIN]        = MRP_APPLICANT_VP,
+               [MRP_EVENT_LV]          = MRP_APPLICANT_VO,
+               [MRP_EVENT_TX]          = MRP_APPLICANT_VO,
+               [MRP_EVENT_R_NEW]       = MRP_APPLICANT_VO,
+               [MRP_EVENT_R_JOIN_IN]   = MRP_APPLICANT_AO,
+               [MRP_EVENT_R_IN]        = MRP_APPLICANT_VO,
+               [MRP_EVENT_R_JOIN_MT]   = MRP_APPLICANT_VO,
+               [MRP_EVENT_R_MT]        = MRP_APPLICANT_VO,
+               [MRP_EVENT_R_LV]        = MRP_APPLICANT_VO,
+               [MRP_EVENT_R_LA]        = MRP_APPLICANT_VO,
+               [MRP_EVENT_REDECLARE]   = MRP_APPLICANT_VO,
+               [MRP_EVENT_PERIODIC]    = MRP_APPLICANT_VO,
+       },
+       [MRP_APPLICANT_VP] = {
+               [MRP_EVENT_NEW]         = MRP_APPLICANT_VN,
+               [MRP_EVENT_JOIN]        = MRP_APPLICANT_VP,
+               [MRP_EVENT_LV]          = MRP_APPLICANT_VO,
+               [MRP_EVENT_TX]          = MRP_APPLICANT_AA,
+               [MRP_EVENT_R_NEW]       = MRP_APPLICANT_VP,
+               [MRP_EVENT_R_JOIN_IN]   = MRP_APPLICANT_AP,
+               [MRP_EVENT_R_IN]        = MRP_APPLICANT_VP,
+               [MRP_EVENT_R_JOIN_MT]   = MRP_APPLICANT_VP,
+               [MRP_EVENT_R_MT]        = MRP_APPLICANT_VP,
+               [MRP_EVENT_R_LV]        = MRP_APPLICANT_VP,
+               [MRP_EVENT_R_LA]        = MRP_APPLICANT_VP,
+               [MRP_EVENT_REDECLARE]   = MRP_APPLICANT_VP,
+               [MRP_EVENT_PERIODIC]    = MRP_APPLICANT_VP,
+       },
+       [MRP_APPLICANT_VN] = {
+               [MRP_EVENT_NEW]         = MRP_APPLICANT_VN,
+               [MRP_EVENT_JOIN]        = MRP_APPLICANT_VN,
+               [MRP_EVENT_LV]          = MRP_APPLICANT_LA,
+               [MRP_EVENT_TX]          = MRP_APPLICANT_AN,
+               [MRP_EVENT_R_NEW]       = MRP_APPLICANT_VN,
+               [MRP_EVENT_R_JOIN_IN]   = MRP_APPLICANT_VN,
+               [MRP_EVENT_R_IN]        = MRP_APPLICANT_VN,
+               [MRP_EVENT_R_JOIN_MT]   = MRP_APPLICANT_VN,
+               [MRP_EVENT_R_MT]        = MRP_APPLICANT_VN,
+               [MRP_EVENT_R_LV]        = MRP_APPLICANT_VN,
+               [MRP_EVENT_R_LA]        = MRP_APPLICANT_VN,
+               [MRP_EVENT_REDECLARE]   = MRP_APPLICANT_VN,
+               [MRP_EVENT_PERIODIC]    = MRP_APPLICANT_VN,
+       },
+       [MRP_APPLICANT_AN] = {
+               [MRP_EVENT_NEW]         = MRP_APPLICANT_AN,
+               [MRP_EVENT_JOIN]        = MRP_APPLICANT_AN,
+               [MRP_EVENT_LV]          = MRP_APPLICANT_LA,
+               [MRP_EVENT_TX]          = MRP_APPLICANT_QA,
+               [MRP_EVENT_R_NEW]       = MRP_APPLICANT_AN,
+               [MRP_EVENT_R_JOIN_IN]   = MRP_APPLICANT_AN,
+               [MRP_EVENT_R_IN]        = MRP_APPLICANT_AN,
+               [MRP_EVENT_R_JOIN_MT]   = MRP_APPLICANT_AN,
+               [MRP_EVENT_R_MT]        = MRP_APPLICANT_AN,
+               [MRP_EVENT_R_LV]        = MRP_APPLICANT_VN,
+               [MRP_EVENT_R_LA]        = MRP_APPLICANT_VN,
+               [MRP_EVENT_REDECLARE]   = MRP_APPLICANT_VN,
+               [MRP_EVENT_PERIODIC]    = MRP_APPLICANT_AN,
+       },
+       [MRP_APPLICANT_AA] = {
+               [MRP_EVENT_NEW]         = MRP_APPLICANT_VN,
+               [MRP_EVENT_JOIN]        = MRP_APPLICANT_AA,
+               [MRP_EVENT_LV]          = MRP_APPLICANT_LA,
+               [MRP_EVENT_TX]          = MRP_APPLICANT_QA,
+               [MRP_EVENT_R_NEW]       = MRP_APPLICANT_AA,
+               [MRP_EVENT_R_JOIN_IN]   = MRP_APPLICANT_QA,
+               [MRP_EVENT_R_IN]        = MRP_APPLICANT_AA,
+               [MRP_EVENT_R_JOIN_MT]   = MRP_APPLICANT_AA,
+               [MRP_EVENT_R_MT]        = MRP_APPLICANT_AA,
+               [MRP_EVENT_R_LV]        = MRP_APPLICANT_VP,
+               [MRP_EVENT_R_LA]        = MRP_APPLICANT_VP,
+               [MRP_EVENT_REDECLARE]   = MRP_APPLICANT_VP,
+               [MRP_EVENT_PERIODIC]    = MRP_APPLICANT_AA,
+       },
+       [MRP_APPLICANT_QA] = {
+               [MRP_EVENT_NEW]         = MRP_APPLICANT_VN,
+               [MRP_EVENT_JOIN]        = MRP_APPLICANT_QA,
+               [MRP_EVENT_LV]          = MRP_APPLICANT_LA,
+               [MRP_EVENT_TX]          = MRP_APPLICANT_QA,
+               [MRP_EVENT_R_NEW]       = MRP_APPLICANT_QA,
+               [MRP_EVENT_R_JOIN_IN]   = MRP_APPLICANT_QA,
+               [MRP_EVENT_R_IN]        = MRP_APPLICANT_QA,
+               [MRP_EVENT_R_JOIN_MT]   = MRP_APPLICANT_AA,
+               [MRP_EVENT_R_MT]        = MRP_APPLICANT_AA,
+               [MRP_EVENT_R_LV]        = MRP_APPLICANT_VP,
+               [MRP_EVENT_R_LA]        = MRP_APPLICANT_VP,
+               [MRP_EVENT_REDECLARE]   = MRP_APPLICANT_VP,
+               [MRP_EVENT_PERIODIC]    = MRP_APPLICANT_AA,
+       },
+       [MRP_APPLICANT_LA] = {
+               [MRP_EVENT_NEW]         = MRP_APPLICANT_VN,
+               [MRP_EVENT_JOIN]        = MRP_APPLICANT_AA,
+               [MRP_EVENT_LV]          = MRP_APPLICANT_LA,
+               [MRP_EVENT_TX]          = MRP_APPLICANT_VO,
+               [MRP_EVENT_R_NEW]       = MRP_APPLICANT_LA,
+               [MRP_EVENT_R_JOIN_IN]   = MRP_APPLICANT_LA,
+               [MRP_EVENT_R_IN]        = MRP_APPLICANT_LA,
+               [MRP_EVENT_R_JOIN_MT]   = MRP_APPLICANT_LA,
+               [MRP_EVENT_R_MT]        = MRP_APPLICANT_LA,
+               [MRP_EVENT_R_LV]        = MRP_APPLICANT_LA,
+               [MRP_EVENT_R_LA]        = MRP_APPLICANT_LA,
+               [MRP_EVENT_REDECLARE]   = MRP_APPLICANT_LA,
+               [MRP_EVENT_PERIODIC]    = MRP_APPLICANT_LA,
+       },
+       [MRP_APPLICANT_AO] = {
+               [MRP_EVENT_NEW]         = MRP_APPLICANT_VN,
+               [MRP_EVENT_JOIN]        = MRP_APPLICANT_AP,
+               [MRP_EVENT_LV]          = MRP_APPLICANT_AO,
+               [MRP_EVENT_TX]          = MRP_APPLICANT_AO,
+               [MRP_EVENT_R_NEW]       = MRP_APPLICANT_AO,
+               [MRP_EVENT_R_JOIN_IN]   = MRP_APPLICANT_QO,
+               [MRP_EVENT_R_IN]        = MRP_APPLICANT_AO,
+               [MRP_EVENT_R_JOIN_MT]   = MRP_APPLICANT_AO,
+               [MRP_EVENT_R_MT]        = MRP_APPLICANT_AO,
+               [MRP_EVENT_R_LV]        = MRP_APPLICANT_VO,
+               [MRP_EVENT_R_LA]        = MRP_APPLICANT_VO,
+               [MRP_EVENT_REDECLARE]   = MRP_APPLICANT_VO,
+               [MRP_EVENT_PERIODIC]    = MRP_APPLICANT_AO,
+       },
+       [MRP_APPLICANT_QO] = {
+               [MRP_EVENT_NEW]         = MRP_APPLICANT_VN,
+               [MRP_EVENT_JOIN]        = MRP_APPLICANT_QP,
+               [MRP_EVENT_LV]          = MRP_APPLICANT_QO,
+               [MRP_EVENT_TX]          = MRP_APPLICANT_QO,
+               [MRP_EVENT_R_NEW]       = MRP_APPLICANT_QO,
+               [MRP_EVENT_R_JOIN_IN]   = MRP_APPLICANT_QO,
+               [MRP_EVENT_R_IN]        = MRP_APPLICANT_QO,
+               [MRP_EVENT_R_JOIN_MT]   = MRP_APPLICANT_AO,
+               [MRP_EVENT_R_MT]        = MRP_APPLICANT_AO,
+               [MRP_EVENT_R_LV]        = MRP_APPLICANT_VO,
+               [MRP_EVENT_R_LA]        = MRP_APPLICANT_VO,
+               [MRP_EVENT_REDECLARE]   = MRP_APPLICANT_VO,
+               [MRP_EVENT_PERIODIC]    = MRP_APPLICANT_QO,
+       },
+       [MRP_APPLICANT_AP] = {
+               [MRP_EVENT_NEW]         = MRP_APPLICANT_VN,
+               [MRP_EVENT_JOIN]        = MRP_APPLICANT_AP,
+               [MRP_EVENT_LV]          = MRP_APPLICANT_AO,
+               [MRP_EVENT_TX]          = MRP_APPLICANT_QA,
+               [MRP_EVENT_R_NEW]       = MRP_APPLICANT_AP,
+               [MRP_EVENT_R_JOIN_IN]   = MRP_APPLICANT_QP,
+               [MRP_EVENT_R_IN]        = MRP_APPLICANT_AP,
+               [MRP_EVENT_R_JOIN_MT]   = MRP_APPLICANT_AP,
+               [MRP_EVENT_R_MT]        = MRP_APPLICANT_AP,
+               [MRP_EVENT_R_LV]        = MRP_APPLICANT_VP,
+               [MRP_EVENT_R_LA]        = MRP_APPLICANT_VP,
+               [MRP_EVENT_REDECLARE]   = MRP_APPLICANT_VP,
+               [MRP_EVENT_PERIODIC]    = MRP_APPLICANT_AP,
+       },
+       [MRP_APPLICANT_QP] = {
+               [MRP_EVENT_NEW]         = MRP_APPLICANT_VN,
+               [MRP_EVENT_JOIN]        = MRP_APPLICANT_QP,
+               [MRP_EVENT_LV]          = MRP_APPLICANT_QO,
+               [MRP_EVENT_TX]          = MRP_APPLICANT_QP,
+               [MRP_EVENT_R_NEW]       = MRP_APPLICANT_QP,
+               [MRP_EVENT_R_JOIN_IN]   = MRP_APPLICANT_QP,
+               [MRP_EVENT_R_IN]        = MRP_APPLICANT_QP,
+               [MRP_EVENT_R_JOIN_MT]   = MRP_APPLICANT_AP,
+               [MRP_EVENT_R_MT]        = MRP_APPLICANT_AP,
+               [MRP_EVENT_R_LV]        = MRP_APPLICANT_VP,
+               [MRP_EVENT_R_LA]        = MRP_APPLICANT_VP,
+               [MRP_EVENT_REDECLARE]   = MRP_APPLICANT_VP,
+               [MRP_EVENT_PERIODIC]    = MRP_APPLICANT_AP,
+       },
+};
+
+static const u8
+mrp_tx_action_table[MRP_APPLICANT_MAX + 1] = {
+       [MRP_APPLICANT_VO] = MRP_TX_ACTION_S_IN_OPTIONAL,
+       [MRP_APPLICANT_VP] = MRP_TX_ACTION_S_JOIN_IN,
+       [MRP_APPLICANT_VN] = MRP_TX_ACTION_S_NEW,
+       [MRP_APPLICANT_AN] = MRP_TX_ACTION_S_NEW,
+       [MRP_APPLICANT_AA] = MRP_TX_ACTION_S_JOIN_IN,
+       [MRP_APPLICANT_QA] = MRP_TX_ACTION_S_JOIN_IN_OPTIONAL,
+       [MRP_APPLICANT_LA] = MRP_TX_ACTION_S_LV,
+       [MRP_APPLICANT_AO] = MRP_TX_ACTION_S_IN_OPTIONAL,
+       [MRP_APPLICANT_QO] = MRP_TX_ACTION_S_IN_OPTIONAL,
+       [MRP_APPLICANT_AP] = MRP_TX_ACTION_S_JOIN_IN,
+       [MRP_APPLICANT_QP] = MRP_TX_ACTION_S_IN_OPTIONAL,
+};
+
+static void mrp_attrvalue_inc(void *value, u8 len)
+{
+       u8 *v = (u8 *)value;
+
+       /* Add 1 to the last byte. If it becomes zero,
+        * go to the previous byte and repeat.
+        */
+       while (len > 0 && !++v[--len])
+               ;
+}
+
+static int mrp_attr_cmp(const struct mrp_attr *attr,
+                        const void *value, u8 len, u8 type)
+{
+       if (attr->type != type)
+               return attr->type - type;
+       if (attr->len != len)
+               return attr->len - len;
+       return memcmp(attr->value, value, len);
+}
+
+static struct mrp_attr *mrp_attr_lookup(const struct mrp_applicant *app,
+                                       const void *value, u8 len, u8 type)
+{
+       struct rb_node *parent = app->mad.rb_node;
+       struct mrp_attr *attr;
+       int d;
+
+       while (parent) {
+               attr = rb_entry(parent, struct mrp_attr, node);
+               d = mrp_attr_cmp(attr, value, len, type);
+               if (d > 0)
+                       parent = parent->rb_left;
+               else if (d < 0)
+                       parent = parent->rb_right;
+               else
+                       return attr;
+       }
+       return NULL;
+}
+
+static struct mrp_attr *mrp_attr_create(struct mrp_applicant *app,
+                                       const void *value, u8 len, u8 type)
+{
+       struct rb_node *parent = NULL, **p = &app->mad.rb_node;
+       struct mrp_attr *attr;
+       int d;
+
+       while (*p) {
+               parent = *p;
+               attr = rb_entry(parent, struct mrp_attr, node);
+               d = mrp_attr_cmp(attr, value, len, type);
+               if (d > 0)
+                       p = &parent->rb_left;
+               else if (d < 0)
+                       p = &parent->rb_right;
+               else {
+                       /* The attribute already exists; re-use it. */
+                       return attr;
+               }
+       }
+       attr = kmalloc(sizeof(*attr) + len, GFP_ATOMIC);
+       if (!attr)
+               return attr;
+       attr->state = MRP_APPLICANT_VO;
+       attr->type  = type;
+       attr->len   = len;
+       memcpy(attr->value, value, len);
+
+       rb_link_node(&attr->node, parent, p);
+       rb_insert_color(&attr->node, &app->mad);
+       return attr;
+}
+
+static void mrp_attr_destroy(struct mrp_applicant *app, struct mrp_attr *attr)
+{
+       rb_erase(&attr->node, &app->mad);
+       kfree(attr);
+}
+
+static int mrp_pdu_init(struct mrp_applicant *app)
+{
+       struct sk_buff *skb;
+       struct mrp_pdu_hdr *ph;
+
+       skb = alloc_skb(app->dev->mtu + LL_RESERVED_SPACE(app->dev),
+                       GFP_ATOMIC);
+       if (!skb)
+               return -ENOMEM;
+
+       skb->dev = app->dev;
+       skb->protocol = app->app->pkttype.type;
+       skb_reserve(skb, LL_RESERVED_SPACE(app->dev));
+       skb_reset_network_header(skb);
+       skb_reset_transport_header(skb);
+
+       ph = (struct mrp_pdu_hdr *)__skb_put(skb, sizeof(*ph));
+       ph->version = app->app->version;
+
+       app->pdu = skb;
+       return 0;
+}
+
+static int mrp_pdu_append_end_mark(struct mrp_applicant *app)
+{
+       __be16 *endmark;
+
+       if (skb_tailroom(app->pdu) < sizeof(*endmark))
+               return -1;
+       endmark = (__be16 *)__skb_put(app->pdu, sizeof(*endmark));
+       put_unaligned(MRP_END_MARK, endmark);
+       return 0;
+}
+
+static void mrp_pdu_queue(struct mrp_applicant *app)
+{
+       if (!app->pdu)
+               return;
+
+       if (mrp_cb(app->pdu)->mh)
+               mrp_pdu_append_end_mark(app);
+       mrp_pdu_append_end_mark(app);
+
+       dev_hard_header(app->pdu, app->dev, ntohs(app->app->pkttype.type),
+                       app->app->group_address, app->dev->dev_addr,
+                       app->pdu->len);
+
+       skb_queue_tail(&app->queue, app->pdu);
+       app->pdu = NULL;
+}
+
+static void mrp_queue_xmit(struct mrp_applicant *app)
+{
+       struct sk_buff *skb;
+
+       while ((skb = skb_dequeue(&app->queue)))
+               dev_queue_xmit(skb);
+}
+
+static int mrp_pdu_append_msg_hdr(struct mrp_applicant *app,
+                                 u8 attrtype, u8 attrlen)
+{
+       struct mrp_msg_hdr *mh;
+
+       if (mrp_cb(app->pdu)->mh) {
+               if (mrp_pdu_append_end_mark(app) < 0)
+                       return -1;
+               mrp_cb(app->pdu)->mh = NULL;
+               mrp_cb(app->pdu)->vah = NULL;
+       }
+
+       if (skb_tailroom(app->pdu) < sizeof(*mh))
+               return -1;
+       mh = (struct mrp_msg_hdr *)__skb_put(app->pdu, sizeof(*mh));
+       mh->attrtype = attrtype;
+       mh->attrlen = attrlen;
+       mrp_cb(app->pdu)->mh = mh;
+       return 0;
+}
+
+static int mrp_pdu_append_vecattr_hdr(struct mrp_applicant *app,
+                                     const void *firstattrvalue, u8 attrlen)
+{
+       struct mrp_vecattr_hdr *vah;
+
+       if (skb_tailroom(app->pdu) < sizeof(*vah) + attrlen)
+               return -1;
+       vah = (struct mrp_vecattr_hdr *)__skb_put(app->pdu,
+                                                 sizeof(*vah) + attrlen);
+       put_unaligned(0, &vah->lenflags);
+       memcpy(vah->firstattrvalue, firstattrvalue, attrlen);
+       mrp_cb(app->pdu)->vah = vah;
+       memcpy(mrp_cb(app->pdu)->attrvalue, firstattrvalue, attrlen);
+       return 0;
+}
+
+static int mrp_pdu_append_vecattr_event(struct mrp_applicant *app,
+                                       const struct mrp_attr *attr,
+                                       enum mrp_vecattr_event vaevent)
+{
+       u16 len, pos;
+       u8 *vaevents;
+       int err;
+again:
+       if (!app->pdu) {
+               err = mrp_pdu_init(app);
+               if (err < 0)
+                       return err;
+       }
+
+       /* If there is no Message header in the PDU, or the Message header is
+        * for a different attribute type, add an EndMark (if necessary) and a
+        * new Message header to the PDU.
+        */
+       if (!mrp_cb(app->pdu)->mh ||
+           mrp_cb(app->pdu)->mh->attrtype != attr->type ||
+           mrp_cb(app->pdu)->mh->attrlen != attr->len) {
+               if (mrp_pdu_append_msg_hdr(app, attr->type, attr->len) < 0)
+                       goto queue;
+       }
+
+       /* If there is no VectorAttribute header for this Message in the PDU,
+        * or this attribute's value does not sequentially follow the previous
+        * attribute's value, add a new VectorAttribute header to the PDU.
+        */
+       if (!mrp_cb(app->pdu)->vah ||
+           memcmp(mrp_cb(app->pdu)->attrvalue, attr->value, attr->len)) {
+               if (mrp_pdu_append_vecattr_hdr(app, attr->value, attr->len) < 0)
+                       goto queue;
+       }
+
+       len = be16_to_cpu(get_unaligned(&mrp_cb(app->pdu)->vah->lenflags));
+       pos = len % 3;
+
+       /* Events are packed into Vectors in the PDU, three to a byte. Add a
+        * byte to the end of the Vector if necessary.
+        */
+       if (!pos) {
+               if (skb_tailroom(app->pdu) < sizeof(u8))
+                       goto queue;
+               vaevents = (u8 *)__skb_put(app->pdu, sizeof(u8));
+       } else {
+               vaevents = (u8 *)(skb_tail_pointer(app->pdu) - sizeof(u8));
+       }
+
+       switch (pos) {
+       case 0:
+               *vaevents = vaevent * (__MRP_VECATTR_EVENT_MAX *
+                                      __MRP_VECATTR_EVENT_MAX);
+               break;
+       case 1:
+               *vaevents += vaevent * __MRP_VECATTR_EVENT_MAX;
+               break;
+       case 2:
+               *vaevents += vaevent;
+               break;
+       default:
+               WARN_ON(1);
+       }
+
+       /* Increment the length of the VectorAttribute in the PDU, as well as
+        * the value of the next attribute that would continue its Vector.
+        */
+       put_unaligned(cpu_to_be16(++len), &mrp_cb(app->pdu)->vah->lenflags);
+       mrp_attrvalue_inc(mrp_cb(app->pdu)->attrvalue, attr->len);
+
+       return 0;
+
+queue:
+       mrp_pdu_queue(app);
+       goto again;
+}
+
+static void mrp_attr_event(struct mrp_applicant *app,
+                          struct mrp_attr *attr, enum mrp_event event)
+{
+       enum mrp_applicant_state state;
+
+       state = mrp_applicant_state_table[attr->state][event];
+       if (state == MRP_APPLICANT_INVALID) {
+               WARN_ON(1);
+               return;
+       }
+
+       if (event == MRP_EVENT_TX) {
+               /* When appending the attribute fails, don't update its state
+                * in order to retry at the next TX event.
+                */
+
+               switch (mrp_tx_action_table[attr->state]) {
+               case MRP_TX_ACTION_NONE:
+               case MRP_TX_ACTION_S_JOIN_IN_OPTIONAL:
+               case MRP_TX_ACTION_S_IN_OPTIONAL:
+                       break;
+               case MRP_TX_ACTION_S_NEW:
+                       if (mrp_pdu_append_vecattr_event(
+                                   app, attr, MRP_VECATTR_EVENT_NEW) < 0)
+                               return;
+                       break;
+               case MRP_TX_ACTION_S_JOIN_IN:
+                       if (mrp_pdu_append_vecattr_event(
+                                   app, attr, MRP_VECATTR_EVENT_JOIN_IN) < 0)
+                               return;
+                       break;
+               case MRP_TX_ACTION_S_LV:
+                       if (mrp_pdu_append_vecattr_event(
+                                   app, attr, MRP_VECATTR_EVENT_LV) < 0)
+                               return;
+                       /* As a pure applicant, sending a leave message
+                        * implies that the attribute was unregistered and
+                        * can be destroyed.
+                        */
+                       mrp_attr_destroy(app, attr);
+                       return;
+               default:
+                       WARN_ON(1);
+               }
+       }
+
+       attr->state = state;
+}
+
+int mrp_request_join(const struct net_device *dev,
+                    const struct mrp_application *appl,
+                    const void *value, u8 len, u8 type)
+{
+       struct mrp_port *port = rtnl_dereference(dev->mrp_port);
+       struct mrp_applicant *app = rtnl_dereference(
+               port->applicants[appl->type]);
+       struct mrp_attr *attr;
+
+       if (sizeof(struct mrp_skb_cb) + len >
+           FIELD_SIZEOF(struct sk_buff, cb))
+               return -ENOMEM;
+
+       spin_lock_bh(&app->lock);
+       attr = mrp_attr_create(app, value, len, type);
+       if (!attr) {
+               spin_unlock_bh(&app->lock);
+               return -ENOMEM;
+       }
+       mrp_attr_event(app, attr, MRP_EVENT_JOIN);
+       spin_unlock_bh(&app->lock);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(mrp_request_join);
+
+void mrp_request_leave(const struct net_device *dev,
+                      const struct mrp_application *appl,
+                      const void *value, u8 len, u8 type)
+{
+       struct mrp_port *port = rtnl_dereference(dev->mrp_port);
+       struct mrp_applicant *app = rtnl_dereference(
+               port->applicants[appl->type]);
+       struct mrp_attr *attr;
+
+       if (sizeof(struct mrp_skb_cb) + len >
+           FIELD_SIZEOF(struct sk_buff, cb))
+               return;
+
+       spin_lock_bh(&app->lock);
+       attr = mrp_attr_lookup(app, value, len, type);
+       if (!attr) {
+               spin_unlock_bh(&app->lock);
+               return;
+       }
+       mrp_attr_event(app, attr, MRP_EVENT_LV);
+       spin_unlock_bh(&app->lock);
+}
+EXPORT_SYMBOL_GPL(mrp_request_leave);
+
+static void mrp_mad_event(struct mrp_applicant *app, enum mrp_event event)
+{
+       struct rb_node *node, *next;
+       struct mrp_attr *attr;
+
+       for (node = rb_first(&app->mad);
+            next = node ? rb_next(node) : NULL, node != NULL;
+            node = next) {
+               attr = rb_entry(node, struct mrp_attr, node);
+               mrp_attr_event(app, attr, event);
+       }
+}
+
+static void mrp_join_timer_arm(struct mrp_applicant *app)
+{
+       unsigned long delay;
+
+       delay = (u64)msecs_to_jiffies(mrp_join_time) * net_random() >> 32;
+       mod_timer(&app->join_timer, jiffies + delay);
+}
+
+static void mrp_join_timer(unsigned long data)
+{
+       struct mrp_applicant *app = (struct mrp_applicant *)data;
+
+       spin_lock(&app->lock);
+       mrp_mad_event(app, MRP_EVENT_TX);
+       mrp_pdu_queue(app);
+       spin_unlock(&app->lock);
+
+       mrp_queue_xmit(app);
+       mrp_join_timer_arm(app);
+}
+
+static int mrp_pdu_parse_end_mark(struct sk_buff *skb, int *offset)
+{
+       __be16 endmark;
+
+       if (skb_copy_bits(skb, *offset, &endmark, sizeof(endmark)) < 0)
+               return -1;
+       if (endmark == MRP_END_MARK) {
+               *offset += sizeof(endmark);
+               return -1;
+       }
+       return 0;
+}
+
+static void mrp_pdu_parse_vecattr_event(struct mrp_applicant *app,
+                                       struct sk_buff *skb,
+                                       enum mrp_vecattr_event vaevent)
+{
+       struct mrp_attr *attr;
+       enum mrp_event event;
+
+       attr = mrp_attr_lookup(app, mrp_cb(skb)->attrvalue,
+                              mrp_cb(skb)->mh->attrlen,
+                              mrp_cb(skb)->mh->attrtype);
+       if (attr == NULL)
+               return;
+
+       switch (vaevent) {
+       case MRP_VECATTR_EVENT_NEW:
+               event = MRP_EVENT_R_NEW;
+               break;
+       case MRP_VECATTR_EVENT_JOIN_IN:
+               event = MRP_EVENT_R_JOIN_IN;
+               break;
+       case MRP_VECATTR_EVENT_IN:
+               event = MRP_EVENT_R_IN;
+               break;
+       case MRP_VECATTR_EVENT_JOIN_MT:
+               event = MRP_EVENT_R_JOIN_MT;
+               break;
+       case MRP_VECATTR_EVENT_MT:
+               event = MRP_EVENT_R_MT;
+               break;
+       case MRP_VECATTR_EVENT_LV:
+               event = MRP_EVENT_R_LV;
+               break;
+       default:
+               return;
+       }
+
+       mrp_attr_event(app, attr, event);
+}
+
+static int mrp_pdu_parse_vecattr(struct mrp_applicant *app,
+                                struct sk_buff *skb, int *offset)
+{
+       struct mrp_vecattr_hdr _vah;
+       u16 valen;
+       u8 vaevents, vaevent;
+
+       mrp_cb(skb)->vah = skb_header_pointer(skb, *offset, sizeof(_vah),
+                                             &_vah);
+       if (!mrp_cb(skb)->vah)
+               return -1;
+       *offset += sizeof(_vah);
+
+       if (get_unaligned(&mrp_cb(skb)->vah->lenflags) &
+           MRP_VECATTR_HDR_FLAG_LA)
+               mrp_mad_event(app, MRP_EVENT_R_LA);
+       valen = be16_to_cpu(get_unaligned(&mrp_cb(skb)->vah->lenflags) &
+                           MRP_VECATTR_HDR_LEN_MASK);
+
+       /* The VectorAttribute structure in a PDU carries event information
+        * about one or more attributes having consecutive values. Only the
+        * value for the first attribute is contained in the structure. So
+        * we make a copy of that value, and then increment it each time we
+        * advance to the next event in its Vector.
+        */
+       if (sizeof(struct mrp_skb_cb) + mrp_cb(skb)->mh->attrlen >
+           FIELD_SIZEOF(struct sk_buff, cb))
+               return -1;
+       if (skb_copy_bits(skb, *offset, mrp_cb(skb)->attrvalue,
+                         mrp_cb(skb)->mh->attrlen) < 0)
+               return -1;
+       *offset += mrp_cb(skb)->mh->attrlen;
+
+       /* In a VectorAttribute, the Vector contains events which are packed
+        * three to a byte. We process one byte of the Vector at a time.
+        */
+       while (valen > 0) {
+               if (skb_copy_bits(skb, *offset, &vaevents,
+                                 sizeof(vaevents)) < 0)
+                       return -1;
+               *offset += sizeof(vaevents);
+
+               /* Extract and process the first event. */
+               vaevent = vaevents / (__MRP_VECATTR_EVENT_MAX *
+                                     __MRP_VECATTR_EVENT_MAX);
+               if (vaevent >= __MRP_VECATTR_EVENT_MAX) {
+                       /* The byte is malformed; stop processing. */
+                       return -1;
+               }
+               mrp_pdu_parse_vecattr_event(app, skb, vaevent);
+
+               /* If present, extract and process the second event. */
+               if (!--valen)
+                       break;
+               mrp_attrvalue_inc(mrp_cb(skb)->attrvalue,
+                                 mrp_cb(skb)->mh->attrlen);
+               vaevents %= (__MRP_VECATTR_EVENT_MAX *
+                            __MRP_VECATTR_EVENT_MAX);
+               vaevent = vaevents / __MRP_VECATTR_EVENT_MAX;
+               mrp_pdu_parse_vecattr_event(app, skb, vaevent);
+
+               /* If present, extract and process the third event. */
+               if (!--valen)
+                       break;
+               mrp_attrvalue_inc(mrp_cb(skb)->attrvalue,
+                                 mrp_cb(skb)->mh->attrlen);
+               vaevents %= __MRP_VECATTR_EVENT_MAX;
+               vaevent = vaevents;
+               mrp_pdu_parse_vecattr_event(app, skb, vaevent);
+       }
+       return 0;
+}
+
+static int mrp_pdu_parse_msg(struct mrp_applicant *app, struct sk_buff *skb,
+                            int *offset)
+{
+       struct mrp_msg_hdr _mh;
+
+       mrp_cb(skb)->mh = skb_header_pointer(skb, *offset, sizeof(_mh), &_mh);
+       if (!mrp_cb(skb)->mh)
+               return -1;
+       *offset += sizeof(_mh);
+
+       if (mrp_cb(skb)->mh->attrtype == 0 ||
+           mrp_cb(skb)->mh->attrtype > app->app->maxattr ||
+           mrp_cb(skb)->mh->attrlen == 0)
+               return -1;
+
+       while (skb->len > *offset) {
+               if (mrp_pdu_parse_end_mark(skb, offset) < 0)
+                       break;
+               if (mrp_pdu_parse_vecattr(app, skb, offset) < 0)
+                       return -1;
+       }
+       return 0;
+}
+
+static int mrp_rcv(struct sk_buff *skb, struct net_device *dev,
+                  struct packet_type *pt, struct net_device *orig_dev)
+{
+       struct mrp_application *appl = container_of(pt, struct mrp_application,
+                                                   pkttype);
+       struct mrp_port *port;
+       struct mrp_applicant *app;
+       struct mrp_pdu_hdr _ph;
+       const struct mrp_pdu_hdr *ph;
+       int offset = skb_network_offset(skb);
+
+       /* If the interface is in promiscuous mode, drop the packet if
+        * it was unicast to another host.
+        */
+       if (unlikely(skb->pkt_type == PACKET_OTHERHOST))
+               goto out;
+       skb = skb_share_check(skb, GFP_ATOMIC);
+       if (unlikely(!skb))
+               goto out;
+       port = rcu_dereference(dev->mrp_port);
+       if (unlikely(!port))
+               goto out;
+       app = rcu_dereference(port->applicants[appl->type]);
+       if (unlikely(!app))
+               goto out;
+
+       ph = skb_header_pointer(skb, offset, sizeof(_ph), &_ph);
+       if (!ph)
+               goto out;
+       offset += sizeof(_ph);
+
+       if (ph->version != app->app->version)
+               goto out;
+
+       spin_lock(&app->lock);
+       while (skb->len > offset) {
+               if (mrp_pdu_parse_end_mark(skb, &offset) < 0)
+                       break;
+               if (mrp_pdu_parse_msg(app, skb, &offset) < 0)
+                       break;
+       }
+       spin_unlock(&app->lock);
+out:
+       kfree_skb(skb);
+       return 0;
+}
+
+static int mrp_init_port(struct net_device *dev)
+{
+       struct mrp_port *port;
+
+       port = kzalloc(sizeof(*port), GFP_KERNEL);
+       if (!port)
+               return -ENOMEM;
+       rcu_assign_pointer(dev->mrp_port, port);
+       return 0;
+}
+
+static void mrp_release_port(struct net_device *dev)
+{
+       struct mrp_port *port = rtnl_dereference(dev->mrp_port);
+       unsigned int i;
+
+       for (i = 0; i <= MRP_APPLICATION_MAX; i++) {
+               if (rtnl_dereference(port->applicants[i]))
+                       return;
+       }
+       RCU_INIT_POINTER(dev->mrp_port, NULL);
+       kfree_rcu(port, rcu);
+}
+
+int mrp_init_applicant(struct net_device *dev, struct mrp_application *appl)
+{
+       struct mrp_applicant *app;
+       int err;
+
+       ASSERT_RTNL();
+
+       if (!rtnl_dereference(dev->mrp_port)) {
+               err = mrp_init_port(dev);
+               if (err < 0)
+                       goto err1;
+       }
+
+       err = -ENOMEM;
+       app = kzalloc(sizeof(*app), GFP_KERNEL);
+       if (!app)
+               goto err2;
+
+       err = dev_mc_add(dev, appl->group_address);
+       if (err < 0)
+               goto err3;
+
+       app->dev = dev;
+       app->app = appl;
+       app->mad = RB_ROOT;
+       spin_lock_init(&app->lock);
+       skb_queue_head_init(&app->queue);
+       rcu_assign_pointer(dev->mrp_port->applicants[appl->type], app);
+       setup_timer(&app->join_timer, mrp_join_timer, (unsigned long)app);
+       mrp_join_timer_arm(app);
+       return 0;
+
+err3:
+       kfree(app);
+err2:
+       mrp_release_port(dev);
+err1:
+       return err;
+}
+EXPORT_SYMBOL_GPL(mrp_init_applicant);
+
+void mrp_uninit_applicant(struct net_device *dev, struct mrp_application *appl)
+{
+       struct mrp_port *port = rtnl_dereference(dev->mrp_port);
+       struct mrp_applicant *app = rtnl_dereference(
+               port->applicants[appl->type]);
+
+       ASSERT_RTNL();
+
+       RCU_INIT_POINTER(port->applicants[appl->type], NULL);
+
+       /* Delete timer and generate a final TX event to flush out
+        * all pending messages before the applicant is gone.
+        */
+       del_timer_sync(&app->join_timer);
+       mrp_mad_event(app, MRP_EVENT_TX);
+       mrp_pdu_queue(app);
+       mrp_queue_xmit(app);
+
+       dev_mc_del(dev, appl->group_address);
+       kfree_rcu(app, rcu);
+       mrp_release_port(dev);
+}
+EXPORT_SYMBOL_GPL(mrp_uninit_applicant);
+
+int mrp_register_application(struct mrp_application *appl)
+{
+       appl->pkttype.func = mrp_rcv;
+       dev_add_pack(&appl->pkttype);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(mrp_register_application);
+
+void mrp_unregister_application(struct mrp_application *appl)
+{
+       dev_remove_pack(&appl->pkttype);
+}
+EXPORT_SYMBOL_GPL(mrp_unregister_application);
index fa073a5..8f7517d 100644 (file)
@@ -27,3 +27,14 @@ config VLAN_8021Q_GVRP
          automatic propagation of registered VLANs to switches.
 
          If unsure, say N.
+
+config VLAN_8021Q_MVRP
+       bool "MVRP (Multiple VLAN Registration Protocol) support"
+       depends on VLAN_8021Q
+       select MRP
+       help
+         Select this to enable MVRP end-system support. MVRP is used for
+         automatic propagation of registered VLANs to switches; it
+         supersedes GVRP and is not backwards-compatible.
+
+         If unsure, say N.
index 9f4f174..7bc8db0 100644 (file)
@@ -6,5 +6,6 @@ obj-$(CONFIG_VLAN_8021Q)                += 8021q.o
 
 8021q-y                                        := vlan.o vlan_dev.o vlan_netlink.o
 8021q-$(CONFIG_VLAN_8021Q_GVRP)                += vlan_gvrp.o
+8021q-$(CONFIG_VLAN_8021Q_MVRP)                += vlan_mvrp.o
 8021q-$(CONFIG_PROC_FS)                        += vlanproc.o
 
index addc578..a187144 100644 (file)
@@ -95,6 +95,8 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
 
        grp->nr_vlan_devs--;
 
+       if (vlan->flags & VLAN_FLAG_MVRP)
+               vlan_mvrp_request_leave(dev);
        if (vlan->flags & VLAN_FLAG_GVRP)
                vlan_gvrp_request_leave(dev);
 
@@ -107,8 +109,10 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
 
        netdev_upper_dev_unlink(real_dev, dev);
 
-       if (grp->nr_vlan_devs == 0)
+       if (grp->nr_vlan_devs == 0) {
+               vlan_mvrp_uninit_applicant(real_dev);
                vlan_gvrp_uninit_applicant(real_dev);
+       }
 
        /* Get rid of the vlan's reference to real_dev */
        dev_put(real_dev);
@@ -151,15 +155,18 @@ int register_vlan_dev(struct net_device *dev)
                err = vlan_gvrp_init_applicant(real_dev);
                if (err < 0)
                        goto out_vid_del;
+               err = vlan_mvrp_init_applicant(real_dev);
+               if (err < 0)
+                       goto out_uninit_gvrp;
        }
 
        err = vlan_group_prealloc_vid(grp, vlan_id);
        if (err < 0)
-               goto out_uninit_applicant;
+               goto out_uninit_mvrp;
 
        err = netdev_upper_dev_link(real_dev, dev);
        if (err)
-               goto out_uninit_applicant;
+               goto out_uninit_mvrp;
 
        err = register_netdevice(dev);
        if (err < 0)
@@ -181,7 +188,10 @@ int register_vlan_dev(struct net_device *dev)
 
 out_upper_dev_unlink:
        netdev_upper_dev_unlink(real_dev, dev);
-out_uninit_applicant:
+out_uninit_mvrp:
+       if (grp->nr_vlan_devs == 0)
+               vlan_mvrp_uninit_applicant(real_dev);
+out_uninit_gvrp:
        if (grp->nr_vlan_devs == 0)
                vlan_gvrp_uninit_applicant(real_dev);
 out_vid_del:
@@ -655,13 +665,19 @@ static int __init vlan_proto_init(void)
        if (err < 0)
                goto err3;
 
-       err = vlan_netlink_init();
+       err = vlan_mvrp_init();
        if (err < 0)
                goto err4;
 
+       err = vlan_netlink_init();
+       if (err < 0)
+               goto err5;
+
        vlan_ioctl_set(vlan_ioctl_handler);
        return 0;
 
+err5:
+       vlan_mvrp_uninit();
 err4:
        vlan_gvrp_uninit();
 err3:
@@ -682,6 +698,7 @@ static void __exit vlan_cleanup_module(void)
        unregister_pernet_subsys(&vlan_net_ops);
        rcu_barrier(); /* Wait for completion of call_rcu()'s */
 
+       vlan_mvrp_uninit();
        vlan_gvrp_uninit();
 }
 
index a4886d9..670f1e8 100644 (file)
@@ -171,6 +171,22 @@ static inline int vlan_gvrp_init(void) { return 0; }
 static inline void vlan_gvrp_uninit(void) {}
 #endif
 
+#ifdef CONFIG_VLAN_8021Q_MVRP
+extern int vlan_mvrp_request_join(const struct net_device *dev);
+extern void vlan_mvrp_request_leave(const struct net_device *dev);
+extern int vlan_mvrp_init_applicant(struct net_device *dev);
+extern void vlan_mvrp_uninit_applicant(struct net_device *dev);
+extern int vlan_mvrp_init(void);
+extern void vlan_mvrp_uninit(void);
+#else
+static inline int vlan_mvrp_request_join(const struct net_device *dev) { return 0; }
+static inline void vlan_mvrp_request_leave(const struct net_device *dev) {}
+static inline int vlan_mvrp_init_applicant(struct net_device *dev) { return 0; }
+static inline void vlan_mvrp_uninit_applicant(struct net_device *dev) {}
+static inline int vlan_mvrp_init(void) { return 0; }
+static inline void vlan_mvrp_uninit(void) {}
+#endif
+
 extern const char vlan_fullname[];
 extern const char vlan_version[];
 extern int vlan_netlink_init(void);
index 71b64fd..f3b6f51 100644 (file)
@@ -144,6 +144,7 @@ err_free:
        kfree_skb(skb);
        return NULL;
 }
+EXPORT_SYMBOL(vlan_untag);
 
 
 /*
index 09f9108..19cf81b 100644 (file)
@@ -261,7 +261,7 @@ int vlan_dev_change_flags(const struct net_device *dev, u32 flags, u32 mask)
        u32 old_flags = vlan->flags;
 
        if (mask & ~(VLAN_FLAG_REORDER_HDR | VLAN_FLAG_GVRP |
-                    VLAN_FLAG_LOOSE_BINDING))
+                    VLAN_FLAG_LOOSE_BINDING | VLAN_FLAG_MVRP))
                return -EINVAL;
 
        vlan->flags = (old_flags & ~mask) | (flags & mask);
@@ -272,6 +272,13 @@ int vlan_dev_change_flags(const struct net_device *dev, u32 flags, u32 mask)
                else
                        vlan_gvrp_request_leave(dev);
        }
+
+       if (netif_running(dev) && (vlan->flags ^ old_flags) & VLAN_FLAG_MVRP) {
+               if (vlan->flags & VLAN_FLAG_MVRP)
+                       vlan_mvrp_request_join(dev);
+               else
+                       vlan_mvrp_request_leave(dev);
+       }
        return 0;
 }
 
@@ -312,6 +319,9 @@ static int vlan_dev_open(struct net_device *dev)
        if (vlan->flags & VLAN_FLAG_GVRP)
                vlan_gvrp_request_join(dev);
 
+       if (vlan->flags & VLAN_FLAG_MVRP)
+               vlan_mvrp_request_join(dev);
+
        if (netif_carrier_ok(real_dev))
                netif_carrier_on(dev);
        return 0;
@@ -723,7 +733,7 @@ static void vlan_dev_netpoll_cleanup(struct net_device *dev)
 
        vlan->netpoll = NULL;
 
-       __netpoll_free_rcu(netpoll);
+       __netpoll_free_async(netpoll);
 }
 #endif /* CONFIG_NET_POLL_CONTROLLER */
 
diff --git a/net/8021q/vlan_mvrp.c b/net/8021q/vlan_mvrp.c
new file mode 100644 (file)
index 0000000..d9ec1d5
--- /dev/null
@@ -0,0 +1,72 @@
+/*
+ *     IEEE 802.1Q Multiple VLAN Registration Protocol (MVRP)
+ *
+ *     Copyright (c) 2012 Massachusetts Institute of Technology
+ *
+ *     Adapted from code in net/8021q/vlan_gvrp.c
+ *     Copyright (c) 2008 Patrick McHardy <kaber@trash.net>
+ *
+ *     This program is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License
+ *     version 2 as published by the Free Software Foundation.
+ */
+#include <linux/types.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <net/mrp.h>
+#include "vlan.h"
+
+#define MRP_MVRP_ADDRESS       { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x21 }
+
+enum mvrp_attributes {
+       MVRP_ATTR_INVALID,
+       MVRP_ATTR_VID,
+       __MVRP_ATTR_MAX
+};
+#define MVRP_ATTR_MAX  (__MVRP_ATTR_MAX - 1)
+
+static struct mrp_application vlan_mrp_app __read_mostly = {
+       .type           = MRP_APPLICATION_MVRP,
+       .maxattr        = MVRP_ATTR_MAX,
+       .pkttype.type   = htons(ETH_P_MVRP),
+       .group_address  = MRP_MVRP_ADDRESS,
+       .version        = 0,
+};
+
+int vlan_mvrp_request_join(const struct net_device *dev)
+{
+       const struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
+       __be16 vlan_id = htons(vlan->vlan_id);
+
+       return mrp_request_join(vlan->real_dev, &vlan_mrp_app,
+                               &vlan_id, sizeof(vlan_id), MVRP_ATTR_VID);
+}
+
+void vlan_mvrp_request_leave(const struct net_device *dev)
+{
+       const struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
+       __be16 vlan_id = htons(vlan->vlan_id);
+
+       mrp_request_leave(vlan->real_dev, &vlan_mrp_app,
+                         &vlan_id, sizeof(vlan_id), MVRP_ATTR_VID);
+}
+
+int vlan_mvrp_init_applicant(struct net_device *dev)
+{
+       return mrp_init_applicant(dev, &vlan_mrp_app);
+}
+
+void vlan_mvrp_uninit_applicant(struct net_device *dev)
+{
+       mrp_uninit_applicant(dev, &vlan_mrp_app);
+}
+
+int __init vlan_mvrp_init(void)
+{
+       return mrp_register_application(&vlan_mrp_app);
+}
+
+void vlan_mvrp_uninit(void)
+{
+       mrp_unregister_application(&vlan_mrp_app);
+}
index 708c80e..1789658 100644 (file)
@@ -62,7 +62,7 @@ static int vlan_validate(struct nlattr *tb[], struct nlattr *data[])
                flags = nla_data(data[IFLA_VLAN_FLAGS]);
                if ((flags->flags & flags->mask) &
                    ~(VLAN_FLAG_REORDER_HDR | VLAN_FLAG_GVRP |
-                     VLAN_FLAG_LOOSE_BINDING))
+                     VLAN_FLAG_LOOSE_BINDING | VLAN_FLAG_MVRP))
                        return -EINVAL;
        }
 
index 4de77ea..dc526ec 100644 (file)
@@ -131,7 +131,7 @@ void vlan_proc_cleanup(struct net *net)
                remove_proc_entry(name_conf, vn->proc_vlan_dir);
 
        if (vn->proc_vlan_dir)
-               proc_net_remove(net, name_root);
+               remove_proc_entry(name_root, net->proc_net);
 
        /* Dynamically added entries should be cleaned up as their vlan_device
         * is removed, so we should not have to take care of it here...
index c31348e..5a1888b 100644 (file)
@@ -217,6 +217,7 @@ source "net/dcb/Kconfig"
 source "net/dns_resolver/Kconfig"
 source "net/batman-adv/Kconfig"
 source "net/openvswitch/Kconfig"
+source "net/vmw_vsock/Kconfig"
 
 config RPS
        boolean
index c5aa8b3..091e7b0 100644 (file)
@@ -69,3 +69,4 @@ obj-$(CONFIG_CEPH_LIB)                += ceph/
 obj-$(CONFIG_BATMAN_ADV)       += batman-adv/
 obj-$(CONFIG_NFC)              += nfc/
 obj-$(CONFIG_OPENVSWITCH)      += openvswitch/
+obj-$(CONFIG_VSOCKETS) += vmw_vsock/
index 0d020de..b4e7534 100644 (file)
@@ -460,7 +460,7 @@ static void atm_proc_dirs_remove(void)
                if (e->dirent)
                        remove_proc_entry(e->name, atm_proc_root);
        }
-       proc_net_remove(&init_net, "atm");
+       remove_proc_entry("atm", init_net.proc_net);
 }
 
 int __init atm_proc_init(void)
index 779095d..69a06c4 100644 (file)
@@ -1992,9 +1992,10 @@ static int __init ax25_init(void)
        dev_add_pack(&ax25_packet_type);
        register_netdevice_notifier(&ax25_dev_notifier);
 
-       proc_net_fops_create(&init_net, "ax25_route", S_IRUGO, &ax25_route_fops);
-       proc_net_fops_create(&init_net, "ax25", S_IRUGO, &ax25_info_fops);
-       proc_net_fops_create(&init_net, "ax25_calls", S_IRUGO, &ax25_uid_fops);
+       proc_create("ax25_route", S_IRUGO, init_net.proc_net,
+                   &ax25_route_fops);
+       proc_create("ax25", S_IRUGO, init_net.proc_net, &ax25_info_fops);
+       proc_create("ax25_calls", S_IRUGO, init_net.proc_net, &ax25_uid_fops);
 out:
        return rc;
 }
@@ -2008,9 +2009,9 @@ MODULE_ALIAS_NETPROTO(PF_AX25);
 
 static void __exit ax25_exit(void)
 {
-       proc_net_remove(&init_net, "ax25_route");
-       proc_net_remove(&init_net, "ax25");
-       proc_net_remove(&init_net, "ax25_calls");
+       remove_proc_entry("ax25_route", init_net.proc_net);
+       remove_proc_entry("ax25", init_net.proc_net);
+       remove_proc_entry("ax25_calls", init_net.proc_net);
 
        unregister_netdevice_notifier(&ax25_dev_notifier);
 
index ea0bd31..761a590 100644 (file)
@@ -440,7 +440,7 @@ static bool batadv_is_orig_node_eligible(struct batadv_dat_candidate *res,
        /* this is an hash collision with the temporary selected node. Choose
         * the one with the lowest address
         */
-       if ((tmp_max == max) &&
+       if ((tmp_max == max) && max_orig_node &&
            (batadv_compare_eth(candidate->orig, max_orig_node->orig) > 0))
                goto out;
 
index 2f67d5e..eb0f4b1 100644 (file)
@@ -290,7 +290,7 @@ static int a2mp_getinfo_req(struct amp_mgr *mgr, struct sk_buff *skb,
                goto done;
        }
 
-       mgr->state = READ_LOC_AMP_INFO;
+       set_bit(READ_LOC_AMP_INFO, &mgr->state);
        hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
 
 done:
@@ -499,8 +499,16 @@ send_rsp:
        if (hdev)
                hci_dev_put(hdev);
 
-       a2mp_send(mgr, A2MP_CREATEPHYSLINK_RSP, hdr->ident, sizeof(rsp),
-                 &rsp);
+       /* Reply error now and success after HCI Write Remote AMP Assoc
+          command complete with success status
+        */
+       if (rsp.status != A2MP_STATUS_SUCCESS) {
+               a2mp_send(mgr, A2MP_CREATEPHYSLINK_RSP, hdr->ident,
+                         sizeof(rsp), &rsp);
+       } else {
+               set_bit(WRITE_REMOTE_AMP_ASSOC, &mgr->state);
+               mgr->ident = hdr->ident;
+       }
 
        skb_pull(skb, le16_to_cpu(hdr->len));
        return 0;
@@ -840,7 +848,7 @@ struct amp_mgr *amp_mgr_lookup_by_state(u8 state)
 
        mutex_lock(&amp_mgr_list_lock);
        list_for_each_entry(mgr, &amp_mgr_list, list) {
-               if (mgr->state == state) {
+               if (test_and_clear_bit(state, &mgr->state)) {
                        amp_mgr_get(mgr);
                        mutex_unlock(&amp_mgr_list_lock);
                        return mgr;
@@ -949,6 +957,32 @@ clean:
        kfree(req);
 }
 
+void a2mp_send_create_phy_link_rsp(struct hci_dev *hdev, u8 status)
+{
+       struct amp_mgr *mgr;
+       struct a2mp_physlink_rsp rsp;
+       struct hci_conn *hs_hcon;
+
+       mgr = amp_mgr_lookup_by_state(WRITE_REMOTE_AMP_ASSOC);
+       if (!mgr)
+               return;
+
+       hs_hcon = hci_conn_hash_lookup_state(hdev, AMP_LINK, BT_CONNECT);
+       if (!hs_hcon) {
+               rsp.status = A2MP_STATUS_UNABLE_START_LINK_CREATION;
+       } else {
+               rsp.remote_id = hs_hcon->remote_id;
+               rsp.status = A2MP_STATUS_SUCCESS;
+       }
+
+       BT_DBG("%s mgr %p hs_hcon %p status %u", hdev->name, mgr, hs_hcon,
+              status);
+
+       rsp.local_id = hdev->id;
+       a2mp_send(mgr, A2MP_CREATEPHYSLINK_RSP, mgr->ident, sizeof(rsp), &rsp);
+       amp_mgr_put(mgr);
+}
+
 void a2mp_discover_amp(struct l2cap_chan *chan)
 {
        struct l2cap_conn *conn = chan->conn;
index 5355df6..d3ee69b 100644 (file)
@@ -641,7 +641,7 @@ int bt_procfs_init(struct module* module, struct net *net, const char *name,
        sk_list->fops.llseek    = seq_lseek;
        sk_list->fops.release   = seq_release_private;
 
-       pde = proc_net_fops_create(net, name, 0, &sk_list->fops);
+       pde = proc_create(name, 0, net->proc_net, &sk_list->fops);
        if (!pde)
                return -ENOMEM;
 
@@ -652,7 +652,7 @@ int bt_procfs_init(struct module* module, struct net *net, const char *name,
 
 void bt_procfs_cleanup(struct net *net, const char *name)
 {
-       proc_net_remove(net, name);
+       remove_proc_entry(name, net->proc_net);
 }
 #else
 int bt_procfs_init(struct module* module, struct net *net, const char *name,
index 1b0d92c..d459ed4 100644 (file)
@@ -236,7 +236,7 @@ void amp_read_loc_assoc(struct hci_dev *hdev, struct amp_mgr *mgr)
 
        cp.max_len = cpu_to_le16(hdev->amp_assoc_size);
 
-       mgr->state = READ_LOC_AMP_ASSOC;
+       set_bit(READ_LOC_AMP_ASSOC, &mgr->state);
        hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_ASSOC, sizeof(cp), &cp);
 }
 
@@ -250,7 +250,7 @@ void amp_read_loc_assoc_final_data(struct hci_dev *hdev,
        cp.len_so_far = cpu_to_le16(0);
        cp.max_len = cpu_to_le16(hdev->amp_assoc_size);
 
-       mgr->state = READ_LOC_AMP_ASSOC_FINAL;
+       set_bit(READ_LOC_AMP_ASSOC_FINAL, &mgr->state);
 
        /* Read Local AMP Assoc final link information data */
        hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_ASSOC, sizeof(cp), &cp);
@@ -317,7 +317,9 @@ void amp_write_rem_assoc_continue(struct hci_dev *hdev, u8 handle)
        if (!hcon)
                return;
 
-       amp_write_rem_assoc_frag(hdev, hcon);
+       /* Send A2MP create phylink rsp when all fragments are written */
+       if (amp_write_rem_assoc_frag(hdev, hcon))
+               a2mp_send_create_phy_link_rsp(hdev, 0);
 }
 
 void amp_write_remote_assoc(struct hci_dev *hdev, u8 handle)
@@ -403,26 +405,20 @@ void amp_physical_cfm(struct hci_conn *bredr_hcon, struct hci_conn *hs_hcon)
 
 void amp_create_logical_link(struct l2cap_chan *chan)
 {
+       struct hci_conn *hs_hcon = chan->hs_hcon;
        struct hci_cp_create_accept_logical_link cp;
-       struct hci_conn *hcon;
        struct hci_dev *hdev;
 
-       BT_DBG("chan %p", chan);
+       BT_DBG("chan %p hs_hcon %p dst %pMR", chan, hs_hcon, chan->conn->dst);
 
-       if (!chan->hs_hcon)
+       if (!hs_hcon)
                return;
 
        hdev = hci_dev_hold(chan->hs_hcon->hdev);
        if (!hdev)
                return;
 
-       BT_DBG("chan %p dst %pMR", chan, chan->conn->dst);
-
-       hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK, chan->conn->dst);
-       if (!hcon)
-               goto done;
-
-       cp.phy_handle = hcon->handle;
+       cp.phy_handle = hs_hcon->handle;
 
        cp.tx_flow_spec.id = chan->local_id;
        cp.tx_flow_spec.stype = chan->local_stype;
@@ -438,14 +434,13 @@ void amp_create_logical_link(struct l2cap_chan *chan)
        cp.rx_flow_spec.acc_lat = cpu_to_le32(chan->remote_acc_lat);
        cp.rx_flow_spec.flush_to = cpu_to_le32(chan->remote_flush_to);
 
-       if (hcon->out)
+       if (hs_hcon->out)
                hci_send_cmd(hdev, HCI_OP_CREATE_LOGICAL_LINK, sizeof(cp),
                             &cp);
        else
                hci_send_cmd(hdev, HCI_OP_ACCEPT_LOGICAL_LINK, sizeof(cp),
                             &cp);
 
-done:
        hci_dev_put(hdev);
 }
 
index a5b6397..e430b1a 100644 (file)
@@ -33,7 +33,6 @@
 
 #include <net/bluetooth/bluetooth.h>
 #include <net/bluetooth/hci_core.h>
-#include <net/bluetooth/l2cap.h>
 
 #include "bnep.h"
 
index 25bfce0..4925a02 100644 (file)
@@ -249,12 +249,12 @@ static void hci_conn_disconnect(struct hci_conn *conn)
        __u8 reason = hci_proto_disconn_ind(conn);
 
        switch (conn->type) {
-       case ACL_LINK:
-               hci_acl_disconn(conn, reason);
-               break;
        case AMP_LINK:
                hci_amp_disconn(conn, reason);
                break;
+       default:
+               hci_acl_disconn(conn, reason);
+               break;
        }
 }
 
index 0f78e34..22e77a7 100644 (file)
@@ -1146,7 +1146,8 @@ static void hci_power_on(struct work_struct *work)
                return;
 
        if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
-               schedule_delayed_work(&hdev->power_off, HCI_AUTO_OFF_TIMEOUT);
+               queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
+                                  HCI_AUTO_OFF_TIMEOUT);
 
        if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
                mgmt_index_added(hdev);
@@ -1182,14 +1183,10 @@ static void hci_discov_off(struct work_struct *work)
 
 int hci_uuids_clear(struct hci_dev *hdev)
 {
-       struct list_head *p, *n;
-
-       list_for_each_safe(p, n, &hdev->uuids) {
-               struct bt_uuid *uuid;
+       struct bt_uuid *uuid, *tmp;
 
-               uuid = list_entry(p, struct bt_uuid, list);
-
-               list_del(p);
+       list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
+               list_del(&uuid->list);
                kfree(uuid);
        }
 
@@ -1621,8 +1618,8 @@ static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
        if (err < 0)
                return err;
 
-       schedule_delayed_work(&hdev->le_scan_disable,
-                             msecs_to_jiffies(timeout));
+       queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
+                          msecs_to_jiffies(timeout));
 
        return 0;
 }
@@ -1799,6 +1796,15 @@ int hci_register_dev(struct hci_dev *hdev)
                goto err;
        }
 
+       hdev->req_workqueue = alloc_workqueue(hdev->name,
+                                             WQ_HIGHPRI | WQ_UNBOUND |
+                                             WQ_MEM_RECLAIM, 1);
+       if (!hdev->req_workqueue) {
+               destroy_workqueue(hdev->workqueue);
+               error = -ENOMEM;
+               goto err;
+       }
+
        error = hci_add_sysfs(hdev);
        if (error < 0)
                goto err_wqueue;
@@ -1821,12 +1827,13 @@ int hci_register_dev(struct hci_dev *hdev)
        hci_notify(hdev, HCI_DEV_REG);
        hci_dev_hold(hdev);
 
-       schedule_work(&hdev->power_on);
+       queue_work(hdev->req_workqueue, &hdev->power_on);
 
        return id;
 
 err_wqueue:
        destroy_workqueue(hdev->workqueue);
+       destroy_workqueue(hdev->req_workqueue);
 err:
        ida_simple_remove(&hci_index_ida, hdev->id);
        write_lock(&hci_dev_list_lock);
@@ -1880,6 +1887,7 @@ void hci_unregister_dev(struct hci_dev *hdev)
        hci_del_sysfs(hdev);
 
        destroy_workqueue(hdev->workqueue);
+       destroy_workqueue(hdev->req_workqueue);
 
        hci_dev_lock(hdev);
        hci_blacklist_clear(hdev);
index 81b4448..477726a 100644 (file)
@@ -609,8 +609,17 @@ static void le_setup(struct hci_dev *hdev)
        /* Read LE Buffer Size */
        hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
 
+       /* Read LE Local Supported Features */
+       hci_send_cmd(hdev, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
+
        /* Read LE Advertising Channel TX Power */
        hci_send_cmd(hdev, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
+
+       /* Read LE White List Size */
+       hci_send_cmd(hdev, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
+
+       /* Read LE Supported States */
+       hci_send_cmd(hdev, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
 }
 
 static void hci_setup(struct hci_dev *hdev)
@@ -1090,6 +1099,19 @@ static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
        hci_req_complete(hdev, HCI_OP_LE_READ_BUFFER_SIZE, rp->status);
 }
 
+static void hci_cc_le_read_local_features(struct hci_dev *hdev,
+                                         struct sk_buff *skb)
+{
+       struct hci_rp_le_read_local_features *rp = (void *) skb->data;
+
+       BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
+
+       if (!rp->status)
+               memcpy(hdev->le_features, rp->features, 8);
+
+       hci_req_complete(hdev, HCI_OP_LE_READ_LOCAL_FEATURES, rp->status);
+}
+
 static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
                                        struct sk_buff *skb)
 {
@@ -1290,6 +1312,19 @@ static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
        }
 }
 
+static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
+                                          struct sk_buff *skb)
+{
+       struct hci_rp_le_read_white_list_size *rp = (void *) skb->data;
+
+       BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
+
+       if (!rp->status)
+               hdev->le_white_list_size = rp->size;
+
+       hci_req_complete(hdev, HCI_OP_LE_READ_WHITE_LIST_SIZE, rp->status);
+}
+
 static void hci_cc_le_ltk_reply(struct hci_dev *hdev, struct sk_buff *skb)
 {
        struct hci_rp_le_ltk_reply *rp = (void *) skb->data;
@@ -1314,6 +1349,19 @@ static void hci_cc_le_ltk_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
        hci_req_complete(hdev, HCI_OP_LE_LTK_NEG_REPLY, rp->status);
 }
 
+static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
+                                           struct sk_buff *skb)
+{
+       struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
+
+       BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
+
+       if (!rp->status)
+               memcpy(hdev->le_states, rp->le_states, 8);
+
+       hci_req_complete(hdev, HCI_OP_LE_READ_SUPPORTED_STATES, rp->status);
+}
+
 static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
                                           struct sk_buff *skb)
 {
@@ -2628,6 +2676,10 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
                hci_cc_le_read_buffer_size(hdev, skb);
                break;
 
+       case HCI_OP_LE_READ_LOCAL_FEATURES:
+               hci_cc_le_read_local_features(hdev, skb);
+               break;
+
        case HCI_OP_LE_READ_ADV_TX_POWER:
                hci_cc_le_read_adv_tx_power(hdev, skb);
                break;
@@ -2664,6 +2716,10 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
                hci_cc_le_set_scan_enable(hdev, skb);
                break;
 
+       case HCI_OP_LE_READ_WHITE_LIST_SIZE:
+               hci_cc_le_read_white_list_size(hdev, skb);
+               break;
+
        case HCI_OP_LE_LTK_REPLY:
                hci_cc_le_ltk_reply(hdev, skb);
                break;
@@ -2672,6 +2728,10 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
                hci_cc_le_ltk_neg_reply(hdev, skb);
                break;
 
+       case HCI_OP_LE_READ_SUPPORTED_STATES:
+               hci_cc_le_read_supported_states(hdev, skb);
+               break;
+
        case HCI_OP_WRITE_LE_HOST_SUPPORTED:
                hci_cc_write_le_host_supported(hdev, skb);
                break;
@@ -3928,8 +3988,6 @@ static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
        void *ptr = &skb->data[1];
        s8 rssi;
 
-       hci_dev_lock(hdev);
-
        while (num_reports--) {
                struct hci_ev_le_advertising_info *ev = ptr;
 
@@ -3939,8 +3997,6 @@ static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
 
                ptr += sizeof(*ev) + ev->length + 1;
        }
-
-       hci_dev_unlock(hdev);
 }
 
 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
index 55cceee..23b4e24 100644 (file)
@@ -2,6 +2,7 @@
 
 #include <linux/debugfs.h>
 #include <linux/module.h>
+#include <asm/unaligned.h>
 
 #include <net/bluetooth/bluetooth.h>
 #include <net/bluetooth/hci_core.h>
@@ -461,19 +462,18 @@ static const struct file_operations blacklist_fops = {
 
 static void print_bt_uuid(struct seq_file *f, u8 *uuid)
 {
-       __be32 data0, data4;
-       __be16 data1, data2, data3, data5;
+       u32 data0, data5;
+       u16 data1, data2, data3, data4;
 
-       memcpy(&data0, &uuid[0], 4);
-       memcpy(&data1, &uuid[4], 2);
-       memcpy(&data2, &uuid[6], 2);
-       memcpy(&data3, &uuid[8], 2);
-       memcpy(&data4, &uuid[10], 4);
-       memcpy(&data5, &uuid[14], 2);
+       data5 = get_unaligned_le32(uuid);
+       data4 = get_unaligned_le16(uuid + 4);
+       data3 = get_unaligned_le16(uuid + 6);
+       data2 = get_unaligned_le16(uuid + 8);
+       data1 = get_unaligned_le16(uuid + 10);
+       data0 = get_unaligned_le32(uuid + 12);
 
-       seq_printf(f, "%.8x-%.4x-%.4x-%.4x-%.8x%.4x\n",
-                  ntohl(data0), ntohs(data1), ntohs(data2), ntohs(data3),
-                  ntohl(data4), ntohs(data5));
+       seq_printf(f, "%.8x-%.4x-%.4x-%.4x-%.4x%.8x\n",
+                  data0, data1, data2, data3, data4, data5);
 }
 
 static int uuids_show(struct seq_file *f, void *p)
index 22e6583..7c7e932 100644 (file)
@@ -1527,17 +1527,12 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
        BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
 
        switch (hcon->type) {
-       case AMP_LINK:
-               conn->mtu = hcon->hdev->block_mtu;
-               break;
-
        case LE_LINK:
                if (hcon->hdev->le_mtu) {
                        conn->mtu = hcon->hdev->le_mtu;
                        break;
                }
                /* fall through */
-
        default:
                conn->mtu = hcon->hdev->acl_mtu;
                break;
index f559b96..39395c7 100644 (file)
@@ -35,7 +35,7 @@
 bool enable_hs;
 
 #define MGMT_VERSION   1
-#define MGMT_REVISION  2
+#define MGMT_REVISION  3
 
 static const u16 mgmt_commands[] = {
        MGMT_OP_READ_INDEX_LIST,
@@ -435,35 +435,117 @@ static u32 get_current_settings(struct hci_dev *hdev)
 
 #define PNP_INFO_SVCLASS_ID            0x1200
 
-static u8 bluetooth_base_uuid[] = {
-                       0xFB, 0x34, 0x9B, 0x5F, 0x80, 0x00, 0x00, 0x80,
-                       0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-};
+static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
+{
+       u8 *ptr = data, *uuids_start = NULL;
+       struct bt_uuid *uuid;
+
+       if (len < 4)
+               return ptr;
+
+       list_for_each_entry(uuid, &hdev->uuids, list) {
+               u16 uuid16;
+
+               if (uuid->size != 16)
+                       continue;
+
+               uuid16 = get_unaligned_le16(&uuid->uuid[12]);
+               if (uuid16 < 0x1100)
+                       continue;
+
+               if (uuid16 == PNP_INFO_SVCLASS_ID)
+                       continue;
 
-static u16 get_uuid16(u8 *uuid128)
+               if (!uuids_start) {
+                       uuids_start = ptr;
+                       uuids_start[0] = 1;
+                       uuids_start[1] = EIR_UUID16_ALL;
+                       ptr += 2;
+               }
+
+               /* Stop if not enough space to put next UUID */
+               if ((ptr - data) + sizeof(u16) > len) {
+                       uuids_start[1] = EIR_UUID16_SOME;
+                       break;
+               }
+
+               *ptr++ = (uuid16 & 0x00ff);
+               *ptr++ = (uuid16 & 0xff00) >> 8;
+               uuids_start[0] += sizeof(uuid16);
+       }
+
+       return ptr;
+}
+
+static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
 {
-       u32 val;
-       int i;
+       u8 *ptr = data, *uuids_start = NULL;
+       struct bt_uuid *uuid;
+
+       if (len < 6)
+               return ptr;
 
-       for (i = 0; i < 12; i++) {
-               if (bluetooth_base_uuid[i] != uuid128[i])
-                       return 0;
+       list_for_each_entry(uuid, &hdev->uuids, list) {
+               if (uuid->size != 32)
+                       continue;
+
+               if (!uuids_start) {
+                       uuids_start = ptr;
+                       uuids_start[0] = 1;
+                       uuids_start[1] = EIR_UUID32_ALL;
+                       ptr += 2;
+               }
+
+               /* Stop if not enough space to put next UUID */
+               if ((ptr - data) + sizeof(u32) > len) {
+                       uuids_start[1] = EIR_UUID32_SOME;
+                       break;
+               }
+
+               memcpy(ptr, &uuid->uuid[12], sizeof(u32));
+               ptr += sizeof(u32);
+               uuids_start[0] += sizeof(u32);
        }
 
-       val = get_unaligned_le32(&uuid128[12]);
-       if (val > 0xffff)
-               return 0;
+       return ptr;
+}
+
+static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
+{
+       u8 *ptr = data, *uuids_start = NULL;
+       struct bt_uuid *uuid;
+
+       if (len < 18)
+               return ptr;
 
-       return (u16) val;
+       list_for_each_entry(uuid, &hdev->uuids, list) {
+               if (uuid->size != 128)
+                       continue;
+
+               if (!uuids_start) {
+                       uuids_start = ptr;
+                       uuids_start[0] = 1;
+                       uuids_start[1] = EIR_UUID128_ALL;
+                       ptr += 2;
+               }
+
+               /* Stop if not enough space to put next UUID */
+               if ((ptr - data) + 16 > len) {
+                       uuids_start[1] = EIR_UUID128_SOME;
+                       break;
+               }
+
+               memcpy(ptr, uuid->uuid, 16);
+               ptr += 16;
+               uuids_start[0] += 16;
+       }
+
+       return ptr;
 }
 
 static void create_eir(struct hci_dev *hdev, u8 *data)
 {
        u8 *ptr = data;
-       u16 eir_len = 0;
-       u16 uuid16_list[HCI_MAX_EIR_LENGTH / sizeof(u16)];
-       int i, truncated = 0;
-       struct bt_uuid *uuid;
        size_t name_len;
 
        name_len = strlen(hdev->dev_name);
@@ -481,7 +563,6 @@ static void create_eir(struct hci_dev *hdev, u8 *data)
 
                memcpy(ptr + 2, hdev->dev_name, name_len);
 
-               eir_len += (name_len + 2);
                ptr += (name_len + 2);
        }
 
@@ -490,7 +571,6 @@ static void create_eir(struct hci_dev *hdev, u8 *data)
                ptr[1] = EIR_TX_POWER;
                ptr[2] = (u8) hdev->inq_tx_power;
 
-               eir_len += 3;
                ptr += 3;
        }
 
@@ -503,60 +583,12 @@ static void create_eir(struct hci_dev *hdev, u8 *data)
                put_unaligned_le16(hdev->devid_product, ptr + 6);
                put_unaligned_le16(hdev->devid_version, ptr + 8);
 
-               eir_len += 10;
                ptr += 10;
        }
 
-       memset(uuid16_list, 0, sizeof(uuid16_list));
-
-       /* Group all UUID16 types */
-       list_for_each_entry(uuid, &hdev->uuids, list) {
-               u16 uuid16;
-
-               uuid16 = get_uuid16(uuid->uuid);
-               if (uuid16 == 0)
-                       return;
-
-               if (uuid16 < 0x1100)
-                       continue;
-
-               if (uuid16 == PNP_INFO_SVCLASS_ID)
-                       continue;
-
-               /* Stop if not enough space to put next UUID */
-               if (eir_len + 2 + sizeof(u16) > HCI_MAX_EIR_LENGTH) {
-                       truncated = 1;
-                       break;
-               }
-
-               /* Check for duplicates */
-               for (i = 0; uuid16_list[i] != 0; i++)
-                       if (uuid16_list[i] == uuid16)
-                               break;
-
-               if (uuid16_list[i] == 0) {
-                       uuid16_list[i] = uuid16;
-                       eir_len += sizeof(u16);
-               }
-       }
-
-       if (uuid16_list[0] != 0) {
-               u8 *length = ptr;
-
-               /* EIR Data type */
-               ptr[1] = truncated ? EIR_UUID16_SOME : EIR_UUID16_ALL;
-
-               ptr += 2;
-               eir_len += 2;
-
-               for (i = 0; uuid16_list[i] != 0; i++) {
-                       *ptr++ = (uuid16_list[i] & 0x00ff);
-                       *ptr++ = (uuid16_list[i] & 0xff00) >> 8;
-               }
-
-               /* EIR Data length */
-               *length = (i * sizeof(u16)) + 1;
-       }
+       ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
+       ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
+       ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
 }
 
 static int update_eir(struct hci_dev *hdev)
@@ -728,13 +760,9 @@ static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
                                            void *data),
                                 void *data)
 {
-       struct list_head *p, *n;
-
-       list_for_each_safe(p, n, &hdev->mgmt_pending) {
-               struct pending_cmd *cmd;
-
-               cmd = list_entry(p, struct pending_cmd, list);
+       struct pending_cmd *cmd, *tmp;
 
+       list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
                if (opcode > 0 && cmd->opcode != opcode)
                        continue;
 
@@ -777,14 +805,19 @@ static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
 
        BT_DBG("request for %s", hdev->name);
 
+       if (cp->val != 0x00 && cp->val != 0x01)
+               return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
+                                 MGMT_STATUS_INVALID_PARAMS);
+
        hci_dev_lock(hdev);
 
        if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
                cancel_delayed_work(&hdev->power_off);
 
                if (cp->val) {
-                       err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
-                       mgmt_powered(hdev, 1);
+                       mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
+                                        data, len);
+                       err = mgmt_powered(hdev, 1);
                        goto failed;
                }
        }
@@ -807,9 +840,9 @@ static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
        }
 
        if (cp->val)
-               schedule_work(&hdev->power_on);
+               queue_work(hdev->req_workqueue, &hdev->power_on);
        else
-               schedule_work(&hdev->power_off.work);
+               queue_work(hdev->req_workqueue, &hdev->power_off.work);
 
        err = 0;
 
@@ -872,6 +905,10 @@ static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
                return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
                                 MGMT_STATUS_NOT_SUPPORTED);
 
+       if (cp->val != 0x00 && cp->val != 0x01)
+               return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
+                                 MGMT_STATUS_INVALID_PARAMS);
+
        timeout = __le16_to_cpu(cp->timeout);
        if (!cp->val && timeout > 0)
                return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
@@ -971,6 +1008,10 @@ static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
                return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
                                  MGMT_STATUS_NOT_SUPPORTED);
 
+       if (cp->val != 0x00 && cp->val != 0x01)
+               return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
+                                 MGMT_STATUS_INVALID_PARAMS);
+
        hci_dev_lock(hdev);
 
        if (!hdev_is_powered(hdev)) {
@@ -1041,6 +1082,10 @@ static int set_pairable(struct sock *sk, struct hci_dev *hdev, void *data,
 
        BT_DBG("request for %s", hdev->name);
 
+       if (cp->val != 0x00 && cp->val != 0x01)
+               return cmd_status(sk, hdev->id, MGMT_OP_SET_PAIRABLE,
+                                 MGMT_STATUS_INVALID_PARAMS);
+
        hci_dev_lock(hdev);
 
        if (cp->val)
@@ -1073,6 +1118,10 @@ static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
                return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
                                  MGMT_STATUS_NOT_SUPPORTED);
 
+       if (cp->val != 0x00 && cp->val != 0x01)
+               return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
+                                 MGMT_STATUS_INVALID_PARAMS);
+
        hci_dev_lock(hdev);
 
        if (!hdev_is_powered(hdev)) {
@@ -1133,13 +1182,15 @@ static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
 
        BT_DBG("request for %s", hdev->name);
 
-       hci_dev_lock(hdev);
+       if (!lmp_ssp_capable(hdev))
+               return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
+                                 MGMT_STATUS_NOT_SUPPORTED);
 
-       if (!lmp_ssp_capable(hdev)) {
-               err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
-                                MGMT_STATUS_NOT_SUPPORTED);
-               goto failed;
-       }
+       if (cp->val != 0x00 && cp->val != 0x01)
+               return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
+                                 MGMT_STATUS_INVALID_PARAMS);
+
+       hci_dev_lock(hdev);
 
        val = !!cp->val;
 
@@ -1199,6 +1250,10 @@ static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
                return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
                                  MGMT_STATUS_NOT_SUPPORTED);
 
+       if (cp->val != 0x00 && cp->val != 0x01)
+               return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
+                                 MGMT_STATUS_INVALID_PARAMS);
+
        if (cp->val)
                set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
        else
@@ -1217,13 +1272,15 @@ static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
 
        BT_DBG("request for %s", hdev->name);
 
-       hci_dev_lock(hdev);
+       if (!lmp_le_capable(hdev))
+               return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
+                                 MGMT_STATUS_NOT_SUPPORTED);
 
-       if (!lmp_le_capable(hdev)) {
-               err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
-                                MGMT_STATUS_NOT_SUPPORTED);
-               goto unlock;
-       }
+       if (cp->val != 0x00 && cp->val != 0x01)
+               return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
+                                 MGMT_STATUS_INVALID_PARAMS);
+
+       hci_dev_lock(hdev);
 
        val = !!cp->val;
        enabled = lmp_host_le_capable(hdev);
@@ -1275,6 +1332,25 @@ unlock:
        return err;
 }
 
+static const u8 bluetooth_base_uuid[] = {
+                       0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
+                       0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+};
+
+static u8 get_uuid_size(const u8 *uuid)
+{
+       u32 val;
+
+       if (memcmp(uuid, bluetooth_base_uuid, 12))
+               return 128;
+
+       val = get_unaligned_le32(&uuid[12]);
+       if (val > 0xffff)
+               return 32;
+
+       return 16;
+}
+
 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
 {
        struct mgmt_cp_add_uuid *cp = data;
@@ -1300,8 +1376,9 @@ static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
 
        memcpy(uuid->uuid, cp->uuid, 16);
        uuid->svc_hint = cp->svc_hint;
+       uuid->size = get_uuid_size(cp->uuid);
 
-       list_add(&uuid->list, &hdev->uuids);
+       list_add_tail(&uuid->list, &hdev->uuids);
 
        err = update_class(hdev);
        if (err < 0)
@@ -1332,7 +1409,8 @@ static bool enable_service_cache(struct hci_dev *hdev)
                return false;
 
        if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
-               schedule_delayed_work(&hdev->service_cache, CACHE_TIMEOUT);
+               queue_delayed_work(hdev->workqueue, &hdev->service_cache,
+                                  CACHE_TIMEOUT);
                return true;
        }
 
@@ -1344,7 +1422,7 @@ static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
 {
        struct mgmt_cp_remove_uuid *cp = data;
        struct pending_cmd *cmd;
-       struct list_head *p, *n;
+       struct bt_uuid *match, *tmp;
        u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
        int err, found;
 
@@ -1372,9 +1450,7 @@ static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
 
        found = 0;
 
-       list_for_each_safe(p, n, &hdev->uuids) {
-               struct bt_uuid *match = list_entry(p, struct bt_uuid, list);
-
+       list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
                if (memcmp(match->uuid, cp->uuid, 16) != 0)
                        continue;
 
@@ -1422,13 +1498,19 @@ static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
 
        BT_DBG("request for %s", hdev->name);
 
-       hci_dev_lock(hdev);
+       if (!lmp_bredr_capable(hdev))
+               return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
+                                 MGMT_STATUS_NOT_SUPPORTED);
 
-       if (test_bit(HCI_PENDING_CLASS, &hdev->dev_flags)) {
-               err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
-                                MGMT_STATUS_BUSY);
-               goto unlock;
-       }
+       if (test_bit(HCI_PENDING_CLASS, &hdev->dev_flags))
+               return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
+                                 MGMT_STATUS_BUSY);
+
+       if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0)
+               return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
+                                 MGMT_STATUS_INVALID_PARAMS);
+
+       hci_dev_lock(hdev);
 
        hdev->major_class = cp->major;
        hdev->minor_class = cp->minor;
@@ -1483,9 +1565,21 @@ static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
                                  MGMT_STATUS_INVALID_PARAMS);
        }
 
+       if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
+               return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
+                                 MGMT_STATUS_INVALID_PARAMS);
+
        BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
               key_count);
 
+       for (i = 0; i < key_count; i++) {
+               struct mgmt_link_key_info *key = &cp->keys[i];
+
+               if (key->addr.type != BDADDR_BREDR)
+                       return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
+                                         MGMT_STATUS_INVALID_PARAMS);
+       }
+
        hci_dev_lock(hdev);
 
        hci_link_keys_clear(hdev);
@@ -1533,12 +1627,22 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
        struct hci_conn *conn;
        int err;
 
-       hci_dev_lock(hdev);
-
        memset(&rp, 0, sizeof(rp));
        bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
        rp.addr.type = cp->addr.type;
 
+       if (!bdaddr_type_is_valid(cp->addr.type))
+               return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
+                                   MGMT_STATUS_INVALID_PARAMS,
+                                   &rp, sizeof(rp));
+
+       if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
+               return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
+                                   MGMT_STATUS_INVALID_PARAMS,
+                                   &rp, sizeof(rp));
+
+       hci_dev_lock(hdev);
+
        if (!hdev_is_powered(hdev)) {
                err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
                                   MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
@@ -1596,6 +1700,7 @@ static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
                      u16 len)
 {
        struct mgmt_cp_disconnect *cp = data;
+       struct mgmt_rp_disconnect rp;
        struct hci_cp_disconnect dc;
        struct pending_cmd *cmd;
        struct hci_conn *conn;
@@ -1603,17 +1708,26 @@ static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
 
        BT_DBG("");
 
+       memset(&rp, 0, sizeof(rp));
+       bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
+       rp.addr.type = cp->addr.type;
+
+       if (!bdaddr_type_is_valid(cp->addr.type))
+               return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
+                                   MGMT_STATUS_INVALID_PARAMS,
+                                   &rp, sizeof(rp));
+
        hci_dev_lock(hdev);
 
        if (!test_bit(HCI_UP, &hdev->flags)) {
-               err = cmd_status(sk, hdev->id, MGMT_OP_DISCONNECT,
-                                MGMT_STATUS_NOT_POWERED);
+               err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
+                                  MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
                goto failed;
        }
 
        if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
-               err = cmd_status(sk, hdev->id, MGMT_OP_DISCONNECT,
-                                MGMT_STATUS_BUSY);
+               err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
+                                  MGMT_STATUS_BUSY, &rp, sizeof(rp));
                goto failed;
        }
 
@@ -1624,8 +1738,8 @@ static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
                conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
 
        if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
-               err = cmd_status(sk, hdev->id, MGMT_OP_DISCONNECT,
-                                MGMT_STATUS_NOT_CONNECTED);
+               err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
+                                  MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
                goto failed;
        }
 
@@ -1903,11 +2017,20 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
 
        BT_DBG("");
 
+       memset(&rp, 0, sizeof(rp));
+       bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
+       rp.addr.type = cp->addr.type;
+
+       if (!bdaddr_type_is_valid(cp->addr.type))
+               return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
+                                   MGMT_STATUS_INVALID_PARAMS,
+                                   &rp, sizeof(rp));
+
        hci_dev_lock(hdev);
 
        if (!hdev_is_powered(hdev)) {
-               err = cmd_status(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
-                                MGMT_STATUS_NOT_POWERED);
+               err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
+                                  MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
                goto unlock;
        }
 
@@ -1924,10 +2047,6 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
                conn = hci_connect(hdev, LE_LINK, &cp->addr.bdaddr,
                                   cp->addr.type, sec_level, auth_type);
 
-       memset(&rp, 0, sizeof(rp));
-       bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
-       rp.addr.type = cp->addr.type;
-
        if (IS_ERR(conn)) {
                int status;
 
@@ -2254,24 +2373,16 @@ static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
 
        hci_dev_lock(hdev);
 
-       if (!hdev_is_powered(hdev)) {
-               err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
-                                  MGMT_STATUS_NOT_POWERED, &cp->addr,
-                                  sizeof(cp->addr));
-               goto unlock;
-       }
-
        err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr, cp->hash,
                                      cp->randomizer);
        if (err < 0)
                status = MGMT_STATUS_FAILED;
        else
-               status = 0;
+               status = MGMT_STATUS_SUCCESS;
 
        err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA, status,
                           &cp->addr, sizeof(cp->addr));
 
-unlock:
        hci_dev_unlock(hdev);
        return err;
 }
@@ -2287,24 +2398,15 @@ static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
 
        hci_dev_lock(hdev);
 
-       if (!hdev_is_powered(hdev)) {
-               err = cmd_complete(sk, hdev->id,
-                                  MGMT_OP_REMOVE_REMOTE_OOB_DATA,
-                                  MGMT_STATUS_NOT_POWERED, &cp->addr,
-                                  sizeof(cp->addr));
-               goto unlock;
-       }
-
        err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr);
        if (err < 0)
                status = MGMT_STATUS_INVALID_PARAMS;
        else
-               status = 0;
+               status = MGMT_STATUS_SUCCESS;
 
        err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
                           status, &cp->addr, sizeof(cp->addr));
 
-unlock:
        hci_dev_unlock(hdev);
        return err;
 }
@@ -2365,31 +2467,45 @@ static int start_discovery(struct sock *sk, struct hci_dev *hdev,
 
        switch (hdev->discovery.type) {
        case DISCOV_TYPE_BREDR:
-               if (lmp_bredr_capable(hdev))
-                       err = hci_do_inquiry(hdev, INQUIRY_LEN_BREDR);
-               else
-                       err = -ENOTSUPP;
+               if (!lmp_bredr_capable(hdev)) {
+                       err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
+                                        MGMT_STATUS_NOT_SUPPORTED);
+                       mgmt_pending_remove(cmd);
+                       goto failed;
+               }
+
+               err = hci_do_inquiry(hdev, INQUIRY_LEN_BREDR);
                break;
 
        case DISCOV_TYPE_LE:
-               if (lmp_host_le_capable(hdev))
-                       err = hci_le_scan(hdev, LE_SCAN_TYPE, LE_SCAN_INT,
-                                         LE_SCAN_WIN, LE_SCAN_TIMEOUT_LE_ONLY);
-               else
-                       err = -ENOTSUPP;
+               if (!lmp_host_le_capable(hdev)) {
+                       err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
+                                        MGMT_STATUS_NOT_SUPPORTED);
+                       mgmt_pending_remove(cmd);
+                       goto failed;
+               }
+
+               err = hci_le_scan(hdev, LE_SCAN_TYPE, LE_SCAN_INT,
+                                 LE_SCAN_WIN, LE_SCAN_TIMEOUT_LE_ONLY);
                break;
 
        case DISCOV_TYPE_INTERLEAVED:
-               if (lmp_host_le_capable(hdev) && lmp_bredr_capable(hdev))
-                       err = hci_le_scan(hdev, LE_SCAN_TYPE, LE_SCAN_INT,
-                                         LE_SCAN_WIN,
-                                         LE_SCAN_TIMEOUT_BREDR_LE);
-               else
-                       err = -ENOTSUPP;
+               if (!lmp_host_le_capable(hdev) || !lmp_bredr_capable(hdev)) {
+                       err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
+                                        MGMT_STATUS_NOT_SUPPORTED);
+                       mgmt_pending_remove(cmd);
+                       goto failed;
+               }
+
+               err = hci_le_scan(hdev, LE_SCAN_TYPE, LE_SCAN_INT, LE_SCAN_WIN,
+                                 LE_SCAN_TIMEOUT_BREDR_LE);
                break;
 
        default:
-               err = -EINVAL;
+               err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
+                                MGMT_STATUS_INVALID_PARAMS);
+               mgmt_pending_remove(cmd);
+               goto failed;
        }
 
        if (err < 0)
@@ -2510,7 +2626,8 @@ static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
                hci_inquiry_cache_update_resolve(hdev, e);
        }
 
-       err = 0;
+       err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
+                          sizeof(cp->addr));
 
 failed:
        hci_dev_unlock(hdev);
@@ -2526,13 +2643,18 @@ static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
 
        BT_DBG("%s", hdev->name);
 
+       if (!bdaddr_type_is_valid(cp->addr.type))
+               return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
+                                   MGMT_STATUS_INVALID_PARAMS,
+                                   &cp->addr, sizeof(cp->addr));
+
        hci_dev_lock(hdev);
 
        err = hci_blacklist_add(hdev, &cp->addr.bdaddr, cp->addr.type);
        if (err < 0)
                status = MGMT_STATUS_FAILED;
        else
-               status = 0;
+               status = MGMT_STATUS_SUCCESS;
 
        err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
                           &cp->addr, sizeof(cp->addr));
@@ -2551,13 +2673,18 @@ static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
 
        BT_DBG("%s", hdev->name);
 
+       if (!bdaddr_type_is_valid(cp->addr.type))
+               return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
+                                   MGMT_STATUS_INVALID_PARAMS,
+                                   &cp->addr, sizeof(cp->addr));
+
        hci_dev_lock(hdev);
 
        err = hci_blacklist_del(hdev, &cp->addr.bdaddr, cp->addr.type);
        if (err < 0)
                status = MGMT_STATUS_INVALID_PARAMS;
        else
-               status = 0;
+               status = MGMT_STATUS_SUCCESS;
 
        err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
                           &cp->addr, sizeof(cp->addr));
@@ -2612,6 +2739,10 @@ static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
                return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
                                  MGMT_STATUS_NOT_SUPPORTED);
 
+       if (cp->val != 0x00 && cp->val != 0x01)
+               return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
+                                 MGMT_STATUS_INVALID_PARAMS);
+
        if (!hdev_is_powered(hdev))
                return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
                                  MGMT_STATUS_NOT_POWERED);
@@ -2659,12 +2790,23 @@ done:
        return err;
 }
 
+static bool ltk_is_valid(struct mgmt_ltk_info *key)
+{
+       if (key->authenticated != 0x00 && key->authenticated != 0x01)
+               return false;
+       if (key->master != 0x00 && key->master != 0x01)
+               return false;
+       if (!bdaddr_type_is_le(key->addr.type))
+               return false;
+       return true;
+}
+
 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
                               void *cp_data, u16 len)
 {
        struct mgmt_cp_load_long_term_keys *cp = cp_data;
        u16 key_count, expected_len;
-       int i;
+       int i, err;
 
        key_count = __le16_to_cpu(cp->key_count);
 
@@ -2674,11 +2816,20 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
                BT_ERR("load_keys: expected %u bytes, got %u bytes",
                       len, expected_len);
                return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
-                                 EINVAL);
+                                 MGMT_STATUS_INVALID_PARAMS);
        }
 
        BT_DBG("%s key_count %u", hdev->name, key_count);
 
+       for (i = 0; i < key_count; i++) {
+               struct mgmt_ltk_info *key = &cp->keys[i];
+
+               if (!ltk_is_valid(key))
+                       return cmd_status(sk, hdev->id,
+                                         MGMT_OP_LOAD_LONG_TERM_KEYS,
+                                         MGMT_STATUS_INVALID_PARAMS);
+       }
+
        hci_dev_lock(hdev);
 
        hci_smp_ltks_clear(hdev);
@@ -2698,9 +2849,12 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
                            key->enc_size, key->ediv, key->rand);
        }
 
+       err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
+                          NULL, 0);
+
        hci_dev_unlock(hdev);
 
-       return 0;
+       return err;
 }
 
 static const struct mgmt_handler {
@@ -2915,6 +3069,8 @@ int mgmt_powered(struct hci_dev *hdev, u8 powered)
        mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
 
        if (powered) {
+               u8 link_sec;
+
                if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
                    !lmp_host_ssp_capable(hdev)) {
                        u8 ssp = 1;
@@ -2938,6 +3094,11 @@ int mgmt_powered(struct hci_dev *hdev, u8 powered)
                                             sizeof(cp), &cp);
                }
 
+               link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
+               if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
+                       hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE,
+                                    sizeof(link_sec), &link_sec);
+
                if (lmp_bredr_capable(hdev)) {
                        set_bredr_scan(hdev);
                        update_class(hdev);
@@ -2946,7 +3107,13 @@ int mgmt_powered(struct hci_dev *hdev, u8 powered)
                }
        } else {
                u8 status = MGMT_STATUS_NOT_POWERED;
+               u8 zero_cod[] = { 0, 0, 0 };
+
                mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
+
+               if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
+                       mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
+                                  zero_cod, sizeof(zero_cod), NULL);
        }
 
        err = new_settings(hdev, match.sk);
index 57f250c..b5178d6 100644 (file)
@@ -900,8 +900,6 @@ static void sco_conn_ready(struct sco_conn *conn)
 
        BT_DBG("conn %p", conn);
 
-       sco_conn_lock(conn);
-
        if (sk) {
                sco_sock_clear_timer(sk);
                bh_lock_sock(sk);
@@ -909,9 +907,13 @@ static void sco_conn_ready(struct sco_conn *conn)
                sk->sk_state_change(sk);
                bh_unlock_sock(sk);
        } else {
+               sco_conn_lock(conn);
+
                parent = sco_get_sock_listen(conn->src);
-               if (!parent)
-                       goto done;
+               if (!parent) {
+                       sco_conn_unlock(conn);
+                       return;
+               }
 
                bh_lock_sock(parent);
 
@@ -919,7 +921,8 @@ static void sco_conn_ready(struct sco_conn *conn)
                                    BTPROTO_SCO, GFP_ATOMIC);
                if (!sk) {
                        bh_unlock_sock(parent);
-                       goto done;
+                       sco_conn_unlock(conn);
+                       return;
                }
 
                sco_sock_init(sk, parent);
@@ -939,10 +942,9 @@ static void sco_conn_ready(struct sco_conn *conn)
                parent->sk_data_ready(parent, 1);
 
                bh_unlock_sock(parent);
-       }
 
-done:
-       sco_conn_unlock(conn);
+               sco_conn_unlock(conn);
+       }
 }
 
 /* ----- SCO interface with lower layer (HCI) ----- */
index 68a9587..5abefb1 100644 (file)
@@ -859,6 +859,19 @@ int smp_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
 
        skb_pull(skb, sizeof(code));
 
+       /*
+        * The SMP context must be initialized for all other PDUs except
+        * pairing and security requests. If we get any other PDU when
+        * not initialized simply disconnect (done if this function
+        * returns an error).
+        */
+       if (code != SMP_CMD_PAIRING_REQ && code != SMP_CMD_SECURITY_REQ &&
+           !conn->smp_chan) {
+               BT_ERR("Unexpected SMP command 0x%02x. Disconnecting.", code);
+               kfree_skb(skb);
+               return -ENOTSUPP;
+       }
+
        switch (code) {
        case SMP_CMD_PAIRING_REQ:
                reason = smp_cmd_pairing_req(conn, skb);
index 6dee7bf..aa0d3b2 100644 (file)
@@ -46,3 +46,17 @@ config BRIDGE_IGMP_SNOOPING
          Say N to exclude this support and reduce the binary size.
 
          If unsure, say Y.
+
+config BRIDGE_VLAN_FILTERING
+       bool "VLAN filtering"
+       depends on BRIDGE
+       depends on VLAN_8021Q
+       default n
+       ---help---
+         If you say Y here, then the Ethernet bridge will be able selectively
+         receive and forward traffic based on VLAN information in the packet
+         any VLAN information configured on the bridge port or bridge device.
+
+         Say N to exclude this support and reduce the binary size.
+
+         If unsure, say Y.
index e859098..e85498b 100644 (file)
@@ -14,4 +14,6 @@ bridge-$(CONFIG_BRIDGE_NETFILTER) += br_netfilter.o
 
 bridge-$(CONFIG_BRIDGE_IGMP_SNOOPING) += br_multicast.o br_mdb.o
 
+bridge-$(CONFIG_BRIDGE_VLAN_FILTERING) += br_vlan.o
+
 obj-$(CONFIG_BRIDGE_NF_EBTABLES) += netfilter/
index e1bc090..d5f1d3f 100644 (file)
@@ -30,6 +30,7 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
        struct net_bridge_fdb_entry *dst;
        struct net_bridge_mdb_entry *mdst;
        struct br_cpu_netstats *brstats = this_cpu_ptr(br->stats);
+       u16 vid = 0;
 
        rcu_read_lock();
 #ifdef CONFIG_BRIDGE_NETFILTER
@@ -45,6 +46,9 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
        brstats->tx_bytes += skb->len;
        u64_stats_update_end(&brstats->syncp);
 
+       if (!br_allowed_ingress(br, br_get_vlan_info(br), skb, &vid))
+               goto out;
+
        BR_INPUT_SKB_CB(skb)->brdev = dev;
 
        skb_reset_mac_header(skb);
@@ -67,7 +71,7 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
                        br_multicast_deliver(mdst, skb);
                else
                        br_flood_deliver(br, skb);
-       } else if ((dst = __br_fdb_get(br, dest)) != NULL)
+       } else if ((dst = __br_fdb_get(br, dest, vid)) != NULL)
                br_deliver(dst->dst, skb);
        else
                br_flood_deliver(br, skb);
@@ -176,7 +180,6 @@ static int br_set_mac_address(struct net_device *dev, void *p)
                br_fdb_change_mac_address(br, addr->sa_data);
                br_stp_change_bridge_id(br, addr->sa_data);
        }
-       br->flags |= BR_SET_MAC_ADDR;
        spin_unlock_bh(&br->lock);
 
        return 0;
@@ -266,7 +269,7 @@ void br_netpoll_disable(struct net_bridge_port *p)
 
        p->np = NULL;
 
-       __netpoll_free_rcu(np);
+       __netpoll_free_async(np);
 }
 
 #endif
@@ -314,6 +317,7 @@ static const struct net_device_ops br_netdev_ops = {
        .ndo_fdb_dump            = br_fdb_dump,
        .ndo_bridge_getlink      = br_getlink,
        .ndo_bridge_setlink      = br_setlink,
+       .ndo_bridge_dellink      = br_dellink,
 };
 
 static void br_dev_free(struct net_device *dev)
index d9576e6..8117900 100644 (file)
 #include <linux/slab.h>
 #include <linux/atomic.h>
 #include <asm/unaligned.h>
+#include <linux/if_vlan.h>
 #include "br_private.h"
 
 static struct kmem_cache *br_fdb_cache __read_mostly;
 static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
-                     const unsigned char *addr);
+                     const unsigned char *addr, u16 vid);
 static void fdb_notify(struct net_bridge *br,
                       const struct net_bridge_fdb_entry *, int);
 
@@ -67,11 +68,11 @@ static inline int has_expired(const struct net_bridge *br,
                time_before_eq(fdb->updated + hold_time(br), jiffies);
 }
 
-static inline int br_mac_hash(const unsigned char *mac)
+static inline int br_mac_hash(const unsigned char *mac, __u16 vid)
 {
-       /* use 1 byte of OUI cnd 3 bytes of NIC */
+       /* use 1 byte of OUI and 3 bytes of NIC */
        u32 key = get_unaligned((u32 *)(mac + 2));
-       return jhash_1word(key, fdb_salt) & (BR_HASH_SIZE - 1);
+       return jhash_2words(key, vid, fdb_salt) & (BR_HASH_SIZE - 1);
 }
 
 static void fdb_rcu_free(struct rcu_head *head)
@@ -91,6 +92,7 @@ static void fdb_delete(struct net_bridge *br, struct net_bridge_fdb_entry *f)
 void br_fdb_changeaddr(struct net_bridge_port *p, const unsigned char *newaddr)
 {
        struct net_bridge *br = p->br;
+       bool no_vlan = (nbp_get_vlan_info(p) == NULL) ? true : false;
        int i;
 
        spin_lock_bh(&br->hash_lock);
@@ -105,10 +107,12 @@ void br_fdb_changeaddr(struct net_bridge_port *p, const unsigned char *newaddr)
                        if (f->dst == p && f->is_local) {
                                /* maybe another port has same hw addr? */
                                struct net_bridge_port *op;
+                               u16 vid = f->vlan_id;
                                list_for_each_entry(op, &br->port_list, list) {
                                        if (op != p &&
                                            ether_addr_equal(op->dev->dev_addr,
-                                                            f->addr.addr)) {
+                                                            f->addr.addr) &&
+                                           nbp_vlan_find(op, vid)) {
                                                f->dst = op;
                                                goto insert;
                                        }
@@ -116,27 +120,55 @@ void br_fdb_changeaddr(struct net_bridge_port *p, const unsigned char *newaddr)
 
                                /* delete old one */
                                fdb_delete(br, f);
-                               goto insert;
+insert:
+                               /* insert new address,  may fail if invalid
+                                * address or dup.
+                                */
+                               fdb_insert(br, p, newaddr, vid);
+
+                               /* if this port has no vlan information
+                                * configured, we can safely be done at
+                                * this point.
+                                */
+                               if (no_vlan)
+                                       goto done;
                        }
                }
        }
- insert:
-       /* insert new address,  may fail if invalid address or dup. */
-       fdb_insert(br, p, newaddr);
 
+done:
        spin_unlock_bh(&br->hash_lock);
 }
 
 void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr)
 {
        struct net_bridge_fdb_entry *f;
+       struct net_port_vlans *pv;
+       u16 vid = 0;
 
        /* If old entry was unassociated with any port, then delete it. */
-       f = __br_fdb_get(br, br->dev->dev_addr);
+       f = __br_fdb_get(br, br->dev->dev_addr, 0);
        if (f && f->is_local && !f->dst)
                fdb_delete(br, f);
 
-       fdb_insert(br, NULL, newaddr);
+       fdb_insert(br, NULL, newaddr, 0);
+
+       /* Now remove and add entries for every VLAN configured on the
+        * bridge.  This function runs under RTNL so the bitmap will not
+        * change from under us.
+        */
+       pv = br_get_vlan_info(br);
+       if (!pv)
+               return;
+
+       for (vid = find_next_bit(pv->vlan_bitmap, BR_VLAN_BITMAP_LEN, vid);
+            vid < BR_VLAN_BITMAP_LEN;
+            vid = find_next_bit(pv->vlan_bitmap, BR_VLAN_BITMAP_LEN, vid+1)) {
+               f = __br_fdb_get(br, br->dev->dev_addr, vid);
+               if (f && f->is_local && !f->dst)
+                       fdb_delete(br, f);
+               fdb_insert(br, NULL, newaddr, vid);
+       }
 }
 
 void br_fdb_cleanup(unsigned long _data)
@@ -231,13 +263,16 @@ void br_fdb_delete_by_port(struct net_bridge *br,
 
 /* No locking or refcounting, assumes caller has rcu_read_lock */
 struct net_bridge_fdb_entry *__br_fdb_get(struct net_bridge *br,
-                                         const unsigned char *addr)
+                                         const unsigned char *addr,
+                                         __u16 vid)
 {
        struct hlist_node *h;
        struct net_bridge_fdb_entry *fdb;
 
-       hlist_for_each_entry_rcu(fdb, h, &br->hash[br_mac_hash(addr)], hlist) {
-               if (ether_addr_equal(fdb->addr.addr, addr)) {
+       hlist_for_each_entry_rcu(fdb, h,
+                               &br->hash[br_mac_hash(addr, vid)], hlist) {
+               if (ether_addr_equal(fdb->addr.addr, addr) &&
+                   fdb->vlan_id == vid) {
                        if (unlikely(has_expired(br, fdb)))
                                break;
                        return fdb;
@@ -261,7 +296,7 @@ int br_fdb_test_addr(struct net_device *dev, unsigned char *addr)
        if (!port)
                ret = 0;
        else {
-               fdb = __br_fdb_get(port->br, addr);
+               fdb = __br_fdb_get(port->br, addr, 0);
                ret = fdb && fdb->dst && fdb->dst->dev != dev &&
                        fdb->dst->state == BR_STATE_FORWARDING;
        }
@@ -325,26 +360,30 @@ int br_fdb_fillbuf(struct net_bridge *br, void *buf,
 }
 
 static struct net_bridge_fdb_entry *fdb_find(struct hlist_head *head,
-                                            const unsigned char *addr)
+                                            const unsigned char *addr,
+                                            __u16 vid)
 {
        struct hlist_node *h;
        struct net_bridge_fdb_entry *fdb;
 
        hlist_for_each_entry(fdb, h, head, hlist) {
-               if (ether_addr_equal(fdb->addr.addr, addr))
+               if (ether_addr_equal(fdb->addr.addr, addr) &&
+                   fdb->vlan_id == vid)
                        return fdb;
        }
        return NULL;
 }
 
 static struct net_bridge_fdb_entry *fdb_find_rcu(struct hlist_head *head,
-                                                const unsigned char *addr)
+                                                const unsigned char *addr,
+                                                __u16 vid)
 {
        struct hlist_node *h;
        struct net_bridge_fdb_entry *fdb;
 
        hlist_for_each_entry_rcu(fdb, h, head, hlist) {
-               if (ether_addr_equal(fdb->addr.addr, addr))
+               if (ether_addr_equal(fdb->addr.addr, addr) &&
+                   fdb->vlan_id == vid)
                        return fdb;
        }
        return NULL;
@@ -352,7 +391,8 @@ static struct net_bridge_fdb_entry *fdb_find_rcu(struct hlist_head *head,
 
 static struct net_bridge_fdb_entry *fdb_create(struct hlist_head *head,
                                               struct net_bridge_port *source,
-                                              const unsigned char *addr)
+                                              const unsigned char *addr,
+                                              __u16 vid)
 {
        struct net_bridge_fdb_entry *fdb;
 
@@ -360,6 +400,7 @@ static struct net_bridge_fdb_entry *fdb_create(struct hlist_head *head,
        if (fdb) {
                memcpy(fdb->addr.addr, addr, ETH_ALEN);
                fdb->dst = source;
+               fdb->vlan_id = vid;
                fdb->is_local = 0;
                fdb->is_static = 0;
                fdb->updated = fdb->used = jiffies;
@@ -369,15 +410,15 @@ static struct net_bridge_fdb_entry *fdb_create(struct hlist_head *head,
 }
 
 static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
-                 const unsigned char *addr)
+                 const unsigned char *addr, u16 vid)
 {
-       struct hlist_head *head = &br->hash[br_mac_hash(addr)];
+       struct hlist_head *head = &br->hash[br_mac_hash(addr, vid)];
        struct net_bridge_fdb_entry *fdb;
 
        if (!is_valid_ether_addr(addr))
                return -EINVAL;
 
-       fdb = fdb_find(head, addr);
+       fdb = fdb_find(head, addr, vid);
        if (fdb) {
                /* it is okay to have multiple ports with same
                 * address, just use the first one.
@@ -390,7 +431,7 @@ static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
                fdb_delete(br, fdb);
        }
 
-       fdb = fdb_create(head, source, addr);
+       fdb = fdb_create(head, source, addr, vid);
        if (!fdb)
                return -ENOMEM;
 
@@ -401,20 +442,20 @@ static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
 
 /* Add entry for local address of interface */
 int br_fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
-                 const unsigned char *addr)
+                 const unsigned char *addr, u16 vid)
 {
        int ret;
 
        spin_lock_bh(&br->hash_lock);
-       ret = fdb_insert(br, source, addr);
+       ret = fdb_insert(br, source, addr, vid);
        spin_unlock_bh(&br->hash_lock);
        return ret;
 }
 
 void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
-                  const unsigned char *addr)
+                  const unsigned char *addr, u16 vid)
 {
-       struct hlist_head *head = &br->hash[br_mac_hash(addr)];
+       struct hlist_head *head = &br->hash[br_mac_hash(addr, vid)];
        struct net_bridge_fdb_entry *fdb;
 
        /* some users want to always flood. */
@@ -426,7 +467,7 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
              source->state == BR_STATE_FORWARDING))
                return;
 
-       fdb = fdb_find_rcu(head, addr);
+       fdb = fdb_find_rcu(head, addr, vid);
        if (likely(fdb)) {
                /* attempt to update an entry for a local interface */
                if (unlikely(fdb->is_local)) {
@@ -441,8 +482,8 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
                }
        } else {
                spin_lock(&br->hash_lock);
-               if (likely(!fdb_find(head, addr))) {
-                       fdb = fdb_create(head, source, addr);
+               if (likely(!fdb_find(head, addr, vid))) {
+                       fdb = fdb_create(head, source, addr, vid);
                        if (fdb)
                                fdb_notify(br, fdb, RTM_NEWNEIGH);
                }
@@ -495,6 +536,10 @@ static int fdb_fill_info(struct sk_buff *skb, const struct net_bridge *br,
        ci.ndm_refcnt    = 0;
        if (nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
                goto nla_put_failure;
+
+       if (nla_put(skb, NDA_VLAN, sizeof(u16), &fdb->vlan_id))
+               goto nla_put_failure;
+
        return nlmsg_end(skb, nlh);
 
 nla_put_failure:
@@ -506,6 +551,7 @@ static inline size_t fdb_nlmsg_size(void)
 {
        return NLMSG_ALIGN(sizeof(struct ndmsg))
                + nla_total_size(ETH_ALEN) /* NDA_LLADDR */
+               + nla_total_size(sizeof(u16)) /* NDA_VLAN */
                + nla_total_size(sizeof(struct nda_cacheinfo));
 }
 
@@ -571,18 +617,18 @@ out:
 
 /* Update (create or replace) forwarding database entry */
 static int fdb_add_entry(struct net_bridge_port *source, const __u8 *addr,
-                        __u16 state, __u16 flags)
+                        __u16 state, __u16 flags, __u16 vid)
 {
        struct net_bridge *br = source->br;
-       struct hlist_head *head = &br->hash[br_mac_hash(addr)];
+       struct hlist_head *head = &br->hash[br_mac_hash(addr, vid)];
        struct net_bridge_fdb_entry *fdb;
 
-       fdb = fdb_find(head, addr);
+       fdb = fdb_find(head, addr, vid);
        if (fdb == NULL) {
                if (!(flags & NLM_F_CREATE))
                        return -ENOENT;
 
-               fdb = fdb_create(head, source, addr);
+               fdb = fdb_create(head, source, addr, vid);
                if (!fdb)
                        return -ENOMEM;
                fdb_notify(br, fdb, RTM_NEWNEIGH);
@@ -607,6 +653,25 @@ static int fdb_add_entry(struct net_bridge_port *source, const __u8 *addr,
        return 0;
 }
 
+static int __br_fdb_add(struct ndmsg *ndm, struct net_bridge_port *p,
+              const unsigned char *addr, u16 nlh_flags, u16 vid)
+{
+       int err = 0;
+
+       if (ndm->ndm_flags & NTF_USE) {
+               rcu_read_lock();
+               br_fdb_update(p->br, p, addr, vid);
+               rcu_read_unlock();
+       } else {
+               spin_lock_bh(&p->br->hash_lock);
+               err = fdb_add_entry(p, addr, ndm->ndm_state,
+                                   nlh_flags, vid);
+               spin_unlock_bh(&p->br->hash_lock);
+       }
+
+       return err;
+}
+
 /* Add new permanent fdb entry with RTM_NEWNEIGH */
 int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
               struct net_device *dev,
@@ -614,12 +679,29 @@ int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
 {
        struct net_bridge_port *p;
        int err = 0;
+       struct net_port_vlans *pv;
+       unsigned short vid = VLAN_N_VID;
 
        if (!(ndm->ndm_state & (NUD_PERMANENT|NUD_NOARP|NUD_REACHABLE))) {
                pr_info("bridge: RTM_NEWNEIGH with invalid state %#x\n", ndm->ndm_state);
                return -EINVAL;
        }
 
+       if (tb[NDA_VLAN]) {
+               if (nla_len(tb[NDA_VLAN]) != sizeof(unsigned short)) {
+                       pr_info("bridge: RTM_NEWNEIGH with invalid vlan\n");
+                       return -EINVAL;
+               }
+
+               vid = nla_get_u16(tb[NDA_VLAN]);
+
+               if (vid >= VLAN_N_VID) {
+                       pr_info("bridge: RTM_NEWNEIGH with invalid vlan id %d\n",
+                               vid);
+                       return -EINVAL;
+               }
+       }
+
        p = br_port_get_rtnl(dev);
        if (p == NULL) {
                pr_info("bridge: RTM_NEWNEIGH %s not a bridge port\n",
@@ -627,40 +709,90 @@ int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
                return -EINVAL;
        }
 
-       if (ndm->ndm_flags & NTF_USE) {
-               rcu_read_lock();
-               br_fdb_update(p->br, p, addr);
-               rcu_read_unlock();
+       pv = nbp_get_vlan_info(p);
+       if (vid != VLAN_N_VID) {
+               if (!pv || !test_bit(vid, pv->vlan_bitmap)) {
+                       pr_info("bridge: RTM_NEWNEIGH with unconfigured "
+                               "vlan %d on port %s\n", vid, dev->name);
+                       return -EINVAL;
+               }
+
+               /* VID was specified, so use it. */
+               err = __br_fdb_add(ndm, p, addr, nlh_flags, vid);
        } else {
-               spin_lock_bh(&p->br->hash_lock);
-               err = fdb_add_entry(p, addr, ndm->ndm_state, nlh_flags);
-               spin_unlock_bh(&p->br->hash_lock);
+               if (!pv || bitmap_empty(pv->vlan_bitmap, BR_VLAN_BITMAP_LEN)) {
+                       err = __br_fdb_add(ndm, p, addr, nlh_flags, 0);
+                       goto out;
+               }
+
+               /* We have vlans configured on this port and user didn't
+                * specify a VLAN.  To be nice, add/update entry for every
+                * vlan on this port.
+                */
+               vid = find_first_bit(pv->vlan_bitmap, BR_VLAN_BITMAP_LEN);
+               while (vid < BR_VLAN_BITMAP_LEN) {
+                       err = __br_fdb_add(ndm, p, addr, nlh_flags, vid);
+                       if (err)
+                               goto out;
+                       vid = find_next_bit(pv->vlan_bitmap,
+                                           BR_VLAN_BITMAP_LEN, vid+1);
+               }
        }
 
+out:
        return err;
 }
 
-static int fdb_delete_by_addr(struct net_bridge_port *p, const u8 *addr)
+int fdb_delete_by_addr(struct net_bridge *br, const u8 *addr,
+                      u16 vlan)
 {
-       struct net_bridge *br = p->br;
-       struct hlist_head *head = &br->hash[br_mac_hash(addr)];
+       struct hlist_head *head = &br->hash[br_mac_hash(addr, vlan)];
        struct net_bridge_fdb_entry *fdb;
 
-       fdb = fdb_find(head, addr);
+       fdb = fdb_find(head, addr, vlan);
        if (!fdb)
                return -ENOENT;
 
-       fdb_delete(p->br, fdb);
+       fdb_delete(br, fdb);
        return 0;
 }
 
+static int __br_fdb_delete(struct net_bridge_port *p,
+                          const unsigned char *addr, u16 vid)
+{
+       int err;
+
+       spin_lock_bh(&p->br->hash_lock);
+       err = fdb_delete_by_addr(p->br, addr, vid);
+       spin_unlock_bh(&p->br->hash_lock);
+
+       return err;
+}
+
 /* Remove neighbor entry with RTM_DELNEIGH */
-int br_fdb_delete(struct ndmsg *ndm, struct net_device *dev,
+int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
+                 struct net_device *dev,
                  const unsigned char *addr)
 {
        struct net_bridge_port *p;
        int err;
+       struct net_port_vlans *pv;
+       unsigned short vid = VLAN_N_VID;
+
+       if (tb[NDA_VLAN]) {
+               if (nla_len(tb[NDA_VLAN]) != sizeof(unsigned short)) {
+                       pr_info("bridge: RTM_NEWNEIGH with invalid vlan\n");
+                       return -EINVAL;
+               }
+
+               vid = nla_get_u16(tb[NDA_VLAN]);
 
+               if (vid >= VLAN_N_VID) {
+                       pr_info("bridge: RTM_NEWNEIGH with invalid vlan id %d\n",
+                               vid);
+                       return -EINVAL;
+               }
+       }
        p = br_port_get_rtnl(dev);
        if (p == NULL) {
                pr_info("bridge: RTM_DELNEIGH %s not a bridge port\n",
@@ -668,9 +800,33 @@ int br_fdb_delete(struct ndmsg *ndm, struct net_device *dev,
                return -EINVAL;
        }
 
-       spin_lock_bh(&p->br->hash_lock);
-       err = fdb_delete_by_addr(p, addr);
-       spin_unlock_bh(&p->br->hash_lock);
+       pv = nbp_get_vlan_info(p);
+       if (vid != VLAN_N_VID) {
+               if (!pv || !test_bit(vid, pv->vlan_bitmap)) {
+                       pr_info("bridge: RTM_DELNEIGH with unconfigured "
+                               "vlan %d on port %s\n", vid, dev->name);
+                       return -EINVAL;
+               }
+
+               err = __br_fdb_delete(p, addr, vid);
+       } else {
+               if (!pv || bitmap_empty(pv->vlan_bitmap, BR_VLAN_BITMAP_LEN)) {
+                       err = __br_fdb_delete(p, addr, 0);
+                       goto out;
+               }
 
+               /* We have vlans configured on this port and user didn't
+                * specify a VLAN.  To be nice, add/update entry for every
+                * vlan on this port.
+                */
+               err = -ENOENT;
+               vid = find_first_bit(pv->vlan_bitmap, BR_VLAN_BITMAP_LEN);
+               while (vid < BR_VLAN_BITMAP_LEN) {
+                       err &= __br_fdb_delete(p, addr, vid);
+                       vid = find_next_bit(pv->vlan_bitmap,
+                                           BR_VLAN_BITMAP_LEN, vid+1);
+               }
+       }
+out:
        return err;
 }
index 02015a5..092b20e 100644 (file)
@@ -31,6 +31,7 @@ static inline int should_deliver(const struct net_bridge_port *p,
                                 const struct sk_buff *skb)
 {
        return (((p->flags & BR_HAIRPIN_MODE) || skb->dev != p->dev) &&
+               br_allowed_egress(p->br, nbp_get_vlan_info(p), skb) &&
                p->state == BR_STATE_FORWARDING);
 }
 
@@ -63,6 +64,10 @@ int br_forward_finish(struct sk_buff *skb)
 
 static void __br_deliver(const struct net_bridge_port *to, struct sk_buff *skb)
 {
+       skb = br_handle_vlan(to->br, nbp_get_vlan_info(to), skb);
+       if (!skb)
+               return;
+
        skb->dev = to->dev;
 
        if (unlikely(netpoll_tx_running(to->br->dev))) {
@@ -88,6 +93,10 @@ static void __br_forward(const struct net_bridge_port *to, struct sk_buff *skb)
                return;
        }
 
+       skb = br_handle_vlan(to->br, nbp_get_vlan_info(to), skb);
+       if (!skb)
+               return;
+
        indev = skb->dev;
        skb->dev = to->dev;
        skb_forward_csum(skb);
index 2148d47..ef1b914 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/if_ether.h>
 #include <linux/slab.h>
 #include <net/sock.h>
+#include <linux/if_vlan.h>
 
 #include "br_private.h"
 
@@ -139,6 +140,7 @@ static void del_nbp(struct net_bridge_port *p)
 
        br_ifinfo_notify(RTM_DELLINK, p);
 
+       nbp_vlan_flush(p);
        br_fdb_delete_by_port(br, p, 1);
 
        list_del_rcu(&p->list);
@@ -395,7 +397,7 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
 
        dev_set_mtu(br->dev, br_min_mtu(br));
 
-       if (br_fdb_insert(br, p, dev->dev_addr))
+       if (br_fdb_insert(br, p, dev->dev_addr, 0))
                netdev_err(dev, "failed insert local address bridge forwarding table\n");
 
        kobject_uevent(&p->kobj, KOBJ_ADD);
index 4b34207..4803301 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/etherdevice.h>
 #include <linux/netfilter_bridge.h>
 #include <linux/export.h>
+#include <linux/rculist.h>
 #include "br_private.h"
 
 /* Hook for brouter */
@@ -34,6 +35,20 @@ static int br_pass_frame_up(struct sk_buff *skb)
        brstats->rx_bytes += skb->len;
        u64_stats_update_end(&brstats->syncp);
 
+       /* Bridge is just like any other port.  Make sure the
+        * packet is allowed except in promisc modue when someone
+        * may be running packet capture.
+        */
+       if (!(brdev->flags & IFF_PROMISC) &&
+           !br_allowed_egress(br, br_get_vlan_info(br), skb)) {
+               kfree_skb(skb);
+               return NET_RX_DROP;
+       }
+
+       skb = br_handle_vlan(br, br_get_vlan_info(br), skb);
+       if (!skb)
+               return NET_RX_DROP;
+
        indev = skb->dev;
        skb->dev = brdev;
 
@@ -50,13 +65,17 @@ int br_handle_frame_finish(struct sk_buff *skb)
        struct net_bridge_fdb_entry *dst;
        struct net_bridge_mdb_entry *mdst;
        struct sk_buff *skb2;
+       u16 vid = 0;
 
        if (!p || p->state == BR_STATE_DISABLED)
                goto drop;
 
+       if (!br_allowed_ingress(p->br, nbp_get_vlan_info(p), skb, &vid))
+               goto drop;
+
        /* insert into forwarding database after filtering to avoid spoofing */
        br = p->br;
-       br_fdb_update(br, p, eth_hdr(skb)->h_source);
+       br_fdb_update(br, p, eth_hdr(skb)->h_source, vid);
 
        if (!is_broadcast_ether_addr(dest) && is_multicast_ether_addr(dest) &&
            br_multicast_rcv(br, p, skb))
@@ -91,7 +110,8 @@ int br_handle_frame_finish(struct sk_buff *skb)
                        skb2 = skb;
 
                br->dev->stats.multicast++;
-       } else if ((dst = __br_fdb_get(br, dest)) && dst->is_local) {
+       } else if ((dst = __br_fdb_get(br, dest, vid)) &&
+                       dst->is_local) {
                skb2 = skb;
                /* Do not forward the packet since it's local. */
                skb = NULL;
@@ -119,8 +139,10 @@ drop:
 static int br_handle_local_finish(struct sk_buff *skb)
 {
        struct net_bridge_port *p = br_port_get_rcu(skb->dev);
+       u16 vid = 0;
 
-       br_fdb_update(p->br, p, eth_hdr(skb)->h_source);
+       br_vlan_get_tag(skb, &vid);
+       br_fdb_update(p->br, p, eth_hdr(skb)->h_source, vid);
        return 0;        /* process further */
 }
 
index 6d6f265..7d886b0 100644 (file)
@@ -39,6 +39,8 @@ static inline int br_ip_equal(const struct br_ip *a, const struct br_ip *b)
 {
        if (a->proto != b->proto)
                return 0;
+       if (a->vid != b->vid)
+               return 0;
        switch (a->proto) {
        case htons(ETH_P_IP):
                return a->u.ip4 == b->u.ip4;
@@ -50,16 +52,19 @@ static inline int br_ip_equal(const struct br_ip *a, const struct br_ip *b)
        return 0;
 }
 
-static inline int __br_ip4_hash(struct net_bridge_mdb_htable *mdb, __be32 ip)
+static inline int __br_ip4_hash(struct net_bridge_mdb_htable *mdb, __be32 ip,
+                               __u16 vid)
 {
-       return jhash_1word(mdb->secret, (__force u32)ip) & (mdb->max - 1);
+       return jhash_2words((__force u32)ip, vid, mdb->secret) & (mdb->max - 1);
 }
 
 #if IS_ENABLED(CONFIG_IPV6)
 static inline int __br_ip6_hash(struct net_bridge_mdb_htable *mdb,
-                               const struct in6_addr *ip)
+                               const struct in6_addr *ip,
+                               __u16 vid)
 {
-       return jhash2((__force u32 *)ip->s6_addr32, 4, mdb->secret) & (mdb->max - 1);
+       return jhash_2words(ipv6_addr_hash(ip), vid,
+                           mdb->secret) & (mdb->max - 1);
 }
 #endif
 
@@ -68,10 +73,10 @@ static inline int br_ip_hash(struct net_bridge_mdb_htable *mdb,
 {
        switch (ip->proto) {
        case htons(ETH_P_IP):
-               return __br_ip4_hash(mdb, ip->u.ip4);
+               return __br_ip4_hash(mdb, ip->u.ip4, ip->vid);
 #if IS_ENABLED(CONFIG_IPV6)
        case htons(ETH_P_IPV6):
-               return __br_ip6_hash(mdb, &ip->u.ip6);
+               return __br_ip6_hash(mdb, &ip->u.ip6, ip->vid);
 #endif
        }
        return 0;
@@ -101,24 +106,27 @@ struct net_bridge_mdb_entry *br_mdb_ip_get(struct net_bridge_mdb_htable *mdb,
 }
 
 static struct net_bridge_mdb_entry *br_mdb_ip4_get(
-       struct net_bridge_mdb_htable *mdb, __be32 dst)
+       struct net_bridge_mdb_htable *mdb, __be32 dst, __u16 vid)
 {
        struct br_ip br_dst;
 
        br_dst.u.ip4 = dst;
        br_dst.proto = htons(ETH_P_IP);
+       br_dst.vid = vid;
 
        return br_mdb_ip_get(mdb, &br_dst);
 }
 
 #if IS_ENABLED(CONFIG_IPV6)
 static struct net_bridge_mdb_entry *br_mdb_ip6_get(
-       struct net_bridge_mdb_htable *mdb, const struct in6_addr *dst)
+       struct net_bridge_mdb_htable *mdb, const struct in6_addr *dst,
+       __u16 vid)
 {
        struct br_ip br_dst;
 
        br_dst.u.ip6 = *dst;
        br_dst.proto = htons(ETH_P_IPV6);
+       br_dst.vid = vid;
 
        return br_mdb_ip_get(mdb, &br_dst);
 }
@@ -694,7 +702,8 @@ err:
 
 static int br_ip4_multicast_add_group(struct net_bridge *br,
                                      struct net_bridge_port *port,
-                                     __be32 group)
+                                     __be32 group,
+                                     __u16 vid)
 {
        struct br_ip br_group;
 
@@ -703,6 +712,7 @@ static int br_ip4_multicast_add_group(struct net_bridge *br,
 
        br_group.u.ip4 = group;
        br_group.proto = htons(ETH_P_IP);
+       br_group.vid = vid;
 
        return br_multicast_add_group(br, port, &br_group);
 }
@@ -710,7 +720,8 @@ static int br_ip4_multicast_add_group(struct net_bridge *br,
 #if IS_ENABLED(CONFIG_IPV6)
 static int br_ip6_multicast_add_group(struct net_bridge *br,
                                      struct net_bridge_port *port,
-                                     const struct in6_addr *group)
+                                     const struct in6_addr *group,
+                                     __u16 vid)
 {
        struct br_ip br_group;
 
@@ -719,6 +730,7 @@ static int br_ip6_multicast_add_group(struct net_bridge *br,
 
        br_group.u.ip6 = *group;
        br_group.proto = htons(ETH_P_IPV6);
+       br_group.vid = vid;
 
        return br_multicast_add_group(br, port, &br_group);
 }
@@ -895,10 +907,12 @@ static int br_ip4_multicast_igmp3_report(struct net_bridge *br,
        int type;
        int err = 0;
        __be32 group;
+       u16 vid = 0;
 
        if (!pskb_may_pull(skb, sizeof(*ih)))
                return -EINVAL;
 
+       br_vlan_get_tag(skb, &vid);
        ih = igmpv3_report_hdr(skb);
        num = ntohs(ih->ngrec);
        len = sizeof(*ih);
@@ -930,7 +944,7 @@ static int br_ip4_multicast_igmp3_report(struct net_bridge *br,
                        continue;
                }
 
-               err = br_ip4_multicast_add_group(br, port, group);
+               err = br_ip4_multicast_add_group(br, port, group, vid);
                if (err)
                        break;
        }
@@ -949,10 +963,12 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br,
        int len;
        int num;
        int err = 0;
+       u16 vid = 0;
 
        if (!pskb_may_pull(skb, sizeof(*icmp6h)))
                return -EINVAL;
 
+       br_vlan_get_tag(skb, &vid);
        icmp6h = icmp6_hdr(skb);
        num = ntohs(icmp6h->icmp6_dataun.un_data16[1]);
        len = sizeof(*icmp6h);
@@ -990,7 +1006,8 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br,
                        continue;
                }
 
-               err = br_ip6_multicast_add_group(br, port, &grec->grec_mca);
+               err = br_ip6_multicast_add_group(br, port, &grec->grec_mca,
+                                                vid);
                if (!err)
                        break;
        }
@@ -1074,6 +1091,7 @@ static int br_ip4_multicast_query(struct net_bridge *br,
        unsigned long now = jiffies;
        __be32 group;
        int err = 0;
+       u16 vid = 0;
 
        spin_lock(&br->multicast_lock);
        if (!netif_running(br->dev) ||
@@ -1108,7 +1126,8 @@ static int br_ip4_multicast_query(struct net_bridge *br,
        if (!group)
                goto out;
 
-       mp = br_mdb_ip4_get(mlock_dereference(br->mdb, br), group);
+       br_vlan_get_tag(skb, &vid);
+       mp = br_mdb_ip4_get(mlock_dereference(br->mdb, br), group, vid);
        if (!mp)
                goto out;
 
@@ -1149,6 +1168,7 @@ static int br_ip6_multicast_query(struct net_bridge *br,
        unsigned long now = jiffies;
        const struct in6_addr *group = NULL;
        int err = 0;
+       u16 vid = 0;
 
        spin_lock(&br->multicast_lock);
        if (!netif_running(br->dev) ||
@@ -1180,7 +1200,8 @@ static int br_ip6_multicast_query(struct net_bridge *br,
        if (!group)
                goto out;
 
-       mp = br_mdb_ip6_get(mlock_dereference(br->mdb, br), group);
+       br_vlan_get_tag(skb, &vid);
+       mp = br_mdb_ip6_get(mlock_dereference(br->mdb, br), group, vid);
        if (!mp)
                goto out;
 
@@ -1286,7 +1307,8 @@ out:
 
 static void br_ip4_multicast_leave_group(struct net_bridge *br,
                                         struct net_bridge_port *port,
-                                        __be32 group)
+                                        __be32 group,
+                                        __u16 vid)
 {
        struct br_ip br_group;
 
@@ -1295,6 +1317,7 @@ static void br_ip4_multicast_leave_group(struct net_bridge *br,
 
        br_group.u.ip4 = group;
        br_group.proto = htons(ETH_P_IP);
+       br_group.vid = vid;
 
        br_multicast_leave_group(br, port, &br_group);
 }
@@ -1302,7 +1325,8 @@ static void br_ip4_multicast_leave_group(struct net_bridge *br,
 #if IS_ENABLED(CONFIG_IPV6)
 static void br_ip6_multicast_leave_group(struct net_bridge *br,
                                         struct net_bridge_port *port,
-                                        const struct in6_addr *group)
+                                        const struct in6_addr *group,
+                                        __u16 vid)
 {
        struct br_ip br_group;
 
@@ -1311,6 +1335,7 @@ static void br_ip6_multicast_leave_group(struct net_bridge *br,
 
        br_group.u.ip6 = *group;
        br_group.proto = htons(ETH_P_IPV6);
+       br_group.vid = vid;
 
        br_multicast_leave_group(br, port, &br_group);
 }
@@ -1326,6 +1351,7 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br,
        unsigned int len;
        unsigned int offset;
        int err;
+       u16 vid = 0;
 
        /* We treat OOM as packet loss for now. */
        if (!pskb_may_pull(skb, sizeof(*iph)))
@@ -1386,6 +1412,7 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br,
 
        err = 0;
 
+       br_vlan_get_tag(skb2, &vid);
        BR_INPUT_SKB_CB(skb)->igmp = 1;
        ih = igmp_hdr(skb2);
 
@@ -1393,7 +1420,7 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br,
        case IGMP_HOST_MEMBERSHIP_REPORT:
        case IGMPV2_HOST_MEMBERSHIP_REPORT:
                BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
-               err = br_ip4_multicast_add_group(br, port, ih->group);
+               err = br_ip4_multicast_add_group(br, port, ih->group, vid);
                break;
        case IGMPV3_HOST_MEMBERSHIP_REPORT:
                err = br_ip4_multicast_igmp3_report(br, port, skb2);
@@ -1402,7 +1429,7 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br,
                err = br_ip4_multicast_query(br, port, skb2);
                break;
        case IGMP_HOST_LEAVE_MESSAGE:
-               br_ip4_multicast_leave_group(br, port, ih->group);
+               br_ip4_multicast_leave_group(br, port, ih->group, vid);
                break;
        }
 
@@ -1427,6 +1454,7 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
        unsigned int len;
        int offset;
        int err;
+       u16 vid = 0;
 
        if (!pskb_may_pull(skb, sizeof(*ip6h)))
                return -EINVAL;
@@ -1510,6 +1538,7 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
 
        err = 0;
 
+       br_vlan_get_tag(skb, &vid);
        BR_INPUT_SKB_CB(skb)->igmp = 1;
 
        switch (icmp6_type) {
@@ -1522,7 +1551,7 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
                }
                mld = (struct mld_msg *)skb_transport_header(skb2);
                BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
-               err = br_ip6_multicast_add_group(br, port, &mld->mld_mca);
+               err = br_ip6_multicast_add_group(br, port, &mld->mld_mca, vid);
                break;
            }
        case ICMPV6_MLD2_REPORT:
@@ -1539,7 +1568,7 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
                        goto out;
                }
                mld = (struct mld_msg *)skb_transport_header(skb2);
-               br_ip6_multicast_leave_group(br, port, &mld->mld_mca);
+               br_ip6_multicast_leave_group(br, port, &mld->mld_mca, vid);
            }
        }
 
index 39ca979..27aa3ee 100644 (file)
@@ -16,6 +16,7 @@
 #include <net/rtnetlink.h>
 #include <net/net_namespace.h>
 #include <net/sock.h>
+#include <uapi/linux/if_bridge.h>
 
 #include "br_private.h"
 #include "br_private_stp.h"
@@ -64,15 +65,21 @@ static int br_port_fill_attrs(struct sk_buff *skb,
  * Create one netlink message for one interface
  * Contains port and master info as well as carrier and bridge state.
  */
-static int br_fill_ifinfo(struct sk_buff *skb, const struct net_bridge_port *port,
-                         u32 pid, u32 seq, int event, unsigned int flags)
+static int br_fill_ifinfo(struct sk_buff *skb,
+                         const struct net_bridge_port *port,
+                         u32 pid, u32 seq, int event, unsigned int flags,
+                         u32 filter_mask, const struct net_device *dev)
 {
-       const struct net_bridge *br = port->br;
-       const struct net_device *dev = port->dev;
+       const struct net_bridge *br;
        struct ifinfomsg *hdr;
        struct nlmsghdr *nlh;
        u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN;
 
+       if (port)
+               br = port->br;
+       else
+               br = netdev_priv(dev);
+
        br_debug(br, "br_fill_info event %d port %s master %s\n",
                     event, dev->name, br->dev->name);
 
@@ -98,7 +105,7 @@ static int br_fill_ifinfo(struct sk_buff *skb, const struct net_bridge_port *por
             nla_put_u32(skb, IFLA_LINK, dev->iflink)))
                goto nla_put_failure;
 
-       if (event == RTM_NEWLINK) {
+       if (event == RTM_NEWLINK && port) {
                struct nlattr *nest
                        = nla_nest_start(skb, IFLA_PROTINFO | NLA_F_NESTED);
 
@@ -107,6 +114,48 @@ static int br_fill_ifinfo(struct sk_buff *skb, const struct net_bridge_port *por
                nla_nest_end(skb, nest);
        }
 
+       /* Check if  the VID information is requested */
+       if (filter_mask & RTEXT_FILTER_BRVLAN) {
+               struct nlattr *af;
+               const struct net_port_vlans *pv;
+               struct bridge_vlan_info vinfo;
+               u16 vid;
+               u16 pvid;
+
+               if (port)
+                       pv = nbp_get_vlan_info(port);
+               else
+                       pv = br_get_vlan_info(br);
+
+               if (!pv || bitmap_empty(pv->vlan_bitmap, BR_VLAN_BITMAP_LEN))
+                       goto done;
+
+               af = nla_nest_start(skb, IFLA_AF_SPEC);
+               if (!af)
+                       goto nla_put_failure;
+
+               pvid = br_get_pvid(pv);
+               for (vid = find_first_bit(pv->vlan_bitmap, BR_VLAN_BITMAP_LEN);
+                    vid < BR_VLAN_BITMAP_LEN;
+                    vid = find_next_bit(pv->vlan_bitmap,
+                                        BR_VLAN_BITMAP_LEN, vid+1)) {
+                       vinfo.vid = vid;
+                       vinfo.flags = 0;
+                       if (vid == pvid)
+                               vinfo.flags |= BRIDGE_VLAN_INFO_PVID;
+
+                       if (test_bit(vid, pv->untagged_bitmap))
+                               vinfo.flags |= BRIDGE_VLAN_INFO_UNTAGGED;
+
+                       if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO,
+                                   sizeof(vinfo), &vinfo))
+                               goto nla_put_failure;
+               }
+
+               nla_nest_end(skb, af);
+       }
+
+done:
        return nlmsg_end(skb, nlh);
 
 nla_put_failure:
@@ -119,10 +168,14 @@ nla_put_failure:
  */
 void br_ifinfo_notify(int event, struct net_bridge_port *port)
 {
-       struct net *net = dev_net(port->dev);
+       struct net *net;
        struct sk_buff *skb;
        int err = -ENOBUFS;
 
+       if (!port)
+               return;
+
+       net = dev_net(port->dev);
        br_debug(port->br, "port %u(%s) event %d\n",
                 (unsigned int)port->port_no, port->dev->name, event);
 
@@ -130,7 +183,7 @@ void br_ifinfo_notify(int event, struct net_bridge_port *port)
        if (skb == NULL)
                goto errout;
 
-       err = br_fill_ifinfo(skb, port, 0, 0, event, 0);
+       err = br_fill_ifinfo(skb, port, 0, 0, event, 0, 0, port->dev);
        if (err < 0) {
                /* -EMSGSIZE implies BUG in br_nlmsg_size() */
                WARN_ON(err == -EMSGSIZE);
@@ -144,24 +197,85 @@ errout:
                rtnl_set_sk_err(net, RTNLGRP_LINK, err);
 }
 
+
 /*
  * Dump information about all ports, in response to GETLINK
  */
 int br_getlink(struct sk_buff *skb, u32 pid, u32 seq,
-              struct net_device *dev)
+              struct net_device *dev, u32 filter_mask)
 {
        int err = 0;
        struct net_bridge_port *port = br_port_get_rcu(dev);
 
-       /* not a bridge port */
-       if (!port)
+       /* not a bridge port and  */
+       if (!port && !(filter_mask & RTEXT_FILTER_BRVLAN))
                goto out;
 
-       err = br_fill_ifinfo(skb, port, pid, seq, RTM_NEWLINK, NLM_F_MULTI);
+       err = br_fill_ifinfo(skb, port, pid, seq, RTM_NEWLINK, NLM_F_MULTI,
+                            filter_mask, dev);
 out:
        return err;
 }
 
+static const struct nla_policy ifla_br_policy[IFLA_MAX+1] = {
+       [IFLA_BRIDGE_FLAGS]     = { .type = NLA_U16 },
+       [IFLA_BRIDGE_MODE]      = { .type = NLA_U16 },
+       [IFLA_BRIDGE_VLAN_INFO] = { .type = NLA_BINARY,
+                                   .len = sizeof(struct bridge_vlan_info), },
+};
+
+static int br_afspec(struct net_bridge *br,
+                    struct net_bridge_port *p,
+                    struct nlattr *af_spec,
+                    int cmd)
+{
+       struct nlattr *tb[IFLA_BRIDGE_MAX+1];
+       int err = 0;
+
+       err = nla_parse_nested(tb, IFLA_BRIDGE_MAX, af_spec, ifla_br_policy);
+       if (err)
+               return err;
+
+       if (tb[IFLA_BRIDGE_VLAN_INFO]) {
+               struct bridge_vlan_info *vinfo;
+
+               vinfo = nla_data(tb[IFLA_BRIDGE_VLAN_INFO]);
+
+               if (vinfo->vid >= VLAN_N_VID)
+                       return -EINVAL;
+
+               switch (cmd) {
+               case RTM_SETLINK:
+                       if (p) {
+                               err = nbp_vlan_add(p, vinfo->vid, vinfo->flags);
+                               if (err)
+                                       break;
+
+                               if (vinfo->flags & BRIDGE_VLAN_INFO_MASTER)
+                                       err = br_vlan_add(p->br, vinfo->vid,
+                                                         vinfo->flags);
+                       } else
+                               err = br_vlan_add(br, vinfo->vid, vinfo->flags);
+
+                       if (err)
+                               break;
+
+                       break;
+
+               case RTM_DELLINK:
+                       if (p) {
+                               nbp_vlan_delete(p, vinfo->vid);
+                               if (vinfo->flags & BRIDGE_VLAN_INFO_MASTER)
+                                       br_vlan_delete(p->br, vinfo->vid);
+                       } else
+                               br_vlan_delete(br, vinfo->vid);
+                       break;
+               }
+       }
+
+       return err;
+}
+
 static const struct nla_policy ifla_brport_policy[IFLA_BRPORT_MAX + 1] = {
        [IFLA_BRPORT_STATE]     = { .type = NLA_U8 },
        [IFLA_BRPORT_COST]      = { .type = NLA_U32 },
@@ -241,6 +355,7 @@ int br_setlink(struct net_device *dev, struct nlmsghdr *nlh)
 {
        struct ifinfomsg *ifm;
        struct nlattr *protinfo;
+       struct nlattr *afspec;
        struct net_bridge_port *p;
        struct nlattr *tb[IFLA_BRPORT_MAX + 1];
        int err;
@@ -248,38 +363,76 @@ int br_setlink(struct net_device *dev, struct nlmsghdr *nlh)
        ifm = nlmsg_data(nlh);
 
        protinfo = nlmsg_find_attr(nlh, sizeof(*ifm), IFLA_PROTINFO);
-       if (!protinfo)
+       afspec = nlmsg_find_attr(nlh, sizeof(*ifm), IFLA_AF_SPEC);
+       if (!protinfo && !afspec)
                return 0;
 
        p = br_port_get_rtnl(dev);
-       if (!p)
+       /* We want to accept dev as bridge itself if the AF_SPEC
+        * is set to see if someone is setting vlan info on the brigde
+        */
+       if (!p && ((dev->priv_flags & IFF_EBRIDGE) && !afspec))
                return -EINVAL;
 
-       if (protinfo->nla_type & NLA_F_NESTED) {
-               err = nla_parse_nested(tb, IFLA_BRPORT_MAX,
-                                      protinfo, ifla_brport_policy);
+       if (p && protinfo) {
+               if (protinfo->nla_type & NLA_F_NESTED) {
+                       err = nla_parse_nested(tb, IFLA_BRPORT_MAX,
+                                              protinfo, ifla_brport_policy);
+                       if (err)
+                               return err;
+
+                       spin_lock_bh(&p->br->lock);
+                       err = br_setport(p, tb);
+                       spin_unlock_bh(&p->br->lock);
+               } else {
+                       /* Binary compatability with old RSTP */
+                       if (nla_len(protinfo) < sizeof(u8))
+                               return -EINVAL;
+
+                       spin_lock_bh(&p->br->lock);
+                       err = br_set_port_state(p, nla_get_u8(protinfo));
+                       spin_unlock_bh(&p->br->lock);
+               }
                if (err)
-                       return err;
-
-               spin_lock_bh(&p->br->lock);
-               err = br_setport(p, tb);
-               spin_unlock_bh(&p->br->lock);
-       } else {
-               /* Binary compatability with old RSTP */
-               if (nla_len(protinfo) < sizeof(u8))
-                       return -EINVAL;
+                       goto out;
+       }
 
-               spin_lock_bh(&p->br->lock);
-               err = br_set_port_state(p, nla_get_u8(protinfo));
-               spin_unlock_bh(&p->br->lock);
+       if (afspec) {
+               err = br_afspec((struct net_bridge *)netdev_priv(dev), p,
+                               afspec, RTM_SETLINK);
        }
 
        if (err == 0)
                br_ifinfo_notify(RTM_NEWLINK, p);
 
+out:
        return err;
 }
 
+/* Delete port information */
+int br_dellink(struct net_device *dev, struct nlmsghdr *nlh)
+{
+       struct ifinfomsg *ifm;
+       struct nlattr *afspec;
+       struct net_bridge_port *p;
+       int err;
+
+       ifm = nlmsg_data(nlh);
+
+       afspec = nlmsg_find_attr(nlh, sizeof(*ifm), IFLA_AF_SPEC);
+       if (!afspec)
+               return 0;
+
+       p = br_port_get_rtnl(dev);
+       /* We want to accept dev as bridge itself as well */
+       if (!p && !(dev->priv_flags & IFF_EBRIDGE))
+               return -EINVAL;
+
+       err = br_afspec((struct net_bridge *)netdev_priv(dev), p,
+                       afspec, RTM_DELLINK);
+
+       return err;
+}
 static int br_validate(struct nlattr *tb[], struct nlattr *data[])
 {
        if (tb[IFLA_ADDRESS]) {
@@ -292,6 +445,29 @@ static int br_validate(struct nlattr *tb[], struct nlattr *data[])
        return 0;
 }
 
+static size_t br_get_link_af_size(const struct net_device *dev)
+{
+       struct net_port_vlans *pv;
+
+       if (br_port_exists(dev))
+               pv = nbp_get_vlan_info(br_port_get_rcu(dev));
+       else if (dev->priv_flags & IFF_EBRIDGE)
+               pv = br_get_vlan_info((struct net_bridge *)netdev_priv(dev));
+       else
+               return 0;
+
+       if (!pv)
+               return 0;
+
+       /* Each VLAN is returned in bridge_vlan_info along with flags */
+       return pv->num_vlans * nla_total_size(sizeof(struct bridge_vlan_info));
+}
+
+static struct rtnl_af_ops br_af_ops = {
+       .family                 = AF_BRIDGE,
+       .get_link_af_size       = br_get_link_af_size,
+};
+
 struct rtnl_link_ops br_link_ops __read_mostly = {
        .kind           = "bridge",
        .priv_size      = sizeof(struct net_bridge),
@@ -305,11 +481,18 @@ int __init br_netlink_init(void)
        int err;
 
        br_mdb_init();
-       err = rtnl_link_register(&br_link_ops);
+       err = rtnl_af_register(&br_af_ops);
        if (err)
                goto out;
 
+       err = rtnl_link_register(&br_link_ops);
+       if (err)
+               goto out_af;
+
        return 0;
+
+out_af:
+       rtnl_af_unregister(&br_af_ops);
 out:
        br_mdb_uninit();
        return err;
@@ -318,5 +501,6 @@ out:
 void __exit br_netlink_fini(void)
 {
        br_mdb_uninit();
+       rtnl_af_unregister(&br_af_ops);
        rtnl_link_unregister(&br_link_ops);
 }
index 711094a..6d314c4 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/netpoll.h>
 #include <linux/u64_stats_sync.h>
 #include <net/route.h>
+#include <linux/if_vlan.h>
 
 #define BR_HASH_BITS 8
 #define BR_HASH_SIZE (1 << BR_HASH_BITS)
@@ -26,6 +27,7 @@
 
 #define BR_PORT_BITS   10
 #define BR_MAX_PORTS   (1<<BR_PORT_BITS)
+#define BR_VLAN_BITMAP_LEN     BITS_TO_LONGS(VLAN_N_VID)
 
 #define BR_VERSION     "2.3"
 
@@ -61,6 +63,20 @@ struct br_ip
 #endif
        } u;
        __be16          proto;
+       __u16           vid;
+};
+
+struct net_port_vlans {
+       u16                             port_idx;
+       u16                             pvid;
+       union {
+               struct net_bridge_port          *port;
+               struct net_bridge               *br;
+       }                               parent;
+       struct rcu_head                 rcu;
+       unsigned long                   vlan_bitmap[BR_VLAN_BITMAP_LEN];
+       unsigned long                   untagged_bitmap[BR_VLAN_BITMAP_LEN];
+       u16                             num_vlans;
 };
 
 struct net_bridge_fdb_entry
@@ -74,6 +90,7 @@ struct net_bridge_fdb_entry
        mac_addr                        addr;
        unsigned char                   is_local;
        unsigned char                   is_static;
+       __u16                           vlan_id;
 };
 
 struct net_bridge_port_group {
@@ -156,6 +173,9 @@ struct net_bridge_port
 #ifdef CONFIG_NET_POLL_CONTROLLER
        struct netpoll                  *np;
 #endif
+#ifdef CONFIG_BRIDGE_VLAN_FILTERING
+       struct net_port_vlans __rcu     *vlan_info;
+#endif
 };
 
 #define br_port_exists(dev) (dev->priv_flags & IFF_BRIDGE_PORT)
@@ -197,9 +217,6 @@ struct net_bridge
        bool                            nf_call_ip6tables;
        bool                            nf_call_arptables;
 #endif
-       unsigned long                   flags;
-#define BR_SET_MAC_ADDR                0x00000001
-
        u16                             group_fwd_mask;
 
        /* STP */
@@ -260,6 +277,10 @@ struct net_bridge
        struct timer_list               topology_change_timer;
        struct timer_list               gc_timer;
        struct kobject                  *ifobj;
+#ifdef CONFIG_BRIDGE_VLAN_FILTERING
+       u8                              vlan_enabled;
+       struct net_port_vlans __rcu     *vlan_info;
+#endif
 };
 
 struct br_input_skb_cb {
@@ -355,18 +376,22 @@ extern void br_fdb_cleanup(unsigned long arg);
 extern void br_fdb_delete_by_port(struct net_bridge *br,
                                  const struct net_bridge_port *p, int do_all);
 extern struct net_bridge_fdb_entry *__br_fdb_get(struct net_bridge *br,
-                                                const unsigned char *addr);
+                                                const unsigned char *addr,
+                                                __u16 vid);
 extern int br_fdb_test_addr(struct net_device *dev, unsigned char *addr);
 extern int br_fdb_fillbuf(struct net_bridge *br, void *buf,
                          unsigned long count, unsigned long off);
 extern int br_fdb_insert(struct net_bridge *br,
                         struct net_bridge_port *source,
-                        const unsigned char *addr);
+                        const unsigned char *addr,
+                        u16 vid);
 extern void br_fdb_update(struct net_bridge *br,
                          struct net_bridge_port *source,
-                         const unsigned char *addr);
+                         const unsigned char *addr,
+                         u16 vid);
+extern int fdb_delete_by_addr(struct net_bridge *br, const u8 *addr, u16 vid);
 
-extern int br_fdb_delete(struct ndmsg *ndm,
+extern int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
                         struct net_device *dev,
                         const unsigned char *addr);
 extern int br_fdb_add(struct ndmsg *nlh, struct nlattr *tb[],
@@ -534,6 +559,142 @@ static inline void br_mdb_uninit(void)
 }
 #endif
 
+/* br_vlan.c */
+#ifdef CONFIG_BRIDGE_VLAN_FILTERING
+extern bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v,
+                              struct sk_buff *skb, u16 *vid);
+extern bool br_allowed_egress(struct net_bridge *br,
+                             const struct net_port_vlans *v,
+                             const struct sk_buff *skb);
+extern struct sk_buff *br_handle_vlan(struct net_bridge *br,
+                                     const struct net_port_vlans *v,
+                                     struct sk_buff *skb);
+extern int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags);
+extern int br_vlan_delete(struct net_bridge *br, u16 vid);
+extern void br_vlan_flush(struct net_bridge *br);
+extern int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val);
+extern int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags);
+extern int nbp_vlan_delete(struct net_bridge_port *port, u16 vid);
+extern void nbp_vlan_flush(struct net_bridge_port *port);
+extern bool nbp_vlan_find(struct net_bridge_port *port, u16 vid);
+
+static inline struct net_port_vlans *br_get_vlan_info(
+                                               const struct net_bridge *br)
+{
+       return rcu_dereference_rtnl(br->vlan_info);
+}
+
+static inline struct net_port_vlans *nbp_get_vlan_info(
+                                               const struct net_bridge_port *p)
+{
+       return rcu_dereference_rtnl(p->vlan_info);
+}
+
+/* Since bridge now depends on 8021Q module, but the time bridge sees the
+ * skb, the vlan tag will always be present if the frame was tagged.
+ */
+static inline int br_vlan_get_tag(const struct sk_buff *skb, u16 *vid)
+{
+       int err = 0;
+
+       if (vlan_tx_tag_present(skb))
+               *vid = vlan_tx_tag_get(skb) & VLAN_VID_MASK;
+       else {
+               *vid = 0;
+               err = -EINVAL;
+       }
+
+       return err;
+}
+
+static inline u16 br_get_pvid(const struct net_port_vlans *v)
+{
+       /* Return just the VID if it is set, or VLAN_N_VID (invalid vid) if
+        * vid wasn't set
+        */
+       smp_rmb();
+       return (v->pvid & VLAN_TAG_PRESENT) ?
+                       (v->pvid & ~VLAN_TAG_PRESENT) :
+                       VLAN_N_VID;
+}
+
+#else
+static inline bool br_allowed_ingress(struct net_bridge *br,
+                                     struct net_port_vlans *v,
+                                     struct sk_buff *skb,
+                                     u16 *vid)
+{
+       return true;
+}
+
+static inline bool br_allowed_egress(struct net_bridge *br,
+                                    const struct net_port_vlans *v,
+                                    const struct sk_buff *skb)
+{
+       return true;
+}
+
+static inline struct sk_buff *br_handle_vlan(struct net_bridge *br,
+                                            const struct net_port_vlans *v,
+                                            struct sk_buff *skb)
+{
+       return skb;
+}
+
+static inline int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags)
+{
+       return -EOPNOTSUPP;
+}
+
+static inline int br_vlan_delete(struct net_bridge *br, u16 vid)
+{
+       return -EOPNOTSUPP;
+}
+
+static inline void br_vlan_flush(struct net_bridge *br)
+{
+}
+
+static inline int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags)
+{
+       return -EOPNOTSUPP;
+}
+
+static inline int nbp_vlan_delete(struct net_bridge_port *port, u16 vid)
+{
+       return -EOPNOTSUPP;
+}
+
+static inline void nbp_vlan_flush(struct net_bridge_port *port)
+{
+}
+
+static inline struct net_port_vlans *br_get_vlan_info(
+                                               const struct net_bridge *br)
+{
+       return NULL;
+}
+static inline struct net_port_vlans *nbp_get_vlan_info(
+                                               const struct net_bridge_port *p)
+{
+       return NULL;
+}
+
+static inline bool nbp_vlan_find(struct net_bridge_port *port, u16 vid)
+{
+       return false;
+}
+
+static inline u16 br_vlan_get_tag(const struct sk_buff *skb, u16 *tag)
+{
+       return 0;
+}
+static inline u16 br_get_pvid(const struct net_port_vlans *v)
+{
+       return VLAN_N_VID;      /* Returns invalid vid */
+}
+#endif
+
 /* br_netfilter.c */
 #ifdef CONFIG_BRIDGE_NETFILTER
 extern int br_netfilter_init(void);
@@ -594,8 +755,9 @@ extern int br_netlink_init(void);
 extern void br_netlink_fini(void);
 extern void br_ifinfo_notify(int event, struct net_bridge_port *port);
 extern int br_setlink(struct net_device *dev, struct nlmsghdr *nlmsg);
+extern int br_dellink(struct net_device *dev, struct nlmsghdr *nlmsg);
 extern int br_getlink(struct sk_buff *skb, u32 pid, u32 seq,
-                     struct net_device *dev);
+                     struct net_device *dev, u32 filter_mask);
 
 #ifdef CONFIG_SYSFS
 /* br_sysfs_if.c */
index 7f884e3..8660ea3 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/etherdevice.h>
 #include <linux/llc.h>
 #include <linux/slab.h>
+#include <linux/pkt_sched.h>
 #include <net/net_namespace.h>
 #include <net/llc.h>
 #include <net/llc_pdu.h>
@@ -40,6 +41,7 @@ static void br_send_bpdu(struct net_bridge_port *p,
 
        skb->dev = p->dev;
        skb->protocol = htons(ETH_P_802_2);
+       skb->priority = TC_PRIO_CONTROL;
 
        skb_reserve(skb, LLC_RESERVE);
        memcpy(__skb_put(skb, length), data, length);
index 7b5197c..0bdb4eb 100644 (file)
@@ -216,7 +216,7 @@ bool br_stp_recalculate_bridge_id(struct net_bridge *br)
        struct net_bridge_port *p;
 
        /* user has chosen a value so keep it */
-       if (br->flags & BR_SET_MAC_ADDR)
+       if (br->dev->addr_assign_type == NET_ADDR_SET)
                return false;
 
        list_for_each_entry(p, &br->port_list, list) {
index 5913a3a..8baa9c0 100644 (file)
@@ -691,6 +691,24 @@ static ssize_t store_nf_call_arptables(
 static DEVICE_ATTR(nf_call_arptables, S_IRUGO | S_IWUSR,
                   show_nf_call_arptables, store_nf_call_arptables);
 #endif
+#ifdef CONFIG_BRIDGE_VLAN_FILTERING
+static ssize_t show_vlan_filtering(struct device *d,
+                                  struct device_attribute *attr,
+                                  char *buf)
+{
+       struct net_bridge *br = to_bridge(d);
+       return sprintf(buf, "%d\n", br->vlan_enabled);
+}
+
+static ssize_t store_vlan_filtering(struct device *d,
+                                   struct device_attribute *attr,
+                                   const char *buf, size_t len)
+{
+       return store_bridge_parm(d, buf, len, br_vlan_filter_toggle);
+}
+static DEVICE_ATTR(vlan_filtering, S_IRUGO | S_IWUSR,
+                  show_vlan_filtering, store_vlan_filtering);
+#endif
 
 static struct attribute *bridge_attrs[] = {
        &dev_attr_forward_delay.attr,
@@ -732,6 +750,9 @@ static struct attribute *bridge_attrs[] = {
        &dev_attr_nf_call_ip6tables.attr,
        &dev_attr_nf_call_arptables.attr,
 #endif
+#ifdef CONFIG_BRIDGE_VLAN_FILTERING
+       &dev_attr_vlan_filtering.attr,
+#endif
        NULL
 };
 
diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
new file mode 100644 (file)
index 0000000..93dde75
--- /dev/null
@@ -0,0 +1,415 @@
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/rtnetlink.h>
+#include <linux/slab.h>
+
+#include "br_private.h"
+
+static void __vlan_add_pvid(struct net_port_vlans *v, u16 vid)
+{
+       if (v->pvid == vid)
+               return;
+
+       smp_wmb();
+       v->pvid = vid;
+}
+
+static void __vlan_delete_pvid(struct net_port_vlans *v, u16 vid)
+{
+       if (v->pvid != vid)
+               return;
+
+       smp_wmb();
+       v->pvid = 0;
+}
+
+static void __vlan_add_flags(struct net_port_vlans *v, u16 vid, u16 flags)
+{
+       if (flags & BRIDGE_VLAN_INFO_PVID)
+               __vlan_add_pvid(v, vid);
+
+       if (flags & BRIDGE_VLAN_INFO_UNTAGGED)
+               set_bit(vid, v->untagged_bitmap);
+}
+
+static int __vlan_add(struct net_port_vlans *v, u16 vid, u16 flags)
+{
+       struct net_bridge_port *p = NULL;
+       struct net_bridge *br;
+       struct net_device *dev;
+       int err;
+
+       if (test_bit(vid, v->vlan_bitmap)) {
+               __vlan_add_flags(v, vid, flags);
+               return 0;
+       }
+
+       if (vid) {
+               if (v->port_idx) {
+                       p = v->parent.port;
+                       br = p->br;
+                       dev = p->dev;
+               } else {
+                       br = v->parent.br;
+                       dev = br->dev;
+               }
+
+               if (p && (dev->features & NETIF_F_HW_VLAN_FILTER)) {
+                       /* Add VLAN to the device filter if it is supported.
+                        * Stricly speaking, this is not necessary now, since
+                        * devices are made promiscuous by the bridge, but if
+                        * that ever changes this code will allow tagged
+                        * traffic to enter the bridge.
+                        */
+                       err = dev->netdev_ops->ndo_vlan_rx_add_vid(dev, vid);
+                       if (err)
+                               return err;
+               }
+
+               err = br_fdb_insert(br, p, dev->dev_addr, vid);
+               if (err) {
+                       br_err(br, "failed insert local address into bridge "
+                              "forwarding table\n");
+                       goto out_filt;
+               }
+
+       }
+
+       set_bit(vid, v->vlan_bitmap);
+       v->num_vlans++;
+       __vlan_add_flags(v, vid, flags);
+
+       return 0;
+
+out_filt:
+       if (p && (dev->features & NETIF_F_HW_VLAN_FILTER))
+               dev->netdev_ops->ndo_vlan_rx_kill_vid(dev, vid);
+       return err;
+}
+
+static int __vlan_del(struct net_port_vlans *v, u16 vid)
+{
+       if (!test_bit(vid, v->vlan_bitmap))
+               return -EINVAL;
+
+       __vlan_delete_pvid(v, vid);
+       clear_bit(vid, v->untagged_bitmap);
+
+       if (v->port_idx && vid) {
+               struct net_device *dev = v->parent.port->dev;
+
+               if (dev->features & NETIF_F_HW_VLAN_FILTER)
+                       dev->netdev_ops->ndo_vlan_rx_kill_vid(dev, vid);
+       }
+
+       clear_bit(vid, v->vlan_bitmap);
+       v->num_vlans--;
+       if (bitmap_empty(v->vlan_bitmap, BR_VLAN_BITMAP_LEN)) {
+               if (v->port_idx)
+                       rcu_assign_pointer(v->parent.port->vlan_info, NULL);
+               else
+                       rcu_assign_pointer(v->parent.br->vlan_info, NULL);
+               kfree_rcu(v, rcu);
+       }
+       return 0;
+}
+
+static void __vlan_flush(struct net_port_vlans *v)
+{
+       smp_wmb();
+       v->pvid = 0;
+       bitmap_zero(v->vlan_bitmap, BR_VLAN_BITMAP_LEN);
+       if (v->port_idx)
+               rcu_assign_pointer(v->parent.port->vlan_info, NULL);
+       else
+               rcu_assign_pointer(v->parent.br->vlan_info, NULL);
+       kfree_rcu(v, rcu);
+}
+
+/* Strip the tag from the packet.  Will return skb with tci set 0.  */
+static struct sk_buff *br_vlan_untag(struct sk_buff *skb)
+{
+       if (skb->protocol != htons(ETH_P_8021Q)) {
+               skb->vlan_tci = 0;
+               return skb;
+       }
+
+       skb->vlan_tci = 0;
+       skb = vlan_untag(skb);
+       if (skb)
+               skb->vlan_tci = 0;
+
+       return skb;
+}
+
+struct sk_buff *br_handle_vlan(struct net_bridge *br,
+                              const struct net_port_vlans *pv,
+                              struct sk_buff *skb)
+{
+       u16 vid;
+
+       if (!br->vlan_enabled)
+               goto out;
+
+       /* At this point, we know that the frame was filtered and contains
+        * a valid vlan id.  If the vlan id is set in the untagged bitmap,
+        * send untagged; otherwise, send taged.
+        */
+       br_vlan_get_tag(skb, &vid);
+       if (test_bit(vid, pv->untagged_bitmap))
+               skb = br_vlan_untag(skb);
+       else {
+               /* Egress policy says "send tagged".  If output device
+                * is the  bridge, we need to add the VLAN header
+                * ourselves since we'll be going through the RX path.
+                * Sending to ports puts the frame on the TX path and
+                * we let dev_hard_start_xmit() add the header.
+                */
+               if (skb->protocol != htons(ETH_P_8021Q) &&
+                   pv->port_idx == 0) {
+                       /* vlan_put_tag expects skb->data to point to
+                        * mac header.
+                        */
+                       skb_push(skb, ETH_HLEN);
+                       skb = __vlan_put_tag(skb, skb->vlan_tci);
+                       if (!skb)
+                               goto out;
+                       /* put skb->data back to where it was */
+                       skb_pull(skb, ETH_HLEN);
+                       skb->vlan_tci = 0;
+               }
+       }
+
+out:
+       return skb;
+}
+
+/* Called under RCU */
+bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v,
+                       struct sk_buff *skb, u16 *vid)
+{
+       /* If VLAN filtering is disabled on the bridge, all packets are
+        * permitted.
+        */
+       if (!br->vlan_enabled)
+               return true;
+
+       /* If there are no vlan in the permitted list, all packets are
+        * rejected.
+        */
+       if (!v)
+               return false;
+
+       if (br_vlan_get_tag(skb, vid)) {
+               u16 pvid = br_get_pvid(v);
+
+               /* Frame did not have a tag.  See if pvid is set
+                * on this port.  That tells us which vlan untagged
+                * traffic belongs to.
+                */
+               if (pvid == VLAN_N_VID)
+                       return false;
+
+               /* PVID is set on this port.  Any untagged ingress
+                * frame is considered to belong to this vlan.
+                */
+               __vlan_hwaccel_put_tag(skb, pvid);
+               return true;
+       }
+
+       /* Frame had a valid vlan tag.  See if vlan is allowed */
+       if (test_bit(*vid, v->vlan_bitmap))
+               return true;
+
+       return false;
+}
+
+/* Called under RCU. */
+bool br_allowed_egress(struct net_bridge *br,
+                      const struct net_port_vlans *v,
+                      const struct sk_buff *skb)
+{
+       u16 vid;
+
+       if (!br->vlan_enabled)
+               return true;
+
+       if (!v)
+               return false;
+
+       br_vlan_get_tag(skb, &vid);
+       if (test_bit(vid, v->vlan_bitmap))
+               return true;
+
+       return false;
+}
+
+/* Must be protected by RTNL */
+int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags)
+{
+       struct net_port_vlans *pv = NULL;
+       int err;
+
+       ASSERT_RTNL();
+
+       pv = rtnl_dereference(br->vlan_info);
+       if (pv)
+               return __vlan_add(pv, vid, flags);
+
+       /* Create port vlan infomration
+        */
+       pv = kzalloc(sizeof(*pv), GFP_KERNEL);
+       if (!pv)
+               return -ENOMEM;
+
+       pv->parent.br = br;
+       err = __vlan_add(pv, vid, flags);
+       if (err)
+               goto out;
+
+       rcu_assign_pointer(br->vlan_info, pv);
+       return 0;
+out:
+       kfree(pv);
+       return err;
+}
+
+/* Must be protected by RTNL */
+int br_vlan_delete(struct net_bridge *br, u16 vid)
+{
+       struct net_port_vlans *pv;
+
+       ASSERT_RTNL();
+
+       pv = rtnl_dereference(br->vlan_info);
+       if (!pv)
+               return -EINVAL;
+
+       if (vid) {
+               /* If the VID !=0 remove fdb for this vid. VID 0 is special
+                * in that it's the default and is always there in the fdb.
+                */
+               spin_lock_bh(&br->hash_lock);
+               fdb_delete_by_addr(br, br->dev->dev_addr, vid);
+               spin_unlock_bh(&br->hash_lock);
+       }
+
+       __vlan_del(pv, vid);
+       return 0;
+}
+
+void br_vlan_flush(struct net_bridge *br)
+{
+       struct net_port_vlans *pv;
+
+       ASSERT_RTNL();
+       pv = rtnl_dereference(br->vlan_info);
+       if (!pv)
+               return;
+
+       __vlan_flush(pv);
+}
+
+int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val)
+{
+       if (!rtnl_trylock())
+               return restart_syscall();
+
+       if (br->vlan_enabled == val)
+               goto unlock;
+
+       br->vlan_enabled = val;
+
+unlock:
+       rtnl_unlock();
+       return 0;
+}
+
+/* Must be protected by RTNL */
+int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags)
+{
+       struct net_port_vlans *pv = NULL;
+       int err;
+
+       ASSERT_RTNL();
+
+       pv = rtnl_dereference(port->vlan_info);
+       if (pv)
+               return __vlan_add(pv, vid, flags);
+
+       /* Create port vlan infomration
+        */
+       pv = kzalloc(sizeof(*pv), GFP_KERNEL);
+       if (!pv) {
+               err = -ENOMEM;
+               goto clean_up;
+       }
+
+       pv->port_idx = port->port_no;
+       pv->parent.port = port;
+       err = __vlan_add(pv, vid, flags);
+       if (err)
+               goto clean_up;
+
+       rcu_assign_pointer(port->vlan_info, pv);
+       return 0;
+
+clean_up:
+       kfree(pv);
+       return err;
+}
+
+/* Must be protected by RTNL */
+int nbp_vlan_delete(struct net_bridge_port *port, u16 vid)
+{
+       struct net_port_vlans *pv;
+
+       ASSERT_RTNL();
+
+       pv = rtnl_dereference(port->vlan_info);
+       if (!pv)
+               return -EINVAL;
+
+       if (vid) {
+               /* If the VID !=0 remove fdb for this vid. VID 0 is special
+                * in that it's the default and is always there in the fdb.
+                */
+               spin_lock_bh(&port->br->hash_lock);
+               fdb_delete_by_addr(port->br, port->dev->dev_addr, vid);
+               spin_unlock_bh(&port->br->hash_lock);
+       }
+
+       return __vlan_del(pv, vid);
+}
+
+void nbp_vlan_flush(struct net_bridge_port *port)
+{
+       struct net_port_vlans *pv;
+
+       ASSERT_RTNL();
+
+       pv = rtnl_dereference(port->vlan_info);
+       if (!pv)
+               return;
+
+       __vlan_flush(pv);
+}
+
+bool nbp_vlan_find(struct net_bridge_port *port, u16 vid)
+{
+       struct net_port_vlans *pv;
+       bool found = false;
+
+       rcu_read_lock();
+       pv = rcu_dereference(port->vlan_info);
+
+       if (!pv)
+               goto out;
+
+       if (test_bit(vid, pv->vlan_bitmap))
+               found = true;
+
+out:
+       rcu_read_unlock();
+       return found;
+}
index 28e12d1..5dcb200 100644 (file)
@@ -1633,7 +1633,7 @@ static void __exit bcm_module_exit(void)
        can_proto_unregister(&bcm_can_proto);
 
        if (proc_dir)
-               proc_net_remove(&init_net, "can-bcm");
+               remove_proc_entry("can-bcm", init_net.proc_net);
 }
 
 module_init(bcm_module_init);
index ae56690..4973358 100644 (file)
@@ -531,5 +531,5 @@ void can_remove_proc(void)
                can_remove_proc_readentry(CAN_PROC_RCVLIST_SFF);
 
        if (can_dir)
-               proc_net_remove(&init_net, "can");
+               remove_proc_entry("can", init_net.proc_net);
 }
index 674641b..0c5e361 100644 (file)
@@ -9,7 +9,7 @@ obj-$(CONFIG_SYSCTL) += sysctl_net_core.o
 
 obj-y               += dev.o ethtool.o dev_addr_lists.o dst.o netevent.o \
                        neighbour.o rtnetlink.o utils.o link_watch.o filter.o \
-                       sock_diag.o
+                       sock_diag.o dev_ioctl.o
 
 obj-$(CONFIG_XFRM) += flow.o
 obj-y += net-sysfs.o
index 0337e2b..368f9c3 100644 (file)
@@ -187,7 +187,7 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
                skb_queue_walk(queue, skb) {
                        *peeked = skb->peeked;
                        if (flags & MSG_PEEK) {
-                               if (*off >= skb->len) {
+                               if (*off >= skb->len && skb->len) {
                                        *off -= skb->len;
                                        continue;
                                }
index a87bc74..decf55f 100644 (file)
 #include <net/xfrm.h>
 #include <linux/highmem.h>
 #include <linux/init.h>
-#include <linux/kmod.h>
 #include <linux/module.h>
 #include <linux/netpoll.h>
 #include <linux/rcupdate.h>
 #include <linux/pci.h>
 #include <linux/inetdevice.h>
 #include <linux/cpu_rmap.h>
-#include <linux/net_tstamp.h>
 #include <linux/static_key.h>
 
 #include "net-sysfs.h"
@@ -1226,36 +1224,6 @@ void netdev_notify_peers(struct net_device *dev)
 }
 EXPORT_SYMBOL(netdev_notify_peers);
 
-/**
- *     dev_load        - load a network module
- *     @net: the applicable net namespace
- *     @name: name of interface
- *
- *     If a network interface is not present and the process has suitable
- *     privileges this function loads the module. If module loading is not
- *     available in this kernel then it becomes a nop.
- */
-
-void dev_load(struct net *net, const char *name)
-{
-       struct net_device *dev;
-       int no_module;
-
-       rcu_read_lock();
-       dev = dev_get_by_name_rcu(net, name);
-       rcu_read_unlock();
-
-       no_module = !dev;
-       if (no_module && capable(CAP_NET_ADMIN))
-               no_module = request_module("netdev-%s", name);
-       if (no_module && capable(CAP_SYS_MODULE)) {
-               if (!request_module("%s", name))
-                       pr_warn("Loading kernel module for a network device with CAP_SYS_MODULE (deprecated).  Use CAP_NET_ADMIN and alias netdev-%s instead.\n",
-                               name);
-       }
-}
-EXPORT_SYMBOL(dev_load);
-
 static int __dev_open(struct net_device *dev)
 {
        const struct net_device_ops *ops = dev->netdev_ops;
@@ -1266,6 +1234,14 @@ static int __dev_open(struct net_device *dev)
        if (!netif_device_present(dev))
                return -ENODEV;
 
+       /* Block netpoll from trying to do any rx path servicing.
+        * If we don't do this there is a chance ndo_poll_controller
+        * or ndo_poll may be running while we open the device
+        */
+       ret = netpoll_rx_disable(dev);
+       if (ret)
+               return ret;
+
        ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
        ret = notifier_to_errno(ret);
        if (ret)
@@ -1279,6 +1255,8 @@ static int __dev_open(struct net_device *dev)
        if (!ret && ops->ndo_open)
                ret = ops->ndo_open(dev);
 
+       netpoll_rx_enable(dev);
+
        if (ret)
                clear_bit(__LINK_STATE_START, &dev->state);
        else {
@@ -1370,9 +1348,16 @@ static int __dev_close(struct net_device *dev)
        int retval;
        LIST_HEAD(single);
 
+       /* Temporarily disable netpoll until the interface is down */
+       retval = netpoll_rx_disable(dev);
+       if (retval)
+               return retval;
+
        list_add(&dev->unreg_list, &single);
        retval = __dev_close_many(&single);
        list_del(&single);
+
+       netpoll_rx_enable(dev);
        return retval;
 }
 
@@ -1408,14 +1393,22 @@ static int dev_close_many(struct list_head *head)
  */
 int dev_close(struct net_device *dev)
 {
+       int ret = 0;
        if (dev->flags & IFF_UP) {
                LIST_HEAD(single);
 
+               /* Block netpoll rx while the interface is going down */
+               ret = netpoll_rx_disable(dev);
+               if (ret)
+                       return ret;
+
                list_add(&dev->unreg_list, &single);
                dev_close_many(&single);
                list_del(&single);
+
+               netpoll_rx_enable(dev);
        }
-       return 0;
+       return ret;
 }
 EXPORT_SYMBOL(dev_close);
 
@@ -1620,57 +1613,6 @@ static inline void net_timestamp_set(struct sk_buff *skb)
                        __net_timestamp(SKB);           \
        }                                               \
 
-static int net_hwtstamp_validate(struct ifreq *ifr)
-{
-       struct hwtstamp_config cfg;
-       enum hwtstamp_tx_types tx_type;
-       enum hwtstamp_rx_filters rx_filter;
-       int tx_type_valid = 0;
-       int rx_filter_valid = 0;
-
-       if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
-               return -EFAULT;
-
-       if (cfg.flags) /* reserved for future extensions */
-               return -EINVAL;
-
-       tx_type = cfg.tx_type;
-       rx_filter = cfg.rx_filter;
-
-       switch (tx_type) {
-       case HWTSTAMP_TX_OFF:
-       case HWTSTAMP_TX_ON:
-       case HWTSTAMP_TX_ONESTEP_SYNC:
-               tx_type_valid = 1;
-               break;
-       }
-
-       switch (rx_filter) {
-       case HWTSTAMP_FILTER_NONE:
-       case HWTSTAMP_FILTER_ALL:
-       case HWTSTAMP_FILTER_SOME:
-       case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
-       case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
-       case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
-       case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
-       case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
-       case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
-       case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
-       case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
-       case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
-       case HWTSTAMP_FILTER_PTP_V2_EVENT:
-       case HWTSTAMP_FILTER_PTP_V2_SYNC:
-       case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
-               rx_filter_valid = 1;
-               break;
-       }
-
-       if (!tx_type_valid || !rx_filter_valid)
-               return -ERANGE;
-
-       return 0;
-}
-
 static inline bool is_skb_forwardable(struct net_device *dev,
                                      struct sk_buff *skb)
 {
@@ -2303,25 +2245,19 @@ out:
 EXPORT_SYMBOL(skb_checksum_help);
 
 /**
- *     skb_gso_segment - Perform segmentation on skb.
+ *     skb_mac_gso_segment - mac layer segmentation handler.
  *     @skb: buffer to segment
  *     @features: features for the output path (see dev->features)
- *
- *     This function segments the given skb and returns a list of segments.
- *
- *     It may return NULL if the skb requires no segmentation.  This is
- *     only possible when GSO is used for verifying header integrity.
  */
-struct sk_buff *skb_gso_segment(struct sk_buff *skb,
-       netdev_features_t features)
+struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
+                                   netdev_features_t features)
 {
        struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
        struct packet_offload *ptype;
        __be16 type = skb->protocol;
-       int vlan_depth = ETH_HLEN;
-       int err;
 
        while (type == htons(ETH_P_8021Q)) {
+               int vlan_depth = ETH_HLEN;
                struct vlan_hdr *vh;
 
                if (unlikely(!pskb_may_pull(skb, vlan_depth + VLAN_HLEN)))
@@ -2332,22 +2268,14 @@ struct sk_buff *skb_gso_segment(struct sk_buff *skb,
                vlan_depth += VLAN_HLEN;
        }
 
-       skb_reset_mac_header(skb);
-       skb->mac_len = skb->network_header - skb->mac_header;
        __skb_pull(skb, skb->mac_len);
 
-       if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
-               skb_warn_bad_offload(skb);
-
-               if (skb_header_cloned(skb) &&
-                   (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
-                       return ERR_PTR(err);
-       }
-
        rcu_read_lock();
        list_for_each_entry_rcu(ptype, &offload_base, list) {
                if (ptype->type == type && ptype->callbacks.gso_segment) {
                        if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
+                               int err;
+
                                err = ptype->callbacks.gso_send_check(skb);
                                segs = ERR_PTR(err);
                                if (err || skb_gso_ok(skb, features))
@@ -2365,7 +2293,50 @@ struct sk_buff *skb_gso_segment(struct sk_buff *skb,
 
        return segs;
 }
-EXPORT_SYMBOL(skb_gso_segment);
+EXPORT_SYMBOL(skb_mac_gso_segment);
+
+
+/* openvswitch calls this on rx path, so we need a different check.
+ */
+static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
+{
+       if (tx_path)
+               return skb->ip_summed != CHECKSUM_PARTIAL;
+       else
+               return skb->ip_summed == CHECKSUM_NONE;
+}
+
+/**
+ *     __skb_gso_segment - Perform segmentation on skb.
+ *     @skb: buffer to segment
+ *     @features: features for the output path (see dev->features)
+ *     @tx_path: whether it is called in TX path
+ *
+ *     This function segments the given skb and returns a list of segments.
+ *
+ *     It may return NULL if the skb requires no segmentation.  This is
+ *     only possible when GSO is used for verifying header integrity.
+ */
+struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
+                                 netdev_features_t features, bool tx_path)
+{
+       if (unlikely(skb_needs_check(skb, tx_path))) {
+               int err;
+
+               skb_warn_bad_offload(skb);
+
+               if (skb_header_cloned(skb) &&
+                   (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
+                       return ERR_PTR(err);
+       }
+
+       SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb);
+       skb_reset_mac_header(skb);
+       skb_reset_mac_len(skb);
+
+       return skb_mac_gso_segment(skb, features);
+}
+EXPORT_SYMBOL(__skb_gso_segment);
 
 /* Take action when hardware reception checksum errors are detected. */
 #ifdef CONFIG_BUG
@@ -2799,6 +2770,8 @@ int dev_queue_xmit(struct sk_buff *skb)
        struct Qdisc *q;
        int rc = -ENOMEM;
 
+       skb_reset_mac_header(skb);
+
        /* Disable soft irqs for various locks below. Also
         * stops preemption for RCU.
         */
@@ -3419,7 +3392,7 @@ static bool skb_pfmemalloc_protocol(struct sk_buff *skb)
        }
 }
 
-static int __netif_receive_skb(struct sk_buff *skb)
+static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc)
 {
        struct packet_type *ptype, *pt_prev;
        rx_handler_func_t *rx_handler;
@@ -3428,24 +3401,11 @@ static int __netif_receive_skb(struct sk_buff *skb)
        bool deliver_exact = false;
        int ret = NET_RX_DROP;
        __be16 type;
-       unsigned long pflags = current->flags;
 
        net_timestamp_check(!netdev_tstamp_prequeue, skb);
 
        trace_netif_receive_skb(skb);
 
-       /*
-        * PFMEMALLOC skbs are special, they should
-        * - be delivered to SOCK_MEMALLOC sockets only
-        * - stay away from userspace
-        * - have bounded memory usage
-        *
-        * Use PF_MEMALLOC as this saves us from propagating the allocation
-        * context down to all allocation sites.
-        */
-       if (sk_memalloc_socks() && skb_pfmemalloc(skb))
-               current->flags |= PF_MEMALLOC;
-
        /* if we've gotten here through NAPI, check netpoll */
        if (netpoll_receive_skb(skb))
                goto out;
@@ -3479,7 +3439,7 @@ another_round:
        }
 #endif
 
-       if (sk_memalloc_socks() && skb_pfmemalloc(skb))
+       if (pfmemalloc)
                goto skip_taps;
 
        list_for_each_entry_rcu(ptype, &ptype_all, list) {
@@ -3498,8 +3458,7 @@ skip_taps:
 ncls:
 #endif
 
-       if (sk_memalloc_socks() && skb_pfmemalloc(skb)
-                               && !skb_pfmemalloc_protocol(skb))
+       if (pfmemalloc && !skb_pfmemalloc_protocol(skb))
                goto drop;
 
        if (vlan_tx_tag_present(skb)) {
@@ -3569,7 +3528,31 @@ drop:
 unlock:
        rcu_read_unlock();
 out:
-       tsk_restore_flags(current, pflags, PF_MEMALLOC);
+       return ret;
+}
+
+static int __netif_receive_skb(struct sk_buff *skb)
+{
+       int ret;
+
+       if (sk_memalloc_socks() && skb_pfmemalloc(skb)) {
+               unsigned long pflags = current->flags;
+
+               /*
+                * PFMEMALLOC skbs are special, they should
+                * - be delivered to SOCK_MEMALLOC sockets only
+                * - stay away from userspace
+                * - have bounded memory usage
+                *
+                * Use PF_MEMALLOC as this saves us from propagating the allocation
+                * context down to all allocation sites.
+                */
+               current->flags |= PF_MEMALLOC;
+               ret = __netif_receive_skb_core(skb, true);
+               tsk_restore_flags(current, pflags, PF_MEMALLOC);
+       } else
+               ret = __netif_receive_skb_core(skb, false);
+
        return ret;
 }
 
@@ -3736,7 +3719,6 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
        __be16 type = skb->protocol;
        struct list_head *head = &offload_base;
        int same_flow;
-       int mac_len;
        enum gro_result ret;
 
        if (!(skb->dev->features & NETIF_F_GRO) || netpoll_rx_on(skb))
@@ -3753,8 +3735,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
                        continue;
 
                skb_set_network_header(skb, skb_gro_offset(skb));
-               mac_len = skb->network_header - skb->mac_header;
-               skb->mac_len = mac_len;
+               skb_reset_mac_len(skb);
                NAPI_GRO_CB(skb)->same_flow = 0;
                NAPI_GRO_CB(skb)->flush = 0;
                NAPI_GRO_CB(skb)->free = 0;
@@ -4236,127 +4217,6 @@ softnet_break:
        goto out;
 }
 
-static gifconf_func_t *gifconf_list[NPROTO];
-
-/**
- *     register_gifconf        -       register a SIOCGIF handler
- *     @family: Address family
- *     @gifconf: Function handler
- *
- *     Register protocol dependent address dumping routines. The handler
- *     that is passed must not be freed or reused until it has been replaced
- *     by another handler.
- */
-int register_gifconf(unsigned int family, gifconf_func_t *gifconf)
-{
-       if (family >= NPROTO)
-               return -EINVAL;
-       gifconf_list[family] = gifconf;
-       return 0;
-}
-EXPORT_SYMBOL(register_gifconf);
-
-
-/*
- *     Map an interface index to its name (SIOCGIFNAME)
- */
-
-/*
- *     We need this ioctl for efficient implementation of the
- *     if_indextoname() function required by the IPv6 API.  Without
- *     it, we would have to search all the interfaces to find a
- *     match.  --pb
- */
-
-static int dev_ifname(struct net *net, struct ifreq __user *arg)
-{
-       struct net_device *dev;
-       struct ifreq ifr;
-       unsigned seq;
-
-       /*
-        *      Fetch the caller's info block.
-        */
-
-       if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
-               return -EFAULT;
-
-retry:
-       seq = read_seqcount_begin(&devnet_rename_seq);
-       rcu_read_lock();
-       dev = dev_get_by_index_rcu(net, ifr.ifr_ifindex);
-       if (!dev) {
-               rcu_read_unlock();
-               return -ENODEV;
-       }
-
-       strcpy(ifr.ifr_name, dev->name);
-       rcu_read_unlock();
-       if (read_seqcount_retry(&devnet_rename_seq, seq))
-               goto retry;
-
-       if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
-               return -EFAULT;
-       return 0;
-}
-
-/*
- *     Perform a SIOCGIFCONF call. This structure will change
- *     size eventually, and there is nothing I can do about it.
- *     Thus we will need a 'compatibility mode'.
- */
-
-static int dev_ifconf(struct net *net, char __user *arg)
-{
-       struct ifconf ifc;
-       struct net_device *dev;
-       char __user *pos;
-       int len;
-       int total;
-       int i;
-
-       /*
-        *      Fetch the caller's info block.
-        */
-
-       if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
-               return -EFAULT;
-
-       pos = ifc.ifc_buf;
-       len = ifc.ifc_len;
-
-       /*
-        *      Loop over the interfaces, and write an info block for each.
-        */
-
-       total = 0;
-       for_each_netdev(net, dev) {
-               for (i = 0; i < NPROTO; i++) {
-                       if (gifconf_list[i]) {
-                               int done;
-                               if (!pos)
-                                       done = gifconf_list[i](dev, NULL, 0);
-                               else
-                                       done = gifconf_list[i](dev, pos + total,
-                                                              len - total);
-                               if (done < 0)
-                                       return -EFAULT;
-                               total += done;
-                       }
-               }
-       }
-
-       /*
-        *      All done.  Write the updated control block back to the caller.
-        */
-       ifc.ifc_len = total;
-
-       /*
-        *      Both BSD and Solaris return 0 here, so we do too.
-        */
-       return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
-}
-
 #ifdef CONFIG_PROC_FS
 
 #define BUCKET_SPACE (32 - NETDEV_HASHBITS - 1)
@@ -4658,11 +4518,12 @@ static int __net_init dev_proc_net_init(struct net *net)
 {
        int rc = -ENOMEM;
 
-       if (!proc_net_fops_create(net, "dev", S_IRUGO, &dev_seq_fops))
+       if (!proc_create("dev", S_IRUGO, net->proc_net, &dev_seq_fops))
                goto out;
-       if (!proc_net_fops_create(net, "softnet_stat", S_IRUGO, &softnet_seq_fops))
+       if (!proc_create("softnet_stat", S_IRUGO, net->proc_net,
+                        &softnet_seq_fops))
                goto out_dev;
-       if (!proc_net_fops_create(net, "ptype", S_IRUGO, &ptype_seq_fops))
+       if (!proc_create("ptype", S_IRUGO, net->proc_net, &ptype_seq_fops))
                goto out_softnet;
 
        if (wext_proc_init(net))
@@ -4671,11 +4532,11 @@ static int __net_init dev_proc_net_init(struct net *net)
 out:
        return rc;
 out_ptype:
-       proc_net_remove(net, "ptype");
+       remove_proc_entry("ptype", net->proc_net);
 out_softnet:
-       proc_net_remove(net, "softnet_stat");
+       remove_proc_entry("softnet_stat", net->proc_net);
 out_dev:
-       proc_net_remove(net, "dev");
+       remove_proc_entry("dev", net->proc_net);
        goto out;
 }
 
@@ -4683,9 +4544,9 @@ static void __net_exit dev_proc_net_exit(struct net *net)
 {
        wext_proc_exit(net);
 
-       proc_net_remove(net, "ptype");
-       proc_net_remove(net, "softnet_stat");
-       proc_net_remove(net, "dev");
+       remove_proc_entry("ptype", net->proc_net);
+       remove_proc_entry("softnet_stat", net->proc_net);
+       remove_proc_entry("dev", net->proc_net);
 }
 
 static struct pernet_operations __net_initdata dev_proc_ops = {
@@ -5317,375 +5178,6 @@ int dev_change_carrier(struct net_device *dev, bool new_carrier)
 }
 EXPORT_SYMBOL(dev_change_carrier);
 
-/*
- *     Perform the SIOCxIFxxx calls, inside rcu_read_lock()
- */
-static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd)
-{
-       int err;
-       struct net_device *dev = dev_get_by_name_rcu(net, ifr->ifr_name);
-
-       if (!dev)
-               return -ENODEV;
-
-       switch (cmd) {
-       case SIOCGIFFLAGS:      /* Get interface flags */
-               ifr->ifr_flags = (short) dev_get_flags(dev);
-               return 0;
-
-       case SIOCGIFMETRIC:     /* Get the metric on the interface
-                                  (currently unused) */
-               ifr->ifr_metric = 0;
-               return 0;
-
-       case SIOCGIFMTU:        /* Get the MTU of a device */
-               ifr->ifr_mtu = dev->mtu;
-               return 0;
-
-       case SIOCGIFHWADDR:
-               if (!dev->addr_len)
-                       memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data);
-               else
-                       memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
-                              min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
-               ifr->ifr_hwaddr.sa_family = dev->type;
-               return 0;
-
-       case SIOCGIFSLAVE:
-               err = -EINVAL;
-               break;
-
-       case SIOCGIFMAP:
-               ifr->ifr_map.mem_start = dev->mem_start;
-               ifr->ifr_map.mem_end   = dev->mem_end;
-               ifr->ifr_map.base_addr = dev->base_addr;
-               ifr->ifr_map.irq       = dev->irq;
-               ifr->ifr_map.dma       = dev->dma;
-               ifr->ifr_map.port      = dev->if_port;
-               return 0;
-
-       case SIOCGIFINDEX:
-               ifr->ifr_ifindex = dev->ifindex;
-               return 0;
-
-       case SIOCGIFTXQLEN:
-               ifr->ifr_qlen = dev->tx_queue_len;
-               return 0;
-
-       default:
-               /* dev_ioctl() should ensure this case
-                * is never reached
-                */
-               WARN_ON(1);
-               err = -ENOTTY;
-               break;
-
-       }
-       return err;
-}
-
-/*
- *     Perform the SIOCxIFxxx calls, inside rtnl_lock()
- */
-static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
-{
-       int err;
-       struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
-       const struct net_device_ops *ops;
-
-       if (!dev)
-               return -ENODEV;
-
-       ops = dev->netdev_ops;
-
-       switch (cmd) {
-       case SIOCSIFFLAGS:      /* Set interface flags */
-               return dev_change_flags(dev, ifr->ifr_flags);
-
-       case SIOCSIFMETRIC:     /* Set the metric on the interface
-                                  (currently unused) */
-               return -EOPNOTSUPP;
-
-       case SIOCSIFMTU:        /* Set the MTU of a device */
-               return dev_set_mtu(dev, ifr->ifr_mtu);
-
-       case SIOCSIFHWADDR:
-               return dev_set_mac_address(dev, &ifr->ifr_hwaddr);
-
-       case SIOCSIFHWBROADCAST:
-               if (ifr->ifr_hwaddr.sa_family != dev->type)
-                       return -EINVAL;
-               memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
-                      min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
-               call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
-               return 0;
-
-       case SIOCSIFMAP:
-               if (ops->ndo_set_config) {
-                       if (!netif_device_present(dev))
-                               return -ENODEV;
-                       return ops->ndo_set_config(dev, &ifr->ifr_map);
-               }
-               return -EOPNOTSUPP;
-
-       case SIOCADDMULTI:
-               if (!ops->ndo_set_rx_mode ||
-                   ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
-                       return -EINVAL;
-               if (!netif_device_present(dev))
-                       return -ENODEV;
-               return dev_mc_add_global(dev, ifr->ifr_hwaddr.sa_data);
-
-       case SIOCDELMULTI:
-               if (!ops->ndo_set_rx_mode ||
-                   ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
-                       return -EINVAL;
-               if (!netif_device_present(dev))
-                       return -ENODEV;
-               return dev_mc_del_global(dev, ifr->ifr_hwaddr.sa_data);
-
-       case SIOCSIFTXQLEN:
-               if (ifr->ifr_qlen < 0)
-                       return -EINVAL;
-               dev->tx_queue_len = ifr->ifr_qlen;
-               return 0;
-
-       case SIOCSIFNAME:
-               ifr->ifr_newname[IFNAMSIZ-1] = '\0';
-               return dev_change_name(dev, ifr->ifr_newname);
-
-       case SIOCSHWTSTAMP:
-               err = net_hwtstamp_validate(ifr);
-               if (err)
-                       return err;
-               /* fall through */
-
-       /*
-        *      Unknown or private ioctl
-        */
-       default:
-               if ((cmd >= SIOCDEVPRIVATE &&
-                   cmd <= SIOCDEVPRIVATE + 15) ||
-                   cmd == SIOCBONDENSLAVE ||
-                   cmd == SIOCBONDRELEASE ||
-                   cmd == SIOCBONDSETHWADDR ||
-                   cmd == SIOCBONDSLAVEINFOQUERY ||
-                   cmd == SIOCBONDINFOQUERY ||
-                   cmd == SIOCBONDCHANGEACTIVE ||
-                   cmd == SIOCGMIIPHY ||
-                   cmd == SIOCGMIIREG ||
-                   cmd == SIOCSMIIREG ||
-                   cmd == SIOCBRADDIF ||
-                   cmd == SIOCBRDELIF ||
-                   cmd == SIOCSHWTSTAMP ||
-                   cmd == SIOCWANDEV) {
-                       err = -EOPNOTSUPP;
-                       if (ops->ndo_do_ioctl) {
-                               if (netif_device_present(dev))
-                                       err = ops->ndo_do_ioctl(dev, ifr, cmd);
-                               else
-                                       err = -ENODEV;
-                       }
-               } else
-                       err = -EINVAL;
-
-       }
-       return err;
-}
-
-/*
- *     This function handles all "interface"-type I/O control requests. The actual
- *     'doing' part of this is dev_ifsioc above.
- */
-
-/**
- *     dev_ioctl       -       network device ioctl
- *     @net: the applicable net namespace
- *     @cmd: command to issue
- *     @arg: pointer to a struct ifreq in user space
- *
- *     Issue ioctl functions to devices. This is normally called by the
- *     user space syscall interfaces but can sometimes be useful for
- *     other purposes. The return value is the return from the syscall if
- *     positive or a negative errno code on error.
- */
-
-int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
-{
-       struct ifreq ifr;
-       int ret;
-       char *colon;
-
-       /* One special case: SIOCGIFCONF takes ifconf argument
-          and requires shared lock, because it sleeps writing
-          to user space.
-        */
-
-       if (cmd == SIOCGIFCONF) {
-               rtnl_lock();
-               ret = dev_ifconf(net, (char __user *) arg);
-               rtnl_unlock();
-               return ret;
-       }
-       if (cmd == SIOCGIFNAME)
-               return dev_ifname(net, (struct ifreq __user *)arg);
-
-       if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
-               return -EFAULT;
-
-       ifr.ifr_name[IFNAMSIZ-1] = 0;
-
-       colon = strchr(ifr.ifr_name, ':');
-       if (colon)
-               *colon = 0;
-
-       /*
-        *      See which interface the caller is talking about.
-        */
-
-       switch (cmd) {
-       /*
-        *      These ioctl calls:
-        *      - can be done by all.
-        *      - atomic and do not require locking.
-        *      - return a value
-        */
-       case SIOCGIFFLAGS:
-       case SIOCGIFMETRIC:
-       case SIOCGIFMTU:
-       case SIOCGIFHWADDR:
-       case SIOCGIFSLAVE:
-       case SIOCGIFMAP:
-       case SIOCGIFINDEX:
-       case SIOCGIFTXQLEN:
-               dev_load(net, ifr.ifr_name);
-               rcu_read_lock();
-               ret = dev_ifsioc_locked(net, &ifr, cmd);
-               rcu_read_unlock();
-               if (!ret) {
-                       if (colon)
-                               *colon = ':';
-                       if (copy_to_user(arg, &ifr,
-                                        sizeof(struct ifreq)))
-                               ret = -EFAULT;
-               }
-               return ret;
-
-       case SIOCETHTOOL:
-               dev_load(net, ifr.ifr_name);
-               rtnl_lock();
-               ret = dev_ethtool(net, &ifr);
-               rtnl_unlock();
-               if (!ret) {
-                       if (colon)
-                               *colon = ':';
-                       if (copy_to_user(arg, &ifr,
-                                        sizeof(struct ifreq)))
-                               ret = -EFAULT;
-               }
-               return ret;
-
-       /*
-        *      These ioctl calls:
-        *      - require superuser power.
-        *      - require strict serialization.
-        *      - return a value
-        */
-       case SIOCGMIIPHY:
-       case SIOCGMIIREG:
-       case SIOCSIFNAME:
-               if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
-                       return -EPERM;
-               dev_load(net, ifr.ifr_name);
-               rtnl_lock();
-               ret = dev_ifsioc(net, &ifr, cmd);
-               rtnl_unlock();
-               if (!ret) {
-                       if (colon)
-                               *colon = ':';
-                       if (copy_to_user(arg, &ifr,
-                                        sizeof(struct ifreq)))
-                               ret = -EFAULT;
-               }
-               return ret;
-
-       /*
-        *      These ioctl calls:
-        *      - require superuser power.
-        *      - require strict serialization.
-        *      - do not return a value
-        */
-       case SIOCSIFMAP:
-       case SIOCSIFTXQLEN:
-               if (!capable(CAP_NET_ADMIN))
-                       return -EPERM;
-               /* fall through */
-       /*
-        *      These ioctl calls:
-        *      - require local superuser power.
-        *      - require strict serialization.
-        *      - do not return a value
-        */
-       case SIOCSIFFLAGS:
-       case SIOCSIFMETRIC:
-       case SIOCSIFMTU:
-       case SIOCSIFHWADDR:
-       case SIOCSIFSLAVE:
-       case SIOCADDMULTI:
-       case SIOCDELMULTI:
-       case SIOCSIFHWBROADCAST:
-       case SIOCSMIIREG:
-       case SIOCBONDENSLAVE:
-       case SIOCBONDRELEASE:
-       case SIOCBONDSETHWADDR:
-       case SIOCBONDCHANGEACTIVE:
-       case SIOCBRADDIF:
-       case SIOCBRDELIF:
-       case SIOCSHWTSTAMP:
-               if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
-                       return -EPERM;
-               /* fall through */
-       case SIOCBONDSLAVEINFOQUERY:
-       case SIOCBONDINFOQUERY:
-               dev_load(net, ifr.ifr_name);
-               rtnl_lock();
-               ret = dev_ifsioc(net, &ifr, cmd);
-               rtnl_unlock();
-               return ret;
-
-       case SIOCGIFMEM:
-               /* Get the per device memory space. We can add this but
-                * currently do not support it */
-       case SIOCSIFMEM:
-               /* Set the per device memory buffer space.
-                * Not applicable in our case */
-       case SIOCSIFLINK:
-               return -ENOTTY;
-
-       /*
-        *      Unknown or private ioctl.
-        */
-       default:
-               if (cmd == SIOCWANDEV ||
-                   (cmd >= SIOCDEVPRIVATE &&
-                    cmd <= SIOCDEVPRIVATE + 15)) {
-                       dev_load(net, ifr.ifr_name);
-                       rtnl_lock();
-                       ret = dev_ifsioc(net, &ifr, cmd);
-                       rtnl_unlock();
-                       if (!ret && copy_to_user(arg, &ifr,
-                                                sizeof(struct ifreq)))
-                               ret = -EFAULT;
-                       return ret;
-               }
-               /* Take care of Wireless Extensions */
-               if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
-                       return wext_handle_ioctl(net, &ifr, cmd, arg);
-               return -ENOTTY;
-       }
-}
-
-
 /**
  *     dev_new_index   -       allocate an ifindex
  *     @net: the applicable net namespace
@@ -5958,10 +5450,9 @@ static int netif_alloc_rx_queues(struct net_device *dev)
        BUG_ON(count < 1);
 
        rx = kcalloc(count, sizeof(struct netdev_rx_queue), GFP_KERNEL);
-       if (!rx) {
-               pr_err("netdev: Unable to allocate %u rx queues\n", count);
+       if (!rx)
                return -ENOMEM;
-       }
+
        dev->_rx = rx;
 
        for (i = 0; i < count; i++)
@@ -5992,10 +5483,9 @@ static int netif_alloc_netdev_queues(struct net_device *dev)
        BUG_ON(count < 1);
 
        tx = kcalloc(count, sizeof(struct netdev_queue), GFP_KERNEL);
-       if (!tx) {
-               pr_err("netdev: Unable to allocate %u tx queues\n", count);
+       if (!tx)
                return -ENOMEM;
-       }
+
        dev->_tx = tx;
 
        netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
@@ -6482,10 +5972,8 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
        alloc_size += NETDEV_ALIGN - 1;
 
        p = kzalloc(alloc_size, GFP_KERNEL);
-       if (!p) {
-               pr_err("alloc_netdev: Unable to allocate device\n");
+       if (!p)
                return NULL;
-       }
 
        dev = PTR_ALIGN(p, NETDEV_ALIGN);
        dev->padded = (char *)dev - (char *)p;
index b079c7b..8956252 100644 (file)
@@ -780,14 +780,14 @@ static const struct file_operations dev_mc_seq_fops = {
 
 static int __net_init dev_mc_net_init(struct net *net)
 {
-       if (!proc_net_fops_create(net, "dev_mcast", 0, &dev_mc_seq_fops))
+       if (!proc_create("dev_mcast", 0, net->proc_net, &dev_mc_seq_fops))
                return -ENOMEM;
        return 0;
 }
 
 static void __net_exit dev_mc_net_exit(struct net *net)
 {
-       proc_net_remove(net, "dev_mcast");
+       remove_proc_entry("dev_mcast", net->proc_net);
 }
 
 static struct pernet_operations __net_initdata dev_mc_net_ops = {
diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
new file mode 100644 (file)
index 0000000..6cc0481
--- /dev/null
@@ -0,0 +1,576 @@
+#include <linux/kmod.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/rtnetlink.h>
+#include <linux/net_tstamp.h>
+#include <linux/wireless.h>
+#include <net/wext.h>
+
+/*
+ *     Map an interface index to its name (SIOCGIFNAME)
+ */
+
+/*
+ *     We need this ioctl for efficient implementation of the
+ *     if_indextoname() function required by the IPv6 API.  Without
+ *     it, we would have to search all the interfaces to find a
+ *     match.  --pb
+ */
+
+static int dev_ifname(struct net *net, struct ifreq __user *arg)
+{
+       struct net_device *dev;
+       struct ifreq ifr;
+       unsigned seq;
+
+       /*
+        *      Fetch the caller's info block.
+        */
+
+       if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
+               return -EFAULT;
+
+retry:
+       seq = read_seqcount_begin(&devnet_rename_seq);
+       rcu_read_lock();
+       dev = dev_get_by_index_rcu(net, ifr.ifr_ifindex);
+       if (!dev) {
+               rcu_read_unlock();
+               return -ENODEV;
+       }
+
+       strcpy(ifr.ifr_name, dev->name);
+       rcu_read_unlock();
+       if (read_seqcount_retry(&devnet_rename_seq, seq))
+               goto retry;
+
+       if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
+               return -EFAULT;
+       return 0;
+}
+
+static gifconf_func_t *gifconf_list[NPROTO];
+
+/**
+ *     register_gifconf        -       register a SIOCGIF handler
+ *     @family: Address family
+ *     @gifconf: Function handler
+ *
+ *     Register protocol dependent address dumping routines. The handler
+ *     that is passed must not be freed or reused until it has been replaced
+ *     by another handler.
+ */
+int register_gifconf(unsigned int family, gifconf_func_t *gifconf)
+{
+       if (family >= NPROTO)
+               return -EINVAL;
+       gifconf_list[family] = gifconf;
+       return 0;
+}
+EXPORT_SYMBOL(register_gifconf);
+
+/*
+ *     Perform a SIOCGIFCONF call. This structure will change
+ *     size eventually, and there is nothing I can do about it.
+ *     Thus we will need a 'compatibility mode'.
+ */
+
+static int dev_ifconf(struct net *net, char __user *arg)
+{
+       struct ifconf ifc;
+       struct net_device *dev;
+       char __user *pos;
+       int len;
+       int total;
+       int i;
+
+       /*
+        *      Fetch the caller's info block.
+        */
+
+       if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
+               return -EFAULT;
+
+       pos = ifc.ifc_buf;
+       len = ifc.ifc_len;
+
+       /*
+        *      Loop over the interfaces, and write an info block for each.
+        */
+
+       total = 0;
+       for_each_netdev(net, dev) {
+               for (i = 0; i < NPROTO; i++) {
+                       if (gifconf_list[i]) {
+                               int done;
+                               if (!pos)
+                                       done = gifconf_list[i](dev, NULL, 0);
+                               else
+                                       done = gifconf_list[i](dev, pos + total,
+                                                              len - total);
+                               if (done < 0)
+                                       return -EFAULT;
+                               total += done;
+                       }
+               }
+       }
+
+       /*
+        *      All done.  Write the updated control block back to the caller.
+        */
+       ifc.ifc_len = total;
+
+       /*
+        *      Both BSD and Solaris return 0 here, so we do too.
+        */
+       return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
+}
+
+/*
+ *     Perform the SIOCxIFxxx calls, inside rcu_read_lock()
+ */
+static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd)
+{
+       int err;
+       struct net_device *dev = dev_get_by_name_rcu(net, ifr->ifr_name);
+
+       if (!dev)
+               return -ENODEV;
+
+       switch (cmd) {
+       case SIOCGIFFLAGS:      /* Get interface flags */
+               ifr->ifr_flags = (short) dev_get_flags(dev);
+               return 0;
+
+       case SIOCGIFMETRIC:     /* Get the metric on the interface
+                                  (currently unused) */
+               ifr->ifr_metric = 0;
+               return 0;
+
+       case SIOCGIFMTU:        /* Get the MTU of a device */
+               ifr->ifr_mtu = dev->mtu;
+               return 0;
+
+       case SIOCGIFHWADDR:
+               if (!dev->addr_len)
+                       memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data);
+               else
+                       memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
+                              min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
+               ifr->ifr_hwaddr.sa_family = dev->type;
+               return 0;
+
+       case SIOCGIFSLAVE:
+               err = -EINVAL;
+               break;
+
+       case SIOCGIFMAP:
+               ifr->ifr_map.mem_start = dev->mem_start;
+               ifr->ifr_map.mem_end   = dev->mem_end;
+               ifr->ifr_map.base_addr = dev->base_addr;
+               ifr->ifr_map.irq       = dev->irq;
+               ifr->ifr_map.dma       = dev->dma;
+               ifr->ifr_map.port      = dev->if_port;
+               return 0;
+
+       case SIOCGIFINDEX:
+               ifr->ifr_ifindex = dev->ifindex;
+               return 0;
+
+       case SIOCGIFTXQLEN:
+               ifr->ifr_qlen = dev->tx_queue_len;
+               return 0;
+
+       default:
+               /* dev_ioctl() should ensure this case
+                * is never reached
+                */
+               WARN_ON(1);
+               err = -ENOTTY;
+               break;
+
+       }
+       return err;
+}
+
+static int net_hwtstamp_validate(struct ifreq *ifr)
+{
+       struct hwtstamp_config cfg;
+       enum hwtstamp_tx_types tx_type;
+       enum hwtstamp_rx_filters rx_filter;
+       int tx_type_valid = 0;
+       int rx_filter_valid = 0;
+
+       if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
+               return -EFAULT;
+
+       if (cfg.flags) /* reserved for future extensions */
+               return -EINVAL;
+
+       tx_type = cfg.tx_type;
+       rx_filter = cfg.rx_filter;
+
+       switch (tx_type) {
+       case HWTSTAMP_TX_OFF:
+       case HWTSTAMP_TX_ON:
+       case HWTSTAMP_TX_ONESTEP_SYNC:
+               tx_type_valid = 1;
+               break;
+       }
+
+       switch (rx_filter) {
+       case HWTSTAMP_FILTER_NONE:
+       case HWTSTAMP_FILTER_ALL:
+       case HWTSTAMP_FILTER_SOME:
+       case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+       case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+       case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+       case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+       case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+       case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+       case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+       case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+       case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+       case HWTSTAMP_FILTER_PTP_V2_EVENT:
+       case HWTSTAMP_FILTER_PTP_V2_SYNC:
+       case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+               rx_filter_valid = 1;
+               break;
+       }
+
+       if (!tx_type_valid || !rx_filter_valid)
+               return -ERANGE;
+
+       return 0;
+}
+
+/*
+ *     Perform the SIOCxIFxxx calls, inside rtnl_lock()
+ */
+static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
+{
+       int err;
+       struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
+       const struct net_device_ops *ops;
+
+       if (!dev)
+               return -ENODEV;
+
+       ops = dev->netdev_ops;
+
+       switch (cmd) {
+       case SIOCSIFFLAGS:      /* Set interface flags */
+               return dev_change_flags(dev, ifr->ifr_flags);
+
+       case SIOCSIFMETRIC:     /* Set the metric on the interface
+                                  (currently unused) */
+               return -EOPNOTSUPP;
+
+       case SIOCSIFMTU:        /* Set the MTU of a device */
+               return dev_set_mtu(dev, ifr->ifr_mtu);
+
+       case SIOCSIFHWADDR:
+               return dev_set_mac_address(dev, &ifr->ifr_hwaddr);
+
+       case SIOCSIFHWBROADCAST:
+               if (ifr->ifr_hwaddr.sa_family != dev->type)
+                       return -EINVAL;
+               memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
+                      min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
+               call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
+               return 0;
+
+       case SIOCSIFMAP:
+               if (ops->ndo_set_config) {
+                       if (!netif_device_present(dev))
+                               return -ENODEV;
+                       return ops->ndo_set_config(dev, &ifr->ifr_map);
+               }
+               return -EOPNOTSUPP;
+
+       case SIOCADDMULTI:
+               if (!ops->ndo_set_rx_mode ||
+                   ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
+                       return -EINVAL;
+               if (!netif_device_present(dev))
+                       return -ENODEV;
+               return dev_mc_add_global(dev, ifr->ifr_hwaddr.sa_data);
+
+       case SIOCDELMULTI:
+               if (!ops->ndo_set_rx_mode ||
+                   ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
+                       return -EINVAL;
+               if (!netif_device_present(dev))
+                       return -ENODEV;
+               return dev_mc_del_global(dev, ifr->ifr_hwaddr.sa_data);
+
+       case SIOCSIFTXQLEN:
+               if (ifr->ifr_qlen < 0)
+                       return -EINVAL;
+               dev->tx_queue_len = ifr->ifr_qlen;
+               return 0;
+
+       case SIOCSIFNAME:
+               ifr->ifr_newname[IFNAMSIZ-1] = '\0';
+               return dev_change_name(dev, ifr->ifr_newname);
+
+       case SIOCSHWTSTAMP:
+               err = net_hwtstamp_validate(ifr);
+               if (err)
+                       return err;
+               /* fall through */
+
+       /*
+        *      Unknown or private ioctl
+        */
+       default:
+               if ((cmd >= SIOCDEVPRIVATE &&
+                   cmd <= SIOCDEVPRIVATE + 15) ||
+                   cmd == SIOCBONDENSLAVE ||
+                   cmd == SIOCBONDRELEASE ||
+                   cmd == SIOCBONDSETHWADDR ||
+                   cmd == SIOCBONDSLAVEINFOQUERY ||
+                   cmd == SIOCBONDINFOQUERY ||
+                   cmd == SIOCBONDCHANGEACTIVE ||
+                   cmd == SIOCGMIIPHY ||
+                   cmd == SIOCGMIIREG ||
+                   cmd == SIOCSMIIREG ||
+                   cmd == SIOCBRADDIF ||
+                   cmd == SIOCBRDELIF ||
+                   cmd == SIOCSHWTSTAMP ||
+                   cmd == SIOCWANDEV) {
+                       err = -EOPNOTSUPP;
+                       if (ops->ndo_do_ioctl) {
+                               if (netif_device_present(dev))
+                                       err = ops->ndo_do_ioctl(dev, ifr, cmd);
+                               else
+                                       err = -ENODEV;
+                       }
+               } else
+                       err = -EINVAL;
+
+       }
+       return err;
+}
+
+/**
+ *     dev_load        - load a network module
+ *     @net: the applicable net namespace
+ *     @name: name of interface
+ *
+ *     If a network interface is not present and the process has suitable
+ *     privileges this function loads the module. If module loading is not
+ *     available in this kernel then it becomes a nop.
+ */
+
+void dev_load(struct net *net, const char *name)
+{
+       struct net_device *dev;
+       int no_module;
+
+       rcu_read_lock();
+       dev = dev_get_by_name_rcu(net, name);
+       rcu_read_unlock();
+
+       no_module = !dev;
+       if (no_module && capable(CAP_NET_ADMIN))
+               no_module = request_module("netdev-%s", name);
+       if (no_module && capable(CAP_SYS_MODULE)) {
+               if (!request_module("%s", name))
+                       pr_warn("Loading kernel module for a network device with CAP_SYS_MODULE (deprecated).  Use CAP_NET_ADMIN and alias netdev-%s instead.\n",
+                               name);
+       }
+}
+EXPORT_SYMBOL(dev_load);
+
+/*
+ *     This function handles all "interface"-type I/O control requests. The actual
+ *     'doing' part of this is dev_ifsioc above.
+ */
+
+/**
+ *     dev_ioctl       -       network device ioctl
+ *     @net: the applicable net namespace
+ *     @cmd: command to issue
+ *     @arg: pointer to a struct ifreq in user space
+ *
+ *     Issue ioctl functions to devices. This is normally called by the
+ *     user space syscall interfaces but can sometimes be useful for
+ *     other purposes. The return value is the return from the syscall if
+ *     positive or a negative errno code on error.
+ */
+
+int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
+{
+       struct ifreq ifr;
+       int ret;
+       char *colon;
+
+       /* One special case: SIOCGIFCONF takes ifconf argument
+          and requires shared lock, because it sleeps writing
+          to user space.
+        */
+
+       if (cmd == SIOCGIFCONF) {
+               rtnl_lock();
+               ret = dev_ifconf(net, (char __user *) arg);
+               rtnl_unlock();
+               return ret;
+       }
+       if (cmd == SIOCGIFNAME)
+               return dev_ifname(net, (struct ifreq __user *)arg);
+
+       if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
+               return -EFAULT;
+
+       ifr.ifr_name[IFNAMSIZ-1] = 0;
+
+       colon = strchr(ifr.ifr_name, ':');
+       if (colon)
+               *colon = 0;
+
+       /*
+        *      See which interface the caller is talking about.
+        */
+
+       switch (cmd) {
+       /*
+        *      These ioctl calls:
+        *      - can be done by all.
+        *      - atomic and do not require locking.
+        *      - return a value
+        */
+       case SIOCGIFFLAGS:
+       case SIOCGIFMETRIC:
+       case SIOCGIFMTU:
+       case SIOCGIFHWADDR:
+       case SIOCGIFSLAVE:
+       case SIOCGIFMAP:
+       case SIOCGIFINDEX:
+       case SIOCGIFTXQLEN:
+               dev_load(net, ifr.ifr_name);
+               rcu_read_lock();
+               ret = dev_ifsioc_locked(net, &ifr, cmd);
+               rcu_read_unlock();
+               if (!ret) {
+                       if (colon)
+                               *colon = ':';
+                       if (copy_to_user(arg, &ifr,
+                                        sizeof(struct ifreq)))
+                               ret = -EFAULT;
+               }
+               return ret;
+
+       case SIOCETHTOOL:
+               dev_load(net, ifr.ifr_name);
+               rtnl_lock();
+               ret = dev_ethtool(net, &ifr);
+               rtnl_unlock();
+               if (!ret) {
+                       if (colon)
+                               *colon = ':';
+                       if (copy_to_user(arg, &ifr,
+                                        sizeof(struct ifreq)))
+                               ret = -EFAULT;
+               }
+               return ret;
+
+       /*
+        *      These ioctl calls:
+        *      - require superuser power.
+        *      - require strict serialization.
+        *      - return a value
+        */
+       case SIOCGMIIPHY:
+       case SIOCGMIIREG:
+       case SIOCSIFNAME:
+               if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
+                       return -EPERM;
+               dev_load(net, ifr.ifr_name);
+               rtnl_lock();
+               ret = dev_ifsioc(net, &ifr, cmd);
+               rtnl_unlock();
+               if (!ret) {
+                       if (colon)
+                               *colon = ':';
+                       if (copy_to_user(arg, &ifr,
+                                        sizeof(struct ifreq)))
+                               ret = -EFAULT;
+               }
+               return ret;
+
+       /*
+        *      These ioctl calls:
+        *      - require superuser power.
+        *      - require strict serialization.
+        *      - do not return a value
+        */
+       case SIOCSIFMAP:
+       case SIOCSIFTXQLEN:
+               if (!capable(CAP_NET_ADMIN))
+                       return -EPERM;
+               /* fall through */
+       /*
+        *      These ioctl calls:
+        *      - require local superuser power.
+        *      - require strict serialization.
+        *      - do not return a value
+        */
+       case SIOCSIFFLAGS:
+       case SIOCSIFMETRIC:
+       case SIOCSIFMTU:
+       case SIOCSIFHWADDR:
+       case SIOCSIFSLAVE:
+       case SIOCADDMULTI:
+       case SIOCDELMULTI:
+       case SIOCSIFHWBROADCAST:
+       case SIOCSMIIREG:
+       case SIOCBONDENSLAVE:
+       case SIOCBONDRELEASE:
+       case SIOCBONDSETHWADDR:
+       case SIOCBONDCHANGEACTIVE:
+       case SIOCBRADDIF:
+       case SIOCBRDELIF:
+       case SIOCSHWTSTAMP:
+               if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
+                       return -EPERM;
+               /* fall through */
+       case SIOCBONDSLAVEINFOQUERY:
+       case SIOCBONDINFOQUERY:
+               dev_load(net, ifr.ifr_name);
+               rtnl_lock();
+               ret = dev_ifsioc(net, &ifr, cmd);
+               rtnl_unlock();
+               return ret;
+
+       case SIOCGIFMEM:
+               /* Get the per device memory space. We can add this but
+                * currently do not support it */
+       case SIOCSIFMEM:
+               /* Set the per device memory buffer space.
+                * Not applicable in our case */
+       case SIOCSIFLINK:
+               return -ENOTTY;
+
+       /*
+        *      Unknown or private ioctl.
+        */
+       default:
+               if (cmd == SIOCWANDEV ||
+                   (cmd >= SIOCDEVPRIVATE &&
+                    cmd <= SIOCDEVPRIVATE + 15)) {
+                       dev_load(net, ifr.ifr_name);
+                       rtnl_lock();
+                       ret = dev_ifsioc(net, &ifr, cmd);
+                       rtnl_unlock();
+                       if (!ret && copy_to_user(arg, &ifr,
+                                                sizeof(struct ifreq)))
+                               ret = -EFAULT;
+                       return ret;
+               }
+               /* Take care of Wireless Extensions */
+               if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
+                       return wext_handle_ioctl(net, &ifr, cmd, arg);
+               return -ENOTTY;
+       }
+}
index d9d5520..3e9b2c3 100644 (file)
@@ -77,6 +77,7 @@ static const char netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN]
        [NETIF_F_TSO_ECN_BIT] =          "tx-tcp-ecn-segmentation",
        [NETIF_F_TSO6_BIT] =             "tx-tcp6-segmentation",
        [NETIF_F_FSO_BIT] =              "tx-fcoe-segmentation",
+       [NETIF_F_GSO_GRE_BIT] =          "tx-gre-segmentation",
 
        [NETIF_F_FCOE_CRC_BIT] =         "tx-checksum-fcoe-crc",
        [NETIF_F_SCTP_CSUM_BIT] =        "tx-checksum-sctp",
index 331ccb9..fa32899 100644 (file)
@@ -47,6 +47,8 @@ static struct sk_buff_head skb_pool;
 
 static atomic_t trapped;
 
+static struct srcu_struct netpoll_srcu;
+
 #define USEC_PER_POLL  50
 #define NETPOLL_RX_ENABLED  1
 #define NETPOLL_RX_DROP     2
@@ -59,6 +61,7 @@ static atomic_t trapped;
 
 static void zap_completion_queue(void);
 static void netpoll_neigh_reply(struct sk_buff *skb, struct netpoll_info *npinfo);
+static void netpoll_async_cleanup(struct work_struct *work);
 
 static unsigned int carrier_timeout = 4;
 module_param(carrier_timeout, uint, 0644);
@@ -199,18 +202,31 @@ static void netpoll_poll_dev(struct net_device *dev)
        const struct net_device_ops *ops;
        struct netpoll_info *ni = rcu_dereference_bh(dev->npinfo);
 
-       if (!dev || !netif_running(dev))
+       /* Don't do any rx activity if the dev_lock mutex is held
+        * the dev_open/close paths use this to block netpoll activity
+        * while changing device state
+        */
+       if (!mutex_trylock(&ni->dev_lock))
                return;
 
+       if (!netif_running(dev)) {
+               mutex_unlock(&ni->dev_lock);
+               return;
+       }
+
        ops = dev->netdev_ops;
-       if (!ops->ndo_poll_controller)
+       if (!ops->ndo_poll_controller) {
+               mutex_unlock(&ni->dev_lock);
                return;
+       }
 
        /* Process pending work on NIC */
        ops->ndo_poll_controller(dev);
 
        poll_napi(dev);
 
+       mutex_unlock(&ni->dev_lock);
+
        if (dev->flags & IFF_SLAVE) {
                if (ni) {
                        struct net_device *bond_dev;
@@ -231,6 +247,31 @@ static void netpoll_poll_dev(struct net_device *dev)
        zap_completion_queue();
 }
 
+int netpoll_rx_disable(struct net_device *dev)
+{
+       struct netpoll_info *ni;
+       int idx;
+       might_sleep();
+       idx = srcu_read_lock(&netpoll_srcu);
+       ni = srcu_dereference(dev->npinfo, &netpoll_srcu);
+       if (ni)
+               mutex_lock(&ni->dev_lock);
+       srcu_read_unlock(&netpoll_srcu, idx);
+       return 0;
+}
+EXPORT_SYMBOL(netpoll_rx_disable);
+
+void netpoll_rx_enable(struct net_device *dev)
+{
+       struct netpoll_info *ni;
+       rcu_read_lock();
+       ni = rcu_dereference(dev->npinfo);
+       if (ni)
+               mutex_unlock(&ni->dev_lock);
+       rcu_read_unlock();
+}
+EXPORT_SYMBOL(netpoll_rx_enable);
+
 static void refill_skbs(void)
 {
        struct sk_buff *skb;
@@ -666,7 +707,7 @@ static void netpoll_neigh_reply(struct sk_buff *skb, struct netpoll_info *npinfo
                        icmp6h->icmp6_type = NDISC_NEIGHBOUR_ADVERTISEMENT;
                        icmp6h->icmp6_router = 0;
                        icmp6h->icmp6_solicited = 1;
-                       target = (struct in6_addr *)skb_transport_header(send_skb) + sizeof(struct icmp6hdr);
+                       target = (struct in6_addr *)(skb_transport_header(send_skb) + sizeof(struct icmp6hdr));
                        *target = msg->target;
                        icmp6h->icmp6_cksum = csum_ipv6_magic(saddr, daddr, size,
                                                              IPPROTO_ICMPV6,
@@ -984,6 +1025,7 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev, gfp_t gfp)
 
        np->dev = ndev;
        strlcpy(np->dev_name, ndev->name, IFNAMSIZ);
+       INIT_WORK(&np->cleanup_work, netpoll_async_cleanup);
 
        if ((ndev->priv_flags & IFF_DISABLE_NETPOLL) ||
            !ndev->netdev_ops->ndo_poll_controller) {
@@ -1004,6 +1046,7 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev, gfp_t gfp)
                INIT_LIST_HEAD(&npinfo->rx_np);
 
                spin_lock_init(&npinfo->rx_lock);
+               mutex_init(&npinfo->dev_lock);
                skb_queue_head_init(&npinfo->neigh_tx);
                skb_queue_head_init(&npinfo->txq);
                INIT_DELAYED_WORK(&npinfo->tx_work, queue_process);
@@ -1017,7 +1060,7 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev, gfp_t gfp)
                                goto free_npinfo;
                }
        } else {
-               npinfo = ndev->npinfo;
+               npinfo = rtnl_dereference(ndev->npinfo);
                atomic_inc(&npinfo->refcnt);
        }
 
@@ -1169,6 +1212,7 @@ EXPORT_SYMBOL(netpoll_setup);
 static int __init netpoll_init(void)
 {
        skb_queue_head_init(&skb_pool);
+       init_srcu_struct(&netpoll_srcu);
        return 0;
 }
 core_initcall(netpoll_init);
@@ -1196,7 +1240,11 @@ void __netpoll_cleanup(struct netpoll *np)
        struct netpoll_info *npinfo;
        unsigned long flags;
 
-       npinfo = np->dev->npinfo;
+       /* rtnl_dereference would be preferable here but
+        * rcu_cleanup_netpoll path can put us in here safely without
+        * holding the rtnl, so plain rcu_dereference it is
+        */
+       npinfo = rtnl_dereference(np->dev->npinfo);
        if (!npinfo)
                return;
 
@@ -1208,6 +1256,8 @@ void __netpoll_cleanup(struct netpoll *np)
                spin_unlock_irqrestore(&npinfo->rx_lock, flags);
        }
 
+       synchronize_srcu(&netpoll_srcu);
+
        if (atomic_dec_and_test(&npinfo->refcnt)) {
                const struct net_device_ops *ops;
 
@@ -1215,25 +1265,27 @@ void __netpoll_cleanup(struct netpoll *np)
                if (ops->ndo_netpoll_cleanup)
                        ops->ndo_netpoll_cleanup(np->dev);
 
-               RCU_INIT_POINTER(np->dev->npinfo, NULL);
+               rcu_assign_pointer(np->dev->npinfo, NULL);
                call_rcu_bh(&npinfo->rcu, rcu_cleanup_netpoll_info);
        }
 }
 EXPORT_SYMBOL_GPL(__netpoll_cleanup);
 
-static void rcu_cleanup_netpoll(struct rcu_head *rcu_head)
+static void netpoll_async_cleanup(struct work_struct *work)
 {
-       struct netpoll *np = container_of(rcu_head, struct netpoll, rcu);
+       struct netpoll *np = container_of(work, struct netpoll, cleanup_work);
 
+       rtnl_lock();
        __netpoll_cleanup(np);
+       rtnl_unlock();
        kfree(np);
 }
 
-void __netpoll_free_rcu(struct netpoll *np)
+void __netpoll_free_async(struct netpoll *np)
 {
-       call_rcu_bh(&np->rcu, rcu_cleanup_netpoll);
+       schedule_work(&np->cleanup_work);
 }
-EXPORT_SYMBOL_GPL(__netpoll_free_rcu);
+EXPORT_SYMBOL_GPL(__netpoll_free_async);
 
 void netpoll_cleanup(struct netpoll *np)
 {
index 5e67def..0777d0a 100644 (file)
@@ -69,10 +69,8 @@ static int extend_netdev_table(struct net_device *dev, u32 target_idx)
 
        /* allocate & copy */
        new = kzalloc(new_sz, GFP_KERNEL);
-       if (!new) {
-               pr_warn("Unable to alloc new priomap!\n");
+       if (!new)
                return -ENOMEM;
-       }
 
        if (old)
                memcpy(new->priomap, old->priomap,
index 7977695..6048fc1 100644 (file)
@@ -1790,10 +1790,13 @@ static ssize_t pktgen_thread_write(struct file *file,
                        return -EFAULT;
                i += len;
                mutex_lock(&pktgen_thread_lock);
-               pktgen_add_device(t, f);
+               ret = pktgen_add_device(t, f);
                mutex_unlock(&pktgen_thread_lock);
-               ret = count;
-               sprintf(pg_result, "OK: add_device=%s", f);
+               if (!ret) {
+                       ret = count;
+                       sprintf(pg_result, "OK: add_device=%s", f);
+               } else
+                       sprintf(pg_result, "ERROR: can not add device %s", f);
                goto out;
        }
 
@@ -3647,7 +3650,7 @@ static int __net_init pg_net_init(struct net *net)
 remove_entry:
        remove_proc_entry(PGCTRL, pn->proc_dir);
 remove:
-       proc_net_remove(pn->net, PG_PROC_DIR);
+       remove_proc_entry(PG_PROC_DIR, pn->net->proc_net);
        return ret;
 }
 
@@ -3673,7 +3676,7 @@ static void __net_exit pg_net_exit(struct net *net)
        }
 
        remove_proc_entry(PGCTRL, pn->proc_dir);
-       proc_net_remove(pn->net, PG_PROC_DIR);
+       remove_proc_entry(PG_PROC_DIR, pn->net->proc_net);
 }
 
 static struct pernet_operations pg_net_ops = {
index c1e4db6..d8aa20f 100644 (file)
@@ -2119,13 +2119,17 @@ static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
 {
        struct net *net = sock_net(skb->sk);
        struct ndmsg *ndm;
-       struct nlattr *llattr;
+       struct nlattr *tb[NDA_MAX+1];
        struct net_device *dev;
        int err = -EINVAL;
        __u8 *addr;
 
-       if (nlmsg_len(nlh) < sizeof(*ndm))
-               return -EINVAL;
+       if (!capable(CAP_NET_ADMIN))
+               return -EPERM;
+
+       err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL);
+       if (err < 0)
+               return err;
 
        ndm = nlmsg_data(nlh);
        if (ndm->ndm_ifindex == 0) {
@@ -2139,13 +2143,17 @@ static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
                return -ENODEV;
        }
 
-       llattr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_LLADDR);
-       if (llattr == NULL || nla_len(llattr) != ETH_ALEN) {
-               pr_info("PF_BRIGDE: RTM_DELNEIGH with invalid address\n");
+       if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) {
+               pr_info("PF_BRIDGE: RTM_DELNEIGH with invalid address\n");
+               return -EINVAL;
+       }
+
+       addr = nla_data(tb[NDA_LLADDR]);
+       if (!is_valid_ether_addr(addr)) {
+               pr_info("PF_BRIDGE: RTM_DELNEIGH with invalid ether address\n");
                return -EINVAL;
        }
 
-       addr = nla_data(llattr);
        err = -EOPNOTSUPP;
 
        /* Support fdb on master device the net/bridge default case */
@@ -2155,7 +2163,7 @@ static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
                const struct net_device_ops *ops = br_dev->netdev_ops;
 
                if (ops->ndo_fdb_del)
-                       err = ops->ndo_fdb_del(ndm, dev, addr);
+                       err = ops->ndo_fdb_del(ndm, tb, dev, addr);
 
                if (err)
                        goto out;
@@ -2165,7 +2173,7 @@ static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
 
        /* Embedded bridge, macvlan, and any other device support */
        if ((ndm->ndm_flags & NTF_SELF) && dev->netdev_ops->ndo_fdb_del) {
-               err = dev->netdev_ops->ndo_fdb_del(ndm, dev, addr);
+               err = dev->netdev_ops->ndo_fdb_del(ndm, tb, dev, addr);
 
                if (!err) {
                        rtnl_fdb_notify(dev, addr, RTM_DELNEIGH);
@@ -2315,6 +2323,13 @@ static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb)
        int idx = 0;
        u32 portid = NETLINK_CB(cb->skb).portid;
        u32 seq = cb->nlh->nlmsg_seq;
+       struct nlattr *extfilt;
+       u32 filter_mask = 0;
+
+       extfilt = nlmsg_find_attr(cb->nlh, sizeof(struct rtgenmsg),
+                                 IFLA_EXT_MASK);
+       if (extfilt)
+               filter_mask = nla_get_u32(extfilt);
 
        rcu_read_lock();
        for_each_netdev_rcu(net, dev) {
@@ -2324,14 +2339,15 @@ static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb)
                if (br_dev && br_dev->netdev_ops->ndo_bridge_getlink) {
                        if (idx >= cb->args[0] &&
                            br_dev->netdev_ops->ndo_bridge_getlink(
-                                   skb, portid, seq, dev) < 0)
+                                   skb, portid, seq, dev, filter_mask) < 0)
                                break;
                        idx++;
                }
 
                if (ops->ndo_bridge_getlink) {
                        if (idx >= cb->args[0] &&
-                           ops->ndo_bridge_getlink(skb, portid, seq, dev) < 0)
+                           ops->ndo_bridge_getlink(skb, portid, seq, dev,
+                                                   filter_mask) < 0)
                                break;
                        idx++;
                }
@@ -2372,14 +2388,14 @@ static int rtnl_bridge_notify(struct net_device *dev, u16 flags)
 
        if ((!flags || (flags & BRIDGE_FLAGS_MASTER)) &&
            br_dev && br_dev->netdev_ops->ndo_bridge_getlink) {
-               err = br_dev->netdev_ops->ndo_bridge_getlink(skb, 0, 0, dev);
+               err = br_dev->netdev_ops->ndo_bridge_getlink(skb, 0, 0, dev, 0);
                if (err < 0)
                        goto errout;
        }
 
        if ((flags & BRIDGE_FLAGS_SELF) &&
            dev->netdev_ops->ndo_bridge_getlink) {
-               err = dev->netdev_ops->ndo_bridge_getlink(skb, 0, 0, dev);
+               err = dev->netdev_ops->ndo_bridge_getlink(skb, 0, 0, dev, 0);
                if (err < 0)
                        goto errout;
        }
@@ -2464,6 +2480,77 @@ out:
        return err;
 }
 
+static int rtnl_bridge_dellink(struct sk_buff *skb, struct nlmsghdr *nlh,
+                              void *arg)
+{
+       struct net *net = sock_net(skb->sk);
+       struct ifinfomsg *ifm;
+       struct net_device *dev;
+       struct nlattr *br_spec, *attr = NULL;
+       int rem, err = -EOPNOTSUPP;
+       u16 oflags, flags = 0;
+       bool have_flags = false;
+
+       if (nlmsg_len(nlh) < sizeof(*ifm))
+               return -EINVAL;
+
+       ifm = nlmsg_data(nlh);
+       if (ifm->ifi_family != AF_BRIDGE)
+               return -EPFNOSUPPORT;
+
+       dev = __dev_get_by_index(net, ifm->ifi_index);
+       if (!dev) {
+               pr_info("PF_BRIDGE: RTM_SETLINK with unknown ifindex\n");
+               return -ENODEV;
+       }
+
+       br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
+       if (br_spec) {
+               nla_for_each_nested(attr, br_spec, rem) {
+                       if (nla_type(attr) == IFLA_BRIDGE_FLAGS) {
+                               have_flags = true;
+                               flags = nla_get_u16(attr);
+                               break;
+                       }
+               }
+       }
+
+       oflags = flags;
+
+       if (!flags || (flags & BRIDGE_FLAGS_MASTER)) {
+               struct net_device *br_dev = netdev_master_upper_dev_get(dev);
+
+               if (!br_dev || !br_dev->netdev_ops->ndo_bridge_dellink) {
+                       err = -EOPNOTSUPP;
+                       goto out;
+               }
+
+               err = br_dev->netdev_ops->ndo_bridge_dellink(dev, nlh);
+               if (err)
+                       goto out;
+
+               flags &= ~BRIDGE_FLAGS_MASTER;
+       }
+
+       if ((flags & BRIDGE_FLAGS_SELF)) {
+               if (!dev->netdev_ops->ndo_bridge_dellink)
+                       err = -EOPNOTSUPP;
+               else
+                       err = dev->netdev_ops->ndo_bridge_dellink(dev, nlh);
+
+               if (!err)
+                       flags &= ~BRIDGE_FLAGS_SELF;
+       }
+
+       if (have_flags)
+               memcpy(nla_data(attr), &flags, sizeof(flags));
+       /* Generate event to notify upper layer of bridge change */
+       if (!err)
+               err = rtnl_bridge_notify(dev, oflags);
+out:
+       return err;
+}
+
 /* Protected by RTNL sempahore.  */
 static struct rtattr **rta_buf;
 static int rtattr_max;
@@ -2647,6 +2734,7 @@ void __init rtnetlink_init(void)
        rtnl_register(PF_BRIDGE, RTM_GETNEIGH, NULL, rtnl_fdb_dump, NULL);
 
        rtnl_register(PF_BRIDGE, RTM_GETLINK, NULL, rtnl_bridge_getlink, NULL);
+       rtnl_register(PF_BRIDGE, RTM_DELLINK, rtnl_bridge_dellink, NULL, NULL);
        rtnl_register(PF_BRIDGE, RTM_SETLINK, rtnl_bridge_setlink, NULL, NULL);
 }
 
index bddc1dd..2a3ca33 100644 (file)
@@ -104,47 +104,37 @@ static const struct pipe_buf_operations sock_pipe_buf_ops = {
        .get = sock_pipe_buf_get,
 };
 
-/*
- *     Keep out-of-line to prevent kernel bloat.
- *     __builtin_return_address is not used because it is not always
- *     reliable.
- */
-
 /**
- *     skb_over_panic  -       private function
- *     @skb: buffer
- *     @sz: size
- *     @here: address
- *
- *     Out of line support code for skb_put(). Not user callable.
+ *     skb_panic - private function for out-of-line support
+ *     @skb:   buffer
+ *     @sz:    size
+ *     @addr:  address
+ *     @msg:   skb_over_panic or skb_under_panic
+ *
+ *     Out-of-line support for skb_put() and skb_push().
+ *     Called via the wrapper skb_over_panic() or skb_under_panic().
+ *     Keep out of line to prevent kernel bloat.
+ *     __builtin_return_address is not used because it is not always reliable.
  */
-static void skb_over_panic(struct sk_buff *skb, int sz, void *here)
+static void skb_panic(struct sk_buff *skb, unsigned int sz, void *addr,
+                     const char msg[])
 {
        pr_emerg("%s: text:%p len:%d put:%d head:%p data:%p tail:%#lx end:%#lx dev:%s\n",
-                __func__, here, skb->len, sz, skb->head, skb->data,
+                msg, addr, skb->len, sz, skb->head, skb->data,
                 (unsigned long)skb->tail, (unsigned long)skb->end,
                 skb->dev ? skb->dev->name : "<NULL>");
        BUG();
 }
 
-/**
- *     skb_under_panic -       private function
- *     @skb: buffer
- *     @sz: size
- *     @here: address
- *
- *     Out of line support code for skb_push(). Not user callable.
- */
-
-static void skb_under_panic(struct sk_buff *skb, int sz, void *here)
+static void skb_over_panic(struct sk_buff *skb, unsigned int sz, void *addr)
 {
-       pr_emerg("%s: text:%p len:%d put:%d head:%p data:%p tail:%#lx end:%#lx dev:%s\n",
-                __func__, here, skb->len, sz, skb->head, skb->data,
-                (unsigned long)skb->tail, (unsigned long)skb->end,
-                skb->dev ? skb->dev->name : "<NULL>");
-       BUG();
+       skb_panic(skb, sz, addr, __func__);
 }
 
+static void skb_under_panic(struct sk_buff *skb, unsigned int sz, void *addr)
+{
+       skb_panic(skb, sz, addr, __func__);
+}
 
 /*
  * kmalloc_reserve is a wrapper around kmalloc_node_track_caller that tells
@@ -351,10 +341,6 @@ struct netdev_alloc_cache {
 };
 static DEFINE_PER_CPU(struct netdev_alloc_cache, netdev_alloc_cache);
 
-#define NETDEV_FRAG_PAGE_MAX_ORDER get_order(32768)
-#define NETDEV_FRAG_PAGE_MAX_SIZE  (PAGE_SIZE << NETDEV_FRAG_PAGE_MAX_ORDER)
-#define NETDEV_PAGECNT_MAX_BIAS           NETDEV_FRAG_PAGE_MAX_SIZE
-
 static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
 {
        struct netdev_alloc_cache *nc;
@@ -686,7 +672,7 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
        new->network_header     = old->network_header;
        new->mac_header         = old->mac_header;
        new->inner_transport_header = old->inner_transport_header;
-       new->inner_network_header = old->inner_transport_header;
+       new->inner_network_header = old->inner_network_header;
        skb_dst_copy(new, old);
        new->rxhash             = old->rxhash;
        new->ooo_okay           = old->ooo_okay;
@@ -2340,8 +2326,7 @@ void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len)
 {
        int pos = skb_headlen(skb);
 
-       skb_shinfo(skb1)->gso_type = skb_shinfo(skb)->gso_type;
-
+       skb_shinfo(skb)->tx_flags = skb_shinfo(skb1)->tx_flags & SKBTX_SHARED_FRAG;
        if (len < pos)  /* Split line is inside header. */
                skb_split_inside_header(skb, skb1, len, pos);
        else            /* Second chunk has no header, nothing to copy. */
@@ -2753,6 +2738,7 @@ struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features)
        unsigned int mss = skb_shinfo(skb)->gso_size;
        unsigned int doffset = skb->data - skb_mac_header(skb);
        unsigned int offset = doffset;
+       unsigned int tnl_hlen = skb_tnl_header_len(skb);
        unsigned int headroom;
        unsigned int len;
        int sg = !!(features & NETIF_F_SG);
@@ -2829,7 +2815,10 @@ struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features)
                skb_set_network_header(nskb, skb->mac_len);
                nskb->transport_header = (nskb->network_header +
                                          skb_network_header_len(skb));
-               skb_copy_from_linear_data(skb, nskb->data, doffset);
+
+               skb_copy_from_linear_data_offset(skb, -tnl_hlen,
+                                                nskb->data - tnl_hlen,
+                                                doffset + tnl_hlen);
 
                if (fskb != skb_shinfo(skb)->frag_list)
                        continue;
@@ -2847,7 +2836,7 @@ struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features)
                skb_copy_from_linear_data_offset(skb, offset,
                                                 skb_put(nskb, hsize), hsize);
 
-               skb_shinfo(nskb)->gso_type = skb_shinfo(skb)->gso_type;
+               skb_shinfo(nskb)->tx_flags = skb_shinfo(skb)->tx_flags & SKBTX_SHARED_FRAG;
 
                while (pos < offset + len && i < nfrags) {
                        *frag = skb_shinfo(skb)->frags[i];
index f1e14e2..fe96c5d 100644 (file)
@@ -2836,7 +2836,7 @@ static const struct file_operations proto_seq_fops = {
 
 static __net_init int proto_init_net(struct net *net)
 {
-       if (!proc_net_fops_create(net, "protocols", S_IRUGO, &proto_seq_fops))
+       if (!proc_create("protocols", S_IRUGO, net->proc_net, &proto_seq_fops))
                return -ENOMEM;
 
        return 0;
@@ -2844,7 +2844,7 @@ static __net_init int proto_init_net(struct net *net)
 
 static __net_exit void proto_exit_net(struct net *net)
 {
-       proc_net_remove(net, "protocols");
+       remove_proc_entry("protocols", net->proc_net);
 }
 
 
index 0a8d6eb..4c6bdf9 100644 (file)
@@ -171,7 +171,7 @@ static __init int dccpprobe_init(void)
        spin_lock_init(&dccpw.lock);
        if (kfifo_alloc(&dccpw.fifo, bufsize, GFP_KERNEL))
                return ret;
-       if (!proc_net_fops_create(&init_net, procname, S_IRUSR, &dccpprobe_fops))
+       if (!proc_create(procname, S_IRUSR, init_net.proc_net, &dccpprobe_fops))
                goto err0;
 
        ret = setup_jprobe();
@@ -181,7 +181,7 @@ static __init int dccpprobe_init(void)
        pr_info("DCCP watch registered (port=%d)\n", port);
        return 0;
 err1:
-       proc_net_remove(&init_net, procname);
+       remove_proc_entry(procname, init_net.proc_net);
 err0:
        kfifo_free(&dccpw.fifo);
        return ret;
@@ -191,7 +191,7 @@ module_init(dccpprobe_init);
 static __exit void dccpprobe_exit(void)
 {
        kfifo_free(&dccpw.fifo);
-       proc_net_remove(&init_net, procname);
+       remove_proc_entry(procname, init_net.proc_net);
        unregister_jprobe(&dccp_send_probe);
 
 }
index 64d9843..c4a2def 100644 (file)
@@ -2384,7 +2384,7 @@ static int __init decnet_init(void)
        dev_add_pack(&dn_dix_packet_type);
        register_netdevice_notifier(&dn_dev_notifier);
 
-       proc_net_fops_create(&init_net, "decnet", S_IRUGO, &dn_socket_seq_fops);
+       proc_create("decnet", S_IRUGO, init_net.proc_net, &dn_socket_seq_fops);
        dn_register_sysctl();
 out:
        return rc;
@@ -2413,7 +2413,7 @@ static void __exit decnet_exit(void)
        dn_neigh_cleanup();
        dn_fib_cleanup();
 
-       proc_net_remove(&init_net, "decnet");
+       remove_proc_entry("decnet", init_net.proc_net);
 
        proto_unregister(&dn_proto);
 
index e47ba9f..c8da116 100644 (file)
@@ -1412,7 +1412,7 @@ void __init dn_dev_init(void)
        rtnl_register(PF_DECnet, RTM_DELADDR, dn_nl_deladdr, NULL, NULL);
        rtnl_register(PF_DECnet, RTM_GETADDR, NULL, dn_nl_dump_ifaddr, NULL);
 
-       proc_net_fops_create(&init_net, "decnet_dev", S_IRUGO, &dn_dev_seq_fops);
+       proc_create("decnet_dev", S_IRUGO, init_net.proc_net, &dn_dev_seq_fops);
 
 #ifdef CONFIG_SYSCTL
        {
@@ -1433,7 +1433,7 @@ void __exit dn_dev_cleanup(void)
        }
 #endif /* CONFIG_SYSCTL */
 
-       proc_net_remove(&init_net, "decnet_dev");
+       remove_proc_entry("decnet_dev", init_net.proc_net);
 
        dn_dev_devices_off();
 }
index 3aede1b..f8637f9 100644 (file)
@@ -95,7 +95,7 @@ static u32 dn_neigh_hash(const void *pkey,
 
 struct neigh_table dn_neigh_table = {
        .family =                       PF_DECnet,
-       .entry_size =                   sizeof(struct dn_neigh),
+       .entry_size =                   NEIGH_ENTRY_SIZE(sizeof(struct dn_neigh)),
        .key_len =                      sizeof(__le16),
        .hash =                         dn_neigh_hash,
        .constructor =                  dn_neigh_construct,
@@ -590,11 +590,12 @@ static const struct file_operations dn_neigh_seq_fops = {
 void __init dn_neigh_init(void)
 {
        neigh_table_init(&dn_neigh_table);
-       proc_net_fops_create(&init_net, "decnet_neigh", S_IRUGO, &dn_neigh_seq_fops);
+       proc_create("decnet_neigh", S_IRUGO, init_net.proc_net,
+                   &dn_neigh_seq_fops);
 }
 
 void __exit dn_neigh_cleanup(void)
 {
-       proc_net_remove(&init_net, "decnet_neigh");
+       remove_proc_entry("decnet_neigh", init_net.proc_net);
        neigh_table_clear(&dn_neigh_table);
 }
index 1550028..5ac0e15 100644 (file)
@@ -1901,7 +1901,8 @@ void __init dn_route_init(void)
 
        dn_dst_ops.gc_thresh = (dn_rt_hash_mask + 1);
 
-       proc_net_fops_create(&init_net, "decnet_cache", S_IRUGO, &dn_rt_cache_seq_fops);
+       proc_create("decnet_cache", S_IRUGO, init_net.proc_net,
+                   &dn_rt_cache_seq_fops);
 
 #ifdef CONFIG_DECNET_ROUTER
        rtnl_register(PF_DECnet, RTM_GETROUTE, dn_cache_getroute,
@@ -1917,7 +1918,7 @@ void __exit dn_route_cleanup(void)
        del_timer(&dn_route_timer);
        dn_run_flush(0);
 
-       proc_net_remove(&init_net, "decnet_cache");
+       remove_proc_entry("decnet_cache", init_net.proc_net);
        dst_entries_destroy(&dn_dst_ops);
 }
 
index 09cba81..43b95ca 100644 (file)
@@ -377,17 +377,14 @@ static int lowpan_header_create(struct sk_buff *skb,
        struct ipv6hdr *hdr;
        const u8 *saddr = _saddr;
        const u8 *daddr = _daddr;
-       u8 *head;
+       u8 head[100];
        struct ieee802154_addr sa, da;
 
+       /* TODO:
+        * if this package isn't ipv6 one, where should it be routed?
+        */
        if (type != ETH_P_IPV6)
                return 0;
-               /* TODO:
-                * if this package isn't ipv6 one, where should it be routed?
-                */
-       head = kzalloc(100, GFP_KERNEL);
-       if (head == NULL)
-               return -ENOMEM;
 
        hdr = ipv6_hdr(skb);
        hc06_ptr = head + 2;
@@ -561,8 +558,6 @@ static int lowpan_header_create(struct sk_buff *skb,
        skb_pull(skb, sizeof(struct ipv6hdr));
        memcpy(skb_push(skb, hc06_ptr - head), head, hc06_ptr - head);
 
-       kfree(head);
-
        lowpan_raw_dump_table(__func__, "raw skb data dump", skb->data,
                                skb->len);
 
@@ -1267,7 +1262,7 @@ static inline int __init lowpan_netlink_init(void)
        return rtnl_link_register(&lowpan_link_ops);
 }
 
-static inline void __init lowpan_netlink_fini(void)
+static inline void lowpan_netlink_fini(void)
 {
        rtnl_link_unregister(&lowpan_link_ops);
 }
index 49ddca3..e225a4e 100644 (file)
@@ -263,21 +263,6 @@ void build_ehash_secret(void)
 }
 EXPORT_SYMBOL(build_ehash_secret);
 
-static inline int inet_netns_ok(struct net *net, __u8 protocol)
-{
-       const struct net_protocol *ipprot;
-
-       if (net_eq(net, &init_net))
-               return 1;
-
-       ipprot = rcu_dereference(inet_protos[protocol]);
-       if (ipprot == NULL) {
-               /* raw IP is OK */
-               return 1;
-       }
-       return ipprot->netns_ok;
-}
-
 /*
  *     Create an inet socket.
  */
@@ -350,10 +335,6 @@ lookup_protocol:
            !ns_capable(net->user_ns, CAP_NET_RAW))
                goto out_rcu_unlock;
 
-       err = -EAFNOSUPPORT;
-       if (!inet_netns_ok(net, protocol))
-               goto out_rcu_unlock;
-
        sock->ops = answer->ops;
        answer_prot = answer->prot;
        answer_no_check = answer->no_check;
@@ -1306,7 +1287,7 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb,
                       SKB_GSO_UDP |
                       SKB_GSO_DODGY |
                       SKB_GSO_TCP_ECN |
-                      SKB_GSO_SHARED_FRAG |
+                      SKB_GSO_GRE |
                       0)))
                goto out;
 
index a69b4e4..2e7f194 100644 (file)
@@ -321,8 +321,7 @@ static int ah_input(struct xfrm_state *x, struct sk_buff *skb)
 
        /* We are going to _remove_ AH header to keep sockets happy,
         * so... Later this can change. */
-       if (skb_cloned(skb) &&
-           pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
+       if (skb_unclone(skb, GFP_ATOMIC))
                goto out;
 
        skb->ip_summed = CHECKSUM_NONE;
index 9547a27..fea4929 100644 (file)
@@ -928,24 +928,25 @@ static void parp_redo(struct sk_buff *skb)
 static int arp_rcv(struct sk_buff *skb, struct net_device *dev,
                   struct packet_type *pt, struct net_device *orig_dev)
 {
-       struct arphdr *arp;
+       const struct arphdr *arp;
+
+       if (dev->flags & IFF_NOARP ||
+           skb->pkt_type == PACKET_OTHERHOST ||
+           skb->pkt_type == PACKET_LOOPBACK)
+               goto freeskb;
+
+       skb = skb_share_check(skb, GFP_ATOMIC);
+       if (!skb)
+               goto out_of_mem;
 
        /* ARP header, plus 2 device addresses, plus 2 IP addresses.  */
        if (!pskb_may_pull(skb, arp_hdr_len(dev)))
                goto freeskb;
 
        arp = arp_hdr(skb);
-       if (arp->ar_hln != dev->addr_len ||
-           dev->flags & IFF_NOARP ||
-           skb->pkt_type == PACKET_OTHERHOST ||
-           skb->pkt_type == PACKET_LOOPBACK ||
-           arp->ar_pln != 4)
+       if (arp->ar_hln != dev->addr_len || arp->ar_pln != 4)
                goto freeskb;
 
-       skb = skb_share_check(skb, GFP_ATOMIC);
-       if (skb == NULL)
-               goto out_of_mem;
-
        memset(NEIGH_CB(skb), 0, sizeof(struct neighbour_cb));
 
        return NF_HOOK(NFPROTO_ARP, NF_ARP_IN, skb, dev, NULL, arp_process);
@@ -1404,14 +1405,14 @@ static const struct file_operations arp_seq_fops = {
 
 static int __net_init arp_net_init(struct net *net)
 {
-       if (!proc_net_fops_create(net, "arp", S_IRUGO, &arp_seq_fops))
+       if (!proc_create("arp", S_IRUGO, net->proc_net, &arp_seq_fops))
                return -ENOMEM;
        return 0;
 }
 
 static void __net_exit arp_net_exit(struct net *net)
 {
-       proc_net_remove(net, "arp");
+       remove_proc_entry("arp", net->proc_net);
 }
 
 static struct pernet_operations arp_net_ops = {
index 31d771c..61e03da 100644 (file)
@@ -2607,31 +2607,31 @@ static const struct file_operations fib_route_fops = {
 
 int __net_init fib_proc_init(struct net *net)
 {
-       if (!proc_net_fops_create(net, "fib_trie", S_IRUGO, &fib_trie_fops))
+       if (!proc_create("fib_trie", S_IRUGO, net->proc_net, &fib_trie_fops))
                goto out1;
 
-       if (!proc_net_fops_create(net, "fib_triestat", S_IRUGO,
-                                 &fib_triestat_fops))
+       if (!proc_create("fib_triestat", S_IRUGO, net->proc_net,
+                        &fib_triestat_fops))
                goto out2;
 
-       if (!proc_net_fops_create(net, "route", S_IRUGO, &fib_route_fops))
+       if (!proc_create("route", S_IRUGO, net->proc_net, &fib_route_fops))
                goto out3;
 
        return 0;
 
 out3:
-       proc_net_remove(net, "fib_triestat");
+       remove_proc_entry("fib_triestat", net->proc_net);
 out2:
-       proc_net_remove(net, "fib_trie");
+       remove_proc_entry("fib_trie", net->proc_net);
 out1:
        return -ENOMEM;
 }
 
 void __net_exit fib_proc_exit(struct net *net)
 {
-       proc_net_remove(net, "fib_trie");
-       proc_net_remove(net, "fib_triestat");
-       proc_net_remove(net, "route");
+       remove_proc_entry("fib_trie", net->proc_net);
+       remove_proc_entry("fib_triestat", net->proc_net);
+       remove_proc_entry("route", net->proc_net);
 }
 
 #endif /* CONFIG_PROC_FS */
index 42a4910..7a4c710 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/in.h>
 #include <linux/ip.h>
 #include <linux/netdevice.h>
+#include <linux/if_tunnel.h>
 #include <linux/spinlock.h>
 #include <net/protocol.h>
 #include <net/gre.h>
 
 static const struct gre_protocol __rcu *gre_proto[GREPROTO_MAX] __read_mostly;
 static DEFINE_SPINLOCK(gre_proto_lock);
+struct gre_base_hdr {
+       __be16 flags;
+       __be16 protocol;
+};
+#define GRE_HEADER_SECTION 4
 
 int gre_add_protocol(const struct gre_protocol *proto, u8 version)
 {
@@ -112,12 +118,117 @@ static void gre_err(struct sk_buff *skb, u32 info)
        rcu_read_unlock();
 }
 
+static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
+                                      netdev_features_t features)
+{
+       struct sk_buff *segs = ERR_PTR(-EINVAL);
+       netdev_features_t enc_features;
+       int ghl = GRE_HEADER_SECTION;
+       struct gre_base_hdr *greh;
+       int mac_len = skb->mac_len;
+       int tnl_hlen;
+       bool csum;
+
+       if (unlikely(skb_shinfo(skb)->gso_type &
+                               ~(SKB_GSO_TCPV4 |
+                                 SKB_GSO_TCPV6 |
+                                 SKB_GSO_UDP |
+                                 SKB_GSO_DODGY |
+                                 SKB_GSO_TCP_ECN |
+                                 SKB_GSO_GRE)))
+               goto out;
+
+       if (unlikely(!pskb_may_pull(skb, sizeof(*greh))))
+               goto out;
+
+       greh = (struct gre_base_hdr *)skb_transport_header(skb);
+
+       if (greh->flags & GRE_KEY)
+               ghl += GRE_HEADER_SECTION;
+       if (greh->flags & GRE_SEQ)
+               ghl += GRE_HEADER_SECTION;
+       if (greh->flags & GRE_CSUM) {
+               ghl += GRE_HEADER_SECTION;
+               csum = true;
+       } else
+               csum = false;
+
+       /* setup inner skb. */
+       if (greh->protocol == htons(ETH_P_TEB)) {
+               struct ethhdr *eth = eth_hdr(skb);
+               skb->protocol = eth->h_proto;
+       } else {
+               skb->protocol = greh->protocol;
+       }
+
+       skb->encapsulation = 0;
+
+       if (unlikely(!pskb_may_pull(skb, ghl)))
+               goto out;
+       __skb_pull(skb, ghl);
+       skb_reset_mac_header(skb);
+       skb_set_network_header(skb, skb_inner_network_offset(skb));
+       skb->mac_len = skb_inner_network_offset(skb);
+
+       /* segment inner packet. */
+       enc_features = skb->dev->hw_enc_features & netif_skb_features(skb);
+       segs = skb_mac_gso_segment(skb, enc_features);
+       if (!segs || IS_ERR(segs))
+               goto out;
+
+       skb = segs;
+       tnl_hlen = skb_tnl_header_len(skb);
+       do {
+               __skb_push(skb, ghl);
+               if (csum) {
+                       __be32 *pcsum;
+
+                       if (skb_has_shared_frag(skb)) {
+                               int err;
+
+                               err = __skb_linearize(skb);
+                               if (err) {
+                                       kfree_skb(segs);
+                                       segs = ERR_PTR(err);
+                                       goto out;
+                               }
+                       }
+
+                       greh = (struct gre_base_hdr *)(skb->data);
+                       pcsum = (__be32 *)(greh + 1);
+                       *pcsum = 0;
+                       *(__sum16 *)pcsum = csum_fold(skb_checksum(skb, 0, skb->len, 0));
+               }
+               __skb_push(skb, tnl_hlen - ghl);
+
+               skb_reset_mac_header(skb);
+               skb_set_network_header(skb, mac_len);
+               skb->mac_len = mac_len;
+       } while ((skb = skb->next));
+out:
+       return segs;
+}
+
+static int gre_gso_send_check(struct sk_buff *skb)
+{
+       if (!skb->encapsulation)
+               return -EINVAL;
+       return 0;
+}
+
 static const struct net_protocol net_gre_protocol = {
        .handler     = gre_rcv,
        .err_handler = gre_err,
        .netns_ok    = 1,
 };
 
+static const struct net_offload gre_offload = {
+       .callbacks = {
+               .gso_send_check =       gre_gso_send_check,
+               .gso_segment    =       gre_gso_segment,
+       },
+};
+
 static int __init gre_init(void)
 {
        pr_info("GRE over IPv4 demultiplexor driver\n");
@@ -127,11 +238,18 @@ static int __init gre_init(void)
                return -EAGAIN;
        }
 
+       if (inet_add_offload(&gre_offload, IPPROTO_GRE)) {
+               pr_err("can't add protocol offload\n");
+               inet_del_protocol(&net_gre_protocol, IPPROTO_GRE);
+               return -EAGAIN;
+       }
+
        return 0;
 }
 
 static void __exit gre_exit(void)
 {
+       inet_del_offload(&gre_offload, IPPROTO_GRE);
        inet_del_protocol(&net_gre_protocol, IPPROTO_GRE);
 }
 
index 736ab70..d8c2327 100644 (file)
@@ -2646,24 +2646,25 @@ static int __net_init igmp_net_init(struct net *net)
 {
        struct proc_dir_entry *pde;
 
-       pde = proc_net_fops_create(net, "igmp", S_IRUGO, &igmp_mc_seq_fops);
+       pde = proc_create("igmp", S_IRUGO, net->proc_net, &igmp_mc_seq_fops);
        if (!pde)
                goto out_igmp;
-       pde = proc_net_fops_create(net, "mcfilter", S_IRUGO, &igmp_mcf_seq_fops);
+       pde = proc_create("mcfilter", S_IRUGO, net->proc_net,
+                         &igmp_mcf_seq_fops);
        if (!pde)
                goto out_mcfilter;
        return 0;
 
 out_mcfilter:
-       proc_net_remove(net, "igmp");
+       remove_proc_entry("igmp", net->proc_net);
 out_igmp:
        return -ENOMEM;
 }
 
 static void __net_exit igmp_net_exit(struct net *net)
 {
-       proc_net_remove(net, "mcfilter");
-       proc_net_remove(net, "igmp");
+       remove_proc_entry("mcfilter", net->proc_net);
+       remove_proc_entry("igmp", net->proc_net);
 }
 
 static struct pernet_operations igmp_net_ops = {
index 1211613..b6d30ac 100644 (file)
@@ -590,7 +590,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
                goto out_oversize;
 
        /* Head of list must not be cloned. */
-       if (skb_cloned(head) && pskb_expand_head(head, 0, 0, GFP_ATOMIC))
+       if (skb_unclone(head, GFP_ATOMIC))
                goto out_nomem;
 
        /* If the first fragment is fragmented itself, we split
index 00a14b9..a56f118 100644 (file)
@@ -735,8 +735,33 @@ drop:
        return 0;
 }
 
+static struct sk_buff *handle_offloads(struct sk_buff *skb)
+{
+       int err;
+
+       if (skb_is_gso(skb)) {
+               err = skb_unclone(skb, GFP_ATOMIC);
+               if (unlikely(err))
+                       goto error;
+               skb_shinfo(skb)->gso_type |= SKB_GSO_GRE;
+               return skb;
+       } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+               err = skb_checksum_help(skb);
+               if (unlikely(err))
+                       goto error;
+       }
+       skb->ip_summed = CHECKSUM_NONE;
+
+       return skb;
+
+error:
+       kfree_skb(skb);
+       return ERR_PTR(err);
+}
+
 static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
 {
+       struct pcpu_tstats *tstats = this_cpu_ptr(dev->tstats);
        struct ip_tunnel *tunnel = netdev_priv(dev);
        const struct iphdr  *old_iph;
        const struct iphdr  *tiph;
@@ -751,10 +776,19 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
        __be32 dst;
        int    mtu;
        u8     ttl;
+       int    err;
+       int    pkt_len;
 
-       if (skb->ip_summed == CHECKSUM_PARTIAL &&
-           skb_checksum_help(skb))
-               goto tx_error;
+       skb = handle_offloads(skb);
+       if (IS_ERR(skb)) {
+               dev->stats.tx_dropped++;
+               return NETDEV_TX_OK;
+       }
+
+       if (!skb->encapsulation) {
+               skb_reset_inner_headers(skb);
+               skb->encapsulation = 1;
+       }
 
        old_iph = ip_hdr(skb);
 
@@ -855,7 +889,8 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
        if (skb->protocol == htons(ETH_P_IP)) {
                df |= (old_iph->frag_off&htons(IP_DF));
 
-               if ((old_iph->frag_off&htons(IP_DF)) &&
+               if (!skb_is_gso(skb) &&
+                   (old_iph->frag_off&htons(IP_DF)) &&
                    mtu < ntohs(old_iph->tot_len)) {
                        icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
                        ip_rt_put(rt);
@@ -875,7 +910,9 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
                        }
                }
 
-               if (mtu >= IPV6_MIN_MTU && mtu < skb->len - tunnel->hlen + gre_hlen) {
+               if (!skb_is_gso(skb) &&
+                   mtu >= IPV6_MIN_MTU &&
+                   mtu < skb->len - tunnel->hlen + gre_hlen) {
                        icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
                        ip_rt_put(rt);
                        goto tx_error;
@@ -936,6 +973,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
        iph->daddr              =       fl4.daddr;
        iph->saddr              =       fl4.saddr;
        iph->ttl                =       ttl;
+       iph->id                 =       0;
 
        if (ttl == 0) {
                if (skb->protocol == htons(ETH_P_IP))
@@ -964,9 +1002,19 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
                        *ptr = tunnel->parms.o_key;
                        ptr--;
                }
-               if (tunnel->parms.o_flags&GRE_CSUM) {
+               /* Skip GRE checksum if skb is getting offloaded. */
+               if (!(skb_shinfo(skb)->gso_type & SKB_GSO_GRE) &&
+                   (tunnel->parms.o_flags&GRE_CSUM)) {
                        int offset = skb_transport_offset(skb);
 
+                       if (skb_has_shared_frag(skb)) {
+                               err = __skb_linearize(skb);
+                               if (err) {
+                                       ip_rt_put(rt);
+                                       goto tx_error;
+                               }
+                       }
+
                        *ptr = 0;
                        *(__sum16 *)ptr = csum_fold(skb_checksum(skb, offset,
                                                                 skb->len - offset,
@@ -974,7 +1022,19 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
                }
        }
 
-       iptunnel_xmit(skb, dev);
+       nf_reset(skb);
+
+       pkt_len = skb->len - skb_transport_offset(skb);
+       err = ip_local_out(skb);
+       if (likely(net_xmit_eval(err) == 0)) {
+               u64_stats_update_begin(&tstats->syncp);
+               tstats->tx_bytes += pkt_len;
+               tstats->tx_packets++;
+               u64_stats_update_end(&tstats->syncp);
+       } else {
+               dev->stats.tx_errors++;
+               dev->stats.tx_aborted_errors++;
+       }
        return NETDEV_TX_OK;
 
 #if IS_ENABLED(CONFIG_IPV6)
@@ -1044,6 +1104,11 @@ static int ipgre_tunnel_bind_dev(struct net_device *dev)
                mtu = 68;
 
        tunnel->hlen = addend;
+       /* TCP offload with GRE SEQ is not supported. */
+       if (!(tunnel->parms.o_flags & GRE_SEQ)) {
+               dev->features           |= NETIF_F_GSO_SOFTWARE;
+               dev->hw_features        |= NETIF_F_GSO_SOFTWARE;
+       }
 
        return mtu;
 }
@@ -1593,6 +1658,9 @@ static void ipgre_tap_setup(struct net_device *dev)
 
        dev->iflink             = 0;
        dev->features           |= NETIF_F_NETNS_LOCAL;
+
+       dev->features           |= GRE_FEATURES;
+       dev->hw_features        |= GRE_FEATURES;
 }
 
 static int ipgre_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[],
index f1395a6..87abd3e 100644 (file)
@@ -208,13 +208,6 @@ static int ip_local_deliver_finish(struct sk_buff *skb)
                if (ipprot != NULL) {
                        int ret;
 
-                       if (!net_eq(net, &init_net) && !ipprot->netns_ok) {
-                               net_info_ratelimited("%s: proto %d isn't netns-ready\n",
-                                                    __func__, protocol);
-                               kfree_skb(skb);
-                               goto out;
-                       }
-
                        if (!ipprot->no_policy) {
                                if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
                                        kfree_skb(skb);
index 3e98ed2..5e12dca 100644 (file)
@@ -598,6 +598,7 @@ slow_path:
        /* for offloaded checksums cleanup checksum before fragmentation */
        if ((skb->ip_summed == CHECKSUM_PARTIAL) && skb_checksum_help(skb))
                goto fail;
+       iph = ip_hdr(skb);
 
        left = skb->len - hlen;         /* Space per frame */
        ptr = hlen;             /* Where to start from */
index a2e50ae..98cbc68 100644 (file)
@@ -1394,7 +1394,7 @@ static int __init ip_auto_config(void)
        unsigned int i;
 
 #ifdef CONFIG_PROC_FS
-       proc_net_fops_create(&init_net, "pnp", S_IRUGO, &pnp_seq_fops);
+       proc_create("pnp", S_IRUGO, init_net.proc_net, &pnp_seq_fops);
 #endif /* CONFIG_PROC_FS */
 
        if (!ic_enable)
index 7085b9b..5f95b3a 100644 (file)
@@ -2703,16 +2703,16 @@ static int __net_init ipmr_net_init(struct net *net)
 
 #ifdef CONFIG_PROC_FS
        err = -ENOMEM;
-       if (!proc_net_fops_create(net, "ip_mr_vif", 0, &ipmr_vif_fops))
+       if (!proc_create("ip_mr_vif", 0, net->proc_net, &ipmr_vif_fops))
                goto proc_vif_fail;
-       if (!proc_net_fops_create(net, "ip_mr_cache", 0, &ipmr_mfc_fops))
+       if (!proc_create("ip_mr_cache", 0, net->proc_net, &ipmr_mfc_fops))
                goto proc_cache_fail;
 #endif
        return 0;
 
 #ifdef CONFIG_PROC_FS
 proc_cache_fail:
-       proc_net_remove(net, "ip_mr_vif");
+       remove_proc_entry("ip_mr_vif", net->proc_net);
 proc_vif_fail:
        ipmr_rules_exit(net);
 #endif
@@ -2723,8 +2723,8 @@ fail:
 static void __net_exit ipmr_net_exit(struct net *net)
 {
 #ifdef CONFIG_PROC_FS
-       proc_net_remove(net, "ip_mr_cache");
-       proc_net_remove(net, "ip_mr_vif");
+       remove_proc_entry("ip_mr_cache", net->proc_net);
+       remove_proc_entry("ip_mr_vif", net->proc_net);
 #endif
        ipmr_rules_exit(net);
 }
index 9682b36..f2ca127 100644 (file)
@@ -417,12 +417,12 @@ static int __net_init ip_conntrack_net_init(struct net *net)
 {
        struct proc_dir_entry *proc, *proc_exp, *proc_stat;
 
-       proc = proc_net_fops_create(net, "ip_conntrack", 0440, &ct_file_ops);
+       proc = proc_create("ip_conntrack", 0440, net->proc_net, &ct_file_ops);
        if (!proc)
                goto err1;
 
-       proc_exp = proc_net_fops_create(net, "ip_conntrack_expect", 0440,
-                                       &ip_exp_file_ops);
+       proc_exp = proc_create("ip_conntrack_expect", 0440, net->proc_net,
+                              &ip_exp_file_ops);
        if (!proc_exp)
                goto err2;
 
@@ -433,9 +433,9 @@ static int __net_init ip_conntrack_net_init(struct net *net)
        return 0;
 
 err3:
-       proc_net_remove(net, "ip_conntrack_expect");
+       remove_proc_entry("ip_conntrack_expect", net->proc_net);
 err2:
-       proc_net_remove(net, "ip_conntrack");
+       remove_proc_entry("ip_conntrack", net->proc_net);
 err1:
        return -ENOMEM;
 }
@@ -443,8 +443,8 @@ err1:
 static void __net_exit ip_conntrack_net_exit(struct net *net)
 {
        remove_proc_entry("ip_conntrack", net->proc_net_stat);
-       proc_net_remove(net, "ip_conntrack_expect");
-       proc_net_remove(net, "ip_conntrack");
+       remove_proc_entry("ip_conntrack_expect", net->proc_net);
+       remove_proc_entry("ip_conntrack", net->proc_net);
 }
 
 static struct pernet_operations ip_conntrack_net_ops = {
index 6f9c072..55c4ee1 100644 (file)
@@ -889,7 +889,7 @@ static int ping_proc_register(struct net *net)
        struct proc_dir_entry *p;
        int rc = 0;
 
-       p = proc_net_fops_create(net, "icmp", S_IRUGO, &ping_seq_fops);
+       p = proc_create("icmp", S_IRUGO, net->proc_net, &ping_seq_fops);
        if (!p)
                rc = -ENOMEM;
        return rc;
@@ -897,7 +897,7 @@ static int ping_proc_register(struct net *net)
 
 static void ping_proc_unregister(struct net *net)
 {
-       proc_net_remove(net, "icmp");
+       remove_proc_entry("icmp", net->proc_net);
 }
 
 
index 8de53e1..32030a2 100644 (file)
@@ -471,28 +471,29 @@ static const struct file_operations netstat_seq_fops = {
 
 static __net_init int ip_proc_init_net(struct net *net)
 {
-       if (!proc_net_fops_create(net, "sockstat", S_IRUGO, &sockstat_seq_fops))
+       if (!proc_create("sockstat", S_IRUGO, net->proc_net,
+                        &sockstat_seq_fops))
                goto out_sockstat;
-       if (!proc_net_fops_create(net, "netstat", S_IRUGO, &netstat_seq_fops))
+       if (!proc_create("netstat", S_IRUGO, net->proc_net, &netstat_seq_fops))
                goto out_netstat;
-       if (!proc_net_fops_create(net, "snmp", S_IRUGO, &snmp_seq_fops))
+       if (!proc_create("snmp", S_IRUGO, net->proc_net, &snmp_seq_fops))
                goto out_snmp;
 
        return 0;
 
 out_snmp:
-       proc_net_remove(net, "netstat");
+       remove_proc_entry("netstat", net->proc_net);
 out_netstat:
-       proc_net_remove(net, "sockstat");
+       remove_proc_entry("sockstat", net->proc_net);
 out_sockstat:
        return -ENOMEM;
 }
 
 static __net_exit void ip_proc_exit_net(struct net *net)
 {
-       proc_net_remove(net, "snmp");
-       proc_net_remove(net, "netstat");
-       proc_net_remove(net, "sockstat");
+       remove_proc_entry("snmp", net->proc_net);
+       remove_proc_entry("netstat", net->proc_net);
+       remove_proc_entry("sockstat", net->proc_net);
 }
 
 static __net_initdata struct pernet_operations ip_proc_ops = {
index 0f9d09f..ce84846 100644 (file)
@@ -37,6 +37,12 @@ const struct net_offload __rcu *inet_offloads[MAX_INET_PROTOS] __read_mostly;
 
 int inet_add_protocol(const struct net_protocol *prot, unsigned char protocol)
 {
+       if (!prot->netns_ok) {
+               pr_err("Protocol %u is not namespace aware, cannot register.\n",
+                       protocol);
+               return -EINVAL;
+       }
+
        return !cmpxchg((const struct net_protocol **)&inet_protos[protocol],
                        NULL, prot) ? 0 : -1;
 }
index 6f08991..53ddebc 100644 (file)
@@ -1050,7 +1050,7 @@ static const struct file_operations raw_seq_fops = {
 
 static __net_init int raw_init_net(struct net *net)
 {
-       if (!proc_net_fops_create(net, "raw", S_IRUGO, &raw_seq_fops))
+       if (!proc_create("raw", S_IRUGO, net->proc_net, &raw_seq_fops))
                return -ENOMEM;
 
        return 0;
@@ -1058,7 +1058,7 @@ static __net_init int raw_init_net(struct net *net)
 
 static __net_exit void raw_exit_net(struct net *net)
 {
-       proc_net_remove(net, "raw");
+       remove_proc_entry("raw", net->proc_net);
 }
 
 static __net_initdata struct pernet_operations raw_net_ops = {
index a0fcc47..3bdd1b1 100644 (file)
@@ -384,8 +384,8 @@ static int __net_init ip_rt_do_proc_init(struct net *net)
 {
        struct proc_dir_entry *pde;
 
-       pde = proc_net_fops_create(net, "rt_cache", S_IRUGO,
-                       &rt_cache_seq_fops);
+       pde = proc_create("rt_cache", S_IRUGO, net->proc_net,
+                         &rt_cache_seq_fops);
        if (!pde)
                goto err1;
 
index 2622707..960fd29 100644 (file)
@@ -633,13 +633,6 @@ static struct ctl_table ipv4_table[] = {
                .proc_handler   = proc_tcp_congestion_control,
        },
        {
-               .procname       = "tcp_abc",
-               .data           = &sysctl_tcp_abc,
-               .maxlen         = sizeof(int),
-               .mode           = 0644,
-               .proc_handler   = proc_dointvec,
-       },
-       {
                .procname       = "tcp_mtu_probing",
                .data           = &sysctl_tcp_mtu_probing,
                .maxlen         = sizeof(int),
index 3ec1f69..7a5ba48 100644 (file)
@@ -400,6 +400,8 @@ void tcp_init_sock(struct sock *sk)
        tcp_enable_early_retrans(tp);
        icsk->icsk_ca_ops = &tcp_init_congestion_ops;
 
+       tp->tsoffset = 0;
+
        sk->sk_state = TCP_CLOSE;
 
        sk->sk_write_space = sk_stream_write_space;
@@ -895,8 +897,7 @@ new_segment:
                        get_page(page);
                        skb_fill_page_desc(skb, i, page, offset, copy);
                }
-
-               skb_shinfo(skb)->gso_type |= SKB_GSO_SHARED_FRAG;
+               skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
 
                skb->len += copy;
                skb->data_len += copy;
@@ -2289,7 +2290,6 @@ int tcp_disconnect(struct sock *sk, int flags)
        tp->packets_out = 0;
        tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
        tp->snd_cwnd_cnt = 0;
-       tp->bytes_acked = 0;
        tp->window_clamp = 0;
        tcp_set_ca_state(sk, TCP_CA_Open);
        tcp_clear_retrans(tp);
@@ -2713,6 +2713,12 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
                else
                        err = -EINVAL;
                break;
+       case TCP_TIMESTAMP:
+               if (!tp->repair)
+                       err = -EPERM;
+               else
+                       tp->tsoffset = val - tcp_time_stamp;
+               break;
        default:
                err = -ENOPROTOOPT;
                break;
@@ -2961,6 +2967,9 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
        case TCP_USER_TIMEOUT:
                val = jiffies_to_msecs(icsk->icsk_user_timeout);
                break;
+       case TCP_TIMESTAMP:
+               val = tcp_time_stamp + tp->tsoffset;
+               break;
        default:
                return -ENOPROTOOPT;
        }
@@ -3034,7 +3043,7 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb,
                               SKB_GSO_DODGY |
                               SKB_GSO_TCP_ECN |
                               SKB_GSO_TCPV6 |
-                              SKB_GSO_SHARED_FRAG |
+                              SKB_GSO_GRE |
                               0) ||
                             !(type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))))
                        goto out;
index 291f2ed..019c238 100644 (file)
@@ -310,35 +310,24 @@ void tcp_slow_start(struct tcp_sock *tp)
 {
        int cnt; /* increase in packets */
        unsigned int delta = 0;
+       u32 snd_cwnd = tp->snd_cwnd;
 
-       /* RFC3465: ABC Slow start
-        * Increase only after a full MSS of bytes is acked
-        *
-        * TCP sender SHOULD increase cwnd by the number of
-        * previously unacknowledged bytes ACKed by each incoming
-        * acknowledgment, provided the increase is not more than L
-        */
-       if (sysctl_tcp_abc && tp->bytes_acked < tp->mss_cache)
-               return;
+       if (unlikely(!snd_cwnd)) {
+               pr_err_once("snd_cwnd is nul, please report this bug.\n");
+               snd_cwnd = 1U;
+       }
 
        if (sysctl_tcp_max_ssthresh > 0 && tp->snd_cwnd > sysctl_tcp_max_ssthresh)
                cnt = sysctl_tcp_max_ssthresh >> 1;     /* limited slow start */
        else
-               cnt = tp->snd_cwnd;                     /* exponential increase */
-
-       /* RFC3465: ABC
-        * We MAY increase by 2 if discovered delayed ack
-        */
-       if (sysctl_tcp_abc > 1 && tp->bytes_acked >= 2*tp->mss_cache)
-               cnt <<= 1;
-       tp->bytes_acked = 0;
+               cnt = snd_cwnd;                         /* exponential increase */
 
        tp->snd_cwnd_cnt += cnt;
-       while (tp->snd_cwnd_cnt >= tp->snd_cwnd) {
-               tp->snd_cwnd_cnt -= tp->snd_cwnd;
+       while (tp->snd_cwnd_cnt >= snd_cwnd) {
+               tp->snd_cwnd_cnt -= snd_cwnd;
                delta++;
        }
-       tp->snd_cwnd = min(tp->snd_cwnd + delta, tp->snd_cwnd_clamp);
+       tp->snd_cwnd = min(snd_cwnd + delta, tp->snd_cwnd_clamp);
 }
 EXPORT_SYMBOL_GPL(tcp_slow_start);
 
@@ -372,20 +361,9 @@ void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
        /* In "safe" area, increase. */
        if (tp->snd_cwnd <= tp->snd_ssthresh)
                tcp_slow_start(tp);
-
        /* In dangerous area, increase slowly. */
-       else if (sysctl_tcp_abc) {
-               /* RFC3465: Appropriate Byte Count
-                * increase once for each full cwnd acked
-                */
-               if (tp->bytes_acked >= tp->snd_cwnd*tp->mss_cache) {
-                       tp->bytes_acked -= tp->snd_cwnd*tp->mss_cache;
-                       if (tp->snd_cwnd < tp->snd_cwnd_clamp)
-                               tp->snd_cwnd++;
-               }
-       } else {
+       else
                tcp_cong_avoid_ai(tp, tp->snd_cwnd);
-       }
 }
 EXPORT_SYMBOL_GPL(tcp_reno_cong_avoid);
 
index 492c7cf..a759e19 100644 (file)
@@ -98,7 +98,6 @@ int sysctl_tcp_frto_response __read_mostly;
 int sysctl_tcp_thin_dupack __read_mostly;
 
 int sysctl_tcp_moderate_rcvbuf __read_mostly = 1;
-int sysctl_tcp_abc __read_mostly;
 int sysctl_tcp_early_retrans __read_mostly = 2;
 
 #define FLAG_DATA              0x01 /* Incoming frame contained data.          */
@@ -1240,13 +1239,13 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
         */
        if (!skb_shinfo(prev)->gso_size) {
                skb_shinfo(prev)->gso_size = mss;
-               skb_shinfo(prev)->gso_type |= sk->sk_gso_type;
+               skb_shinfo(prev)->gso_type = sk->sk_gso_type;
        }
 
        /* CHECKME: To clear or not to clear? Mimics normal skb currently */
        if (skb_shinfo(skb)->gso_segs <= 1) {
                skb_shinfo(skb)->gso_size = 0;
-               skb_shinfo(skb)->gso_type &= SKB_GSO_SHARED_FRAG;
+               skb_shinfo(skb)->gso_type = 0;
        }
 
        /* Difference in this won't matter, both ACKed by the same cumul. ACK */
@@ -2007,7 +2006,6 @@ static void tcp_enter_frto_loss(struct sock *sk, int allowed_segments, int flag)
        tp->snd_cwnd_cnt = 0;
        tp->snd_cwnd_stamp = tcp_time_stamp;
        tp->frto_counter = 0;
-       tp->bytes_acked = 0;
 
        tp->reordering = min_t(unsigned int, tp->reordering,
                               sysctl_tcp_reordering);
@@ -2056,7 +2054,6 @@ void tcp_enter_loss(struct sock *sk, int how)
        tp->snd_cwnd_cnt   = 0;
        tp->snd_cwnd_stamp = tcp_time_stamp;
 
-       tp->bytes_acked = 0;
        tcp_clear_retrans_partial(tp);
 
        if (tcp_is_reno(tp))
@@ -2684,7 +2681,6 @@ static void tcp_init_cwnd_reduction(struct sock *sk, const bool set_ssthresh)
        struct tcp_sock *tp = tcp_sk(sk);
 
        tp->high_seq = tp->snd_nxt;
-       tp->bytes_acked = 0;
        tp->snd_cwnd_cnt = 0;
        tp->prior_cwnd = tp->snd_cwnd;
        tp->prr_delivered = 0;
@@ -2735,7 +2731,6 @@ void tcp_enter_cwr(struct sock *sk, const int set_ssthresh)
        struct tcp_sock *tp = tcp_sk(sk);
 
        tp->prior_ssthresh = 0;
-       tp->bytes_acked = 0;
        if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) {
                tp->undo_marker = 0;
                tcp_init_cwnd_reduction(sk, set_ssthresh);
@@ -3417,7 +3412,6 @@ static void tcp_conservative_spur_to_response(struct tcp_sock *tp)
 {
        tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh);
        tp->snd_cwnd_cnt = 0;
-       tp->bytes_acked = 0;
        TCP_ECN_queue_cwr(tp);
        tcp_moderate_cwnd(tp);
 }
@@ -3502,6 +3496,11 @@ static bool tcp_process_frto(struct sock *sk, int flag)
                }
        } else {
                if (!(flag & FLAG_DATA_ACKED) && (tp->frto_counter == 1)) {
+                       if (!tcp_packets_in_flight(tp)) {
+                               tcp_enter_frto_loss(sk, 2, flag);
+                               return true;
+                       }
+
                        /* Prevent sending of new data. */
                        tp->snd_cwnd = min(tp->snd_cwnd,
                                           tcp_packets_in_flight(tp));
@@ -3608,15 +3607,6 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
        if (after(ack, prior_snd_una))
                flag |= FLAG_SND_UNA_ADVANCED;
 
-       if (sysctl_tcp_abc) {
-               if (icsk->icsk_ca_state < TCP_CA_CWR)
-                       tp->bytes_acked += ack - prior_snd_una;
-               else if (icsk->icsk_ca_state == TCP_CA_Loss)
-                       /* we assume just one segment left network */
-                       tp->bytes_acked += min(ack - prior_snd_una,
-                                              tp->mss_cache);
-       }
-
        prior_fackets = tp->fackets_out;
        prior_in_flight = tcp_packets_in_flight(tp);
 
@@ -3870,7 +3860,7 @@ static bool tcp_parse_aligned_timestamp(struct tcp_sock *tp, const struct tcphdr
                ++ptr;
                tp->rx_opt.rcv_tsval = ntohl(*ptr);
                ++ptr;
-               tp->rx_opt.rcv_tsecr = ntohl(*ptr);
+               tp->rx_opt.rcv_tsecr = ntohl(*ptr) - tp->tsoffset;
                return true;
        }
        return false;
@@ -3894,7 +3884,11 @@ static bool tcp_fast_parse_options(const struct sk_buff *skb,
                if (tcp_parse_aligned_timestamp(tp, th))
                        return true;
        }
+
        tcp_parse_options(skb, &tp->rx_opt, hvpp, 1, NULL);
+       if (tp->rx_opt.saw_tstamp)
+               tp->rx_opt.rcv_tsecr -= tp->tsoffset;
+
        return true;
 }
 
@@ -5647,8 +5641,7 @@ static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack,
         * the remote receives only the retransmitted (regular) SYNs: either
         * the original SYN-data or the corresponding SYN-ACK is lost.
         */
-       syn_drop = (cookie->len <= 0 && data &&
-                   inet_csk(sk)->icsk_retransmits);
+       syn_drop = (cookie->len <= 0 && data && tp->total_retrans);
 
        tcp_fastopen_cache_set(sk, mss, cookie, syn_drop);
 
@@ -5676,6 +5669,8 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
        int saved_clamp = tp->rx_opt.mss_clamp;
 
        tcp_parse_options(skb, &tp->rx_opt, &hash_location, 0, &foc);
+       if (tp->rx_opt.saw_tstamp)
+               tp->rx_opt.rcv_tsecr -= tp->tsoffset;
 
        if (th->ack) {
                /* rfc793:
index 5a1cfc6..145d3bf 100644 (file)
@@ -496,6 +496,7 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
                 * errors returned from accept().
                 */
                inet_csk_reqsk_queue_drop(sk, req, prev);
+               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
                goto out;
 
        case TCP_SYN_SENT:
@@ -725,7 +726,7 @@ release_sk1:
  */
 
 static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
-                           u32 win, u32 ts, int oif,
+                           u32 win, u32 tsval, u32 tsecr, int oif,
                            struct tcp_md5sig_key *key,
                            int reply_flags, u8 tos)
 {
@@ -746,12 +747,12 @@ static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
 
        arg.iov[0].iov_base = (unsigned char *)&rep;
        arg.iov[0].iov_len  = sizeof(rep.th);
-       if (ts) {
+       if (tsecr) {
                rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
                                   (TCPOPT_TIMESTAMP << 8) |
                                   TCPOLEN_TIMESTAMP);
-               rep.opt[1] = htonl(tcp_time_stamp);
-               rep.opt[2] = htonl(ts);
+               rep.opt[1] = htonl(tsval);
+               rep.opt[2] = htonl(tsecr);
                arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
        }
 
@@ -766,7 +767,7 @@ static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
 
 #ifdef CONFIG_TCP_MD5SIG
        if (key) {
-               int offset = (ts) ? 3 : 0;
+               int offset = (tsecr) ? 3 : 0;
 
                rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
                                          (TCPOPT_NOP << 16) |
@@ -801,6 +802,7 @@ static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
 
        tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
                        tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
+                       tcp_time_stamp + tcptw->tw_ts_offset,
                        tcptw->tw_ts_recent,
                        tw->tw_bound_dev_if,
                        tcp_twsk_md5_key(tcptw),
@@ -820,6 +822,7 @@ static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
        tcp_v4_send_ack(skb, (sk->sk_state == TCP_LISTEN) ?
                        tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
                        tcp_rsk(req)->rcv_nxt, req->rcv_wnd,
+                       tcp_time_stamp,
                        req->ts_recent,
                        0,
                        tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr,
@@ -1501,8 +1504,10 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
         * clogging syn queue with openreqs with exponentially increasing
         * timeout.
         */
-       if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
+       if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) {
+               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
                goto drop;
+       }
 
        req = inet_reqsk_alloc(&tcp_request_sock_ops);
        if (!req)
@@ -1667,6 +1672,7 @@ drop_and_release:
 drop_and_free:
        reqsk_free(req);
 drop:
+       NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
        return 0;
 }
 EXPORT_SYMBOL(tcp_v4_conn_request);
@@ -2610,7 +2616,7 @@ EXPORT_SYMBOL(tcp_proc_register);
 
 void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
 {
-       proc_net_remove(net, afinfo->name);
+       remove_proc_entry(afinfo->name, net->proc_net);
 }
 EXPORT_SYMBOL(tcp_proc_unregister);
 
index f35f2df..b83a49c 100644 (file)
@@ -102,6 +102,7 @@ tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
                tcp_parse_options(skb, &tmp_opt, &hash_location, 0, NULL);
 
                if (tmp_opt.saw_tstamp) {
+                       tmp_opt.rcv_tsecr       -= tcptw->tw_ts_offset;
                        tmp_opt.ts_recent       = tcptw->tw_ts_recent;
                        tmp_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
                        paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
@@ -288,6 +289,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
                tcptw->tw_rcv_wnd       = tcp_receive_window(tp);
                tcptw->tw_ts_recent     = tp->rx_opt.ts_recent;
                tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp;
+               tcptw->tw_ts_offset     = tp->tsoffset;
 
 #if IS_ENABLED(CONFIG_IPV6)
                if (tw->tw_family == PF_INET6) {
@@ -446,7 +448,6 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
                 */
                newtp->snd_cwnd = TCP_INIT_CWND;
                newtp->snd_cwnd_cnt = 0;
-               newtp->bytes_acked = 0;
 
                newtp->frto_counter = 0;
                newtp->frto_highmark = 0;
@@ -500,6 +501,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
                        newtp->rx_opt.ts_recent_stamp = 0;
                        newtp->tcp_header_len = sizeof(struct tcphdr);
                }
+               newtp->tsoffset = 0;
 #ifdef CONFIG_TCP_MD5SIG
                newtp->md5sig_info = NULL;      /*XXX*/
                if (newtp->af_specific->md5_lookup(sk, newsk))
index 367e2ec..fd0cea1 100644 (file)
@@ -622,7 +622,7 @@ static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb,
 
        if (likely(sysctl_tcp_timestamps && *md5 == NULL)) {
                opts->options |= OPTION_TS;
-               opts->tsval = TCP_SKB_CB(skb)->when;
+               opts->tsval = TCP_SKB_CB(skb)->when + tp->tsoffset;
                opts->tsecr = tp->rx_opt.ts_recent;
                remaining -= TCPOLEN_TSTAMP_ALIGNED;
        }
@@ -806,7 +806,7 @@ static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb
 
        if (likely(tp->rx_opt.tstamp_ok)) {
                opts->options |= OPTION_TS;
-               opts->tsval = tcb ? tcb->when : 0;
+               opts->tsval = tcb ? tcb->when + tp->tsoffset : 0;
                opts->tsecr = tp->rx_opt.ts_recent;
                size += TCPOLEN_TSTAMP_ALIGNED;
        }
@@ -1133,7 +1133,6 @@ static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
 static void tcp_set_skb_tso_segs(const struct sock *sk, struct sk_buff *skb,
                                 unsigned int mss_now)
 {
-       skb_shinfo(skb)->gso_type &= SKB_GSO_SHARED_FRAG;
        if (skb->len <= mss_now || !sk_can_gso(sk) ||
            skb->ip_summed == CHECKSUM_NONE) {
                /* Avoid the costly divide in the normal
@@ -1141,10 +1140,11 @@ static void tcp_set_skb_tso_segs(const struct sock *sk, struct sk_buff *skb,
                 */
                skb_shinfo(skb)->gso_segs = 1;
                skb_shinfo(skb)->gso_size = 0;
+               skb_shinfo(skb)->gso_type = 0;
        } else {
                skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss_now);
                skb_shinfo(skb)->gso_size = mss_now;
-               skb_shinfo(skb)->gso_type |= sk->sk_gso_type;
+               skb_shinfo(skb)->gso_type = sk->sk_gso_type;
        }
 }
 
@@ -1331,7 +1331,7 @@ static void __pskb_trim_head(struct sk_buff *skb, int len)
 /* Remove acked data from a packet in the transmit queue. */
 int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
 {
-       if (skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
+       if (skb_unclone(skb, GFP_ATOMIC))
                return -ENOMEM;
 
        __pskb_trim_head(skb, len);
index 4526fe6..d4943f6 100644 (file)
@@ -234,7 +234,7 @@ static __init int tcpprobe_init(void)
        if (!tcp_probe.log)
                goto err0;
 
-       if (!proc_net_fops_create(&init_net, procname, S_IRUSR, &tcpprobe_fops))
+       if (!proc_create(procname, S_IRUSR, init_net.proc_net, &tcpprobe_fops))
                goto err0;
 
        ret = register_jprobe(&tcp_jprobe);
@@ -244,7 +244,7 @@ static __init int tcpprobe_init(void)
        pr_info("probe registered (port=%d) bufsize=%u\n", port, bufsize);
        return 0;
  err1:
-       proc_net_remove(&init_net, procname);
+       remove_proc_entry(procname, init_net.proc_net);
  err0:
        kfree(tcp_probe.log);
        return ret;
@@ -253,7 +253,7 @@ module_init(tcpprobe_init);
 
 static __exit void tcpprobe_exit(void)
 {
-       proc_net_remove(&init_net, procname);
+       remove_proc_entry(procname, init_net.proc_net);
        unregister_jprobe(&tcp_jprobe);
        kfree(tcp_probe.log);
 }
index 6791aac..265c42c 100644 (file)
@@ -2122,7 +2122,7 @@ EXPORT_SYMBOL(udp_proc_register);
 
 void udp_proc_unregister(struct net *net, struct udp_seq_afinfo *afinfo)
 {
-       proc_net_remove(net, afinfo->name);
+       remove_proc_entry(afinfo->name, net->proc_net);
 }
 EXPORT_SYMBOL(udp_proc_unregister);
 
@@ -2305,7 +2305,8 @@ struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
                /* Packet is from an untrusted source, reset gso_segs. */
                int type = skb_shinfo(skb)->gso_type;
 
-               if (unlikely(type & ~(SKB_GSO_UDP | SKB_GSO_DODGY) ||
+               if (unlikely(type & ~(SKB_GSO_UDP | SKB_GSO_DODGY |
+                                     SKB_GSO_GRE) ||
                             !(type & (SKB_GSO_UDP))))
                        goto out;
 
index 06814b6..1f12c8b 100644 (file)
@@ -132,7 +132,7 @@ int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb)
         * header and optional ESP marker bytes) and then modify the
         * protocol to ESP, and then call into the transform receiver.
         */
-       if (skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
+       if (skb_unclone(skb, GFP_ATOMIC))
                goto drop;
 
        /* Now we can update and verify the packet length... */
index ddee0a0..fe5189e 100644 (file)
@@ -142,8 +142,8 @@ static int xfrm4_mode_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
        for_each_input_rcu(rcv_notify_handlers, handler)
                handler->handler(skb);
 
-       if (skb_cloned(skb) &&
-           (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
+       err = skb_unclone(skb, GFP_ATOMIC);
+       if (err)
                goto out;
 
        if (x->props.flags & XFRM_STATE_DECAP_DSCP)
index 3be0ac2..9a459be 100644 (file)
@@ -262,21 +262,56 @@ static struct ctl_table xfrm4_policy_table[] = {
        { }
 };
 
-static struct ctl_table_header *sysctl_hdr;
-#endif
-
-static void __init xfrm4_policy_init(void)
+static int __net_init xfrm4_net_init(struct net *net)
 {
-       xfrm_policy_register_afinfo(&xfrm4_policy_afinfo);
+       struct ctl_table *table;
+       struct ctl_table_header *hdr;
+
+       table = xfrm4_policy_table;
+       if (!net_eq(net, &init_net)) {
+               table = kmemdup(table, sizeof(xfrm4_policy_table), GFP_KERNEL);
+               if (!table)
+                       goto err_alloc;
+
+               table[0].data = &net->xfrm.xfrm4_dst_ops.gc_thresh;
+       }
+
+       hdr = register_net_sysctl(net, "net/ipv4", table);
+       if (!hdr)
+               goto err_reg;
+
+       net->ipv4.xfrm4_hdr = hdr;
+       return 0;
+
+err_reg:
+       if (!net_eq(net, &init_net))
+               kfree(table);
+err_alloc:
+       return -ENOMEM;
 }
 
-static void __exit xfrm4_policy_fini(void)
+static void __net_exit xfrm4_net_exit(struct net *net)
 {
-#ifdef CONFIG_SYSCTL
-       if (sysctl_hdr)
-               unregister_net_sysctl_table(sysctl_hdr);
+       struct ctl_table *table;
+
+       if (net->ipv4.xfrm4_hdr == NULL)
+               return;
+
+       table = net->ipv4.xfrm4_hdr->ctl_table_arg;
+       unregister_net_sysctl_table(net->ipv4.xfrm4_hdr);
+       if (!net_eq(net, &init_net))
+               kfree(table);
+}
+
+static struct pernet_operations __net_initdata xfrm4_net_ops = {
+       .init   = xfrm4_net_init,
+       .exit   = xfrm4_net_exit,
+};
 #endif
-       xfrm_policy_unregister_afinfo(&xfrm4_policy_afinfo);
+
+static void __init xfrm4_policy_init(void)
+{
+       xfrm_policy_register_afinfo(&xfrm4_policy_afinfo);
 }
 
 void __init xfrm4_init(void)
@@ -286,8 +321,7 @@ void __init xfrm4_init(void)
        xfrm4_state_init();
        xfrm4_policy_init();
 #ifdef CONFIG_SYSCTL
-       sysctl_hdr = register_net_sysctl(&init_net, "net/ipv4",
-                                        xfrm4_policy_table);
+       register_pernet_subsys(&xfrm4_net_ops);
 #endif
 }
 
index 7f7332b..4dc0d44 100644 (file)
@@ -244,6 +244,9 @@ const struct in6_addr in6addr_any = IN6ADDR_ANY_INIT;
 const struct in6_addr in6addr_loopback = IN6ADDR_LOOPBACK_INIT;
 const struct in6_addr in6addr_linklocal_allnodes = IN6ADDR_LINKLOCAL_ALLNODES_INIT;
 const struct in6_addr in6addr_linklocal_allrouters = IN6ADDR_LINKLOCAL_ALLROUTERS_INIT;
+const struct in6_addr in6addr_interfacelocal_allnodes = IN6ADDR_INTERFACELOCAL_ALLNODES_INIT;
+const struct in6_addr in6addr_interfacelocal_allrouters = IN6ADDR_INTERFACELOCAL_ALLROUTERS_INIT;
+const struct in6_addr in6addr_sitelocal_allrouters = IN6ADDR_SITELOCAL_ALLROUTERS_INIT;
 
 /* Check if a valid qdisc is available */
 static inline bool addrconf_qdisc_ok(const struct net_device *dev)
@@ -428,6 +431,9 @@ static struct inet6_dev *ipv6_add_dev(struct net_device *dev)
        /* protected by rtnl_lock */
        rcu_assign_pointer(dev->ip6_ptr, ndev);
 
+       /* Join interface-local all-node multicast group */
+       ipv6_dev_mc_inc(dev, &in6addr_interfacelocal_allnodes);
+
        /* Join all-node multicast group */
        ipv6_dev_mc_inc(dev, &in6addr_linklocal_allnodes);
 
@@ -611,10 +617,15 @@ static void dev_forward_change(struct inet6_dev *idev)
        if (idev->cnf.forwarding)
                dev_disable_lro(dev);
        if (dev->flags & IFF_MULTICAST) {
-               if (idev->cnf.forwarding)
+               if (idev->cnf.forwarding) {
                        ipv6_dev_mc_inc(dev, &in6addr_linklocal_allrouters);
-               else
+                       ipv6_dev_mc_inc(dev, &in6addr_interfacelocal_allrouters);
+                       ipv6_dev_mc_inc(dev, &in6addr_sitelocal_allrouters);
+               } else {
                        ipv6_dev_mc_dec(dev, &in6addr_linklocal_allrouters);
+                       ipv6_dev_mc_dec(dev, &in6addr_interfacelocal_allrouters);
+                       ipv6_dev_mc_dec(dev, &in6addr_sitelocal_allrouters);
+               }
        }
 
        list_for_each_entry(ifa, &idev->addr_list, if_list) {
@@ -1656,6 +1667,7 @@ static int addrconf_ifid_eui64(u8 *eui, struct net_device *dev)
        if (dev->addr_len != IEEE802154_ADDR_LEN)
                return -1;
        memcpy(eui, dev->dev_addr, 8);
+       eui[0] ^= 2;
        return 0;
 }
 
@@ -3313,14 +3325,14 @@ static const struct file_operations if6_fops = {
 
 static int __net_init if6_proc_net_init(struct net *net)
 {
-       if (!proc_net_fops_create(net, "if_inet6", S_IRUGO, &if6_fops))
+       if (!proc_create("if_inet6", S_IRUGO, net->proc_net, &if6_fops))
                return -ENOMEM;
        return 0;
 }
 
 static void __net_exit if6_proc_net_exit(struct net *net)
 {
-       proc_net_remove(net, "if_inet6");
+       remove_proc_entry("if_inet6", net->proc_net);
 }
 
 static struct pernet_operations if6_proc_net_ops = {
index 3842331..bb02e17 100644 (file)
@@ -521,8 +521,7 @@ static int ah6_input(struct xfrm_state *x, struct sk_buff *skb)
 
        /* We are going to _remove_ AH header to keep sockets happy,
         * so... Later this can change. */
-       if (skb_cloned(skb) &&
-           pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
+       if (skb_unclone(skb, GFP_ATOMIC))
                goto out;
 
        skb->ip_summed = CHECKSUM_NONE;
index 921b8b3..5a80f15 100644 (file)
@@ -509,7 +509,7 @@ static const struct file_operations ac6_seq_fops = {
 
 int __net_init ac6_proc_init(struct net *net)
 {
-       if (!proc_net_fops_create(net, "anycast6", S_IRUGO, &ac6_seq_fops))
+       if (!proc_create("anycast6", S_IRUGO, net->proc_net, &ac6_seq_fops))
                return -ENOMEM;
 
        return 0;
@@ -517,7 +517,7 @@ int __net_init ac6_proc_init(struct net *net)
 
 void ac6_proc_exit(struct net *net)
 {
-       proc_net_remove(net, "anycast6");
+       remove_proc_entry("anycast6", net->proc_net);
 }
 #endif
 
index 33be363..f5a5478 100644 (file)
@@ -380,7 +380,7 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len)
                if (skb->protocol == htons(ETH_P_IPV6)) {
                        sin->sin6_addr = ipv6_hdr(skb)->saddr;
                        if (np->rxopt.all)
-                               datagram_recv_ctl(sk, msg, skb);
+                               ip6_datagram_recv_ctl(sk, msg, skb);
                        if (ipv6_addr_type(&sin->sin6_addr) & IPV6_ADDR_LINKLOCAL)
                                sin->sin6_scope_id = IP6CB(skb)->iif;
                } else {
@@ -468,7 +468,8 @@ out:
 }
 
 
-int datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb)
+int ip6_datagram_recv_ctl(struct sock *sk, struct msghdr *msg,
+                         struct sk_buff *skb)
 {
        struct ipv6_pinfo *np = inet6_sk(sk);
        struct inet6_skb_parm *opt = IP6CB(skb);
@@ -598,11 +599,12 @@ int datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb)
        }
        return 0;
 }
+EXPORT_SYMBOL_GPL(ip6_datagram_recv_ctl);
 
-int datagram_send_ctl(struct net *net, struct sock *sk,
-                     struct msghdr *msg, struct flowi6 *fl6,
-                     struct ipv6_txoptions *opt,
-                     int *hlimit, int *tclass, int *dontfrag)
+int ip6_datagram_send_ctl(struct net *net, struct sock *sk,
+                         struct msghdr *msg, struct flowi6 *fl6,
+                         struct ipv6_txoptions *opt,
+                         int *hlimit, int *tclass, int *dontfrag)
 {
        struct in6_pktinfo *src_info;
        struct cmsghdr *cmsg;
@@ -872,4 +874,4 @@ int datagram_send_ctl(struct net *net, struct sock *sk,
 exit_f:
        return err;
 }
-EXPORT_SYMBOL_GPL(datagram_send_ctl);
+EXPORT_SYMBOL_GPL(ip6_datagram_send_ctl);
index 22494af..b973ed3 100644 (file)
@@ -65,13 +65,13 @@ static DEFINE_SPINLOCK(ip6_fl_lock);
 static DEFINE_SPINLOCK(ip6_sk_fl_lock);
 
 #define for_each_fl_rcu(hash, fl)                              \
-       for (fl = rcu_dereference(fl_ht[(hash)]);               \
+       for (fl = rcu_dereference_bh(fl_ht[(hash)]);            \
             fl != NULL;                                        \
-            fl = rcu_dereference(fl->next))
+            fl = rcu_dereference_bh(fl->next))
 #define for_each_fl_continue_rcu(fl)                           \
-       for (fl = rcu_dereference(fl->next);                    \
+       for (fl = rcu_dereference_bh(fl->next);                 \
             fl != NULL;                                        \
-            fl = rcu_dereference(fl->next))
+            fl = rcu_dereference_bh(fl->next))
 
 #define for_each_sk_fl_rcu(np, sfl)                            \
        for (sfl = rcu_dereference_bh(np->ipv6_fl_list);        \
@@ -390,8 +390,8 @@ fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq,
                msg.msg_control = (void*)(fl->opt+1);
                memset(&flowi6, 0, sizeof(flowi6));
 
-               err = datagram_send_ctl(net, sk, &msg, &flowi6, fl->opt, &junk,
-                                       &junk, &junk);
+               err = ip6_datagram_send_ctl(net, sk, &msg, &flowi6, fl->opt,
+                                           &junk, &junk, &junk);
                if (err)
                        goto done;
                err = -EINVAL;
@@ -806,15 +806,15 @@ static const struct file_operations ip6fl_seq_fops = {
 
 static int __net_init ip6_flowlabel_proc_init(struct net *net)
 {
-       if (!proc_net_fops_create(net, "ip6_flowlabel",
-                                 S_IRUGO, &ip6fl_seq_fops))
+       if (!proc_create("ip6_flowlabel", S_IRUGO, net->proc_net,
+                        &ip6fl_seq_fops))
                return -ENOMEM;
        return 0;
 }
 
 static void __net_exit ip6_flowlabel_proc_fini(struct net *net)
 {
-       proc_net_remove(net, "ip6_flowlabel");
+       remove_proc_entry("ip6_flowlabel", net->proc_net);
 }
 #else
 static inline int ip6_flowlabel_proc_init(struct net *net)
index db91fe3..e4efffe 100644 (file)
@@ -958,7 +958,7 @@ static netdev_tx_t ip6gre_tunnel_xmit(struct sk_buff *skb,
        int ret;
 
        if (!ip6_tnl_xmit_ctl(t))
-               return -1;
+               goto tx_err;
 
        switch (skb->protocol) {
        case htons(ETH_P_IP):
index 4ac5bf3..5b10414 100644 (file)
@@ -118,6 +118,15 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt
            ipv6_addr_loopback(&hdr->daddr))
                goto err;
 
+       /* RFC4291 2.7
+        * Nodes must not originate a packet to a multicast address whose scope
+        * field contains the reserved value 0; if such a packet is received, it
+        * must be silently dropped.
+        */
+       if (ipv6_addr_is_multicast(&hdr->daddr) &&
+           IPV6_ADDR_MC_SCOPE(&hdr->daddr) == 0)
+               goto err;
+
        /*
         * RFC4291 2.7
         * Multicast addresses must not be used as source addresses in IPv6
index d141fc3..8234c1d 100644 (file)
@@ -99,8 +99,8 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
                     ~(SKB_GSO_UDP |
                       SKB_GSO_DODGY |
                       SKB_GSO_TCP_ECN |
+                      SKB_GSO_GRE |
                       SKB_GSO_TCPV6 |
-                      SKB_GSO_SHARED_FRAG |
                       0)))
                goto out;
 
index 906b7e6..155eccf 100644 (file)
@@ -120,6 +120,13 @@ static int ip6_finish_output2(struct sk_buff *skb)
 
                IP6_UPD_PO_STATS(dev_net(dev), idev, IPSTATS_MIB_OUTMCAST,
                                skb->len);
+
+               if (IPV6_ADDR_MC_SCOPE(&ipv6_hdr(skb)->daddr) <=
+                   IPV6_ADDR_SCOPE_NODELOCAL &&
+                   !(dev->flags & IFF_LOOPBACK)) {
+                       kfree_skb(skb);
+                       return 0;
+               }
        }
 
        rcu_read_lock_bh();
@@ -242,9 +249,8 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
                               dst->dev, dst_output);
        }
 
-       net_dbg_ratelimited("IPv6: sending pkt_too_big to self\n");
        skb->dev = dst->dev;
-       icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
+       ipv6_local_error(sk, EMSGSIZE, fl6, mtu);
        IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGFAILS);
        kfree_skb(skb);
        return -EMSGSIZE;
index 351ce98..96bfb4e 100644 (file)
@@ -1359,9 +1359,9 @@ static int __net_init ip6mr_net_init(struct net *net)
 
 #ifdef CONFIG_PROC_FS
        err = -ENOMEM;
-       if (!proc_net_fops_create(net, "ip6_mr_vif", 0, &ip6mr_vif_fops))
+       if (!proc_create("ip6_mr_vif", 0, net->proc_net, &ip6mr_vif_fops))
                goto proc_vif_fail;
-       if (!proc_net_fops_create(net, "ip6_mr_cache", 0, &ip6mr_mfc_fops))
+       if (!proc_create("ip6_mr_cache", 0, net->proc_net, &ip6mr_mfc_fops))
                goto proc_cache_fail;
 #endif
 
@@ -1369,7 +1369,7 @@ static int __net_init ip6mr_net_init(struct net *net)
 
 #ifdef CONFIG_PROC_FS
 proc_cache_fail:
-       proc_net_remove(net, "ip6_mr_vif");
+       remove_proc_entry("ip6_mr_vif", net->proc_net);
 proc_vif_fail:
        ip6mr_rules_exit(net);
 #endif
@@ -1380,8 +1380,8 @@ fail:
 static void __net_exit ip6mr_net_exit(struct net *net)
 {
 #ifdef CONFIG_PROC_FS
-       proc_net_remove(net, "ip6_mr_cache");
-       proc_net_remove(net, "ip6_mr_vif");
+       remove_proc_entry("ip6_mr_cache", net->proc_net);
+       remove_proc_entry("ip6_mr_vif", net->proc_net);
 #endif
        ip6mr_rules_exit(net);
 }
index ee94d31..d1e2e8e 100644 (file)
@@ -476,8 +476,8 @@ sticky_done:
                msg.msg_controllen = optlen;
                msg.msg_control = (void*)(opt+1);
 
-               retv = datagram_send_ctl(net, sk, &msg, &fl6, opt, &junk, &junk,
-                                        &junk);
+               retv = ip6_datagram_send_ctl(net, sk, &msg, &fl6, opt, &junk,
+                                            &junk, &junk);
                if (retv)
                        goto done;
 update:
@@ -1002,7 +1002,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
                release_sock(sk);
 
                if (skb) {
-                       int err = datagram_recv_ctl(sk, &msg, skb);
+                       int err = ip6_datagram_recv_ctl(sk, &msg, skb);
                        kfree_skb(skb);
                        if (err)
                                return err;
index e5de485..bfa6cc3 100644 (file)
@@ -661,6 +661,10 @@ static void igmp6_group_added(struct ifmcaddr6 *mc)
        struct net_device *dev = mc->idev->dev;
        char buf[MAX_ADDR_LEN];
 
+       if (IPV6_ADDR_MC_SCOPE(&mc->mca_addr) <
+           IPV6_ADDR_SCOPE_LINKLOCAL)
+               return;
+
        spin_lock_bh(&mc->mca_lock);
        if (!(mc->mca_flags&MAF_LOADED)) {
                mc->mca_flags |= MAF_LOADED;
@@ -687,6 +691,10 @@ static void igmp6_group_dropped(struct ifmcaddr6 *mc)
        struct net_device *dev = mc->idev->dev;
        char buf[MAX_ADDR_LEN];
 
+       if (IPV6_ADDR_MC_SCOPE(&mc->mca_addr) <
+           IPV6_ADDR_SCOPE_LINKLOCAL)
+               return;
+
        spin_lock_bh(&mc->mca_lock);
        if (mc->mca_flags&MAF_LOADED) {
                mc->mca_flags &= ~MAF_LOADED;
@@ -2591,10 +2599,10 @@ static int __net_init igmp6_proc_init(struct net *net)
        int err;
 
        err = -ENOMEM;
-       if (!proc_net_fops_create(net, "igmp6", S_IRUGO, &igmp6_mc_seq_fops))
+       if (!proc_create("igmp6", S_IRUGO, net->proc_net, &igmp6_mc_seq_fops))
                goto out;
-       if (!proc_net_fops_create(net, "mcfilter6", S_IRUGO,
-                                 &igmp6_mcf_seq_fops))
+       if (!proc_create("mcfilter6", S_IRUGO, net->proc_net,
+                        &igmp6_mcf_seq_fops))
                goto out_proc_net_igmp6;
 
        err = 0;
@@ -2602,14 +2610,14 @@ out:
        return err;
 
 out_proc_net_igmp6:
-       proc_net_remove(net, "igmp6");
+       remove_proc_entry("igmp6", net->proc_net);
        goto out;
 }
 
 static void __net_exit igmp6_proc_exit(struct net *net)
 {
-       proc_net_remove(net, "mcfilter6");
-       proc_net_remove(net, "igmp6");
+       remove_proc_entry("mcfilter6", net->proc_net);
+       remove_proc_entry("igmp6", net->proc_net);
 }
 #else
 static inline int igmp6_proc_init(struct net *net)
index 7302b0b..83acc14 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/module.h>
 #include <linux/skbuff.h>
 #include <linux/ipv6.h>
+#include <net/ipv6.h>
 #include <linux/netfilter.h>
 #include <linux/netfilter_ipv6.h>
 #include <linux/netfilter_ipv6/ip6t_NPT.h>
@@ -18,11 +19,20 @@ static int ip6t_npt_checkentry(const struct xt_tgchk_param *par)
 {
        struct ip6t_npt_tginfo *npt = par->targinfo;
        __wsum src_sum = 0, dst_sum = 0;
+       struct in6_addr pfx;
        unsigned int i;
 
        if (npt->src_pfx_len > 64 || npt->dst_pfx_len > 64)
                return -EINVAL;
 
+       /* Ensure that LSB of prefix is zero */
+       ipv6_addr_prefix(&pfx, &npt->src_pfx.in6, npt->src_pfx_len);
+       if (!ipv6_addr_equal(&pfx, &npt->src_pfx.in6))
+               return -EINVAL;
+       ipv6_addr_prefix(&pfx, &npt->dst_pfx.in6, npt->dst_pfx_len);
+       if (!ipv6_addr_equal(&pfx, &npt->dst_pfx.in6))
+               return -EINVAL;
+
        for (i = 0; i < ARRAY_SIZE(npt->src_pfx.in6.s6_addr16); i++) {
                src_sum = csum_add(src_sum,
                                (__force __wsum)npt->src_pfx.in6.s6_addr16[i]);
@@ -30,7 +40,7 @@ static int ip6t_npt_checkentry(const struct xt_tgchk_param *par)
                                (__force __wsum)npt->dst_pfx.in6.s6_addr16[i]);
        }
 
-       npt->adjustment = (__force __sum16) csum_sub(src_sum, dst_sum);
+       npt->adjustment = ~csum_fold(csum_sub(src_sum, dst_sum));
        return 0;
 }
 
@@ -51,7 +61,7 @@ static bool ip6t_npt_map_pfx(const struct ip6t_npt_tginfo *npt,
 
                idx = i / 32;
                addr->s6_addr32[idx] &= mask;
-               addr->s6_addr32[idx] |= npt->dst_pfx.in6.s6_addr32[idx];
+               addr->s6_addr32[idx] |= ~mask & npt->dst_pfx.in6.s6_addr32[idx];
        }
 
        if (pfx_len <= 48)
@@ -66,8 +76,8 @@ static bool ip6t_npt_map_pfx(const struct ip6t_npt_tginfo *npt,
                        return false;
        }
 
-       sum = (__force __sum16) csum_add((__force __wsum)addr->s6_addr16[idx],
-                        npt->adjustment);
+       sum = ~csum_fold(csum_add(csum_unfold((__force __sum16)addr->s6_addr16[idx]),
+                                 csum_unfold(npt->adjustment)));
        if (sum == CSUM_MANGLED_0)
                sum = 0;
        *(__force __sum16 *)&addr->s6_addr16[idx] = sum;
index c674f15..54087e9 100644 (file)
@@ -97,9 +97,9 @@ static int nf_ct_frag6_sysctl_register(struct net *net)
                if (table == NULL)
                        goto err_alloc;
 
-               table[0].data = &net->ipv6.frags.high_thresh;
-               table[1].data = &net->ipv6.frags.low_thresh;
-               table[2].data = &net->ipv6.frags.timeout;
+               table[0].data = &net->nf_frag.frags.timeout;
+               table[1].data = &net->nf_frag.frags.low_thresh;
+               table[2].data = &net->nf_frag.frags.high_thresh;
        }
 
        hdr = register_net_sysctl(net, "net/netfilter", table);
@@ -368,7 +368,7 @@ nf_ct_frag6_reasm(struct frag_queue *fq, struct net_device *dev)
        }
 
        /* Head of list must not be cloned. */
-       if (skb_cloned(head) && pskb_expand_head(head, 0, 0, GFP_ATOMIC)) {
+       if (skb_unclone(head, GFP_ATOMIC)) {
                pr_debug("skb is cloned but can't expand head");
                goto out_oom;
        }
index 745a320..bbbe53a 100644 (file)
@@ -295,11 +295,11 @@ int snmp6_unregister_dev(struct inet6_dev *idev)
 
 static int __net_init ipv6_proc_init_net(struct net *net)
 {
-       if (!proc_net_fops_create(net, "sockstat6", S_IRUGO,
-                       &sockstat6_seq_fops))
+       if (!proc_create("sockstat6", S_IRUGO, net->proc_net,
+                        &sockstat6_seq_fops))
                return -ENOMEM;
 
-       if (!proc_net_fops_create(net, "snmp6", S_IRUGO, &snmp6_seq_fops))
+       if (!proc_create("snmp6", S_IRUGO, net->proc_net, &snmp6_seq_fops))
                goto proc_snmp6_fail;
 
        net->mib.proc_net_devsnmp6 = proc_mkdir("dev_snmp6", net->proc_net);
@@ -308,17 +308,17 @@ static int __net_init ipv6_proc_init_net(struct net *net)
        return 0;
 
 proc_dev_snmp6_fail:
-       proc_net_remove(net, "snmp6");
+       remove_proc_entry("snmp6", net->proc_net);
 proc_snmp6_fail:
-       proc_net_remove(net, "sockstat6");
+       remove_proc_entry("sockstat6", net->proc_net);
        return -ENOMEM;
 }
 
 static void __net_exit ipv6_proc_exit_net(struct net *net)
 {
-       proc_net_remove(net, "sockstat6");
-       proc_net_remove(net, "dev_snmp6");
-       proc_net_remove(net, "snmp6");
+       remove_proc_entry("sockstat6", net->proc_net);
+       remove_proc_entry("dev_snmp6", net->proc_net);
+       remove_proc_entry("snmp6", net->proc_net);
 }
 
 static struct pernet_operations ipv6_proc_ops = {
index 6cd29b1..c65907d 100644 (file)
@@ -507,7 +507,7 @@ static int rawv6_recvmsg(struct kiocb *iocb, struct sock *sk,
        sock_recv_ts_and_drops(msg, sk, skb);
 
        if (np->rxopt.all)
-               datagram_recv_ctl(sk, msg, skb);
+               ip6_datagram_recv_ctl(sk, msg, skb);
 
        err = copied;
        if (flags & MSG_TRUNC)
@@ -822,8 +822,8 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
                memset(opt, 0, sizeof(struct ipv6_txoptions));
                opt->tot_len = sizeof(struct ipv6_txoptions);
 
-               err = datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt,
-                                       &hlimit, &tclass, &dontfrag);
+               err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt,
+                                           &hlimit, &tclass, &dontfrag);
                if (err < 0) {
                        fl6_sock_release(flowlabel);
                        return err;
@@ -1292,7 +1292,7 @@ static const struct file_operations raw6_seq_fops = {
 
 static int __net_init raw6_init_net(struct net *net)
 {
-       if (!proc_net_fops_create(net, "raw6", S_IRUGO, &raw6_seq_fops))
+       if (!proc_create("raw6", S_IRUGO, net->proc_net, &raw6_seq_fops))
                return -ENOMEM;
 
        return 0;
@@ -1300,7 +1300,7 @@ static int __net_init raw6_init_net(struct net *net)
 
 static void __net_exit raw6_exit_net(struct net *net)
 {
-       proc_net_remove(net, "raw6");
+       remove_proc_entry("raw6", net->proc_net);
 }
 
 static struct pernet_operations raw6_net_ops = {
index bab2c27..3c6a772 100644 (file)
@@ -79,20 +79,8 @@ unsigned int inet6_hash_frag(__be32 id, const struct in6_addr *saddr,
 {
        u32 c;
 
-       c = jhash_3words((__force u32)saddr->s6_addr32[0],
-                        (__force u32)saddr->s6_addr32[1],
-                        (__force u32)saddr->s6_addr32[2],
-                        rnd);
-
-       c = jhash_3words((__force u32)saddr->s6_addr32[3],
-                        (__force u32)daddr->s6_addr32[0],
-                        (__force u32)daddr->s6_addr32[1],
-                        c);
-
-       c =  jhash_3words((__force u32)daddr->s6_addr32[2],
-                         (__force u32)daddr->s6_addr32[3],
-                         (__force u32)id,
-                         c);
+       c = jhash_3words(ipv6_addr_hash(saddr), ipv6_addr_hash(daddr),
+                        (__force u32)id, rnd);
 
        return c & (INETFRAGS_HASHSZ - 1);
 }
@@ -404,7 +392,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
                goto out_oversize;
 
        /* Head of list must not be cloned. */
-       if (skb_cloned(head) && pskb_expand_head(head, 0, 0, GFP_ATOMIC))
+       if (skb_unclone(head, GFP_ATOMIC))
                goto out_oom;
 
        /* If the first fragment is fragmented itself, we split
index f3328bc..515bb51 100644 (file)
@@ -2995,8 +2995,8 @@ static void __net_exit ip6_route_net_exit(struct net *net)
 static int __net_init ip6_route_net_init_late(struct net *net)
 {
 #ifdef CONFIG_PROC_FS
-       proc_net_fops_create(net, "ipv6_route", 0, &ipv6_route_proc_fops);
-       proc_net_fops_create(net, "rt6_stats", S_IRUGO, &rt6_stats_seq_fops);
+       proc_create("ipv6_route", 0, net->proc_net, &ipv6_route_proc_fops);
+       proc_create("rt6_stats", S_IRUGO, net->proc_net, &rt6_stats_seq_fops);
 #endif
        return 0;
 }
@@ -3004,8 +3004,8 @@ static int __net_init ip6_route_net_init_late(struct net *net)
 static void __net_exit ip6_route_net_exit_late(struct net *net)
 {
 #ifdef CONFIG_PROC_FS
-       proc_net_remove(net, "ipv6_route");
-       proc_net_remove(net, "rt6_stats");
+       remove_proc_entry("ipv6_route", net->proc_net);
+       remove_proc_entry("rt6_stats", net->proc_net);
 #endif
 }
 
index 06087e5..9b64600 100644 (file)
@@ -423,6 +423,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
                }
 
                inet_csk_reqsk_queue_drop(sk, req, prev);
+               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
                goto out;
 
        case TCP_SYN_SENT:
@@ -712,7 +713,8 @@ static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
 #endif
 
 static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
-                                u32 ts, struct tcp_md5sig_key *key, int rst, u8 tclass)
+                                u32 tsval, u32 tsecr,
+                                struct tcp_md5sig_key *key, int rst, u8 tclass)
 {
        const struct tcphdr *th = tcp_hdr(skb);
        struct tcphdr *t1;
@@ -724,7 +726,7 @@ static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
        struct dst_entry *dst;
        __be32 *topt;
 
-       if (ts)
+       if (tsecr)
                tot_len += TCPOLEN_TSTAMP_ALIGNED;
 #ifdef CONFIG_TCP_MD5SIG
        if (key)
@@ -754,11 +756,11 @@ static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
 
        topt = (__be32 *)(t1 + 1);
 
-       if (ts) {
+       if (tsecr) {
                *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
                                (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
-               *topt++ = htonl(tcp_time_stamp);
-               *topt++ = htonl(ts);
+               *topt++ = htonl(tsval);
+               *topt++ = htonl(tsecr);
        }
 
 #ifdef CONFIG_TCP_MD5SIG
@@ -859,7 +861,7 @@ static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
                ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
                          (th->doff << 2);
 
-       tcp_v6_send_response(skb, seq, ack_seq, 0, 0, key, 1, 0);
+       tcp_v6_send_response(skb, seq, ack_seq, 0, 0, 0, key, 1, 0);
 
 #ifdef CONFIG_TCP_MD5SIG
 release_sk1:
@@ -870,10 +872,11 @@ release_sk1:
 #endif
 }
 
-static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts,
+static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
+                           u32 win, u32 tsval, u32 tsecr,
                            struct tcp_md5sig_key *key, u8 tclass)
 {
-       tcp_v6_send_response(skb, seq, ack, win, ts, key, 0, tclass);
+       tcp_v6_send_response(skb, seq, ack, win, tsval, tsecr, key, 0, tclass);
 }
 
 static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
@@ -883,6 +886,7 @@ static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
 
        tcp_v6_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
                        tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
+                       tcp_time_stamp + tcptw->tw_ts_offset,
                        tcptw->tw_ts_recent, tcp_twsk_md5_key(tcptw),
                        tw->tw_tclass);
 
@@ -892,7 +896,8 @@ static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
 static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
                                  struct request_sock *req)
 {
-       tcp_v6_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent,
+       tcp_v6_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1,
+                       req->rcv_wnd, tcp_time_stamp, req->ts_recent,
                        tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr), 0);
 }
 
@@ -959,8 +964,10 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
                        goto drop;
        }
 
-       if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
+       if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) {
+               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
                goto drop;
+       }
 
        req = inet6_reqsk_alloc(&tcp6_request_sock_ops);
        if (req == NULL)
@@ -1109,6 +1116,7 @@ drop_and_release:
 drop_and_free:
        reqsk_free(req);
 drop:
+       NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
        return 0; /* don't send reset */
 }
 
index cb5bf49..599e1ba 100644 (file)
@@ -467,7 +467,7 @@ try_again:
                        ip_cmsg_recv(msg, skb);
        } else {
                if (np->rxopt.all)
-                       datagram_recv_ctl(sk, msg, skb);
+                       ip6_datagram_recv_ctl(sk, msg, skb);
        }
 
        err = copied;
@@ -1143,8 +1143,8 @@ do_udp_sendmsg:
                memset(opt, 0, sizeof(struct ipv6_txoptions));
                opt->tot_len = sizeof(*opt);
 
-               err = datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt,
-                                       &hlimit, &tclass, &dontfrag);
+               err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt,
+                                           &hlimit, &tclass, &dontfrag);
                if (err < 0) {
                        fl6_sock_release(flowlabel);
                        return err;
index 0c8934a..cf05cf0 100644 (file)
@@ -56,7 +56,8 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
                /* Packet is from an untrusted source, reset gso_segs. */
                int type = skb_shinfo(skb)->gso_type;
 
-               if (unlikely(type & ~(SKB_GSO_UDP | SKB_GSO_DODGY) ||
+               if (unlikely(type & ~(SKB_GSO_UDP | SKB_GSO_DODGY |
+                                     SKB_GSO_GRE) ||
                             !(type & (SKB_GSO_UDP))))
                        goto out;
 
index 9f2095b..9bf6a74 100644 (file)
@@ -69,8 +69,8 @@ static int xfrm6_mode_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
        if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
                goto out;
 
-       if (skb_cloned(skb) &&
-           (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
+       err = skb_unclone(skb, GFP_ATOMIC);
+       if (err)
                goto out;
 
        if (x->props.flags & XFRM_STATE_DECAP_DSCP)
index 1282737..4ef7bdb 100644 (file)
@@ -320,7 +320,51 @@ static struct ctl_table xfrm6_policy_table[] = {
        { }
 };
 
-static struct ctl_table_header *sysctl_hdr;
+static int __net_init xfrm6_net_init(struct net *net)
+{
+       struct ctl_table *table;
+       struct ctl_table_header *hdr;
+
+       table = xfrm6_policy_table;
+       if (!net_eq(net, &init_net)) {
+               table = kmemdup(table, sizeof(xfrm6_policy_table), GFP_KERNEL);
+               if (!table)
+                       goto err_alloc;
+
+               table[0].data = &net->xfrm.xfrm6_dst_ops.gc_thresh;
+       }
+
+       hdr = register_net_sysctl(net, "net/ipv6", table);
+       if (!hdr)
+               goto err_reg;
+
+       net->ipv6.sysctl.xfrm6_hdr = hdr;
+       return 0;
+
+err_reg:
+       if (!net_eq(net, &init_net))
+               kfree(table);
+err_alloc:
+       return -ENOMEM;
+}
+
+static void __net_exit xfrm6_net_exit(struct net *net)
+{
+       struct ctl_table *table;
+
+       if (net->ipv6.sysctl.xfrm6_hdr == NULL)
+               return;
+
+       table = net->ipv6.sysctl.xfrm6_hdr->ctl_table_arg;
+       unregister_net_sysctl_table(net->ipv6.sysctl.xfrm6_hdr);
+       if (!net_eq(net, &init_net))
+               kfree(table);
+}
+
+static struct pernet_operations xfrm6_net_ops = {
+       .init   = xfrm6_net_init,
+       .exit   = xfrm6_net_exit,
+};
 #endif
 
 int __init xfrm6_init(void)
@@ -339,8 +383,7 @@ int __init xfrm6_init(void)
                goto out_policy;
 
 #ifdef CONFIG_SYSCTL
-       sysctl_hdr = register_net_sysctl(&init_net, "net/ipv6",
-                                        xfrm6_policy_table);
+       register_pernet_subsys(&xfrm6_net_ops);
 #endif
 out:
        return ret;
@@ -352,8 +395,7 @@ out_policy:
 void xfrm6_fini(void)
 {
 #ifdef CONFIG_SYSCTL
-       if (sysctl_hdr)
-               unregister_net_sysctl_table(sysctl_hdr);
+       unregister_pernet_subsys(&xfrm6_net_ops);
 #endif
        xfrm6_policy_fini();
        xfrm6_state_fini();
index cc2630a..9ef7985 100644 (file)
@@ -203,7 +203,6 @@ static int pfkey_broadcast_one(struct sk_buff *skb, struct sk_buff **skb2,
        }
        if (*skb2 != NULL) {
                if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) {
-                       skb_orphan(*skb2);
                        skb_set_owner_r(*skb2, sk);
                        skb_queue_tail(&sk->sk_receive_queue, *skb2);
                        sk->sk_data_ready(sk, (*skb2)->len);
@@ -816,18 +815,21 @@ static struct sk_buff *__pfkey_xfrm_state2msg(const struct xfrm_state *x,
        sa->sadb_sa_auth = 0;
        if (x->aalg) {
                struct xfrm_algo_desc *a = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
-               sa->sadb_sa_auth = a ? a->desc.sadb_alg_id : 0;
+               sa->sadb_sa_auth = (a && a->pfkey_supported) ?
+                                       a->desc.sadb_alg_id : 0;
        }
        sa->sadb_sa_encrypt = 0;
        BUG_ON(x->ealg && x->calg);
        if (x->ealg) {
                struct xfrm_algo_desc *a = xfrm_ealg_get_byname(x->ealg->alg_name, 0);
-               sa->sadb_sa_encrypt = a ? a->desc.sadb_alg_id : 0;
+               sa->sadb_sa_encrypt = (a && a->pfkey_supported) ?
+                                       a->desc.sadb_alg_id : 0;
        }
        /* KAME compatible: sadb_sa_encrypt is overloaded with calg id */
        if (x->calg) {
                struct xfrm_algo_desc *a = xfrm_calg_get_byname(x->calg->alg_name, 0);
-               sa->sadb_sa_encrypt = a ? a->desc.sadb_alg_id : 0;
+               sa->sadb_sa_encrypt = (a && a->pfkey_supported) ?
+                                       a->desc.sadb_alg_id : 0;
        }
 
        sa->sadb_sa_flags = 0;
@@ -1138,7 +1140,7 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
        if (sa->sadb_sa_auth) {
                int keysize = 0;
                struct xfrm_algo_desc *a = xfrm_aalg_get_byid(sa->sadb_sa_auth);
-               if (!a) {
+               if (!a || !a->pfkey_supported) {
                        err = -ENOSYS;
                        goto out;
                }
@@ -1160,7 +1162,7 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
        if (sa->sadb_sa_encrypt) {
                if (hdr->sadb_msg_satype == SADB_X_SATYPE_IPCOMP) {
                        struct xfrm_algo_desc *a = xfrm_calg_get_byid(sa->sadb_sa_encrypt);
-                       if (!a) {
+                       if (!a || !a->pfkey_supported) {
                                err = -ENOSYS;
                                goto out;
                        }
@@ -1172,7 +1174,7 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
                } else {
                        int keysize = 0;
                        struct xfrm_algo_desc *a = xfrm_ealg_get_byid(sa->sadb_sa_encrypt);
-                       if (!a) {
+                       if (!a || !a->pfkey_supported) {
                                err = -ENOSYS;
                                goto out;
                        }
@@ -1578,13 +1580,13 @@ static struct sk_buff *compose_sadb_supported(const struct sadb_msg *orig,
        struct sadb_msg *hdr;
        int len, auth_len, enc_len, i;
 
-       auth_len = xfrm_count_auth_supported();
+       auth_len = xfrm_count_pfkey_auth_supported();
        if (auth_len) {
                auth_len *= sizeof(struct sadb_alg);
                auth_len += sizeof(struct sadb_supported);
        }
 
-       enc_len = xfrm_count_enc_supported();
+       enc_len = xfrm_count_pfkey_enc_supported();
        if (enc_len) {
                enc_len *= sizeof(struct sadb_alg);
                enc_len += sizeof(struct sadb_supported);
@@ -1615,6 +1617,8 @@ static struct sk_buff *compose_sadb_supported(const struct sadb_msg *orig,
                        struct xfrm_algo_desc *aalg = xfrm_aalg_get_byidx(i);
                        if (!aalg)
                                break;
+                       if (!aalg->pfkey_supported)
+                               continue;
                        if (aalg->available)
                                *ap++ = aalg->desc;
                }
@@ -1634,6 +1638,8 @@ static struct sk_buff *compose_sadb_supported(const struct sadb_msg *orig,
                        struct xfrm_algo_desc *ealg = xfrm_ealg_get_byidx(i);
                        if (!ealg)
                                break;
+                       if (!ealg->pfkey_supported)
+                               continue;
                        if (ealg->available)
                                *ap++ = ealg->desc;
                }
@@ -2825,6 +2831,8 @@ static int count_ah_combs(const struct xfrm_tmpl *t)
                const struct xfrm_algo_desc *aalg = xfrm_aalg_get_byidx(i);
                if (!aalg)
                        break;
+               if (!aalg->pfkey_supported)
+                       continue;
                if (aalg_tmpl_set(t, aalg) && aalg->available)
                        sz += sizeof(struct sadb_comb);
        }
@@ -2840,6 +2848,9 @@ static int count_esp_combs(const struct xfrm_tmpl *t)
                if (!ealg)
                        break;
 
+               if (!ealg->pfkey_supported)
+                       continue;
+
                if (!(ealg_tmpl_set(t, ealg) && ealg->available))
                        continue;
 
@@ -2848,6 +2859,9 @@ static int count_esp_combs(const struct xfrm_tmpl *t)
                        if (!aalg)
                                break;
 
+                       if (!aalg->pfkey_supported)
+                               continue;
+
                        if (aalg_tmpl_set(t, aalg) && aalg->available)
                                sz += sizeof(struct sadb_comb);
                }
@@ -2871,6 +2885,9 @@ static void dump_ah_combs(struct sk_buff *skb, const struct xfrm_tmpl *t)
                if (!aalg)
                        break;
 
+               if (!aalg->pfkey_supported)
+                       continue;
+
                if (aalg_tmpl_set(t, aalg) && aalg->available) {
                        struct sadb_comb *c;
                        c = (struct sadb_comb*)skb_put(skb, sizeof(struct sadb_comb));
@@ -2903,6 +2920,9 @@ static void dump_esp_combs(struct sk_buff *skb, const struct xfrm_tmpl *t)
                if (!ealg)
                        break;
 
+               if (!ealg->pfkey_supported)
+                       continue;
+
                if (!(ealg_tmpl_set(t, ealg) && ealg->available))
                        continue;
 
@@ -2911,6 +2931,8 @@ static void dump_esp_combs(struct sk_buff *skb, const struct xfrm_tmpl *t)
                        const struct xfrm_algo_desc *aalg = xfrm_aalg_get_byidx(k);
                        if (!aalg)
                                break;
+                       if (!aalg->pfkey_supported)
+                               continue;
                        if (!(aalg_tmpl_set(t, aalg) && aalg->available))
                                continue;
                        c = (struct sadb_comb*)skb_put(skb, sizeof(struct sadb_comb));
@@ -3718,7 +3740,7 @@ static int __net_init pfkey_init_proc(struct net *net)
 {
        struct proc_dir_entry *e;
 
-       e = proc_net_fops_create(net, "pfkey", 0, &pfkey_proc_ops);
+       e = proc_create("pfkey", 0, net->proc_net, &pfkey_proc_ops);
        if (e == NULL)
                return -ENOMEM;
 
@@ -3727,7 +3749,7 @@ static int __net_init pfkey_init_proc(struct net *net)
 
 static void __net_exit pfkey_exit_proc(struct net *net)
 {
-       proc_net_remove(net, "pfkey");
+       remove_proc_entry("pfkey", net->proc_net);
 }
 #else
 static inline int pfkey_init_proc(struct net *net)
index 1a9f372..dcfd64e 100644 (file)
@@ -101,6 +101,7 @@ struct l2tp_skb_cb {
 
 static atomic_t l2tp_tunnel_count;
 static atomic_t l2tp_session_count;
+static struct workqueue_struct *l2tp_wq;
 
 /* per-net private data for this module */
 static unsigned int l2tp_net_id;
@@ -122,7 +123,6 @@ static inline struct l2tp_net *l2tp_pernet(struct net *net)
        return net_generic(net, l2tp_net_id);
 }
 
-
 /* Tunnel reference counts. Incremented per session that is added to
  * the tunnel.
  */
@@ -168,6 +168,51 @@ l2tp_session_id_hash_2(struct l2tp_net *pn, u32 session_id)
 
 }
 
+/* Lookup the tunnel socket, possibly involving the fs code if the socket is
+ * owned by userspace.  A struct sock returned from this function must be
+ * released using l2tp_tunnel_sock_put once you're done with it.
+ */
+struct sock *l2tp_tunnel_sock_lookup(struct l2tp_tunnel *tunnel)
+{
+       int err = 0;
+       struct socket *sock = NULL;
+       struct sock *sk = NULL;
+
+       if (!tunnel)
+               goto out;
+
+       if (tunnel->fd >= 0) {
+               /* Socket is owned by userspace, who might be in the process
+                * of closing it.  Look the socket up using the fd to ensure
+                * consistency.
+                */
+               sock = sockfd_lookup(tunnel->fd, &err);
+               if (sock)
+                       sk = sock->sk;
+       } else {
+               /* Socket is owned by kernelspace */
+               sk = tunnel->sock;
+       }
+
+out:
+       return sk;
+}
+EXPORT_SYMBOL_GPL(l2tp_tunnel_sock_lookup);
+
+/* Drop a reference to a tunnel socket obtained via. l2tp_tunnel_sock_put */
+void l2tp_tunnel_sock_put(struct sock *sk)
+{
+       struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk);
+       if (tunnel) {
+               if (tunnel->fd >= 0) {
+                       /* Socket is owned by userspace */
+                       sockfd_put(sk->sk_socket);
+               }
+               sock_put(sk);
+       }
+}
+EXPORT_SYMBOL_GPL(l2tp_tunnel_sock_put);
+
 /* Lookup a session by id in the global session list
  */
 static struct l2tp_session *l2tp_session_find_2(struct net *net, u32 session_id)
@@ -1123,8 +1168,6 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len
        struct udphdr *uh;
        struct inet_sock *inet;
        __wsum csum;
-       int old_headroom;
-       int new_headroom;
        int headroom;
        int uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0;
        int udp_len;
@@ -1136,16 +1179,12 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len
         */
        headroom = NET_SKB_PAD + sizeof(struct iphdr) +
                uhlen + hdr_len;
-       old_headroom = skb_headroom(skb);
        if (skb_cow_head(skb, headroom)) {
                kfree_skb(skb);
                return NET_XMIT_DROP;
        }
 
-       new_headroom = skb_headroom(skb);
        skb_orphan(skb);
-       skb->truesize += new_headroom - old_headroom;
-
        /* Setup L2TP header */
        session->build_header(session, __skb_push(skb, hdr_len));
 
@@ -1232,6 +1271,7 @@ EXPORT_SYMBOL_GPL(l2tp_xmit_skb);
 static void l2tp_tunnel_destruct(struct sock *sk)
 {
        struct l2tp_tunnel *tunnel;
+       struct l2tp_net *pn;
 
        tunnel = sk->sk_user_data;
        if (tunnel == NULL)
@@ -1239,9 +1279,8 @@ static void l2tp_tunnel_destruct(struct sock *sk)
 
        l2tp_info(tunnel, L2TP_MSG_CONTROL, "%s: closing...\n", tunnel->name);
 
-       /* Close all sessions */
-       l2tp_tunnel_closeall(tunnel);
 
+       /* Disable udp encapsulation */
        switch (tunnel->encap) {
        case L2TP_ENCAPTYPE_UDP:
                /* No longer an encapsulation socket. See net/ipv4/udp.c */
@@ -1253,17 +1292,23 @@ static void l2tp_tunnel_destruct(struct sock *sk)
        }
 
        /* Remove hooks into tunnel socket */
-       tunnel->sock = NULL;
        sk->sk_destruct = tunnel->old_sk_destruct;
        sk->sk_user_data = NULL;
+       tunnel->sock = NULL;
 
-       /* Call the original destructor */
-       if (sk->sk_destruct)
-               (*sk->sk_destruct)(sk);
+       /* Remove the tunnel struct from the tunnel list */
+       pn = l2tp_pernet(tunnel->l2tp_net);
+       spin_lock_bh(&pn->l2tp_tunnel_list_lock);
+       list_del_rcu(&tunnel->list);
+       spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
+       atomic_dec(&l2tp_tunnel_count);
 
-       /* We're finished with the socket */
+       l2tp_tunnel_closeall(tunnel);
        l2tp_tunnel_dec_refcount(tunnel);
 
+       /* Call the original destructor */
+       if (sk->sk_destruct)
+               (*sk->sk_destruct)(sk);
 end:
        return;
 }
@@ -1337,48 +1382,77 @@ again:
  */
 static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel)
 {
-       struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net);
-
        BUG_ON(atomic_read(&tunnel->ref_count) != 0);
        BUG_ON(tunnel->sock != NULL);
-
        l2tp_info(tunnel, L2TP_MSG_CONTROL, "%s: free...\n", tunnel->name);
-
-       /* Remove from tunnel list */
-       spin_lock_bh(&pn->l2tp_tunnel_list_lock);
-       list_del_rcu(&tunnel->list);
        kfree_rcu(tunnel, rcu);
-       spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
+}
 
-       atomic_dec(&l2tp_tunnel_count);
+/* Workqueue tunnel deletion function */
+static void l2tp_tunnel_del_work(struct work_struct *work)
+{
+       struct l2tp_tunnel *tunnel = NULL;
+       struct socket *sock = NULL;
+       struct sock *sk = NULL;
+
+       tunnel = container_of(work, struct l2tp_tunnel, del_work);
+       sk = l2tp_tunnel_sock_lookup(tunnel);
+       if (!sk)
+               return;
+
+       sock = sk->sk_socket;
+       BUG_ON(!sock);
+
+       /* If the tunnel socket was created directly by the kernel, use the
+        * sk_* API to release the socket now.  Otherwise go through the
+        * inet_* layer to shut the socket down, and let userspace close it.
+        * In either case the tunnel resources are freed in the socket
+        * destructor when the tunnel socket goes away.
+        */
+       if (sock->file == NULL) {
+               kernel_sock_shutdown(sock, SHUT_RDWR);
+               sk_release_kernel(sk);
+       } else {
+               inet_shutdown(sock, 2);
+       }
+
+       l2tp_tunnel_sock_put(sk);
 }
 
 /* Create a socket for the tunnel, if one isn't set up by
  * userspace. This is used for static tunnels where there is no
  * managing L2TP daemon.
+ *
+ * Since we don't want these sockets to keep a namespace alive by
+ * themselves, we drop the socket's namespace refcount after creation.
+ * These sockets are freed when the namespace exits using the pernet
+ * exit hook.
  */
-static int l2tp_tunnel_sock_create(u32 tunnel_id, u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg, struct socket **sockp)
+static int l2tp_tunnel_sock_create(struct net *net,
+                               u32 tunnel_id,
+                               u32 peer_tunnel_id,
+                               struct l2tp_tunnel_cfg *cfg,
+                               struct socket **sockp)
 {
        int err = -EINVAL;
-       struct sockaddr_in udp_addr;
+       struct socket *sock = NULL;
+       struct sockaddr_in udp_addr = {0};
+       struct sockaddr_l2tpip ip_addr = {0};
 #if IS_ENABLED(CONFIG_IPV6)
-       struct sockaddr_in6 udp6_addr;
-       struct sockaddr_l2tpip6 ip6_addr;
+       struct sockaddr_in6 udp6_addr = {0};
+       struct sockaddr_l2tpip6 ip6_addr = {0};
 #endif
-       struct sockaddr_l2tpip ip_addr;
-       struct socket *sock = NULL;
 
        switch (cfg->encap) {
        case L2TP_ENCAPTYPE_UDP:
 #if IS_ENABLED(CONFIG_IPV6)
                if (cfg->local_ip6 && cfg->peer_ip6) {
-                       err = sock_create(AF_INET6, SOCK_DGRAM, 0, sockp);
+                       err = sock_create_kern(AF_INET6, SOCK_DGRAM, 0, &sock);
                        if (err < 0)
                                goto out;
 
-                       sock = *sockp;
+                       sk_change_net(sock->sk, net);
 
-                       memset(&udp6_addr, 0, sizeof(udp6_addr));
                        udp6_addr.sin6_family = AF_INET6;
                        memcpy(&udp6_addr.sin6_addr, cfg->local_ip6,
                               sizeof(udp6_addr.sin6_addr));
@@ -1400,13 +1474,12 @@ static int l2tp_tunnel_sock_create(u32 tunnel_id, u32 peer_tunnel_id, struct l2t
                } else
 #endif
                {
-                       err = sock_create(AF_INET, SOCK_DGRAM, 0, sockp);
+                       err = sock_create_kern(AF_INET, SOCK_DGRAM, 0, &sock);
                        if (err < 0)
                                goto out;
 
-                       sock = *sockp;
+                       sk_change_net(sock->sk, net);
 
-                       memset(&udp_addr, 0, sizeof(udp_addr));
                        udp_addr.sin_family = AF_INET;
                        udp_addr.sin_addr = cfg->local_ip;
                        udp_addr.sin_port = htons(cfg->local_udp_port);
@@ -1433,14 +1506,13 @@ static int l2tp_tunnel_sock_create(u32 tunnel_id, u32 peer_tunnel_id, struct l2t
        case L2TP_ENCAPTYPE_IP:
 #if IS_ENABLED(CONFIG_IPV6)
                if (cfg->local_ip6 && cfg->peer_ip6) {
-                       err = sock_create(AF_INET6, SOCK_DGRAM, IPPROTO_L2TP,
-                                         sockp);
+                       err = sock_create_kern(AF_INET6, SOCK_DGRAM,
+                                         IPPROTO_L2TP, &sock);
                        if (err < 0)
                                goto out;
 
-                       sock = *sockp;
+                       sk_change_net(sock->sk, net);
 
-                       memset(&ip6_addr, 0, sizeof(ip6_addr));
                        ip6_addr.l2tp_family = AF_INET6;
                        memcpy(&ip6_addr.l2tp_addr, cfg->local_ip6,
                               sizeof(ip6_addr.l2tp_addr));
@@ -1462,14 +1534,13 @@ static int l2tp_tunnel_sock_create(u32 tunnel_id, u32 peer_tunnel_id, struct l2t
                } else
 #endif
                {
-                       err = sock_create(AF_INET, SOCK_DGRAM, IPPROTO_L2TP,
-                                         sockp);
+                       err = sock_create_kern(AF_INET, SOCK_DGRAM,
+                                         IPPROTO_L2TP, &sock);
                        if (err < 0)
                                goto out;
 
-                       sock = *sockp;
+                       sk_change_net(sock->sk, net);
 
-                       memset(&ip_addr, 0, sizeof(ip_addr));
                        ip_addr.l2tp_family = AF_INET;
                        ip_addr.l2tp_addr = cfg->local_ip;
                        ip_addr.l2tp_conn_id = tunnel_id;
@@ -1493,8 +1564,10 @@ static int l2tp_tunnel_sock_create(u32 tunnel_id, u32 peer_tunnel_id, struct l2t
        }
 
 out:
+       *sockp = sock;
        if ((err < 0) && sock) {
-               sock_release(sock);
+               kernel_sock_shutdown(sock, SHUT_RDWR);
+               sk_release_kernel(sock->sk);
                *sockp = NULL;
        }
 
@@ -1517,15 +1590,23 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
         * kernel socket.
         */
        if (fd < 0) {
-               err = l2tp_tunnel_sock_create(tunnel_id, peer_tunnel_id, cfg, &sock);
+               err = l2tp_tunnel_sock_create(net, tunnel_id, peer_tunnel_id,
+                               cfg, &sock);
                if (err < 0)
                        goto err;
        } else {
-               err = -EBADF;
                sock = sockfd_lookup(fd, &err);
                if (!sock) {
-                       pr_err("tunl %hu: sockfd_lookup(fd=%d) returned %d\n",
+                       pr_err("tunl %u: sockfd_lookup(fd=%d) returned %d\n",
                               tunnel_id, fd, err);
+                       err = -EBADF;
+                       goto err;
+               }
+
+               /* Reject namespace mismatches */
+               if (!net_eq(sock_net(sock->sk), net)) {
+                       pr_err("tunl %u: netns mismatch\n", tunnel_id);
+                       err = -EINVAL;
                        goto err;
                }
        }
@@ -1607,10 +1688,14 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
        tunnel->old_sk_destruct = sk->sk_destruct;
        sk->sk_destruct = &l2tp_tunnel_destruct;
        tunnel->sock = sk;
+       tunnel->fd = fd;
        lockdep_set_class_and_name(&sk->sk_lock.slock, &l2tp_socket_class, "l2tp_sock");
 
        sk->sk_allocation = GFP_ATOMIC;
 
+       /* Init delete workqueue struct */
+       INIT_WORK(&tunnel->del_work, l2tp_tunnel_del_work);
+
        /* Add tunnel to our list */
        INIT_LIST_HEAD(&tunnel->list);
        atomic_inc(&l2tp_tunnel_count);
@@ -1642,25 +1727,7 @@ EXPORT_SYMBOL_GPL(l2tp_tunnel_create);
  */
 int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel)
 {
-       int err = 0;
-       struct socket *sock = tunnel->sock ? tunnel->sock->sk_socket : NULL;
-
-       /* Force the tunnel socket to close. This will eventually
-        * cause the tunnel to be deleted via the normal socket close
-        * mechanisms when userspace closes the tunnel socket.
-        */
-       if (sock != NULL) {
-               err = inet_shutdown(sock, 2);
-
-               /* If the tunnel's socket was created by the kernel,
-                * close the socket here since the socket was not
-                * created by userspace.
-                */
-               if (sock->file == NULL)
-                       err = inet_release(sock);
-       }
-
-       return err;
+       return (false == queue_work(l2tp_wq, &tunnel->del_work));
 }
 EXPORT_SYMBOL_GPL(l2tp_tunnel_delete);
 
@@ -1844,8 +1911,21 @@ static __net_init int l2tp_init_net(struct net *net)
        return 0;
 }
 
+static __net_exit void l2tp_exit_net(struct net *net)
+{
+       struct l2tp_net *pn = l2tp_pernet(net);
+       struct l2tp_tunnel *tunnel = NULL;
+
+       rcu_read_lock_bh();
+       list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) {
+               (void)l2tp_tunnel_delete(tunnel);
+       }
+       rcu_read_unlock_bh();
+}
+
 static struct pernet_operations l2tp_net_ops = {
        .init = l2tp_init_net,
+       .exit = l2tp_exit_net,
        .id   = &l2tp_net_id,
        .size = sizeof(struct l2tp_net),
 };
@@ -1858,6 +1938,13 @@ static int __init l2tp_init(void)
        if (rc)
                goto out;
 
+       l2tp_wq = alloc_workqueue("l2tp", WQ_NON_REENTRANT | WQ_UNBOUND, 0);
+       if (!l2tp_wq) {
+               pr_err("alloc_workqueue failed\n");
+               rc = -ENOMEM;
+               goto out;
+       }
+
        pr_info("L2TP core driver, %s\n", L2TP_DRV_VERSION);
 
 out:
@@ -1867,6 +1954,10 @@ out:
 static void __exit l2tp_exit(void)
 {
        unregister_pernet_device(&l2tp_net_ops);
+       if (l2tp_wq) {
+               destroy_workqueue(l2tp_wq);
+               l2tp_wq = NULL;
+       }
 }
 
 module_init(l2tp_init);
index 56d583e..8eb8f1d 100644 (file)
@@ -188,7 +188,10 @@ struct l2tp_tunnel {
        int (*recv_payload_hook)(struct sk_buff *skb);
        void (*old_sk_destruct)(struct sock *);
        struct sock             *sock;          /* Parent socket */
-       int                     fd;
+       int                     fd;             /* Parent fd, if tunnel socket
+                                                * was created by userspace */
+
+       struct work_struct      del_work;
 
        uint8_t                 priv[0];        /* private data */
 };
@@ -228,6 +231,8 @@ out:
        return tunnel;
 }
 
+extern struct sock *l2tp_tunnel_sock_lookup(struct l2tp_tunnel *tunnel);
+extern void l2tp_tunnel_sock_put(struct sock *sk);
 extern struct l2tp_session *l2tp_session_find(struct net *net, struct l2tp_tunnel *tunnel, u32 session_id);
 extern struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth);
 extern struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname);
index 61d8b75..f7ac8f4 100644 (file)
@@ -115,6 +115,7 @@ static inline struct sock *l2tp_ip_bind_lookup(struct net *net, __be32 laddr, in
  */
 static int l2tp_ip_recv(struct sk_buff *skb)
 {
+       struct net *net = dev_net(skb->dev);
        struct sock *sk;
        u32 session_id;
        u32 tunnel_id;
@@ -142,7 +143,7 @@ static int l2tp_ip_recv(struct sk_buff *skb)
        }
 
        /* Ok, this is a data packet. Lookup the session. */
-       session = l2tp_session_find(&init_net, NULL, session_id);
+       session = l2tp_session_find(net, NULL, session_id);
        if (session == NULL)
                goto discard;
 
@@ -173,14 +174,14 @@ pass_up:
                goto discard;
 
        tunnel_id = ntohl(*(__be32 *) &skb->data[4]);
-       tunnel = l2tp_tunnel_find(&init_net, tunnel_id);
+       tunnel = l2tp_tunnel_find(net, tunnel_id);
        if (tunnel != NULL)
                sk = tunnel->sock;
        else {
                struct iphdr *iph = (struct iphdr *) skb_network_header(skb);
 
                read_lock_bh(&l2tp_ip_lock);
-               sk = __l2tp_ip_bind_lookup(&init_net, iph->daddr, 0, tunnel_id);
+               sk = __l2tp_ip_bind_lookup(net, iph->daddr, 0, tunnel_id);
                read_unlock_bh(&l2tp_ip_lock);
        }
 
@@ -239,6 +240,7 @@ static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
 {
        struct inet_sock *inet = inet_sk(sk);
        struct sockaddr_l2tpip *addr = (struct sockaddr_l2tpip *) uaddr;
+       struct net *net = sock_net(sk);
        int ret;
        int chk_addr_ret;
 
@@ -251,7 +253,8 @@ static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
 
        ret = -EADDRINUSE;
        read_lock_bh(&l2tp_ip_lock);
-       if (__l2tp_ip_bind_lookup(&init_net, addr->l2tp_addr.s_addr, sk->sk_bound_dev_if, addr->l2tp_conn_id))
+       if (__l2tp_ip_bind_lookup(net, addr->l2tp_addr.s_addr,
+                                 sk->sk_bound_dev_if, addr->l2tp_conn_id))
                goto out_in_use;
 
        read_unlock_bh(&l2tp_ip_lock);
@@ -260,7 +263,7 @@ static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
        if (sk->sk_state != TCP_CLOSE || addr_len < sizeof(struct sockaddr_l2tpip))
                goto out;
 
-       chk_addr_ret = inet_addr_type(&init_net, addr->l2tp_addr.s_addr);
+       chk_addr_ret = inet_addr_type(net, addr->l2tp_addr.s_addr);
        ret = -EADDRNOTAVAIL;
        if (addr->l2tp_addr.s_addr && chk_addr_ret != RTN_LOCAL &&
            chk_addr_ret != RTN_MULTICAST && chk_addr_ret != RTN_BROADCAST)
@@ -369,7 +372,7 @@ static int l2tp_ip_backlog_recv(struct sock *sk, struct sk_buff *skb)
        return 0;
 
 drop:
-       IP_INC_STATS(&init_net, IPSTATS_MIB_INDISCARDS);
+       IP_INC_STATS(sock_net(sk), IPSTATS_MIB_INDISCARDS);
        kfree_skb(skb);
        return -1;
 }
@@ -605,6 +608,7 @@ static struct inet_protosw l2tp_ip_protosw = {
 
 static struct net_protocol l2tp_ip_protocol __read_mostly = {
        .handler        = l2tp_ip_recv,
+       .netns_ok       = 1,
 };
 
 static int __init l2tp_ip_init(void)
index 9275471..8ee4a86 100644 (file)
@@ -554,8 +554,8 @@ static int l2tp_ip6_sendmsg(struct kiocb *iocb, struct sock *sk,
                memset(opt, 0, sizeof(struct ipv6_txoptions));
                opt->tot_len = sizeof(struct ipv6_txoptions);
 
-               err = datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt,
-                                       &hlimit, &tclass, &dontfrag);
+               err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt,
+                                           &hlimit, &tclass, &dontfrag);
                if (err < 0) {
                        fl6_sock_release(flowlabel);
                        return err;
@@ -646,7 +646,7 @@ static int l2tp_ip6_recvmsg(struct kiocb *iocb, struct sock *sk,
                            struct msghdr *msg, size_t len, int noblock,
                            int flags, int *addr_len)
 {
-       struct inet_sock *inet = inet_sk(sk);
+       struct ipv6_pinfo *np = inet6_sk(sk);
        struct sockaddr_l2tpip6 *lsa = (struct sockaddr_l2tpip6 *)msg->msg_name;
        size_t copied = 0;
        int err = -EOPNOTSUPP;
@@ -688,8 +688,8 @@ static int l2tp_ip6_recvmsg(struct kiocb *iocb, struct sock *sk,
                        lsa->l2tp_scope_id = IP6CB(skb)->iif;
        }
 
-       if (inet->cmsg_flags)
-               ip_cmsg_recv(msg, skb);
+       if (np->rxopt.all)
+               ip6_datagram_recv_ctl(sk, msg, skb);
 
        if (flags & MSG_TRUNC)
                copied = skb->len;
index bbba3a1..c1bab22 100644 (file)
@@ -37,6 +37,7 @@ static struct genl_family l2tp_nl_family = {
        .version        = L2TP_GENL_VERSION,
        .hdrsize        = 0,
        .maxattr        = L2TP_ATTR_MAX,
+       .netnsok        = true,
 };
 
 /* Accessed under genl lock */
index 286366e..3f4e3af 100644 (file)
@@ -388,8 +388,6 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
        struct l2tp_session *session;
        struct l2tp_tunnel *tunnel;
        struct pppol2tp_session *ps;
-       int old_headroom;
-       int new_headroom;
        int uhlen, headroom;
 
        if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED))
@@ -408,7 +406,6 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
        if (tunnel == NULL)
                goto abort_put_sess;
 
-       old_headroom = skb_headroom(skb);
        uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0;
        headroom = NET_SKB_PAD +
                   sizeof(struct iphdr) + /* IP header */
@@ -418,9 +415,6 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
        if (skb_cow_head(skb, headroom))
                goto abort_put_sess_tun;
 
-       new_headroom = skb_headroom(skb);
-       skb->truesize += new_headroom - old_headroom;
-
        /* Setup PPP header */
        __skb_push(skb, sizeof(ppph));
        skb->data[0] = ppph[0];
@@ -1789,7 +1783,8 @@ static __net_init int pppol2tp_init_net(struct net *net)
        struct proc_dir_entry *pde;
        int err = 0;
 
-       pde = proc_net_fops_create(net, "pppol2tp", S_IRUGO, &pppol2tp_proc_fops);
+       pde = proc_create("pppol2tp", S_IRUGO, net->proc_net,
+                         &pppol2tp_proc_fops);
        if (!pde) {
                err = -ENOMEM;
                goto out;
@@ -1801,7 +1796,7 @@ out:
 
 static __net_exit void pppol2tp_exit_net(struct net *net)
 {
-       proc_net_remove(net, "pppol2tp");
+       remove_proc_entry("pppol2tp", net->proc_net);
 }
 
 static struct pernet_operations pppol2tp_net_ops = {
index b4ecf26..0ecf947 100644 (file)
@@ -258,6 +258,17 @@ config MAC80211_MESH_SYNC_DEBUG
 
          Do not select this option.
 
+config MAC80211_MESH_PS_DEBUG
+       bool "Verbose mesh powersave debugging"
+       depends on MAC80211_DEBUG_MENU
+       depends on MAC80211_MESH
+       ---help---
+         Selecting this option causes mac80211 to print out very verbose mesh
+         powersave debugging messages (when mac80211 is taking part in a
+         mesh network).
+
+         Do not select this option.
+
 config MAC80211_TDLS_DEBUG
        bool "Verbose TDLS debugging"
        depends on MAC80211_DEBUG_MENU
index 4911202..9d7d840 100644 (file)
@@ -39,7 +39,8 @@ mac80211-$(CONFIG_MAC80211_MESH) += \
        mesh_pathtbl.o \
        mesh_plink.o \
        mesh_hwmp.o \
-       mesh_sync.o
+       mesh_sync.o \
+       mesh_ps.o
 
 mac80211-$(CONFIG_PM) += pm.o
 
index 808338a..31bf258 100644 (file)
@@ -83,8 +83,8 @@ void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
        if (drv_ampdu_action(local, sta->sdata, IEEE80211_AMPDU_RX_STOP,
                             &sta->sta, tid, NULL, 0))
                sdata_info(sta->sdata,
-                          "HW problem - can not stop rx aggregation for tid %d\n",
-                          tid);
+                          "HW problem - can not stop rx aggregation for %pM tid %d\n",
+                          sta->sta.addr, tid);
 
        /* check if this is a self generated aggregation halt */
        if (initiator == WLAN_BACK_RECIPIENT && tx)
@@ -159,7 +159,8 @@ static void sta_rx_agg_session_timer_expired(unsigned long data)
        }
        rcu_read_unlock();
 
-       ht_dbg(sta->sdata, "rx session timer expired on tid %d\n", (u16)*ptid);
+       ht_dbg(sta->sdata, "RX session timer expired on %pM tid %d\n",
+              sta->sta.addr, (u16)*ptid);
 
        set_bit(*ptid, sta->ampdu_mlme.tid_rx_timer_expired);
        ieee80211_queue_work(&sta->local->hw, &sta->ampdu_mlme.work);
@@ -247,7 +248,9 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
        status = WLAN_STATUS_REQUEST_DECLINED;
 
        if (test_sta_flag(sta, WLAN_STA_BLOCK_BA)) {
-               ht_dbg(sta->sdata, "Suspend in progress - Denying ADDBA request\n");
+               ht_dbg(sta->sdata,
+                      "Suspend in progress - Denying ADDBA request (%pM tid %d)\n",
+                      sta->sta.addr, tid);
                goto end_no_lock;
        }
 
@@ -317,7 +320,8 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
 
        ret = drv_ampdu_action(local, sta->sdata, IEEE80211_AMPDU_RX_START,
                               &sta->sta, tid, &start_seq_num, 0);
-       ht_dbg(sta->sdata, "Rx A-MPDU request on tid %d result %d\n", tid, ret);
+       ht_dbg(sta->sdata, "Rx A-MPDU request on %pM tid %d result %d\n",
+              sta->sta.addr, tid, ret);
        if (ret) {
                kfree(tid_agg_rx->reorder_buf);
                kfree(tid_agg_rx->reorder_time);
index 2f0ccbc..13b7683 100644 (file)
@@ -296,7 +296,7 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
                                       IEEE80211_AMPDU_TX_STOP_FLUSH_CONT,
                                       &sta->sta, tid, NULL, 0);
                WARN_ON_ONCE(ret);
-               goto remove_tid_tx;
+               return 0;
        }
 
        if (test_bit(HT_AGG_STATE_WANT_START, &tid_tx->state)) {
@@ -354,12 +354,15 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
                 */
        }
 
-       if (reason == AGG_STOP_DESTROY_STA) {
- remove_tid_tx:
-               spin_lock_bh(&sta->lock);
-               ieee80211_remove_tid_tx(sta, tid);
-               spin_unlock_bh(&sta->lock);
-       }
+       /*
+        * In the case of AGG_STOP_DESTROY_STA, the driver won't
+        * necessarily call ieee80211_stop_tx_ba_cb(), so this may
+        * seem like we can leave the tid_tx data pending forever.
+        * This is true, in a way, but "forever" is only until the
+        * station struct is actually destroyed. In the meantime,
+        * leaving it around ensures that we don't transmit packets
+        * to the driver on this TID which might confuse it.
+        */
 
        return 0;
 }
@@ -387,12 +390,13 @@ static void sta_addba_resp_timer_expired(unsigned long data)
            test_bit(HT_AGG_STATE_RESPONSE_RECEIVED, &tid_tx->state)) {
                rcu_read_unlock();
                ht_dbg(sta->sdata,
-                      "timer expired on tid %d but we are not (or no longer) expecting addBA response there\n",
-                      tid);
+                      "timer expired on %pM tid %d but we are not (or no longer) expecting addBA response there\n",
+                      sta->sta.addr, tid);
                return;
        }
 
-       ht_dbg(sta->sdata, "addBA response timer expired on tid %d\n", tid);
+       ht_dbg(sta->sdata, "addBA response timer expired on %pM tid %d\n",
+              sta->sta.addr, tid);
 
        ieee80211_stop_tx_ba_session(&sta->sta, tid);
        rcu_read_unlock();
@@ -429,7 +433,8 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
                               &sta->sta, tid, &start_seq_num, 0);
        if (ret) {
                ht_dbg(sdata,
-                      "BA request denied - HW unavailable for tid %d\n", tid);
+                      "BA request denied - HW unavailable for %pM tid %d\n",
+                      sta->sta.addr, tid);
                spin_lock_bh(&sta->lock);
                ieee80211_agg_splice_packets(sdata, tid_tx, tid);
                ieee80211_assign_tid_tx(sta, tid, NULL);
@@ -442,7 +447,8 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
 
        /* activate the timer for the recipient's addBA response */
        mod_timer(&tid_tx->addba_resp_timer, jiffies + ADDBA_RESP_INTERVAL);
-       ht_dbg(sdata, "activated addBA response timer on tid %d\n", tid);
+       ht_dbg(sdata, "activated addBA response timer on %pM tid %d\n",
+              sta->sta.addr, tid);
 
        spin_lock_bh(&sta->lock);
        sta->ampdu_mlme.last_addba_req_time[tid] = jiffies;
@@ -489,7 +495,8 @@ static void sta_tx_agg_session_timer_expired(unsigned long data)
 
        rcu_read_unlock();
 
-       ht_dbg(sta->sdata, "tx session timer expired on tid %d\n", (u16)*ptid);
+       ht_dbg(sta->sdata, "tx session timer expired on %pM tid %d\n",
+              sta->sta.addr, (u16)*ptid);
 
        ieee80211_stop_tx_ba_session(&sta->sta, *ptid);
 }
@@ -525,7 +532,8 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
 
        if (test_sta_flag(sta, WLAN_STA_BLOCK_BA)) {
                ht_dbg(sdata,
-                      "BA sessions blocked - Denying BA session request\n");
+                      "BA sessions blocked - Denying BA session request %pM tid %d\n",
+                      sta->sta.addr, tid);
                return -EINVAL;
        }
 
@@ -566,8 +574,8 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
            time_before(jiffies, sta->ampdu_mlme.last_addba_req_time[tid] +
                        HT_AGG_RETRIES_PERIOD)) {
                ht_dbg(sdata,
-                      "BA request denied - waiting a grace period after %d failed requests on tid %u\n",
-                      sta->ampdu_mlme.addba_req_num[tid], tid);
+                      "BA request denied - waiting a grace period after %d failed requests on %pM tid %u\n",
+                      sta->ampdu_mlme.addba_req_num[tid], sta->sta.addr, tid);
                ret = -EBUSY;
                goto err_unlock_sta;
        }
@@ -576,8 +584,8 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
        /* check if the TID is not in aggregation flow already */
        if (tid_tx || sta->ampdu_mlme.tid_start_tx[tid]) {
                ht_dbg(sdata,
-                      "BA request denied - session is not idle on tid %u\n",
-                      tid);
+                      "BA request denied - session is not idle on %pM tid %u\n",
+                      sta->sta.addr, tid);
                ret = -EAGAIN;
                goto err_unlock_sta;
        }
@@ -632,7 +640,8 @@ static void ieee80211_agg_tx_operational(struct ieee80211_local *local,
 
        tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
 
-       ht_dbg(sta->sdata, "Aggregation is on for tid %d\n", tid);
+       ht_dbg(sta->sdata, "Aggregation is on for %pM tid %d\n",
+              sta->sta.addr, tid);
 
        drv_ampdu_action(local, sta->sdata,
                         IEEE80211_AMPDU_TX_OPERATIONAL,
@@ -802,7 +811,9 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid)
        tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
 
        if (!tid_tx || !test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
-               ht_dbg(sdata, "unexpected callback to A-MPDU stop\n");
+               ht_dbg(sdata,
+                      "unexpected callback to A-MPDU stop for %pM tid %d\n",
+                      sta->sta.addr, tid);
                goto unlock_sta;
        }
 
@@ -861,13 +872,15 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local,
                goto out;
 
        if (mgmt->u.action.u.addba_resp.dialog_token != tid_tx->dialog_token) {
-               ht_dbg(sta->sdata, "wrong addBA response token, tid %d\n", tid);
+               ht_dbg(sta->sdata, "wrong addBA response token, %pM tid %d\n",
+                      sta->sta.addr, tid);
                goto out;
        }
 
        del_timer_sync(&tid_tx->addba_resp_timer);
 
-       ht_dbg(sta->sdata, "switched off addBA timer for tid %d\n", tid);
+       ht_dbg(sta->sdata, "switched off addBA timer for %pM tid %d\n",
+              sta->sta.addr, tid);
 
        /*
         * addba_resp_timer may have fired before we got here, and
@@ -877,8 +890,8 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local,
        if (test_bit(HT_AGG_STATE_WANT_STOP, &tid_tx->state) ||
            test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
                ht_dbg(sta->sdata,
-                      "got addBA resp for tid %d but we already gave up\n",
-                      tid);
+                      "got addBA resp for %pM tid %d but we already gave up\n",
+                      sta->sta.addr, tid);
                goto out;
        }
 
index 661b878..179dcbd 100644 (file)
@@ -492,7 +492,10 @@ static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
 #ifdef CONFIG_MAC80211_MESH
                sinfo->filled |= STATION_INFO_LLID |
                                 STATION_INFO_PLID |
-                                STATION_INFO_PLINK_STATE;
+                                STATION_INFO_PLINK_STATE |
+                                STATION_INFO_LOCAL_PM |
+                                STATION_INFO_PEER_PM |
+                                STATION_INFO_NONPEER_PM;
 
                sinfo->llid = le16_to_cpu(sta->llid);
                sinfo->plid = le16_to_cpu(sta->plid);
@@ -501,6 +504,9 @@ static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
                        sinfo->filled |= STATION_INFO_T_OFFSET;
                        sinfo->t_offset = sta->t_offset;
                }
+               sinfo->local_pm = sta->local_pm;
+               sinfo->peer_pm = sta->peer_pm;
+               sinfo->nonpeer_pm = sta->nonpeer_pm;
 #endif
        }
 
@@ -922,11 +928,13 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
        /* TODO: make hostapd tell us what it wants */
        sdata->smps_mode = IEEE80211_SMPS_OFF;
        sdata->needed_rx_chains = sdata->local->rx_chains;
+       sdata->radar_required = params->radar_required;
 
        err = ieee80211_vif_use_channel(sdata, &params->chandef,
                                        IEEE80211_CHANCTX_SHARED);
        if (err)
                return err;
+       ieee80211_vif_copy_chanctx_to_vlans(sdata, false);
 
        /*
         * Apply control port protocol, this allows us to
@@ -1041,6 +1049,7 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev)
        local->total_ps_buffered -= skb_queue_len(&sdata->u.ap.ps.bc_buf);
        skb_queue_purge(&sdata->u.ap.ps.bc_buf);
 
+       ieee80211_vif_copy_chanctx_to_vlans(sdata, true);
        ieee80211_vif_release_channel(sdata);
 
        return 0;
@@ -1243,25 +1252,26 @@ static int sta_apply_parameters(struct ieee80211_local *local,
 
        if (params->ht_capa)
                ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband,
-                                                 params->ht_capa,
-                                                 &sta->sta.ht_cap);
+                                                 params->ht_capa, sta);
 
        if (params->vht_capa)
                ieee80211_vht_cap_ie_to_sta_vht_cap(sdata, sband,
-                                                   params->vht_capa,
-                                                   &sta->sta.vht_cap);
+                                                   params->vht_capa, sta);
 
        if (ieee80211_vif_is_mesh(&sdata->vif)) {
 #ifdef CONFIG_MAC80211_MESH
+               u32 changed = 0;
                if (sdata->u.mesh.security & IEEE80211_MESH_SEC_SECURED) {
-                       u32 changed = 0;
-
                        switch (params->plink_state) {
                        case NL80211_PLINK_ESTAB:
                                if (sta->plink_state != NL80211_PLINK_ESTAB)
                                        changed = mesh_plink_inc_estab_count(
                                                        sdata);
                                sta->plink_state = params->plink_state;
+
+                               ieee80211_mps_sta_status_update(sta);
+                               changed |= ieee80211_mps_set_sta_local_pm(sta,
+                                             sdata->u.mesh.mshcfg.power_mode);
                                break;
                        case NL80211_PLINK_LISTEN:
                        case NL80211_PLINK_BLOCKED:
@@ -1273,22 +1283,31 @@ static int sta_apply_parameters(struct ieee80211_local *local,
                                        changed = mesh_plink_dec_estab_count(
                                                        sdata);
                                sta->plink_state = params->plink_state;
+
+                               ieee80211_mps_sta_status_update(sta);
+                               changed |=
+                                     ieee80211_mps_local_status_update(sdata);
                                break;
                        default:
                                /*  nothing  */
                                break;
                        }
-                       ieee80211_bss_info_change_notify(sdata, changed);
                } else {
                        switch (params->plink_action) {
                        case PLINK_ACTION_OPEN:
-                               mesh_plink_open(sta);
+                               changed |= mesh_plink_open(sta);
                                break;
                        case PLINK_ACTION_BLOCK:
-                               mesh_plink_block(sta);
+                               changed |= mesh_plink_block(sta);
                                break;
                        }
                }
+
+               if (params->local_pm)
+                       changed |=
+                             ieee80211_mps_set_sta_local_pm(sta,
+                                                            params->local_pm);
+               ieee80211_bss_info_change_notify(sdata, changed);
 #endif
        }
 
@@ -1393,9 +1412,11 @@ static int ieee80211_change_station(struct wiphy *wiphy,
                return -ENOENT;
        }
 
-       /* in station mode, supported rates are only valid with TDLS */
+       /* in station mode, some updates are only valid with TDLS */
        if (sdata->vif.type == NL80211_IFTYPE_STATION &&
-           params->supported_rates &&
+           (params->supported_rates || params->ht_capa || params->vht_capa ||
+            params->sta_modify_mask ||
+            (params->sta_flags_mask & BIT(NL80211_STA_FLAG_WME))) &&
            !test_sta_flag(sta, WLAN_STA_TDLS_PEER)) {
                mutex_unlock(&local->sta_mtx);
                return -EINVAL;
@@ -1777,6 +1798,14 @@ static int ieee80211_update_mesh_config(struct wiphy *wiphy,
        if (_chg_mesh_attr(NL80211_MESHCONF_HWMP_CONFIRMATION_INTERVAL, mask))
                conf->dot11MeshHWMPconfirmationInterval =
                        nconf->dot11MeshHWMPconfirmationInterval;
+       if (_chg_mesh_attr(NL80211_MESHCONF_POWER_MODE, mask)) {
+               conf->power_mode = nconf->power_mode;
+               ieee80211_mps_local_status_update(sdata);
+       }
+       if (_chg_mesh_attr(NL80211_MESHCONF_AWAKE_WINDOW, mask))
+               conf->dot11MeshAwakeWindowDuration =
+                       nconf->dot11MeshAwakeWindowDuration;
+       ieee80211_mbss_info_change_notify(sdata, BSS_CHANGED_BEACON);
        return 0;
 }
 
@@ -1802,9 +1831,7 @@ static int ieee80211_join_mesh(struct wiphy *wiphy, struct net_device *dev,
        if (err)
                return err;
 
-       ieee80211_start_mesh(sdata);
-
-       return 0;
+       return ieee80211_start_mesh(sdata);
 }
 
 static int ieee80211_leave_mesh(struct wiphy *wiphy, struct net_device *dev)
@@ -2058,7 +2085,8 @@ static int ieee80211_set_mcast_rate(struct wiphy *wiphy, struct net_device *dev,
 {
        struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
 
-       memcpy(sdata->vif.bss_conf.mcast_rate, rate, sizeof(rate));
+       memcpy(sdata->vif.bss_conf.mcast_rate, rate,
+              sizeof(int) * IEEE80211_NUM_BANDS);
 
        return 0;
 }
@@ -2368,7 +2396,8 @@ static int ieee80211_start_roc_work(struct ieee80211_local *local,
        INIT_LIST_HEAD(&roc->dependents);
 
        /* if there's one pending or we're scanning, queue this one */
-       if (!list_empty(&local->roc_list) || local->scanning)
+       if (!list_empty(&local->roc_list) ||
+           local->scanning || local->radar_detect_enabled)
                goto out_check_combine;
 
        /* if not HW assist, just queue & schedule work */
@@ -2618,6 +2647,37 @@ static int ieee80211_cancel_remain_on_channel(struct wiphy *wiphy,
        return ieee80211_cancel_roc(local, cookie, false);
 }
 
+static int ieee80211_start_radar_detection(struct wiphy *wiphy,
+                                          struct net_device *dev,
+                                          struct cfg80211_chan_def *chandef)
+{
+       struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+       struct ieee80211_local *local = sdata->local;
+       unsigned long timeout;
+       int err;
+
+       if (!list_empty(&local->roc_list) || local->scanning)
+               return -EBUSY;
+
+       /* whatever, but channel contexts should not complain about that one */
+       sdata->smps_mode = IEEE80211_SMPS_OFF;
+       sdata->needed_rx_chains = local->rx_chains;
+       sdata->radar_required = true;
+
+       mutex_lock(&local->iflist_mtx);
+       err = ieee80211_vif_use_channel(sdata, chandef,
+                                       IEEE80211_CHANCTX_SHARED);
+       mutex_unlock(&local->iflist_mtx);
+       if (err)
+               return err;
+
+       timeout = msecs_to_jiffies(IEEE80211_DFS_MIN_CAC_TIME_MS);
+       ieee80211_queue_delayed_work(&sdata->local->hw,
+                                    &sdata->dfs_cac_timer_work, timeout);
+
+       return 0;
+}
+
 static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
                             struct ieee80211_channel *chan, bool offchan,
                             unsigned int wait, const u8 *buf, size_t len,
@@ -2722,7 +2782,8 @@ static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
                goto out_unlock;
        }
 
-       IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_TX_OFFCHAN;
+       IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_TX_OFFCHAN |
+                                       IEEE80211_TX_INTFL_OFFCHAN_TX_OK;
        if (local->hw.flags & IEEE80211_HW_QUEUE_CONTROL)
                IEEE80211_SKB_CB(skb)->hw_queue =
                        local->hw.offchannel_tx_hw_queue;
@@ -3322,4 +3383,5 @@ struct cfg80211_ops mac80211_config_ops = {
        .get_et_stats = ieee80211_get_et_stats,
        .get_et_strings = ieee80211_get_et_strings,
        .get_channel = ieee80211_cfg_get_channel,
+       .start_radar_detection = ieee80211_start_radar_detection,
 };
index 1bfe0a8..78c0d90 100644 (file)
@@ -9,7 +9,7 @@
 #include "ieee80211_i.h"
 #include "driver-ops.h"
 
-static void ieee80211_change_chandef(struct ieee80211_local *local,
+static void ieee80211_change_chanctx(struct ieee80211_local *local,
                                     struct ieee80211_chanctx *ctx,
                                     const struct cfg80211_chan_def *chandef)
 {
@@ -49,7 +49,7 @@ ieee80211_find_chanctx(struct ieee80211_local *local,
                if (!compat)
                        continue;
 
-               ieee80211_change_chandef(local, ctx, compat);
+               ieee80211_change_chanctx(local, ctx, compat);
 
                return ctx;
        }
@@ -91,6 +91,10 @@ ieee80211_new_chanctx(struct ieee80211_local *local,
 
        list_add_rcu(&ctx->list, &local->chanctx_list);
 
+       mutex_lock(&local->mtx);
+       ieee80211_recalc_idle(local);
+       mutex_unlock(&local->mtx);
+
        return ctx;
 }
 
@@ -110,6 +114,10 @@ static void ieee80211_free_chanctx(struct ieee80211_local *local,
 
        list_del_rcu(&ctx->list);
        kfree_rcu(ctx, rcu_head);
+
+       mutex_lock(&local->mtx);
+       ieee80211_recalc_idle(local);
+       mutex_unlock(&local->mtx);
 }
 
 static int ieee80211_assign_vif_chanctx(struct ieee80211_sub_if_data *sdata,
@@ -128,6 +136,11 @@ static int ieee80211_assign_vif_chanctx(struct ieee80211_sub_if_data *sdata,
        ctx->refcount++;
 
        ieee80211_recalc_txpower(sdata);
+       sdata->vif.bss_conf.idle = false;
+
+       if (sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE &&
+           sdata->vif.type != NL80211_IFTYPE_MONITOR)
+               ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_IDLE);
 
        return 0;
 }
@@ -162,7 +175,7 @@ static void ieee80211_recalc_chanctx_chantype(struct ieee80211_local *local,
        if (WARN_ON_ONCE(!compat))
                return;
 
-       ieee80211_change_chandef(local, ctx, compat);
+       ieee80211_change_chanctx(local, ctx, compat);
 }
 
 static void ieee80211_unassign_vif_chanctx(struct ieee80211_sub_if_data *sdata,
@@ -175,11 +188,18 @@ static void ieee80211_unassign_vif_chanctx(struct ieee80211_sub_if_data *sdata,
        ctx->refcount--;
        rcu_assign_pointer(sdata->vif.chanctx_conf, NULL);
 
+       sdata->vif.bss_conf.idle = true;
+
+       if (sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE &&
+           sdata->vif.type != NL80211_IFTYPE_MONITOR)
+               ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_IDLE);
+
        drv_unassign_vif_chanctx(local, sdata, ctx);
 
        if (ctx->refcount > 0) {
                ieee80211_recalc_chanctx_chantype(sdata->local, ctx);
                ieee80211_recalc_smps_chanctx(local, ctx);
+               ieee80211_recalc_radar_chanctx(local, ctx);
        }
 }
 
@@ -198,20 +218,42 @@ static void __ieee80211_vif_release_channel(struct ieee80211_sub_if_data *sdata)
 
        ctx = container_of(conf, struct ieee80211_chanctx, conf);
 
-       if (sdata->vif.type == NL80211_IFTYPE_AP) {
-               struct ieee80211_sub_if_data *vlan;
-
-               /* for the VLAN list */
-               ASSERT_RTNL();
-               list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list)
-                       rcu_assign_pointer(vlan->vif.chanctx_conf, NULL);
-       }
-
        ieee80211_unassign_vif_chanctx(sdata, ctx);
        if (ctx->refcount == 0)
                ieee80211_free_chanctx(local, ctx);
 }
 
+void ieee80211_recalc_radar_chanctx(struct ieee80211_local *local,
+                                   struct ieee80211_chanctx *chanctx)
+{
+       struct ieee80211_sub_if_data *sdata;
+       bool radar_enabled = false;
+
+       lockdep_assert_held(&local->chanctx_mtx);
+
+       rcu_read_lock();
+       list_for_each_entry_rcu(sdata, &local->interfaces, list) {
+               if (sdata->radar_required) {
+                       radar_enabled = true;
+                       break;
+               }
+       }
+       rcu_read_unlock();
+
+       if (radar_enabled == chanctx->conf.radar_enabled)
+               return;
+
+       chanctx->conf.radar_enabled = radar_enabled;
+       local->radar_detect_enabled = chanctx->conf.radar_enabled;
+
+       if (!local->use_chanctx) {
+               local->hw.conf.radar_enabled = chanctx->conf.radar_enabled;
+               ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
+       }
+
+       drv_change_chanctx(local, chanctx, IEEE80211_CHANCTX_CHANGE_RADAR);
+}
+
 void ieee80211_recalc_smps_chanctx(struct ieee80211_local *local,
                                   struct ieee80211_chanctx *chanctx)
 {
@@ -326,16 +368,57 @@ int ieee80211_vif_use_channel(struct ieee80211_sub_if_data *sdata,
                goto out;
        }
 
-       if (sdata->vif.type == NL80211_IFTYPE_AP) {
-               struct ieee80211_sub_if_data *vlan;
+       ieee80211_recalc_smps_chanctx(local, ctx);
+       ieee80211_recalc_radar_chanctx(local, ctx);
+ out:
+       mutex_unlock(&local->chanctx_mtx);
+       return ret;
+}
+
+int ieee80211_vif_change_bandwidth(struct ieee80211_sub_if_data *sdata,
+                                  const struct cfg80211_chan_def *chandef,
+                                  u32 *changed)
+{
+       struct ieee80211_local *local = sdata->local;
+       struct ieee80211_chanctx_conf *conf;
+       struct ieee80211_chanctx *ctx;
+       int ret;
+
+       if (!cfg80211_chandef_usable(sdata->local->hw.wiphy, chandef,
+                                    IEEE80211_CHAN_DISABLED))
+               return -EINVAL;
+
+       mutex_lock(&local->chanctx_mtx);
+       if (cfg80211_chandef_identical(chandef, &sdata->vif.bss_conf.chandef)) {
+               ret = 0;
+               goto out;
+       }
+
+       if (chandef->width == NL80211_CHAN_WIDTH_20_NOHT ||
+           sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT) {
+               ret = -EINVAL;
+               goto out;
+       }
 
-               /* for the VLAN list */
-               ASSERT_RTNL();
-               list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list)
-                       rcu_assign_pointer(vlan->vif.chanctx_conf, &ctx->conf);
+       conf = rcu_dereference_protected(sdata->vif.chanctx_conf,
+                                        lockdep_is_held(&local->chanctx_mtx));
+       if (!conf) {
+               ret = -EINVAL;
+               goto out;
        }
 
-       ieee80211_recalc_smps_chanctx(local, ctx);
+       ctx = container_of(conf, struct ieee80211_chanctx, conf);
+       if (!cfg80211_chandef_compatible(&conf->def, chandef)) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       sdata->vif.bss_conf.chandef = *chandef;
+
+       ieee80211_recalc_chanctx_chantype(local, ctx);
+
+       *changed |= BSS_CHANGED_BANDWIDTH;
+       ret = 0;
  out:
        mutex_unlock(&local->chanctx_mtx);
        return ret;
@@ -369,6 +452,40 @@ void ieee80211_vif_vlan_copy_chanctx(struct ieee80211_sub_if_data *sdata)
        mutex_unlock(&local->chanctx_mtx);
 }
 
+void ieee80211_vif_copy_chanctx_to_vlans(struct ieee80211_sub_if_data *sdata,
+                                        bool clear)
+{
+       struct ieee80211_local *local = sdata->local;
+       struct ieee80211_sub_if_data *vlan;
+       struct ieee80211_chanctx_conf *conf;
+
+       ASSERT_RTNL();
+
+       if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_AP))
+               return;
+
+       mutex_lock(&local->chanctx_mtx);
+
+       /*
+        * Check that conf exists, even when clearing this function
+        * must be called with the AP's channel context still there
+        * as it would otherwise cause VLANs to have an invalid
+        * channel context pointer for a while, possibly pointing
+        * to a channel context that has already been freed.
+        */
+       conf = rcu_dereference_protected(sdata->vif.chanctx_conf,
+                               lockdep_is_held(&local->chanctx_mtx));
+       WARN_ON(!conf);
+
+       if (clear)
+               conf = NULL;
+
+       list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list)
+               rcu_assign_pointer(vlan->vif.chanctx_conf, conf);
+
+       mutex_unlock(&local->chanctx_mtx);
+}
+
 void ieee80211_iter_chan_contexts_atomic(
        struct ieee80211_hw *hw,
        void (*iter)(struct ieee80211_hw *hw,
index 8f383a5..4ccc5ed 100644 (file)
 #define MAC80211_MESH_SYNC_DEBUG 0
 #endif
 
+#ifdef CONFIG_MAC80211_MESH_PS_DEBUG
+#define MAC80211_MESH_PS_DEBUG 1
+#else
+#define MAC80211_MESH_PS_DEBUG 0
+#endif
+
 #ifdef CONFIG_MAC80211_TDLS_DEBUG
 #define MAC80211_TDLS_DEBUG 1
 #else
@@ -151,6 +157,10 @@ do {                                                                       \
        _sdata_dbg(MAC80211_MESH_SYNC_DEBUG,                            \
                   sdata, fmt, ##__VA_ARGS__)
 
+#define mps_dbg(sdata, fmt, ...)                                       \
+       _sdata_dbg(MAC80211_MESH_PS_DEBUG,                              \
+                  sdata, fmt, ##__VA_ARGS__)
+
 #define tdls_dbg(sdata, fmt, ...)                                      \
        _sdata_dbg(MAC80211_TDLS_DEBUG,                                 \
                   sdata, fmt, ##__VA_ARGS__)
index 466f4b4..b0e32d6 100644 (file)
@@ -121,8 +121,8 @@ static ssize_t hwflags_read(struct file *file, char __user *user_buf,
                sf += snprintf(buf + sf, mxln - sf, "SIGNAL_UNSPEC\n");
        if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
                sf += snprintf(buf + sf, mxln - sf, "SIGNAL_DBM\n");
-       if (local->hw.flags & IEEE80211_HW_NEED_DTIM_PERIOD)
-               sf += snprintf(buf + sf, mxln - sf, "NEED_DTIM_PERIOD\n");
+       if (local->hw.flags & IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC)
+               sf += snprintf(buf + sf, mxln - sf, "NEED_DTIM_BEFORE_ASSOC\n");
        if (local->hw.flags & IEEE80211_HW_SPECTRUM_MGMT)
                sf += snprintf(buf + sf, mxln - sf, "SPECTRUM_MGMT\n");
        if (local->hw.flags & IEEE80211_HW_AMPDU_AGGREGATION)
@@ -151,8 +151,6 @@ static ssize_t hwflags_read(struct file *file, char __user *user_buf,
                sf += snprintf(buf + sf, mxln - sf, "AP_LINK_PS\n");
        if (local->hw.flags & IEEE80211_HW_TX_AMPDU_SETUP_IN_HW)
                sf += snprintf(buf + sf, mxln - sf, "TX_AMPDU_SETUP_IN_HW\n");
-       if (local->hw.flags & IEEE80211_HW_SCAN_WHILE_IDLE)
-               sf += snprintf(buf + sf, mxln - sf, "SCAN_WHILE_IDLE\n");
 
        rv = simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf));
        kfree(buf);
index cbde5cc..059bbb8 100644 (file)
@@ -515,6 +515,9 @@ IEEE80211_IF_FILE(dot11MeshHWMProotInterval,
                  u.mesh.mshcfg.dot11MeshHWMProotInterval, DEC);
 IEEE80211_IF_FILE(dot11MeshHWMPconfirmationInterval,
                  u.mesh.mshcfg.dot11MeshHWMPconfirmationInterval, DEC);
+IEEE80211_IF_FILE(power_mode, u.mesh.mshcfg.power_mode, DEC);
+IEEE80211_IF_FILE(dot11MeshAwakeWindowDuration,
+                 u.mesh.mshcfg.dot11MeshAwakeWindowDuration, DEC);
 #endif
 
 #define DEBUGFS_ADD_MODE(name, mode) \
@@ -620,6 +623,8 @@ static void add_mesh_config(struct ieee80211_sub_if_data *sdata)
        MESHPARAMS_ADD(dot11MeshHWMPactivePathToRootTimeout);
        MESHPARAMS_ADD(dot11MeshHWMProotInterval);
        MESHPARAMS_ADD(dot11MeshHWMPconfirmationInterval);
+       MESHPARAMS_ADD(power_mode);
+       MESHPARAMS_ADD(dot11MeshAwakeWindowDuration);
 #undef MESHPARAMS_ADD
 }
 #endif
index 6fb1168..c7591f7 100644 (file)
@@ -65,7 +65,7 @@ static ssize_t sta_flags_read(struct file *file, char __user *userbuf,
        test_sta_flag(sta, WLAN_STA_##flg) ? #flg "\n" : ""
 
        int res = scnprintf(buf, sizeof(buf),
-                           "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
+                           "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
                            TEST(AUTH), TEST(ASSOC), TEST(PS_STA),
                            TEST(PS_DRIVER), TEST(AUTHORIZED),
                            TEST(SHORT_PREAMBLE),
@@ -74,7 +74,8 @@ static ssize_t sta_flags_read(struct file *file, char __user *userbuf,
                            TEST(UAPSD), TEST(SP), TEST(TDLS_PEER),
                            TEST(TDLS_PEER_AUTH), TEST(4ADDR_EVENT),
                            TEST(INSERTED), TEST(RATE_CONTROL),
-                           TEST(TOFFSET_KNOWN));
+                           TEST(TOFFSET_KNOWN), TEST(MPSP_OWNER),
+                           TEST(MPSP_RECIPIENT));
 #undef TEST
        return simple_read_from_buffer(userbuf, count, ppos, buf, res);
 }
index 0c07f94..ee56d07 100644 (file)
@@ -207,13 +207,16 @@ static inline void drv_bss_info_changed(struct ieee80211_local *local,
 {
        might_sleep();
 
-       WARN_ON_ONCE(changed & (BSS_CHANGED_BEACON |
-                               BSS_CHANGED_BEACON_ENABLED) &&
-                    sdata->vif.type != NL80211_IFTYPE_AP &&
-                    sdata->vif.type != NL80211_IFTYPE_ADHOC &&
-                    sdata->vif.type != NL80211_IFTYPE_MESH_POINT);
-       WARN_ON_ONCE(sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE &&
-                    changed & ~BSS_CHANGED_IDLE);
+       if (WARN_ON_ONCE(changed & (BSS_CHANGED_BEACON |
+                                   BSS_CHANGED_BEACON_ENABLED) &&
+                        sdata->vif.type != NL80211_IFTYPE_AP &&
+                        sdata->vif.type != NL80211_IFTYPE_ADHOC &&
+                        sdata->vif.type != NL80211_IFTYPE_MESH_POINT))
+               return;
+
+       if (WARN_ON_ONCE(sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE ||
+                        sdata->vif.type == NL80211_IFTYPE_MONITOR))
+               return;
 
        check_sdata_in_driver(sdata);
 
@@ -528,6 +531,43 @@ static inline void drv_sta_remove_debugfs(struct ieee80211_local *local,
                local->ops->sta_remove_debugfs(&local->hw, &sdata->vif,
                                               sta, dir);
 }
+
+static inline
+void drv_add_interface_debugfs(struct ieee80211_local *local,
+                              struct ieee80211_sub_if_data *sdata)
+{
+       might_sleep();
+
+       check_sdata_in_driver(sdata);
+
+       if (!local->ops->add_interface_debugfs)
+               return;
+
+       local->ops->add_interface_debugfs(&local->hw, &sdata->vif,
+                                         sdata->debugfs.dir);
+}
+
+static inline
+void drv_remove_interface_debugfs(struct ieee80211_local *local,
+                                 struct ieee80211_sub_if_data *sdata)
+{
+       might_sleep();
+
+       check_sdata_in_driver(sdata);
+
+       if (!local->ops->remove_interface_debugfs)
+               return;
+
+       local->ops->remove_interface_debugfs(&local->hw, &sdata->vif,
+                                            sdata->debugfs.dir);
+}
+#else
+static inline
+void drv_add_interface_debugfs(struct ieee80211_local *local,
+                              struct ieee80211_sub_if_data *sdata) {}
+static inline
+void drv_remove_interface_debugfs(struct ieee80211_local *local,
+                                 struct ieee80211_sub_if_data *sdata) {}
 #endif
 
 static inline __must_check
@@ -569,7 +609,8 @@ static inline void drv_sta_rc_update(struct ieee80211_local *local,
        check_sdata_in_driver(sdata);
 
        WARN_ON(changed & IEEE80211_RC_SUPP_RATES_CHANGED &&
-               sdata->vif.type != NL80211_IFTYPE_ADHOC);
+               (sdata->vif.type != NL80211_IFTYPE_ADHOC &&
+                sdata->vif.type != NL80211_IFTYPE_MESH_POINT));
 
        trace_drv_sta_rc_update(local, sdata, sta, changed);
        if (local->ops->sta_rc_update)
@@ -845,11 +886,12 @@ static inline void drv_set_rekey_data(struct ieee80211_local *local,
 }
 
 static inline void drv_rssi_callback(struct ieee80211_local *local,
+                                    struct ieee80211_sub_if_data *sdata,
                                     const enum ieee80211_rssi_event event)
 {
-       trace_drv_rssi_callback(local, event);
+       trace_drv_rssi_callback(local, sdata, event);
        if (local->ops->rssi_callback)
-               local->ops->rssi_callback(&local->hw, event);
+               local->ops->rssi_callback(&local->hw, &sdata->vif, event);
        trace_drv_return_void(local);
 }
 
@@ -1020,4 +1062,32 @@ static inline void drv_restart_complete(struct ieee80211_local *local)
        trace_drv_return_void(local);
 }
 
+static inline void
+drv_set_default_unicast_key(struct ieee80211_local *local,
+                           struct ieee80211_sub_if_data *sdata,
+                           int key_idx)
+{
+       check_sdata_in_driver(sdata);
+
+       WARN_ON_ONCE(key_idx < -1 || key_idx > 3);
+
+       trace_drv_set_default_unicast_key(local, sdata, key_idx);
+       if (local->ops->set_default_unicast_key)
+               local->ops->set_default_unicast_key(&local->hw, &sdata->vif,
+                                                   key_idx);
+       trace_drv_return_void(local);
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+static inline void drv_ipv6_addr_change(struct ieee80211_local *local,
+                                       struct ieee80211_sub_if_data *sdata,
+                                       struct inet6_dev *idev)
+{
+       trace_drv_ipv6_addr_change(local, sdata);
+       if (local->ops->ipv6_addr_change)
+               local->ops->ipv6_addr_change(&local->hw, &sdata->vif, idev);
+       trace_drv_return_void(local);
+}
+#endif
+
 #endif /* __MAC80211_DRIVER_OPS */
index 61ac7c4..0db25d4 100644 (file)
@@ -37,6 +37,9 @@ void ieee80211_apply_htcap_overrides(struct ieee80211_sub_if_data *sdata,
        u8 *smask = (u8 *)(&sdata->u.mgd.ht_capa_mask.mcs.rx_mask);
        int i;
 
+       if (!ht_cap->ht_supported)
+               return;
+
        if (sdata->vif.type != NL80211_IFTYPE_STATION) {
                /* AP interfaces call this code when adding new stations,
                 * so just silently ignore non station interfaces.
@@ -89,22 +92,24 @@ void ieee80211_apply_htcap_overrides(struct ieee80211_sub_if_data *sdata,
 }
 
 
-void ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_sub_if_data *sdata,
+bool ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_sub_if_data *sdata,
                                       struct ieee80211_supported_band *sband,
-                                      struct ieee80211_ht_cap *ht_cap_ie,
-                                      struct ieee80211_sta_ht_cap *ht_cap)
+                                      const struct ieee80211_ht_cap *ht_cap_ie,
+                                      struct sta_info *sta)
 {
+       struct ieee80211_sta_ht_cap ht_cap;
        u8 ampdu_info, tx_mcs_set_cap;
        int i, max_tx_streams;
+       bool changed;
+       enum ieee80211_sta_rx_bandwidth bw;
+       enum ieee80211_smps_mode smps_mode;
 
-       BUG_ON(!ht_cap);
-
-       memset(ht_cap, 0, sizeof(*ht_cap));
+       memset(&ht_cap, 0, sizeof(ht_cap));
 
        if (!ht_cap_ie || !sband->ht_cap.ht_supported)
-               return;
+               goto apply;
 
-       ht_cap->ht_supported = true;
+       ht_cap.ht_supported = true;
 
        /*
         * The bits listed in this expression should be
@@ -112,7 +117,7 @@ void ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_sub_if_data *sdata,
         * advertises more then we can't use those thus
         * we mask them out.
         */
-       ht_cap->cap = le16_to_cpu(ht_cap_ie->cap_info) &
+       ht_cap.cap = le16_to_cpu(ht_cap_ie->cap_info) &
                (sband->ht_cap.cap |
                 ~(IEEE80211_HT_CAP_LDPC_CODING |
                   IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
@@ -121,44 +126,30 @@ void ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_sub_if_data *sdata,
                   IEEE80211_HT_CAP_SGI_40 |
                   IEEE80211_HT_CAP_DSSSCCK40));
 
-       /* Unset 40 MHz if we're not using a 40 MHz channel */
-       switch (sdata->vif.bss_conf.chandef.width) {
-       case NL80211_CHAN_WIDTH_20_NOHT:
-       case NL80211_CHAN_WIDTH_20:
-               ht_cap->cap &= ~IEEE80211_HT_CAP_SGI_40;
-               ht_cap->cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
-               break;
-       case NL80211_CHAN_WIDTH_40:
-       case NL80211_CHAN_WIDTH_80:
-       case NL80211_CHAN_WIDTH_80P80:
-       case NL80211_CHAN_WIDTH_160:
-               break;
-       }
-
        /*
         * The STBC bits are asymmetric -- if we don't have
         * TX then mask out the peer's RX and vice versa.
         */
        if (!(sband->ht_cap.cap & IEEE80211_HT_CAP_TX_STBC))
-               ht_cap->cap &= ~IEEE80211_HT_CAP_RX_STBC;
+               ht_cap.cap &= ~IEEE80211_HT_CAP_RX_STBC;
        if (!(sband->ht_cap.cap & IEEE80211_HT_CAP_RX_STBC))
-               ht_cap->cap &= ~IEEE80211_HT_CAP_TX_STBC;
+               ht_cap.cap &= ~IEEE80211_HT_CAP_TX_STBC;
 
        ampdu_info = ht_cap_ie->ampdu_params_info;
-       ht_cap->ampdu_factor =
+       ht_cap.ampdu_factor =
                ampdu_info & IEEE80211_HT_AMPDU_PARM_FACTOR;
-       ht_cap->ampdu_density =
+       ht_cap.ampdu_density =
                (ampdu_info & IEEE80211_HT_AMPDU_PARM_DENSITY) >> 2;
 
        /* own MCS TX capabilities */
        tx_mcs_set_cap = sband->ht_cap.mcs.tx_params;
 
        /* Copy peer MCS TX capabilities, the driver might need them. */
-       ht_cap->mcs.tx_params = ht_cap_ie->mcs.tx_params;
+       ht_cap.mcs.tx_params = ht_cap_ie->mcs.tx_params;
 
        /* can we TX with MCS rates? */
        if (!(tx_mcs_set_cap & IEEE80211_HT_MCS_TX_DEFINED))
-               return;
+               goto apply;
 
        /* Counting from 0, therefore +1 */
        if (tx_mcs_set_cap & IEEE80211_HT_MCS_TX_RX_DIFF)
@@ -176,25 +167,75 @@ void ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_sub_if_data *sdata,
         * - remainder are multiple spatial streams using unequal modulation
         */
        for (i = 0; i < max_tx_streams; i++)
-               ht_cap->mcs.rx_mask[i] =
+               ht_cap.mcs.rx_mask[i] =
                        sband->ht_cap.mcs.rx_mask[i] & ht_cap_ie->mcs.rx_mask[i];
 
        if (tx_mcs_set_cap & IEEE80211_HT_MCS_TX_UNEQUAL_MODULATION)
                for (i = IEEE80211_HT_MCS_UNEQUAL_MODULATION_START_BYTE;
                     i < IEEE80211_HT_MCS_MASK_LEN; i++)
-                       ht_cap->mcs.rx_mask[i] =
+                       ht_cap.mcs.rx_mask[i] =
                                sband->ht_cap.mcs.rx_mask[i] &
                                        ht_cap_ie->mcs.rx_mask[i];
 
        /* handle MCS rate 32 too */
        if (sband->ht_cap.mcs.rx_mask[32/8] & ht_cap_ie->mcs.rx_mask[32/8] & 1)
-               ht_cap->mcs.rx_mask[32/8] |= 1;
+               ht_cap.mcs.rx_mask[32/8] |= 1;
 
+ apply:
        /*
         * If user has specified capability over-rides, take care
         * of that here.
         */
-       ieee80211_apply_htcap_overrides(sdata, ht_cap);
+       ieee80211_apply_htcap_overrides(sdata, &ht_cap);
+
+       changed = memcmp(&sta->sta.ht_cap, &ht_cap, sizeof(ht_cap));
+
+       memcpy(&sta->sta.ht_cap, &ht_cap, sizeof(ht_cap));
+
+       switch (sdata->vif.bss_conf.chandef.width) {
+       default:
+               WARN_ON_ONCE(1);
+               /* fall through */
+       case NL80211_CHAN_WIDTH_20_NOHT:
+       case NL80211_CHAN_WIDTH_20:
+               bw = IEEE80211_STA_RX_BW_20;
+               break;
+       case NL80211_CHAN_WIDTH_40:
+       case NL80211_CHAN_WIDTH_80:
+       case NL80211_CHAN_WIDTH_80P80:
+       case NL80211_CHAN_WIDTH_160:
+               bw = ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 ?
+                               IEEE80211_STA_RX_BW_40 : IEEE80211_STA_RX_BW_20;
+               break;
+       }
+
+       if (bw != sta->sta.bandwidth)
+               changed = true;
+       sta->sta.bandwidth = bw;
+
+       sta->cur_max_bandwidth =
+               ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 ?
+                               IEEE80211_STA_RX_BW_40 : IEEE80211_STA_RX_BW_20;
+
+       switch ((ht_cap.cap & IEEE80211_HT_CAP_SM_PS)
+                       >> IEEE80211_HT_CAP_SM_PS_SHIFT) {
+       case WLAN_HT_CAP_SM_PS_INVALID:
+       case WLAN_HT_CAP_SM_PS_STATIC:
+               smps_mode = IEEE80211_SMPS_STATIC;
+               break;
+       case WLAN_HT_CAP_SM_PS_DYNAMIC:
+               smps_mode = IEEE80211_SMPS_DYNAMIC;
+               break;
+       case WLAN_HT_CAP_SM_PS_DISABLED:
+               smps_mode = IEEE80211_SMPS_OFF;
+               break;
+       }
+
+       if (smps_mode != sta->sta.smps_mode)
+               changed = true;
+       sta->sta.smps_mode = smps_mode;
+
+       return changed;
 }
 
 void ieee80211_sta_tear_down_BA_sessions(struct sta_info *sta,
@@ -406,6 +447,9 @@ void ieee80211_request_smps(struct ieee80211_vif *vif,
        if (WARN_ON(smps_mode == IEEE80211_SMPS_OFF))
                smps_mode = IEEE80211_SMPS_AUTOMATIC;
 
+       if (sdata->u.mgd.driver_smps_mode == smps_mode)
+               return;
+
        sdata->u.mgd.driver_smps_mode = smps_mode;
 
        ieee80211_queue_work(&sdata->local->hw,
index b4b866f..40b71df 100644 (file)
@@ -228,7 +228,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
 
        bss = cfg80211_inform_bss_frame(local->hw.wiphy, chan,
                                        mgmt, skb->len, 0, GFP_KERNEL);
-       cfg80211_put_bss(bss);
+       cfg80211_put_bss(local->hw.wiphy, bss);
        netif_carrier_on(sdata->dev);
        cfg80211_ibss_joined(sdata->dev, ifibss->bssid, GFP_KERNEL);
 }
@@ -242,6 +242,8 @@ static void ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
        u32 basic_rates;
        int i, j;
        u16 beacon_int = cbss->beacon_interval;
+       const struct cfg80211_bss_ies *ies;
+       u64 tsf;
 
        lockdep_assert_held(&sdata->u.ibss.mtx);
 
@@ -265,13 +267,17 @@ static void ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
                }
        }
 
+       rcu_read_lock();
+       ies = rcu_dereference(cbss->ies);
+       tsf = ies->tsf;
+       rcu_read_unlock();
+
        __ieee80211_sta_join_ibss(sdata, cbss->bssid,
                                  beacon_int,
                                  cbss->channel,
                                  basic_rates,
                                  cbss->capability,
-                                 cbss->tsf,
-                                 false);
+                                 tsf, false);
 }
 
 static struct sta_info *ieee80211_ibss_finish_sta(struct sta_info *sta,
@@ -302,7 +308,7 @@ static struct sta_info *ieee80211_ibss_finish_sta(struct sta_info *sta,
                         "TX Auth SA=%pM DA=%pM BSSID=%pM (auth_transaction=1)\n",
                         sdata->vif.addr, addr, sdata->u.ibss.bssid);
                ieee80211_send_auth(sdata, 1, WLAN_AUTH_OPEN, 0, NULL, 0,
-                                   addr, sdata->u.ibss.bssid, NULL, 0, 0);
+                                   addr, sdata->u.ibss.bssid, NULL, 0, 0, 0);
        }
        return sta;
 }
@@ -422,7 +428,7 @@ static void ieee80211_rx_mgmt_auth_ibss(struct ieee80211_sub_if_data *sdata,
         * has actually implemented this.
         */
        ieee80211_send_auth(sdata, 2, WLAN_AUTH_OPEN, 0, NULL, 0,
-                           mgmt->sa, sdata->u.ibss.bssid, NULL, 0, 0);
+                           mgmt->sa, sdata->u.ibss.bssid, NULL, 0, 0, 0);
 }
 
 static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
@@ -490,33 +496,26 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
                if (sta && elems->ht_operation && elems->ht_cap_elem &&
                    sdata->u.ibss.channel_type != NL80211_CHAN_NO_HT) {
                        /* we both use HT */
-                       struct ieee80211_sta_ht_cap sta_ht_cap_new;
+                       struct ieee80211_ht_cap htcap_ie;
                        struct cfg80211_chan_def chandef;
 
                        ieee80211_ht_oper_to_chandef(channel,
                                                     elems->ht_operation,
                                                     &chandef);
 
-                       ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband,
-                                                         elems->ht_cap_elem,
-                                                         &sta_ht_cap_new);
+                       memcpy(&htcap_ie, elems->ht_cap_elem, sizeof(htcap_ie));
 
                        /*
                         * fall back to HT20 if we don't use or use
                         * the other extension channel
                         */
-                       if (chandef.width != NL80211_CHAN_WIDTH_40 ||
-                           cfg80211_get_chandef_type(&chandef) !=
+                       if (cfg80211_get_chandef_type(&chandef) !=
                                                sdata->u.ibss.channel_type)
-                               sta_ht_cap_new.cap &=
-                                       ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
-
-                       if (memcmp(&sta->sta.ht_cap, &sta_ht_cap_new,
-                                  sizeof(sta_ht_cap_new))) {
-                               memcpy(&sta->sta.ht_cap, &sta_ht_cap_new,
-                                      sizeof(sta_ht_cap_new));
-                               rates_updated = true;
-                       }
+                               htcap_ie.cap_info &=
+                                       cpu_to_le16(~IEEE80211_HT_CAP_SUP_WIDTH_20_40);
+
+                       rates_updated |= ieee80211_ht_cap_ie_to_sta_ht_cap(
+                                               sdata, sband, &htcap_ie, sta);
                }
 
                if (sta && rates_updated) {
@@ -535,8 +534,8 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
 
        cbss = container_of((void *)bss, struct cfg80211_bss, priv);
 
-       /* was just updated in ieee80211_bss_info_update */
-       beacon_timestamp = cbss->tsf;
+       /* same for beacon and probe response */
+       beacon_timestamp = le64_to_cpu(mgmt->u.beacon.timestamp);
 
        /* check if we need to merge IBSS */
 
@@ -1102,10 +1101,6 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata,
 
        mutex_unlock(&sdata->u.ibss.mtx);
 
-       mutex_lock(&sdata->local->mtx);
-       ieee80211_recalc_idle(sdata->local);
-       mutex_unlock(&sdata->local->mtx);
-
        /*
         * 802.11n-2009 9.13.3.1: In an IBSS, the HT Protection field is
         * reserved, but an HT STA shall protect HT transmissions as though
@@ -1159,7 +1154,7 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata)
 
                if (cbss) {
                        cfg80211_unlink_bss(local->hw.wiphy, cbss);
-                       cfg80211_put_bss(cbss);
+                       cfg80211_put_bss(local->hw.wiphy, cbss);
                }
        }
 
@@ -1203,9 +1198,5 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata)
 
        mutex_unlock(&sdata->u.ibss.mtx);
 
-       mutex_lock(&local->mtx);
-       ieee80211_recalc_idle(sdata->local);
-       mutex_unlock(&local->mtx);
-
        return 0;
 }
index 63f0430..388580a 100644 (file)
@@ -86,23 +86,11 @@ struct ieee80211_fragment_entry {
 
 
 struct ieee80211_bss {
-       /* don't want to look up all the time */
-       size_t ssid_len;
-       u8 ssid[IEEE80211_MAX_SSID_LEN];
-
-       u32 device_ts;
+       u32 device_ts_beacon, device_ts_presp;
 
        bool wmm_used;
        bool uapsd_supported;
 
-       unsigned long last_probe_resp;
-
-#ifdef CONFIG_MAC80211_MESH
-       u8 *mesh_id;
-       size_t mesh_id_len;
-       u8 *mesh_cfg;
-#endif
-
 #define IEEE80211_MAX_SUPP_RATES 32
        u8 supp_rates[IEEE80211_MAX_SUPP_RATES];
        size_t supp_rates_len;
@@ -153,31 +141,6 @@ enum ieee80211_bss_valid_data_flags {
        IEEE80211_BSS_VALID_ERP                 = BIT(3)
 };
 
-static inline u8 *bss_mesh_cfg(struct ieee80211_bss *bss)
-{
-#ifdef CONFIG_MAC80211_MESH
-       return bss->mesh_cfg;
-#endif
-       return NULL;
-}
-
-static inline u8 *bss_mesh_id(struct ieee80211_bss *bss)
-{
-#ifdef CONFIG_MAC80211_MESH
-       return bss->mesh_id;
-#endif
-       return NULL;
-}
-
-static inline u8 bss_mesh_id_len(struct ieee80211_bss *bss)
-{
-#ifdef CONFIG_MAC80211_MESH
-       return bss->mesh_id_len;
-#endif
-       return 0;
-}
-
-
 typedef unsigned __bitwise__ ieee80211_tx_result;
 #define TX_CONTINUE    ((__force ieee80211_tx_result) 0u)
 #define TX_DROP                ((__force ieee80211_tx_result) 1u)
@@ -380,6 +343,7 @@ struct ieee80211_mgd_auth_data {
        u8 key[WLAN_KEY_LEN_WEP104];
        u8 key_len, key_idx;
        bool done;
+       bool timeout_started;
 
        u16 sae_trans, sae_status;
        size_t data_len;
@@ -399,9 +363,9 @@ struct ieee80211_mgd_assoc_data {
        u8 ssid_len;
        u8 supp_rates_len;
        bool wmm, uapsd;
-       bool have_beacon;
-       bool sent_assoc;
+       bool have_beacon, need_beacon;
        bool synced;
+       bool timeout_started;
 
        u8 ap_ht_param;
 
@@ -425,6 +389,7 @@ struct ieee80211_if_managed {
        unsigned long probe_timeout;
        int probe_send_count;
        bool nullfunc_failed;
+       bool connection_loss;
 
        struct mutex mtx;
        struct cfg80211_bss *associated;
@@ -449,6 +414,10 @@ struct ieee80211_if_managed {
        bool beacon_crc_valid;
        u32 beacon_crc;
 
+       bool status_acked;
+       bool status_received;
+       __le16 status_fc;
+
        enum {
                IEEE80211_MFP_DISABLED,
                IEEE80211_MFP_OPTIONAL,
@@ -611,6 +580,9 @@ struct ieee80211_if_mesh {
        u32 mesh_seqnum;
        bool accepting_plinks;
        int num_gates;
+       struct beacon_data __rcu *beacon;
+       /* just protects beacon updates for now */
+       struct mutex mtx;
        const u8 *ie;
        u8 ie_len;
        enum {
@@ -623,6 +595,11 @@ struct ieee80211_if_mesh {
        s64 sync_offset_clockdrift_max;
        spinlock_t sync_offset_lock;
        bool adjusting_tbtt;
+       /* mesh power save */
+       enum nl80211_mesh_power_mode nonpeer_pm;
+       int ps_peers_light_sleep;
+       int ps_peers_deep_sleep;
+       struct ps_data ps;
 };
 
 #ifdef CONFIG_MAC80211_MESH
@@ -717,9 +694,6 @@ struct ieee80211_sub_if_data {
 
        char name[IFNAMSIZ];
 
-       /* to detect idle changes */
-       bool old_idle;
-
        /* Fragment table for host-based reassembly */
        struct ieee80211_fragment_entry fragments[IEEE80211_FRAGMENT_MAX];
        unsigned int fragment_next;
@@ -747,14 +721,15 @@ struct ieee80211_sub_if_data {
        struct work_struct work;
        struct sk_buff_head skb_queue;
 
-       bool arp_filter_state;
-
        u8 needed_rx_chains;
        enum ieee80211_smps_mode smps_mode;
 
        int user_power_level; /* in dBm */
        int ap_power_level; /* in dBm */
 
+       bool radar_required;
+       struct delayed_work dfs_cac_timer_work;
+
        /*
         * AP this belongs to: self in AP mode and
         * corresponding AP in VLAN mode, NULL for
@@ -842,6 +817,7 @@ enum queue_stop_reason {
        IEEE80211_QUEUE_STOP_REASON_AGGREGATION,
        IEEE80211_QUEUE_STOP_REASON_SUSPEND,
        IEEE80211_QUEUE_STOP_REASON_SKB_ADD,
+       IEEE80211_QUEUE_STOP_REASON_OFFCHANNEL,
 };
 
 #ifdef CONFIG_MAC80211_LEDS
@@ -974,6 +950,10 @@ struct ieee80211_local {
        /* wowlan is enabled -- don't reconfig on resume */
        bool wowlan;
 
+       /* DFS/radar detection is enabled */
+       bool radar_detect_enabled;
+       struct work_struct radar_detected_work;
+
        /* number of RX chains the hardware has */
        u8 rx_chains;
 
@@ -988,14 +968,7 @@ struct ieee80211_local {
        struct sk_buff_head skb_queue;
        struct sk_buff_head skb_queue_unreliable;
 
-       /*
-        * Internal FIFO queue which is shared between multiple rx path
-        * stages. Its main task is to provide a serialization mechanism,
-        * so all rx handlers can enjoy having exclusive access to their
-        * private data structures.
-        */
-       struct sk_buff_head rx_skb_queue;
-       bool running_rx_handler;        /* protected by rx_skb_queue.lock */
+       spinlock_t rx_path_lock;
 
        /* Station data */
        /*
@@ -1129,14 +1102,13 @@ struct ieee80211_local {
        struct timer_list dynamic_ps_timer;
        struct notifier_block network_latency_notifier;
        struct notifier_block ifa_notifier;
+       struct notifier_block ifa6_notifier;
 
        /*
         * The dynamic ps timeout configured from user space via WEXT -
         * this will override whatever chosen by mac80211 internally.
         */
        int dynamic_ps_forced_timeout;
-       int dynamic_ps_user_timeout;
-       bool disable_dynamic_ps;
 
        int user_power_level; /* in dBm, for all interfaces */
 
@@ -1194,40 +1166,41 @@ struct ieee80211_ra_tid {
 
 /* Parsed Information Elements */
 struct ieee802_11_elems {
-       u8 *ie_start;
+       const u8 *ie_start;
        size_t total_len;
 
        /* pointers to IEs */
-       u8 *ssid;
-       u8 *supp_rates;
-       u8 *fh_params;
-       u8 *ds_params;
-       u8 *cf_params;
-       struct ieee80211_tim_ie *tim;
-       u8 *ibss_params;
-       u8 *challenge;
-       u8 *wpa;
-       u8 *rsn;
-       u8 *erp_info;
-       u8 *ext_supp_rates;
-       u8 *wmm_info;
-       u8 *wmm_param;
-       struct ieee80211_ht_cap *ht_cap_elem;
-       struct ieee80211_ht_operation *ht_operation;
-       struct ieee80211_vht_cap *vht_cap_elem;
-       struct ieee80211_vht_operation *vht_operation;
-       struct ieee80211_meshconf_ie *mesh_config;
-       u8 *mesh_id;
-       u8 *peering;
-       u8 *preq;
-       u8 *prep;
-       u8 *perr;
-       struct ieee80211_rann_ie *rann;
-       struct ieee80211_channel_sw_ie *ch_switch_ie;
-       u8 *country_elem;
-       u8 *pwr_constr_elem;
-       u8 *quiet_elem; /* first quite element */
-       u8 *timeout_int;
+       const u8 *ssid;
+       const u8 *supp_rates;
+       const u8 *fh_params;
+       const u8 *ds_params;
+       const u8 *cf_params;
+       const struct ieee80211_tim_ie *tim;
+       const u8 *ibss_params;
+       const u8 *challenge;
+       const u8 *rsn;
+       const u8 *erp_info;
+       const u8 *ext_supp_rates;
+       const u8 *wmm_info;
+       const u8 *wmm_param;
+       const struct ieee80211_ht_cap *ht_cap_elem;
+       const struct ieee80211_ht_operation *ht_operation;
+       const struct ieee80211_vht_cap *vht_cap_elem;
+       const struct ieee80211_vht_operation *vht_operation;
+       const struct ieee80211_meshconf_ie *mesh_config;
+       const u8 *mesh_id;
+       const u8 *peering;
+       const __le16 *awake_window;
+       const u8 *preq;
+       const u8 *prep;
+       const u8 *perr;
+       const struct ieee80211_rann_ie *rann;
+       const struct ieee80211_channel_sw_ie *ch_switch_ie;
+       const u8 *country_elem;
+       const u8 *pwr_constr_elem;
+       const u8 *quiet_elem;   /* first quite element */
+       const u8 *timeout_int;
+       const u8 *opmode_notif;
 
        /* length of them, respectively */
        u8 ssid_len;
@@ -1238,7 +1211,6 @@ struct ieee802_11_elems {
        u8 tim_len;
        u8 ibss_params_len;
        u8 challenge_len;
-       u8 wpa_len;
        u8 rsn_len;
        u8 erp_info_len;
        u8 ext_supp_rates_len;
@@ -1307,10 +1279,10 @@ void ieee80211_recalc_ps_vif(struct ieee80211_sub_if_data *sdata);
 int ieee80211_max_network_latency(struct notifier_block *nb,
                                  unsigned long data, void *dummy);
 int ieee80211_set_arp_filter(struct ieee80211_sub_if_data *sdata);
-void ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
-                                     struct ieee80211_channel_sw_ie *sw_elem,
-                                     struct ieee80211_bss *bss,
-                                     u64 timestamp);
+void
+ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
+                                const struct ieee80211_channel_sw_ie *sw_elem,
+                                struct ieee80211_bss *bss, u64 timestamp);
 void ieee80211_sta_quiesce(struct ieee80211_sub_if_data *sdata);
 void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata);
 void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata);
@@ -1319,6 +1291,8 @@ void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
 void ieee80211_sta_reset_beacon_monitor(struct ieee80211_sub_if_data *sdata);
 void ieee80211_sta_reset_conn_monitor(struct ieee80211_sub_if_data *sdata);
 void ieee80211_mgd_stop(struct ieee80211_sub_if_data *sdata);
+void ieee80211_mgd_conn_tx_status(struct ieee80211_sub_if_data *sdata,
+                                 __le16 fc, bool acked);
 
 /* IBSS code */
 void ieee80211_ibss_notify_scan_completed(struct ieee80211_local *local);
@@ -1414,10 +1388,10 @@ void ieee80211_purge_tx_queue(struct ieee80211_hw *hw,
 /* HT */
 void ieee80211_apply_htcap_overrides(struct ieee80211_sub_if_data *sdata,
                                     struct ieee80211_sta_ht_cap *ht_cap);
-void ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_sub_if_data *sdata,
+bool ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_sub_if_data *sdata,
                                       struct ieee80211_supported_band *sband,
-                                      struct ieee80211_ht_cap *ht_cap_ie,
-                                      struct ieee80211_sta_ht_cap *ht_cap);
+                                      const struct ieee80211_ht_cap *ht_cap_ie,
+                                      struct sta_info *sta);
 void ieee80211_send_delba(struct ieee80211_sub_if_data *sdata,
                          const u8 *da, u16 tid,
                          u16 initiator, u16 reason_code);
@@ -1457,10 +1431,17 @@ void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid);
 u8 ieee80211_mcs_to_chains(const struct ieee80211_mcs_info *mcs);
 
 /* VHT */
-void ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
-                                        struct ieee80211_supported_band *sband,
-                                        struct ieee80211_vht_cap *vht_cap_ie,
-                                        struct ieee80211_sta_vht_cap *vht_cap);
+void
+ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
+                                   struct ieee80211_supported_band *sband,
+                                   const struct ieee80211_vht_cap *vht_cap_ie,
+                                   struct sta_info *sta);
+enum ieee80211_sta_rx_bandwidth ieee80211_sta_cur_vht_bw(struct sta_info *sta);
+void ieee80211_sta_set_rx_nss(struct sta_info *sta);
+void ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
+                                struct sta_info *sta, u8 opmode,
+                                enum ieee80211_band band, bool nss_only);
+
 /* Spectrum management */
 void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata,
                                       struct ieee80211_mgmt *mgmt,
@@ -1578,8 +1559,9 @@ static inline void ieee80211_add_pending_skbs(struct ieee80211_local *local,
 
 void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
                         u16 transaction, u16 auth_alg, u16 status,
-                        u8 *extra, size_t extra_len, const u8 *bssid,
-                        const u8 *da, const u8 *key, u8 key_len, u8 key_idx);
+                        const u8 *extra, size_t extra_len, const u8 *bssid,
+                        const u8 *da, const u8 *key, u8 key_len, u8 key_idx,
+                        u32 tx_flags);
 void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata,
                                    const u8 *bssid, u16 stype, u16 reason,
                                    bool send_frame, u8 *frame_buf);
@@ -1596,7 +1578,7 @@ struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
 void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst,
                              const u8 *ssid, size_t ssid_len,
                              const u8 *ie, size_t ie_len,
-                             u32 ratemask, bool directed, bool no_cck,
+                             u32 ratemask, bool directed, u32 tx_flags,
                              struct ieee80211_channel *channel, bool scan);
 
 void ieee80211_sta_def_wmm_params(struct ieee80211_sub_if_data *sdata,
@@ -1628,18 +1610,31 @@ int ieee80211_add_ext_srates_ie(struct ieee80211_sub_if_data *sdata,
 
 /* channel management */
 void ieee80211_ht_oper_to_chandef(struct ieee80211_channel *control_chan,
-                                 struct ieee80211_ht_operation *ht_oper,
+                                 const struct ieee80211_ht_operation *ht_oper,
                                  struct cfg80211_chan_def *chandef);
 
 int __must_check
 ieee80211_vif_use_channel(struct ieee80211_sub_if_data *sdata,
                          const struct cfg80211_chan_def *chandef,
                          enum ieee80211_chanctx_mode mode);
+int __must_check
+ieee80211_vif_change_bandwidth(struct ieee80211_sub_if_data *sdata,
+                              const struct cfg80211_chan_def *chandef,
+                              u32 *changed);
 void ieee80211_vif_release_channel(struct ieee80211_sub_if_data *sdata);
 void ieee80211_vif_vlan_copy_chanctx(struct ieee80211_sub_if_data *sdata);
+void ieee80211_vif_copy_chanctx_to_vlans(struct ieee80211_sub_if_data *sdata,
+                                        bool clear);
 
 void ieee80211_recalc_smps_chanctx(struct ieee80211_local *local,
                                   struct ieee80211_chanctx *chanctx);
+void ieee80211_recalc_radar_chanctx(struct ieee80211_local *local,
+                                   struct ieee80211_chanctx *chanctx);
+
+void ieee80211_dfs_cac_timer(unsigned long data);
+void ieee80211_dfs_cac_timer_work(struct work_struct *work);
+void ieee80211_dfs_cac_cancel(struct ieee80211_local *local);
+void ieee80211_dfs_radar_detected_work(struct work_struct *work);
 
 #ifdef CONFIG_MAC80211_NOINLINE
 #define debug_noinline noinline
index 06fac29..86c8308 100644 (file)
@@ -78,8 +78,7 @@ void ieee80211_recalc_txpower(struct ieee80211_sub_if_data *sdata)
                ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_TXPOWER);
 }
 
-static u32 ieee80211_idle_off(struct ieee80211_local *local,
-                             const char *reason)
+static u32 ieee80211_idle_off(struct ieee80211_local *local)
 {
        if (!(local->hw.conf.flags & IEEE80211_CONF_IDLE))
                return 0;
@@ -99,110 +98,45 @@ static u32 ieee80211_idle_on(struct ieee80211_local *local)
        return IEEE80211_CONF_CHANGE_IDLE;
 }
 
-static u32 __ieee80211_recalc_idle(struct ieee80211_local *local)
+void ieee80211_recalc_idle(struct ieee80211_local *local)
 {
-       struct ieee80211_sub_if_data *sdata;
-       int count = 0;
-       bool working = false, scanning = false;
+       bool working = false, scanning, active;
        unsigned int led_trig_start = 0, led_trig_stop = 0;
        struct ieee80211_roc_work *roc;
+       u32 change;
 
-#ifdef CONFIG_PROVE_LOCKING
-       WARN_ON(debug_locks && !lockdep_rtnl_is_held() &&
-               !lockdep_is_held(&local->iflist_mtx));
-#endif
        lockdep_assert_held(&local->mtx);
 
-       list_for_each_entry(sdata, &local->interfaces, list) {
-               if (!ieee80211_sdata_running(sdata)) {
-                       sdata->vif.bss_conf.idle = true;
-                       continue;
-               }
-
-               sdata->old_idle = sdata->vif.bss_conf.idle;
-
-               /* do not count disabled managed interfaces */
-               if (sdata->vif.type == NL80211_IFTYPE_STATION &&
-                   !sdata->u.mgd.associated &&
-                   !sdata->u.mgd.auth_data &&
-                   !sdata->u.mgd.assoc_data) {
-                       sdata->vif.bss_conf.idle = true;
-                       continue;
-               }
-               /* do not count unused IBSS interfaces */
-               if (sdata->vif.type == NL80211_IFTYPE_ADHOC &&
-                   !sdata->u.ibss.ssid_len) {
-                       sdata->vif.bss_conf.idle = true;
-                       continue;
-               }
-
-               if (sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE)
-                       continue;
-
-               /* count everything else */
-               sdata->vif.bss_conf.idle = false;
-               count++;
-       }
+       active = !list_empty(&local->chanctx_list);
 
        if (!local->ops->remain_on_channel) {
                list_for_each_entry(roc, &local->roc_list, list) {
                        working = true;
-                       roc->sdata->vif.bss_conf.idle = false;
+                       break;
                }
        }
 
-       sdata = rcu_dereference_protected(local->scan_sdata,
-                                         lockdep_is_held(&local->mtx));
-       if (sdata && !(local->hw.flags & IEEE80211_HW_SCAN_WHILE_IDLE)) {
-               scanning = true;
-               sdata->vif.bss_conf.idle = false;
-       }
-
-       list_for_each_entry(sdata, &local->interfaces, list) {
-               if (sdata->vif.type == NL80211_IFTYPE_MONITOR ||
-                   sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
-                   sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE)
-                       continue;
-               if (sdata->old_idle == sdata->vif.bss_conf.idle)
-                       continue;
-               if (!ieee80211_sdata_running(sdata))
-                       continue;
-               ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_IDLE);
-       }
+       scanning = test_bit(SCAN_SW_SCANNING, &local->scanning) ||
+                  test_bit(SCAN_ONCHANNEL_SCANNING, &local->scanning);
 
        if (working || scanning)
                led_trig_start |= IEEE80211_TPT_LEDTRIG_FL_WORK;
        else
                led_trig_stop |= IEEE80211_TPT_LEDTRIG_FL_WORK;
 
-       if (count)
+       if (active)
                led_trig_start |= IEEE80211_TPT_LEDTRIG_FL_CONNECTED;
        else
                led_trig_stop |= IEEE80211_TPT_LEDTRIG_FL_CONNECTED;
 
        ieee80211_mod_tpt_led_trig(local, led_trig_start, led_trig_stop);
 
-       if (working)
-               return ieee80211_idle_off(local, "working");
-       if (scanning)
-               return ieee80211_idle_off(local, "scanning");
-       if (!count)
-               return ieee80211_idle_on(local);
+       if (working || scanning || active)
+               change = ieee80211_idle_off(local);
        else
-               return ieee80211_idle_off(local, "in use");
-
-       return 0;
-}
-
-void ieee80211_recalc_idle(struct ieee80211_local *local)
-{
-       u32 chg;
-
-       mutex_lock(&local->iflist_mtx);
-       chg = __ieee80211_recalc_idle(local);
-       mutex_unlock(&local->iflist_mtx);
-       if (chg)
-               ieee80211_hw_config(local, chg);
+               change = ieee80211_idle_on(local);
+       if (change)
+               ieee80211_hw_config(local, change);
 }
 
 static int ieee80211_change_mtu(struct net_device *dev, int new_mtu)
@@ -621,6 +555,8 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
                                goto err_del_interface;
                }
 
+               drv_add_interface_debugfs(local, sdata);
+
                if (sdata->vif.type == NL80211_IFTYPE_AP) {
                        local->fif_pspoll++;
                        local->fif_probe_req++;
@@ -694,10 +630,6 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
        if (sdata->flags & IEEE80211_SDATA_PROMISC)
                atomic_inc(&local->iff_promiscs);
 
-       mutex_lock(&local->mtx);
-       hw_reconf_flags |= __ieee80211_recalc_idle(local);
-       mutex_unlock(&local->mtx);
-
        if (coming_up)
                local->open_count++;
 
@@ -748,6 +680,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
        struct sk_buff *skb, *tmp;
        u32 hw_reconf_flags = 0;
        int i, flushed;
+       struct ps_data *ps;
 
        clear_bit(SDATA_STATE_RUNNING, &sdata->state);
 
@@ -817,6 +750,16 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
 
        cancel_work_sync(&sdata->recalc_smps);
 
+       cancel_delayed_work_sync(&sdata->dfs_cac_timer_work);
+
+       if (sdata->wdev.cac_started) {
+               mutex_lock(&local->iflist_mtx);
+               ieee80211_vif_release_channel(sdata);
+               mutex_unlock(&local->iflist_mtx);
+               cfg80211_cac_event(sdata->dev, NL80211_RADAR_CAC_ABORTED,
+                                  GFP_KERNEL);
+       }
+
        /* APs need special treatment */
        if (sdata->vif.type == NL80211_IFTYPE_AP) {
                struct ieee80211_sub_if_data *vlan, *tmpsdata;
@@ -826,6 +769,19 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
                                         u.vlan.list)
                        dev_close(vlan->dev);
                WARN_ON(!list_empty(&sdata->u.ap.vlans));
+       } else if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
+               /* remove all packets in parent bc_buf pointing to this dev */
+               ps = &sdata->bss->ps;
+
+               spin_lock_irqsave(&ps->bc_buf.lock, flags);
+               skb_queue_walk_safe(&ps->bc_buf, skb, tmp) {
+                       if (skb->dev == sdata->dev) {
+                               __skb_unlink(skb, &ps->bc_buf);
+                               local->total_ps_buffered--;
+                               ieee80211_free_txskb(&local->hw, skb);
+                       }
+               }
+               spin_unlock_irqrestore(&ps->bc_buf.lock, flags);
        } else if (sdata->vif.type == NL80211_IFTYPE_STATION) {
                ieee80211_mgd_stop(sdata);
        }
@@ -882,16 +838,14 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
                 */
                ieee80211_free_keys(sdata);
 
+               drv_remove_interface_debugfs(local, sdata);
+
                if (going_down)
                        drv_remove_interface(local, sdata);
        }
 
        sdata->bss = NULL;
 
-       mutex_lock(&local->mtx);
-       hw_reconf_flags |= __ieee80211_recalc_idle(local);
-       mutex_unlock(&local->mtx);
-
        ieee80211_recalc_ps(local, -1);
 
        if (local->open_count == 0) {
@@ -1574,9 +1528,6 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
        /* initialise type-independent data */
        sdata->wdev.wiphy = local->hw.wiphy;
        sdata->local = local;
-#ifdef CONFIG_INET
-       sdata->arp_filter_state = true;
-#endif
 
        for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++)
                skb_queue_head_init(&sdata->fragments[i].skb_list);
@@ -1586,6 +1537,8 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
        spin_lock_init(&sdata->cleanup_stations_lock);
        INIT_LIST_HEAD(&sdata->cleanup_stations);
        INIT_WORK(&sdata->cleanup_stations_wk, ieee80211_cleanup_sdata_stas_wk);
+       INIT_DELAYED_WORK(&sdata->dfs_cac_timer_work,
+                         ieee80211_dfs_cac_timer_work);
 
        for (i = 0; i < IEEE80211_NUM_BANDS; i++) {
                struct ieee80211_supported_band *sband;
index 619c5d6..ef252eb 100644 (file)
@@ -204,8 +204,11 @@ static void __ieee80211_set_default_key(struct ieee80211_sub_if_data *sdata,
        if (idx >= 0 && idx < NUM_DEFAULT_KEYS)
                key = key_mtx_dereference(sdata->local, sdata->keys[idx]);
 
-       if (uni)
+       if (uni) {
                rcu_assign_pointer(sdata->default_unicast_key, key);
+               drv_set_default_unicast_key(sdata->local, sdata, idx);
+       }
+
        if (multi)
                rcu_assign_pointer(sdata->default_multicast_key, key);
 
index 39cfe8f..f974768 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/inetdevice.h>
 #include <net/net_namespace.h>
 #include <net/cfg80211.h>
+#include <net/addrconf.h>
 
 #include "ieee80211_i.h"
 #include "driver-ops.h"
@@ -33,8 +34,6 @@
 #include "cfg.h"
 #include "debugfs.h"
 
-static struct lock_class_key ieee80211_rx_skb_queue_class;
-
 void ieee80211_configure_filter(struct ieee80211_local *local)
 {
        u64 mc;
@@ -349,27 +348,19 @@ static int ieee80211_ifa_changed(struct notifier_block *nb,
 
        /* Copy the addresses to the bss_conf list */
        ifa = idev->ifa_list;
-       while (c < IEEE80211_BSS_ARP_ADDR_LIST_LEN && ifa) {
-               bss_conf->arp_addr_list[c] = ifa->ifa_address;
+       while (ifa) {
+               if (c < IEEE80211_BSS_ARP_ADDR_LIST_LEN)
+                       bss_conf->arp_addr_list[c] = ifa->ifa_address;
                ifa = ifa->ifa_next;
                c++;
        }
 
-       /* If not all addresses fit the list, disable filtering */
-       if (ifa) {
-               sdata->arp_filter_state = false;
-               c = 0;
-       } else {
-               sdata->arp_filter_state = true;
-       }
        bss_conf->arp_addr_cnt = c;
 
        /* Configure driver only if associated (which also implies it is up) */
-       if (ifmgd->associated) {
-               bss_conf->arp_filter_enabled = sdata->arp_filter_state;
+       if (ifmgd->associated)
                ieee80211_bss_info_change_notify(sdata,
                                                 BSS_CHANGED_ARP_FILTER);
-       }
 
        mutex_unlock(&ifmgd->mtx);
 
@@ -377,6 +368,37 @@ static int ieee80211_ifa_changed(struct notifier_block *nb,
 }
 #endif
 
+#if IS_ENABLED(CONFIG_IPV6)
+static int ieee80211_ifa6_changed(struct notifier_block *nb,
+                                 unsigned long data, void *arg)
+{
+       struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)arg;
+       struct inet6_dev *idev = ifa->idev;
+       struct net_device *ndev = ifa->idev->dev;
+       struct ieee80211_local *local =
+               container_of(nb, struct ieee80211_local, ifa6_notifier);
+       struct wireless_dev *wdev = ndev->ieee80211_ptr;
+       struct ieee80211_sub_if_data *sdata;
+
+       /* Make sure it's our interface that got changed */
+       if (!wdev || wdev->wiphy != local->hw.wiphy)
+               return NOTIFY_DONE;
+
+       sdata = IEEE80211_DEV_TO_SUB_IF(ndev);
+
+       /*
+        * For now only support station mode. This is mostly because
+        * doing AP would have to handle AP_VLAN in some way ...
+        */
+       if (sdata->vif.type != NL80211_IFTYPE_STATION)
+               return NOTIFY_DONE;
+
+       drv_ipv6_addr_change(local, sdata, idev);
+
+       return NOTIFY_DONE;
+}
+#endif
+
 static int ieee80211_napi_poll(struct napi_struct *napi, int budget)
 {
        struct ieee80211_local *local =
@@ -479,6 +501,11 @@ static const struct ieee80211_ht_cap mac80211_ht_capa_mod_mask = {
        },
 };
 
+static const u8 extended_capabilities[] = {
+       0, 0, 0, 0, 0, 0, 0,
+       WLAN_EXT_CAPA8_OPMODE_NOTIF,
+};
+
 struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
                                        const struct ieee80211_ops *ops)
 {
@@ -535,14 +562,17 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
                        WIPHY_FLAG_REPORTS_OBSS |
                        WIPHY_FLAG_OFFCHAN_TX;
 
+       wiphy->extended_capabilities = extended_capabilities;
+       wiphy->extended_capabilities_mask = extended_capabilities;
+       wiphy->extended_capabilities_len = ARRAY_SIZE(extended_capabilities);
+
        if (ops->remain_on_channel)
                wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
 
        wiphy->features |= NL80211_FEATURE_SK_TX_STATUS |
                           NL80211_FEATURE_SAE |
                           NL80211_FEATURE_HT_IBSS |
-                          NL80211_FEATURE_VIF_TXPOWER |
-                          NL80211_FEATURE_FULL_AP_CLIENT_STATE;
+                          NL80211_FEATURE_VIF_TXPOWER;
 
        if (!ops->hw_scan)
                wiphy->features |= NL80211_FEATURE_LOW_PRIORITY_SCAN |
@@ -589,25 +619,19 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
 
        mutex_init(&local->key_mtx);
        spin_lock_init(&local->filter_lock);
+       spin_lock_init(&local->rx_path_lock);
        spin_lock_init(&local->queue_stop_reason_lock);
 
        INIT_LIST_HEAD(&local->chanctx_list);
        mutex_init(&local->chanctx_mtx);
 
-       /*
-        * The rx_skb_queue is only accessed from tasklets,
-        * but other SKB queues are used from within IRQ
-        * context. Therefore, this one needs a different
-        * locking class so our direct, non-irq-safe use of
-        * the queue's lock doesn't throw lockdep warnings.
-        */
-       skb_queue_head_init_class(&local->rx_skb_queue,
-                                 &ieee80211_rx_skb_queue_class);
-
        INIT_DELAYED_WORK(&local->scan_work, ieee80211_scan_work);
 
        INIT_WORK(&local->restart_work, ieee80211_restart_work);
 
+       INIT_WORK(&local->radar_detected_work,
+                 ieee80211_dfs_radar_detected_work);
+
        INIT_WORK(&local->reconfig_filter, ieee80211_reconfig_filter);
        local->smps_mode = IEEE80211_SMPS_OFF;
 
@@ -683,9 +707,6 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
                return -EINVAL;
 #endif
 
-       if ((hw->flags & IEEE80211_HW_SCAN_WHILE_IDLE) && !local->ops->hw_scan)
-               return -EINVAL;
-
        if (!local->use_chanctx) {
                for (i = 0; i < local->hw.wiphy->n_iface_combinations; i++) {
                        const struct ieee80211_iface_combination *comb;
@@ -703,6 +724,16 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
                 */
                if (local->hw.wiphy->interface_modes & BIT(NL80211_IFTYPE_WDS))
                        return -EINVAL;
+
+               /* DFS currently not supported with channel context drivers */
+               for (i = 0; i < local->hw.wiphy->n_iface_combinations; i++) {
+                       const struct ieee80211_iface_combination *comb;
+
+                       comb = &local->hw.wiphy->iface_combinations[i];
+
+                       if (comb->radar_detect_widths)
+                               return -EINVAL;
+               }
        }
 
        /* Only HW csum features are currently compatible with mac80211 */
@@ -985,12 +1016,25 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
                goto fail_ifa;
 #endif
 
+#if IS_ENABLED(CONFIG_IPV6)
+       local->ifa6_notifier.notifier_call = ieee80211_ifa6_changed;
+       result = register_inet6addr_notifier(&local->ifa6_notifier);
+       if (result)
+               goto fail_ifa6;
+#endif
+
        netif_napi_add(&local->napi_dev, &local->napi, ieee80211_napi_poll,
                        local->hw.napi_weight);
 
        return 0;
 
+#if IS_ENABLED(CONFIG_IPV6)
+ fail_ifa6:
 #ifdef CONFIG_INET
+       unregister_inetaddr_notifier(&local->ifa_notifier);
+#endif
+#endif
+#if defined(CONFIG_INET) || defined(CONFIG_IPV6)
  fail_ifa:
        pm_qos_remove_notifier(PM_QOS_NETWORK_LATENCY,
                               &local->network_latency_notifier);
@@ -1026,6 +1070,9 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw)
 #ifdef CONFIG_INET
        unregister_inetaddr_notifier(&local->ifa_notifier);
 #endif
+#if IS_ENABLED(CONFIG_IPV6)
+       unregister_inet6addr_notifier(&local->ifa6_notifier);
+#endif
 
        rtnl_lock();
 
@@ -1049,7 +1096,6 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw)
                wiphy_warn(local->hw.wiphy, "skb_queue not empty\n");
        skb_queue_purge(&local->skb_queue);
        skb_queue_purge(&local->skb_queue_unreliable);
-       skb_queue_purge(&local->rx_skb_queue);
 
        destroy_workqueue(local->workqueue);
        wiphy_unregister(local->hw.wiphy);
index 694e273..a77d40e 100644 (file)
@@ -149,6 +149,31 @@ u32 mesh_accept_plinks_update(struct ieee80211_sub_if_data *sdata)
        return changed;
 }
 
+/*
+ * mesh_sta_cleanup - clean up any mesh sta state
+ *
+ * @sta: mesh sta to clean up.
+ */
+void mesh_sta_cleanup(struct sta_info *sta)
+{
+       struct ieee80211_sub_if_data *sdata = sta->sdata;
+       u32 changed;
+
+       /*
+        * maybe userspace handles peer allocation and peering, but in either
+        * case the beacon is still generated by the kernel and we might need
+        * an update.
+        */
+       changed = mesh_accept_plinks_update(sdata);
+       if (sdata->u.mesh.security == IEEE80211_MESH_SEC_NONE) {
+               changed |= mesh_plink_deactivate(sta);
+               del_timer_sync(&sta->plink_timer);
+       }
+
+       if (changed)
+               ieee80211_mbss_info_change_notify(sdata, changed);
+}
+
 int mesh_rmc_init(struct ieee80211_sub_if_data *sdata)
 {
        int i;
@@ -261,6 +286,9 @@ mesh_add_meshconf_ie(struct sk_buff *skb, struct ieee80211_sub_if_data *sdata)
        *pos = IEEE80211_MESHCONF_CAPAB_FORWARDING;
        *pos |= ifmsh->accepting_plinks ?
            IEEE80211_MESHCONF_CAPAB_ACCEPT_PLINKS : 0x00;
+       /* Mesh PS mode. See IEEE802.11-2012 8.4.2.100.8 */
+       *pos |= ifmsh->ps_peers_deep_sleep ?
+           IEEE80211_MESHCONF_CAPAB_POWER_SAVE_LEVEL : 0x00;
        *pos++ |= ifmsh->adjusting_tbtt ?
            IEEE80211_MESHCONF_CAPAB_TBTT_ADJUSTING : 0x00;
        *pos++ = 0x00;
@@ -286,6 +314,29 @@ mesh_add_meshid_ie(struct sk_buff *skb, struct ieee80211_sub_if_data *sdata)
        return 0;
 }
 
+int mesh_add_awake_window_ie(struct sk_buff *skb,
+                            struct ieee80211_sub_if_data *sdata)
+{
+       struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
+       u8 *pos;
+
+       /* see IEEE802.11-2012 13.14.6 */
+       if (ifmsh->ps_peers_light_sleep == 0 &&
+           ifmsh->ps_peers_deep_sleep == 0 &&
+           ifmsh->nonpeer_pm == NL80211_MESH_POWER_ACTIVE)
+               return 0;
+
+       if (skb_tailroom(skb) < 4)
+               return -ENOMEM;
+
+       pos = skb_put(skb, 2 + 2);
+       *pos++ = WLAN_EID_MESH_AWAKE_WINDOW;
+       *pos++ = 2;
+       put_unaligned_le16(ifmsh->mshcfg.dot11MeshAwakeWindowDuration, pos);
+
+       return 0;
+}
+
 int
 mesh_add_vendor_ies(struct sk_buff *skb, struct ieee80211_sub_if_data *sdata)
 {
@@ -342,8 +393,6 @@ mesh_add_rsn_ie(struct sk_buff *skb, struct ieee80211_sub_if_data *sdata)
 int mesh_add_ds_params_ie(struct sk_buff *skb,
                          struct ieee80211_sub_if_data *sdata)
 {
-       struct ieee80211_local *local = sdata->local;
-       struct ieee80211_supported_band *sband;
        struct ieee80211_chanctx_conf *chanctx_conf;
        struct ieee80211_channel *chan;
        u8 *pos;
@@ -360,13 +409,10 @@ int mesh_add_ds_params_ie(struct sk_buff *skb,
        chan = chanctx_conf->def.chan;
        rcu_read_unlock();
 
-       sband = local->hw.wiphy->bands[chan->band];
-       if (sband->band == IEEE80211_BAND_2GHZ) {
-               pos = skb_put(skb, 2 + 1);
-               *pos++ = WLAN_EID_DS_PARAMS;
-               *pos++ = 1;
-               *pos++ = ieee80211_frequency_to_channel(chan->center_freq);
-       }
+       pos = skb_put(skb, 2 + 1);
+       *pos++ = WLAN_EID_DS_PARAMS;
+       *pos++ = 1;
+       *pos++ = ieee80211_frequency_to_channel(chan->center_freq);
 
        return 0;
 }
@@ -547,7 +593,7 @@ static void ieee80211_mesh_housekeeping(struct ieee80211_sub_if_data *sdata,
        mesh_path_expire(sdata);
 
        changed = mesh_accept_plinks_update(sdata);
-       ieee80211_bss_info_change_notify(sdata, changed);
+       ieee80211_mbss_info_change_notify(sdata, changed);
 
        mod_timer(&ifmsh->housekeeping_timer,
                  round_jiffies(jiffies + IEEE80211_MESH_HOUSEKEEPING_INTERVAL));
@@ -598,7 +644,140 @@ void ieee80211_mesh_restart(struct ieee80211_sub_if_data *sdata)
 }
 #endif
 
-void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata)
+static int
+ieee80211_mesh_build_beacon(struct ieee80211_if_mesh *ifmsh)
+{
+       struct beacon_data *bcn;
+       int head_len, tail_len;
+       struct sk_buff *skb;
+       struct ieee80211_mgmt *mgmt;
+       struct ieee80211_chanctx_conf *chanctx_conf;
+       enum ieee80211_band band;
+       u8 *pos;
+       struct ieee80211_sub_if_data *sdata;
+       int hdr_len = offsetof(struct ieee80211_mgmt, u.beacon) +
+                     sizeof(mgmt->u.beacon);
+
+       sdata = container_of(ifmsh, struct ieee80211_sub_if_data, u.mesh);
+       rcu_read_lock();
+       chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
+       band = chanctx_conf->def.chan->band;
+       rcu_read_unlock();
+
+       head_len = hdr_len +
+                  2 + /* NULL SSID */
+                  2 + 8 + /* supported rates */
+                  2 + 3; /* DS params */
+       tail_len = 2 + (IEEE80211_MAX_SUPP_RATES - 8) +
+                  2 + sizeof(struct ieee80211_ht_cap) +
+                  2 + sizeof(struct ieee80211_ht_operation) +
+                  2 + ifmsh->mesh_id_len +
+                  2 + sizeof(struct ieee80211_meshconf_ie) +
+                  2 + sizeof(__le16) + /* awake window */
+                  ifmsh->ie_len;
+
+       bcn = kzalloc(sizeof(*bcn) + head_len + tail_len, GFP_KERNEL);
+       /* need an skb for IE builders to operate on */
+       skb = dev_alloc_skb(max(head_len, tail_len));
+
+       if (!bcn || !skb)
+               goto out_free;
+
+       /*
+        * pointers go into the block we allocated,
+        * memory is | beacon_data | head | tail |
+        */
+       bcn->head = ((u8 *) bcn) + sizeof(*bcn);
+
+       /* fill in the head */
+       mgmt = (struct ieee80211_mgmt *) skb_put(skb, hdr_len);
+       memset(mgmt, 0, hdr_len);
+       mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
+                                         IEEE80211_STYPE_BEACON);
+       eth_broadcast_addr(mgmt->da);
+       memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
+       memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
+       ieee80211_mps_set_frame_flags(sdata, NULL, (void *) mgmt);
+       mgmt->u.beacon.beacon_int =
+               cpu_to_le16(sdata->vif.bss_conf.beacon_int);
+       mgmt->u.beacon.capab_info |= cpu_to_le16(
+               sdata->u.mesh.security ? WLAN_CAPABILITY_PRIVACY : 0);
+
+       pos = skb_put(skb, 2);
+       *pos++ = WLAN_EID_SSID;
+       *pos++ = 0x0;
+
+       if (ieee80211_add_srates_ie(sdata, skb, true, band) ||
+           mesh_add_ds_params_ie(skb, sdata))
+               goto out_free;
+
+       bcn->head_len = skb->len;
+       memcpy(bcn->head, skb->data, bcn->head_len);
+
+       /* now the tail */
+       skb_trim(skb, 0);
+       bcn->tail = bcn->head + bcn->head_len;
+
+       if (ieee80211_add_ext_srates_ie(sdata, skb, true, band) ||
+           mesh_add_rsn_ie(skb, sdata) ||
+           mesh_add_ht_cap_ie(skb, sdata) ||
+           mesh_add_ht_oper_ie(skb, sdata) ||
+           mesh_add_meshid_ie(skb, sdata) ||
+           mesh_add_meshconf_ie(skb, sdata) ||
+           mesh_add_awake_window_ie(skb, sdata) ||
+           mesh_add_vendor_ies(skb, sdata))
+               goto out_free;
+
+       bcn->tail_len = skb->len;
+       memcpy(bcn->tail, skb->data, bcn->tail_len);
+
+       dev_kfree_skb(skb);
+       rcu_assign_pointer(ifmsh->beacon, bcn);
+       return 0;
+out_free:
+       kfree(bcn);
+       dev_kfree_skb(skb);
+       return -ENOMEM;
+}
+
+static int
+ieee80211_mesh_rebuild_beacon(struct ieee80211_if_mesh *ifmsh)
+{
+       struct ieee80211_sub_if_data *sdata;
+       struct beacon_data *old_bcn;
+       int ret;
+       sdata = container_of(ifmsh, struct ieee80211_sub_if_data, u.mesh);
+
+       mutex_lock(&ifmsh->mtx);
+
+       old_bcn = rcu_dereference_protected(ifmsh->beacon,
+                                           lockdep_is_held(&ifmsh->mtx));
+       ret = ieee80211_mesh_build_beacon(ifmsh);
+       if (ret)
+               /* just reuse old beacon */
+               goto out;
+
+       if (old_bcn)
+               kfree_rcu(old_bcn, rcu_head);
+out:
+       mutex_unlock(&ifmsh->mtx);
+       return ret;
+}
+
+void ieee80211_mbss_info_change_notify(struct ieee80211_sub_if_data *sdata,
+                                      u32 changed)
+{
+       if (sdata->vif.bss_conf.enable_beacon &&
+           (changed & (BSS_CHANGED_BEACON |
+                       BSS_CHANGED_HT |
+                       BSS_CHANGED_BASIC_RATES |
+                       BSS_CHANGED_BEACON_INT)))
+               if (ieee80211_mesh_rebuild_beacon(&sdata->u.mesh))
+                       return;
+       ieee80211_bss_info_change_notify(sdata, changed);
+}
+
+int ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata)
 {
        struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
        struct ieee80211_local *local = sdata->local;
@@ -629,20 +808,24 @@ void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata)
        sdata->vif.bss_conf.basic_rates =
                ieee80211_mandatory_rates(local, band);
 
-       if (band == IEEE80211_BAND_5GHZ) {
-               sdata->vif.bss_conf.use_short_slot = true;
-               changed |= BSS_CHANGED_ERP_SLOT;
+       changed |= ieee80211_mps_local_status_update(sdata);
+
+       if (ieee80211_mesh_build_beacon(ifmsh)) {
+               ieee80211_stop_mesh(sdata);
+               return -ENOMEM;
        }
 
        ieee80211_bss_info_change_notify(sdata, changed);
 
        netif_carrier_on(sdata->dev);
+       return 0;
 }
 
 void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata)
 {
        struct ieee80211_local *local = sdata->local;
        struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
+       struct beacon_data *bcn;
 
        netif_carrier_off(sdata->dev);
 
@@ -651,11 +834,21 @@ void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata)
        sdata->vif.bss_conf.enable_beacon = false;
        clear_bit(SDATA_STATE_OFFCHANNEL_BEACON_STOPPED, &sdata->state);
        ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED);
+       mutex_lock(&ifmsh->mtx);
+       bcn = rcu_dereference_protected(ifmsh->beacon,
+                                       lockdep_is_held(&ifmsh->mtx));
+       rcu_assign_pointer(ifmsh->beacon, NULL);
+       kfree_rcu(bcn, rcu_head);
+       mutex_unlock(&ifmsh->mtx);
 
        /* flush STAs and mpaths on this iface */
        sta_info_flush(sdata);
        mesh_path_flush_by_iface(sdata);
 
+       /* free all potentially still buffered group-addressed frames */
+       local->total_ps_buffered -= skb_queue_len(&ifmsh->ps.bc_buf);
+       skb_queue_purge(&ifmsh->ps.bc_buf);
+
        del_timer_sync(&sdata->u.mesh.housekeeping_timer);
        del_timer_sync(&sdata->u.mesh.mesh_path_root_timer);
        del_timer_sync(&sdata->u.mesh.mesh_path_timer);
@@ -675,6 +868,63 @@ void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata)
        sdata->u.mesh.timers_running = 0;
 }
 
+static void
+ieee80211_mesh_rx_probe_req(struct ieee80211_sub_if_data *sdata,
+                           struct ieee80211_mgmt *mgmt, size_t len)
+{
+       struct ieee80211_local *local = sdata->local;
+       struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
+       struct sk_buff *presp;
+       struct beacon_data *bcn;
+       struct ieee80211_mgmt *hdr;
+       struct ieee802_11_elems elems;
+       size_t baselen;
+       u8 *pos, *end;
+
+       end = ((u8 *) mgmt) + len;
+       pos = mgmt->u.probe_req.variable;
+       baselen = (u8 *) pos - (u8 *) mgmt;
+       if (baselen > len)
+               return;
+
+       ieee802_11_parse_elems(pos, len - baselen, &elems);
+
+       /* 802.11-2012 10.1.4.3.2 */
+       if ((!ether_addr_equal(mgmt->da, sdata->vif.addr) &&
+            !is_broadcast_ether_addr(mgmt->da)) ||
+           elems.ssid_len != 0)
+               return;
+
+       if (elems.mesh_id_len != 0 &&
+           (elems.mesh_id_len != ifmsh->mesh_id_len ||
+            memcmp(elems.mesh_id, ifmsh->mesh_id, ifmsh->mesh_id_len)))
+               return;
+
+       rcu_read_lock();
+       bcn = rcu_dereference(ifmsh->beacon);
+
+       if (!bcn)
+               goto out;
+
+       presp = dev_alloc_skb(local->tx_headroom +
+                             bcn->head_len + bcn->tail_len);
+       if (!presp)
+               goto out;
+
+       skb_reserve(presp, local->tx_headroom);
+       memcpy(skb_put(presp, bcn->head_len), bcn->head, bcn->head_len);
+       memcpy(skb_put(presp, bcn->tail_len), bcn->tail, bcn->tail_len);
+       hdr = (struct ieee80211_mgmt *) presp->data;
+       hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
+                                        IEEE80211_STYPE_PROBE_RESP);
+       memcpy(hdr->da, mgmt->sa, ETH_ALEN);
+       mpl_dbg(sdata, "sending probe resp. to %pM\n", hdr->da);
+       IEEE80211_SKB_CB(presp)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
+       ieee80211_tx_skb(sdata, presp);
+out:
+       rcu_read_unlock();
+}
+
 static void ieee80211_mesh_rx_bcn_presp(struct ieee80211_sub_if_data *sdata,
                                        u16 stype,
                                        struct ieee80211_mgmt *mgmt,
@@ -764,6 +1014,9 @@ void ieee80211_mesh_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
                ieee80211_mesh_rx_bcn_presp(sdata, stype, mgmt, skb->len,
                                            rx_status);
                break;
+       case IEEE80211_STYPE_PROBE_REQ:
+               ieee80211_mesh_rx_probe_req(sdata, mgmt, skb->len);
+               break;
        case IEEE80211_STYPE_ACTION:
                ieee80211_mesh_rx_mgmt_action(sdata, mgmt, skb->len, rx_status);
                break;
@@ -833,8 +1086,11 @@ void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata)
                    ieee80211_mesh_path_root_timer,
                    (unsigned long) sdata);
        INIT_LIST_HEAD(&ifmsh->preq_queue.list);
+       skb_queue_head_init(&ifmsh->ps.bc_buf);
        spin_lock_init(&ifmsh->mesh_preq_queue_lock);
        spin_lock_init(&ifmsh->sync_offset_lock);
+       RCU_INIT_POINTER(ifmsh->beacon, NULL);
+       mutex_init(&ifmsh->mtx);
 
        sdata->vif.bss_conf.bssid = zero_addr;
 }
index aff3015..1a1da87 100644 (file)
@@ -222,6 +222,8 @@ int mesh_add_meshid_ie(struct sk_buff *skb,
                       struct ieee80211_sub_if_data *sdata);
 int mesh_add_rsn_ie(struct sk_buff *skb,
                    struct ieee80211_sub_if_data *sdata);
+int mesh_add_awake_window_ie(struct sk_buff *skb,
+                            struct ieee80211_sub_if_data *sdata);
 int mesh_add_vendor_ies(struct sk_buff *skb,
                        struct ieee80211_sub_if_data *sdata);
 int mesh_add_ds_params_ie(struct sk_buff *skb,
@@ -237,10 +239,28 @@ void ieee80211s_update_metric(struct ieee80211_local *local,
                struct sta_info *sta, struct sk_buff *skb);
 void ieee80211s_stop(void);
 void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata);
-void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata);
+int ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata);
 void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata);
 void ieee80211_mesh_root_setup(struct ieee80211_if_mesh *ifmsh);
 const struct ieee80211_mesh_sync_ops *ieee80211_mesh_sync_ops_get(u8 method);
+/* wrapper for ieee80211_bss_info_change_notify() */
+void ieee80211_mbss_info_change_notify(struct ieee80211_sub_if_data *sdata,
+                                      u32 changed);
+
+/* mesh power save */
+u32 ieee80211_mps_local_status_update(struct ieee80211_sub_if_data *sdata);
+u32 ieee80211_mps_set_sta_local_pm(struct sta_info *sta,
+                                  enum nl80211_mesh_power_mode pm);
+void ieee80211_mps_set_frame_flags(struct ieee80211_sub_if_data *sdata,
+                                  struct sta_info *sta,
+                                  struct ieee80211_hdr *hdr);
+void ieee80211_mps_sta_status_update(struct sta_info *sta);
+void ieee80211_mps_rx_h_sta_process(struct sta_info *sta,
+                                   struct ieee80211_hdr *hdr);
+void ieee80211_mpsp_trigger_process(u8 *qc, struct sta_info *sta,
+                                   bool tx, bool acked);
+void ieee80211_mps_frame_release(struct sta_info *sta,
+                                struct ieee802_11_elems *elems);
 
 /* Mesh paths */
 int mesh_nexthop_lookup(struct sk_buff *skb,
@@ -248,8 +268,8 @@ int mesh_nexthop_lookup(struct sk_buff *skb,
 int mesh_nexthop_resolve(struct sk_buff *skb,
                         struct ieee80211_sub_if_data *sdata);
 void mesh_path_start_discovery(struct ieee80211_sub_if_data *sdata);
-struct mesh_path *mesh_path_lookup(u8 *dst,
-               struct ieee80211_sub_if_data *sdata);
+struct mesh_path *mesh_path_lookup(const u8 *dst,
+                                  struct ieee80211_sub_if_data *sdata);
 struct mesh_path *mpp_path_lookup(u8 *dst,
                                  struct ieee80211_sub_if_data *sdata);
 int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata);
@@ -259,7 +279,7 @@ void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop);
 void mesh_path_expire(struct ieee80211_sub_if_data *sdata);
 void mesh_rx_path_sel_frame(struct ieee80211_sub_if_data *sdata,
                struct ieee80211_mgmt *mgmt, size_t len);
-int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata);
+int mesh_path_add(const u8 *dst, struct ieee80211_sub_if_data *sdata);
 
 int mesh_path_add_gate(struct mesh_path *mpath);
 int mesh_path_send_to_gates(struct mesh_path *mpath);
@@ -271,20 +291,22 @@ void mesh_neighbour_update(struct ieee80211_sub_if_data *sdata,
 bool mesh_peer_accepts_plinks(struct ieee802_11_elems *ie);
 u32 mesh_accept_plinks_update(struct ieee80211_sub_if_data *sdata);
 void mesh_plink_broken(struct sta_info *sta);
-void mesh_plink_deactivate(struct sta_info *sta);
-int mesh_plink_open(struct sta_info *sta);
-void mesh_plink_block(struct sta_info *sta);
+u32 mesh_plink_deactivate(struct sta_info *sta);
+u32 mesh_plink_open(struct sta_info *sta);
+u32 mesh_plink_block(struct sta_info *sta);
 void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata,
                         struct ieee80211_mgmt *mgmt, size_t len,
                         struct ieee80211_rx_status *rx_status);
+void mesh_sta_cleanup(struct sta_info *sta);
 
 /* Private interfaces */
 /* Mesh tables */
 void mesh_mpath_table_grow(void);
 void mesh_mpp_table_grow(void);
 /* Mesh paths */
-int mesh_path_error_tx(u8 ttl, u8 *target, __le32 target_sn, __le16 target_rcode,
-                      const u8 *ra, struct ieee80211_sub_if_data *sdata);
+int mesh_path_error_tx(u8 ttl, const u8 *target, __le32 target_sn,
+                      __le16 target_rcode, const u8 *ra,
+                      struct ieee80211_sub_if_data *sdata);
 void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta);
 void mesh_path_flush_pending(struct mesh_path *mpath);
 void mesh_path_tx_pending(struct mesh_path *mpath);
index 6b4603a..585c1e2 100644 (file)
 
 static void mesh_queue_preq(struct mesh_path *, u8);
 
-static inline u32 u32_field_get(u8 *preq_elem, int offset, bool ae)
+static inline u32 u32_field_get(const u8 *preq_elem, int offset, bool ae)
 {
        if (ae)
                offset += 6;
        return get_unaligned_le32(preq_elem + offset);
 }
 
-static inline u32 u16_field_get(u8 *preq_elem, int offset, bool ae)
+static inline u32 u16_field_get(const u8 *preq_elem, int offset, bool ae)
 {
        if (ae)
                offset += 6;
@@ -102,10 +102,13 @@ enum mpath_frame_type {
 static const u8 broadcast_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
 
 static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags,
-               u8 *orig_addr, __le32 orig_sn, u8 target_flags, u8 *target,
-               __le32 target_sn, const u8 *da, u8 hop_count, u8 ttl,
-               __le32 lifetime, __le32 metric, __le32 preq_id,
-               struct ieee80211_sub_if_data *sdata)
+                                 const u8 *orig_addr, __le32 orig_sn,
+                                 u8 target_flags, const u8 *target,
+                                 __le32 target_sn, const u8 *da,
+                                 u8 hop_count, u8 ttl,
+                                 __le32 lifetime, __le32 metric,
+                                 __le32 preq_id,
+                                 struct ieee80211_sub_if_data *sdata)
 {
        struct ieee80211_local *local = sdata->local;
        struct sk_buff *skb;
@@ -205,6 +208,7 @@ static void prepare_frame_for_deferred_tx(struct ieee80211_sub_if_data *sdata,
                struct sk_buff *skb)
 {
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
 
        skb_set_mac_header(skb, 0);
        skb_set_network_header(skb, 0);
@@ -217,6 +221,7 @@ static void prepare_frame_for_deferred_tx(struct ieee80211_sub_if_data *sdata,
        info->control.vif = &sdata->vif;
        info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
        ieee80211_set_qos_hdr(sdata, skb);
+       ieee80211_mps_set_frame_flags(sdata, NULL, hdr);
 }
 
 /**
@@ -233,7 +238,7 @@ static void prepare_frame_for_deferred_tx(struct ieee80211_sub_if_data *sdata,
  * also acquires in the TX path.  To avoid a deadlock we don't transmit the
  * frame directly but add it to the pending queue instead.
  */
-int mesh_path_error_tx(u8 ttl, u8 *target, __le32 target_sn,
+int mesh_path_error_tx(u8 ttl, const u8 *target, __le32 target_sn,
                       __le16 target_rcode, const u8 *ra,
                       struct ieee80211_sub_if_data *sdata)
 {
@@ -367,14 +372,14 @@ static u32 airtime_link_metric_get(struct ieee80211_local *local,
  * path routing information is updated.
  */
 static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata,
-                           struct ieee80211_mgmt *mgmt,
-                           u8 *hwmp_ie, enum mpath_frame_type action)
+                              struct ieee80211_mgmt *mgmt,
+                              const u8 *hwmp_ie, enum mpath_frame_type action)
 {
        struct ieee80211_local *local = sdata->local;
        struct mesh_path *mpath;
        struct sta_info *sta;
        bool fresh_info;
-       u8 *orig_addr, *ta;
+       const u8 *orig_addr, *ta;
        u32 orig_sn, orig_metric;
        unsigned long orig_lifetime, exp_time;
        u32 last_hop_metric, new_metric;
@@ -509,11 +514,11 @@ static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata,
 
 static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
                                    struct ieee80211_mgmt *mgmt,
-                                   u8 *preq_elem, u32 metric)
+                                   const u8 *preq_elem, u32 metric)
 {
        struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
        struct mesh_path *mpath = NULL;
-       u8 *target_addr, *orig_addr;
+       const u8 *target_addr, *orig_addr;
        const u8 *da;
        u8 target_flags, ttl, flags;
        u32 orig_sn, target_sn, lifetime, orig_metric;
@@ -646,11 +651,11 @@ next_hop_deref_protected(struct mesh_path *mpath)
 
 static void hwmp_prep_frame_process(struct ieee80211_sub_if_data *sdata,
                                    struct ieee80211_mgmt *mgmt,
-                                   u8 *prep_elem, u32 metric)
+                                   const u8 *prep_elem, u32 metric)
 {
        struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
        struct mesh_path *mpath;
-       u8 *target_addr, *orig_addr;
+       const u8 *target_addr, *orig_addr;
        u8 ttl, hopcount, flags;
        u8 next_hop[ETH_ALEN];
        u32 target_sn, orig_sn, lifetime;
@@ -709,12 +714,13 @@ fail:
 }
 
 static void hwmp_perr_frame_process(struct ieee80211_sub_if_data *sdata,
-                            struct ieee80211_mgmt *mgmt, u8 *perr_elem)
+                                   struct ieee80211_mgmt *mgmt,
+                                   const u8 *perr_elem)
 {
        struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
        struct mesh_path *mpath;
        u8 ttl;
-       u8 *ta, *target_addr;
+       const u8 *ta, *target_addr;
        u32 target_sn;
        u16 target_rcode;
 
@@ -756,15 +762,15 @@ endperr:
 }
 
 static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
-                               struct ieee80211_mgmt *mgmt,
-                               struct ieee80211_rann_ie *rann)
+                                   struct ieee80211_mgmt *mgmt,
+                                   const struct ieee80211_rann_ie *rann)
 {
        struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
        struct ieee80211_local *local = sdata->local;
        struct sta_info *sta;
        struct mesh_path *mpath;
        u8 ttl, flags, hopcount;
-       u8 *orig_addr;
+       const u8 *orig_addr;
        u32 orig_sn, metric, metric_txsta, interval;
        bool root_is_gate;
 
@@ -1080,6 +1086,10 @@ int mesh_nexthop_resolve(struct sk_buff *skb,
        u8 *target_addr = hdr->addr3;
        int err = 0;
 
+       /* Nulls are only sent to peers for PS and should be pre-addressed */
+       if (ieee80211_is_qos_nullfunc(hdr->frame_control))
+               return 0;
+
        rcu_read_lock();
        err = mesh_nexthop_lookup(skb, sdata);
        if (!err)
@@ -1151,6 +1161,7 @@ int mesh_nexthop_lookup(struct sk_buff *skb,
        if (next_hop) {
                memcpy(hdr->addr1, next_hop->sta.addr, ETH_ALEN);
                memcpy(hdr->addr2, sdata->vif.addr, ETH_ALEN);
+               ieee80211_mps_set_frame_flags(sdata, next_hop, hdr);
                err = 0;
        }
 
index aa74981..2ce4c40 100644 (file)
@@ -181,7 +181,7 @@ errcopy:
        return -ENOMEM;
 }
 
-static u32 mesh_table_hash(u8 *addr, struct ieee80211_sub_if_data *sdata,
+static u32 mesh_table_hash(const u8 *addr, struct ieee80211_sub_if_data *sdata,
                           struct mesh_table *tbl)
 {
        /* Use last four bytes of hw addr and interface index as hash index */
@@ -212,6 +212,7 @@ void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta)
                hdr = (struct ieee80211_hdr *) skb->data;
                memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN);
                memcpy(hdr->addr2, mpath->sdata->vif.addr, ETH_ALEN);
+               ieee80211_mps_set_frame_flags(sta->sdata, sta, hdr);
        }
 
        spin_unlock_irqrestore(&mpath->frame_queue.lock, flags);
@@ -325,8 +326,8 @@ static void mesh_path_move_to_queue(struct mesh_path *gate_mpath,
 }
 
 
-static struct mesh_path *mpath_lookup(struct mesh_table *tbl, u8 *dst,
-                                         struct ieee80211_sub_if_data *sdata)
+static struct mesh_path *mpath_lookup(struct mesh_table *tbl, const u8 *dst,
+                                     struct ieee80211_sub_if_data *sdata)
 {
        struct mesh_path *mpath;
        struct hlist_node *n;
@@ -358,7 +359,8 @@ static struct mesh_path *mpath_lookup(struct mesh_table *tbl, u8 *dst,
  *
  * Locking: must be called within a read rcu section.
  */
-struct mesh_path *mesh_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
+struct mesh_path *mesh_path_lookup(const u8 *dst,
+                                  struct ieee80211_sub_if_data *sdata)
 {
        return mpath_lookup(rcu_dereference(mesh_paths), dst, sdata);
 }
@@ -493,7 +495,7 @@ int mesh_gate_num(struct ieee80211_sub_if_data *sdata)
  *
  * State: the initial state of the new path is set to 0
  */
-int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
+int mesh_path_add(const u8 *dst, struct ieee80211_sub_if_data *sdata)
 {
        struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
        struct ieee80211_local *local = sdata->local;
index 9e04166..f7526e5 100644 (file)
@@ -56,27 +56,63 @@ static inline void mesh_plink_fsm_restart(struct sta_info *sta)
 }
 
 /*
- * Allocate mesh sta entry and insert into station table
+ * mesh_set_short_slot_time - enable / disable ERP short slot time.
+ *
+ * The standard indirectly mandates mesh STAs to turn off short slot time by
+ * disallowing advertising this (802.11-2012 8.4.1.4), but that doesn't mean we
+ * can't be sneaky about it. Enable short slot time if all mesh STAs in the
+ * MBSS support ERP rates.
+ *
+ * Returns BSS_CHANGED_ERP_SLOT or 0 for no change.
  */
-static struct sta_info *mesh_plink_alloc(struct ieee80211_sub_if_data *sdata,
-                                        u8 *hw_addr)
+static u32 mesh_set_short_slot_time(struct ieee80211_sub_if_data *sdata)
 {
+       struct ieee80211_local *local = sdata->local;
+       enum ieee80211_band band = ieee80211_get_sdata_band(sdata);
+       struct ieee80211_supported_band *sband = local->hw.wiphy->bands[band];
        struct sta_info *sta;
+       u32 erp_rates = 0, changed = 0;
+       int i;
+       bool short_slot = false;
 
-       if (sdata->local->num_sta >= MESH_MAX_PLINKS)
-               return NULL;
+       if (band == IEEE80211_BAND_5GHZ) {
+               /* (IEEE 802.11-2012 19.4.5) */
+               short_slot = true;
+               goto out;
+       } else if (band != IEEE80211_BAND_2GHZ ||
+                  (band == IEEE80211_BAND_2GHZ &&
+                   local->hw.flags & IEEE80211_HW_2GHZ_SHORT_SLOT_INCAPABLE))
+               goto out;
 
-       sta = sta_info_alloc(sdata, hw_addr, GFP_KERNEL);
-       if (!sta)
-               return NULL;
+       for (i = 0; i < sband->n_bitrates; i++)
+               if (sband->bitrates[i].flags & IEEE80211_RATE_ERP_G)
+                       erp_rates |= BIT(i);
 
-       sta_info_pre_move_state(sta, IEEE80211_STA_AUTH);
-       sta_info_pre_move_state(sta, IEEE80211_STA_ASSOC);
-       sta_info_pre_move_state(sta, IEEE80211_STA_AUTHORIZED);
+       if (!erp_rates)
+               goto out;
 
-       set_sta_flag(sta, WLAN_STA_WME);
+       rcu_read_lock();
+       list_for_each_entry_rcu(sta, &local->sta_list, list) {
+               if (sdata != sta->sdata ||
+                   sta->plink_state != NL80211_PLINK_ESTAB)
+                       continue;
 
-       return sta;
+               short_slot = false;
+               if (erp_rates & sta->sta.supp_rates[band])
+                       short_slot = true;
+                else
+                       break;
+       }
+       rcu_read_unlock();
+
+out:
+       if (sdata->vif.bss_conf.use_short_slot != short_slot) {
+               sdata->vif.bss_conf.use_short_slot = short_slot;
+               changed = BSS_CHANGED_ERP_SLOT;
+               mpl_dbg(sdata, "mesh_plink %pM: ERP short slot time %d\n",
+                       sdata->vif.addr, short_slot);
+       }
+       return changed;
 }
 
 /**
@@ -165,6 +201,9 @@ static u32 __mesh_plink_deactivate(struct sta_info *sta)
        sta->plink_state = NL80211_PLINK_BLOCKED;
        mesh_path_flush_by_nexthop(sta);
 
+       ieee80211_mps_sta_status_update(sta);
+       changed |= ieee80211_mps_local_status_update(sdata);
+
        return changed;
 }
 
@@ -175,7 +214,7 @@ static u32 __mesh_plink_deactivate(struct sta_info *sta)
  *
  * All mesh paths with this peer as next hop will be flushed
  */
-void mesh_plink_deactivate(struct sta_info *sta)
+u32 mesh_plink_deactivate(struct sta_info *sta)
 {
        struct ieee80211_sub_if_data *sdata = sta->sdata;
        u32 changed;
@@ -188,7 +227,7 @@ void mesh_plink_deactivate(struct sta_info *sta)
                            sta->reason);
        spin_unlock_bh(&sta->lock);
 
-       ieee80211_bss_info_change_notify(sdata, changed);
+       return changed;
 }
 
 static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
@@ -309,59 +348,32 @@ free:
        return err;
 }
 
-/**
- * mesh_peer_init - initialize new mesh peer and return resulting sta_info
- *
- * @sdata: local meshif
- * @addr: peer's address
- * @elems: IEs from beacon or mesh peering frame
- *
- * call under RCU
- */
-static struct sta_info *mesh_peer_init(struct ieee80211_sub_if_data *sdata,
-                                      u8 *addr,
-                                      struct ieee802_11_elems *elems)
+static void mesh_sta_info_init(struct ieee80211_sub_if_data *sdata,
+                              struct sta_info *sta,
+                              struct ieee802_11_elems *elems, bool insert)
 {
        struct ieee80211_local *local = sdata->local;
        enum ieee80211_band band = ieee80211_get_sdata_band(sdata);
        struct ieee80211_supported_band *sband;
-       u32 rates, basic_rates = 0;
-       struct sta_info *sta;
-       bool insert = false;
+       u32 rates, basic_rates = 0, changed = 0;
 
        sband = local->hw.wiphy->bands[band];
        rates = ieee80211_sta_get_rates(local, elems, band, &basic_rates);
 
-       sta = sta_info_get(sdata, addr);
-       if (!sta) {
-               /* Userspace handles peer allocation when security is enabled */
-               if (sdata->u.mesh.security & IEEE80211_MESH_SEC_AUTHED) {
-                       cfg80211_notify_new_peer_candidate(sdata->dev, addr,
-                                                          elems->ie_start,
-                                                          elems->total_len,
-                                                          GFP_ATOMIC);
-                       return NULL;
-               }
-
-               sta = mesh_plink_alloc(sdata, addr);
-               if (!sta)
-                       return NULL;
-               insert = true;
-       }
-
        spin_lock_bh(&sta->lock);
        sta->last_rx = jiffies;
-       if (sta->plink_state == NL80211_PLINK_ESTAB) {
-               spin_unlock_bh(&sta->lock);
-               return sta;
-       }
 
+       /* rates and capabilities don't change during peering */
+       if (sta->plink_state == NL80211_PLINK_ESTAB)
+               goto out;
+
+       if (sta->sta.supp_rates[band] != rates)
+               changed |= IEEE80211_RC_SUPP_RATES_CHANGED;
        sta->sta.supp_rates[band] = rates;
        if (elems->ht_cap_elem &&
            sdata->vif.bss_conf.chandef.width != NL80211_CHAN_WIDTH_20_NOHT)
                ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband,
-                                                 elems->ht_cap_elem,
-                                                 &sta->sta.ht_cap);
+                                                 elems->ht_cap_elem, sta);
        else
                memset(&sta->sta.ht_cap, 0, sizeof(sta->sta.ht_cap));
 
@@ -370,31 +382,119 @@ static struct sta_info *mesh_peer_init(struct ieee80211_sub_if_data *sdata,
 
                if (!(elems->ht_operation->ht_param &
                      IEEE80211_HT_PARAM_CHAN_WIDTH_ANY))
-                       sta->sta.ht_cap.cap &=
-                                           ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+                       sta->sta.bandwidth = IEEE80211_STA_RX_BW_20;
                ieee80211_ht_oper_to_chandef(sdata->vif.bss_conf.chandef.chan,
                                             elems->ht_operation, &chandef);
+               if (sta->ch_width != chandef.width)
+                       changed |= IEEE80211_RC_BW_CHANGED;
                sta->ch_width = chandef.width;
        }
 
        if (insert)
                rate_control_rate_init(sta);
+       else
+               rate_control_rate_update(local, sband, sta, changed);
+out:
        spin_unlock_bh(&sta->lock);
+}
 
-       if (insert && sta_info_insert(sta))
+static struct sta_info *
+__mesh_sta_info_alloc(struct ieee80211_sub_if_data *sdata, u8 *hw_addr)
+{
+       struct sta_info *sta;
+
+       if (sdata->local->num_sta >= MESH_MAX_PLINKS)
                return NULL;
 
+       sta = sta_info_alloc(sdata, hw_addr, GFP_KERNEL);
+       if (!sta)
+               return NULL;
+
+       sta->plink_state = NL80211_PLINK_LISTEN;
+       init_timer(&sta->plink_timer);
+
+       sta_info_pre_move_state(sta, IEEE80211_STA_AUTH);
+       sta_info_pre_move_state(sta, IEEE80211_STA_ASSOC);
+       sta_info_pre_move_state(sta, IEEE80211_STA_AUTHORIZED);
+
+       set_sta_flag(sta, WLAN_STA_WME);
+
+       return sta;
+}
+
+static struct sta_info *
+mesh_sta_info_alloc(struct ieee80211_sub_if_data *sdata, u8 *addr,
+                   struct ieee802_11_elems *elems)
+{
+       struct sta_info *sta = NULL;
+
+       /* Userspace handles peer allocation when security is enabled */
+       if (sdata->u.mesh.security & IEEE80211_MESH_SEC_AUTHED)
+               cfg80211_notify_new_peer_candidate(sdata->dev, addr,
+                                                  elems->ie_start,
+                                                  elems->total_len,
+                                                  GFP_KERNEL);
+       else
+               sta = __mesh_sta_info_alloc(sdata, addr);
+
+       return sta;
+}
+
+/*
+ * mesh_sta_info_get - return mesh sta info entry for @addr.
+ *
+ * @sdata: local meshif
+ * @addr: peer's address
+ * @elems: IEs from beacon or mesh peering frame.
+ *
+ * Return existing or newly allocated sta_info under RCU read lock.
+ * (re)initialize with given IEs.
+ */
+static struct sta_info *
+mesh_sta_info_get(struct ieee80211_sub_if_data *sdata,
+                 u8 *addr, struct ieee802_11_elems *elems) __acquires(RCU)
+{
+       struct sta_info *sta = NULL;
+
+       rcu_read_lock();
+       sta = sta_info_get(sdata, addr);
+       if (sta) {
+               mesh_sta_info_init(sdata, sta, elems, false);
+       } else {
+               rcu_read_unlock();
+               /* can't run atomic */
+               sta = mesh_sta_info_alloc(sdata, addr, elems);
+               if (!sta) {
+                       rcu_read_lock();
+                       return NULL;
+               }
+
+               mesh_sta_info_init(sdata, sta, elems, true);
+
+               if (sta_info_insert_rcu(sta))
+                       return NULL;
+       }
+
        return sta;
 }
 
+/*
+ * mesh_neighbour_update - update or initialize new mesh neighbor.
+ *
+ * @sdata: local meshif
+ * @addr: peer's address
+ * @elems: IEs from beacon or mesh peering frame
+ *
+ * Initiates peering if appropriate.
+ */
 void mesh_neighbour_update(struct ieee80211_sub_if_data *sdata,
                           u8 *hw_addr,
                           struct ieee802_11_elems *elems)
 {
        struct sta_info *sta;
+       u32 changed = 0;
 
-       rcu_read_lock();
-       sta = mesh_peer_init(sdata, hw_addr, elems);
+       sta = mesh_sta_info_get(sdata, hw_addr, elems);
        if (!sta)
                goto out;
 
@@ -403,10 +503,12 @@ void mesh_neighbour_update(struct ieee80211_sub_if_data *sdata,
            sdata->u.mesh.accepting_plinks &&
            sdata->u.mesh.mshcfg.auto_open_plinks &&
            rssi_threshold_check(sta, sdata))
-               mesh_plink_open(sta);
+               changed = mesh_plink_open(sta);
 
+       ieee80211_mps_frame_release(sta, elems);
 out:
        rcu_read_unlock();
+       ieee80211_mbss_info_change_notify(sdata, changed);
 }
 
 static void mesh_plink_timer(unsigned long data)
@@ -490,6 +592,13 @@ static void mesh_plink_timer(unsigned long data)
 #ifdef CONFIG_PM
 void mesh_plink_quiesce(struct sta_info *sta)
 {
+       if (!ieee80211_vif_is_mesh(&sta->sdata->vif))
+               return;
+
+       /* no kernel mesh sta timers have been initialized */
+       if (sta->sdata->u.mesh.security != IEEE80211_MESH_SEC_NONE)
+               return;
+
        if (del_timer_sync(&sta->plink_timer))
                sta->plink_timer_was_running = true;
 }
@@ -512,13 +621,14 @@ static inline void mesh_plink_timer_set(struct sta_info *sta, int timeout)
        add_timer(&sta->plink_timer);
 }
 
-int mesh_plink_open(struct sta_info *sta)
+u32 mesh_plink_open(struct sta_info *sta)
 {
        __le16 llid;
        struct ieee80211_sub_if_data *sdata = sta->sdata;
+       u32 changed;
 
        if (!test_sta_flag(sta, WLAN_STA_AUTH))
-               return -EPERM;
+               return 0;
 
        spin_lock_bh(&sta->lock);
        get_random_bytes(&llid, 2);
@@ -526,7 +636,7 @@ int mesh_plink_open(struct sta_info *sta)
        if (sta->plink_state != NL80211_PLINK_LISTEN &&
            sta->plink_state != NL80211_PLINK_BLOCKED) {
                spin_unlock_bh(&sta->lock);
-               return -EBUSY;
+               return 0;
        }
        sta->plink_state = NL80211_PLINK_OPN_SNT;
        mesh_plink_timer_set(sta, sdata->u.mesh.mshcfg.dot11MeshRetryTimeout);
@@ -535,13 +645,16 @@ int mesh_plink_open(struct sta_info *sta)
                "Mesh plink: starting establishment with %pM\n",
                sta->sta.addr);
 
-       return mesh_plink_frame_tx(sdata, WLAN_SP_MESH_PEERING_OPEN,
-                                  sta->sta.addr, llid, 0, 0);
+       /* set the non-peer mode to active during peering */
+       changed = ieee80211_mps_local_status_update(sdata);
+
+       mesh_plink_frame_tx(sdata, WLAN_SP_MESH_PEERING_OPEN,
+                           sta->sta.addr, llid, 0, 0);
+       return changed;
 }
 
-void mesh_plink_block(struct sta_info *sta)
+u32 mesh_plink_block(struct sta_info *sta)
 {
-       struct ieee80211_sub_if_data *sdata = sta->sdata;
        u32 changed;
 
        spin_lock_bh(&sta->lock);
@@ -549,7 +662,7 @@ void mesh_plink_block(struct sta_info *sta)
        sta->plink_state = NL80211_PLINK_BLOCKED;
        spin_unlock_bh(&sta->lock);
 
-       ieee80211_bss_info_change_notify(sdata, changed);
+       return changed;
 }
 
 
@@ -632,6 +745,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
            (ftype == WLAN_SP_MESH_PEERING_CLOSE && ie_len == 8))
                memcpy(&llid, PLINK_GET_PLID(elems.peering), 2);
 
+       /* WARNING: Only for sta pointer, is dropped & re-acquired */
        rcu_read_lock();
 
        sta = sta_info_get(sdata, mgmt->sa);
@@ -735,8 +849,9 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
        }
 
        if (event == OPN_ACPT) {
+               rcu_read_unlock();
                /* allocate sta entry if necessary and update info */
-               sta = mesh_peer_init(sdata, mgmt->sa, &elems);
+               sta = mesh_sta_info_get(sdata, mgmt->sa, &elems);
                if (!sta) {
                        mpl_dbg(sdata, "Mesh plink: failed to init peer!\n");
                        rcu_read_unlock();
@@ -766,6 +881,10 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
                        sta->llid = llid;
                        mesh_plink_timer_set(sta,
                                             mshcfg->dot11MeshRetryTimeout);
+
+                       /* set the non-peer mode to active during peering */
+                       changed |= ieee80211_mps_local_status_update(sdata);
+
                        spin_unlock_bh(&sta->lock);
                        mesh_plink_frame_tx(sdata,
                                            WLAN_SP_MESH_PEERING_OPEN,
@@ -856,8 +975,12 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
                        spin_unlock_bh(&sta->lock);
                        changed |= mesh_plink_inc_estab_count(sdata);
                        changed |= mesh_set_ht_prot_mode(sdata);
+                       changed |= mesh_set_short_slot_time(sdata);
                        mpl_dbg(sdata, "Mesh plink with %pM ESTABLISHED\n",
                                sta->sta.addr);
+                       ieee80211_mps_sta_status_update(sta);
+                       changed |= ieee80211_mps_set_sta_local_pm(sta,
+                                                      mshcfg->power_mode);
                        break;
                default:
                        spin_unlock_bh(&sta->lock);
@@ -891,11 +1014,15 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
                        spin_unlock_bh(&sta->lock);
                        changed |= mesh_plink_inc_estab_count(sdata);
                        changed |= mesh_set_ht_prot_mode(sdata);
+                       changed |= mesh_set_short_slot_time(sdata);
                        mpl_dbg(sdata, "Mesh plink with %pM ESTABLISHED\n",
                                sta->sta.addr);
                        mesh_plink_frame_tx(sdata,
                                            WLAN_SP_MESH_PEERING_CONFIRM,
                                            sta->sta.addr, llid, plid, 0);
+                       ieee80211_mps_sta_status_update(sta);
+                       changed |= ieee80211_mps_set_sta_local_pm(sta,
+                                                       mshcfg->power_mode);
                        break;
                default:
                        spin_unlock_bh(&sta->lock);
@@ -914,6 +1041,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
                        mod_plink_timer(sta, mshcfg->dot11MeshHoldingTimeout);
                        spin_unlock_bh(&sta->lock);
                        changed |= mesh_set_ht_prot_mode(sdata);
+                       changed |= mesh_set_short_slot_time(sdata);
                        mesh_plink_frame_tx(sdata, WLAN_SP_MESH_PEERING_CLOSE,
                                            sta->sta.addr, llid, plid, reason);
                        break;
@@ -962,5 +1090,5 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
        rcu_read_unlock();
 
        if (changed)
-               ieee80211_bss_info_change_notify(sdata, changed);
+               ieee80211_mbss_info_change_notify(sdata, changed);
 }
diff --git a/net/mac80211/mesh_ps.c b/net/mac80211/mesh_ps.c
new file mode 100644 (file)
index 0000000..3b7bfc0
--- /dev/null
@@ -0,0 +1,598 @@
+/*
+ * Copyright 2012-2013, Marco Porsch <marco.porsch@s2005.tu-chemnitz.de>
+ * Copyright 2012-2013, cozybit Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "mesh.h"
+#include "wme.h"
+
+
+/* mesh PS management */
+
+/**
+ * mps_qos_null_get - create pre-addressed QoS Null frame for mesh powersave
+ */
+static struct sk_buff *mps_qos_null_get(struct sta_info *sta)
+{
+       struct ieee80211_sub_if_data *sdata = sta->sdata;
+       struct ieee80211_local *local = sdata->local;
+       struct ieee80211_hdr *nullfunc; /* use 4addr header */
+       struct sk_buff *skb;
+       int size = sizeof(*nullfunc);
+       __le16 fc;
+
+       skb = dev_alloc_skb(local->hw.extra_tx_headroom + size + 2);
+       if (!skb)
+               return NULL;
+       skb_reserve(skb, local->hw.extra_tx_headroom);
+
+       nullfunc = (struct ieee80211_hdr *) skb_put(skb, size);
+       fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_NULLFUNC);
+       ieee80211_fill_mesh_addresses(nullfunc, &fc, sta->sta.addr,
+                                     sdata->vif.addr);
+       nullfunc->frame_control = fc;
+       nullfunc->duration_id = 0;
+       /* no address resolution for this frame -> set addr 1 immediately */
+       memcpy(nullfunc->addr1, sta->sta.addr, ETH_ALEN);
+       memset(skb_put(skb, 2), 0, 2); /* append QoS control field */
+       ieee80211_mps_set_frame_flags(sdata, sta, nullfunc);
+
+       return skb;
+}
+
+/**
+ * mps_qos_null_tx - send a QoS Null to indicate link-specific power mode
+ */
+static void mps_qos_null_tx(struct sta_info *sta)
+{
+       struct sk_buff *skb;
+
+       skb = mps_qos_null_get(sta);
+       if (!skb)
+               return;
+
+       mps_dbg(sta->sdata, "announcing peer-specific power mode to %pM\n",
+               sta->sta.addr);
+
+       /* don't unintentionally start a MPSP */
+       if (!test_sta_flag(sta, WLAN_STA_PS_STA)) {
+               u8 *qc = ieee80211_get_qos_ctl((void *) skb->data);
+
+               qc[0] |= IEEE80211_QOS_CTL_EOSP;
+       }
+
+       ieee80211_tx_skb(sta->sdata, skb);
+}
+
+/**
+ * ieee80211_mps_local_status_update - track status of local link-specific PMs
+ *
+ * @sdata: local mesh subif
+ *
+ * sets the non-peer power mode and triggers the driver PS (re-)configuration
+ * Return BSS_CHANGED_BEACON if a beacon update is necessary.
+ */
+u32 ieee80211_mps_local_status_update(struct ieee80211_sub_if_data *sdata)
+{
+       struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
+       struct sta_info *sta;
+       bool peering = false;
+       int light_sleep_cnt = 0;
+       int deep_sleep_cnt = 0;
+       u32 changed = 0;
+       enum nl80211_mesh_power_mode nonpeer_pm;
+
+       rcu_read_lock();
+       list_for_each_entry_rcu(sta, &sdata->local->sta_list, list) {
+               if (sdata != sta->sdata)
+                       continue;
+
+               switch (sta->plink_state) {
+               case NL80211_PLINK_OPN_SNT:
+               case NL80211_PLINK_OPN_RCVD:
+               case NL80211_PLINK_CNF_RCVD:
+                       peering = true;
+                       break;
+               case NL80211_PLINK_ESTAB:
+                       if (sta->local_pm == NL80211_MESH_POWER_LIGHT_SLEEP)
+                               light_sleep_cnt++;
+                       else if (sta->local_pm == NL80211_MESH_POWER_DEEP_SLEEP)
+                               deep_sleep_cnt++;
+                       break;
+               default:
+                       break;
+               }
+       }
+       rcu_read_unlock();
+
+       /*
+        * Set non-peer mode to active during peering/scanning/authentication
+        * (see IEEE802.11-2012 13.14.8.3). The non-peer mesh power mode is
+        * deep sleep if the local STA is in light or deep sleep towards at
+        * least one mesh peer (see 13.14.3.1). Otherwise, set it to the
+        * user-configured default value.
+        */
+       if (peering) {
+               mps_dbg(sdata, "setting non-peer PM to active for peering\n");
+               nonpeer_pm = NL80211_MESH_POWER_ACTIVE;
+       } else if (light_sleep_cnt || deep_sleep_cnt) {
+               mps_dbg(sdata, "setting non-peer PM to deep sleep\n");
+               nonpeer_pm = NL80211_MESH_POWER_DEEP_SLEEP;
+       } else {
+               mps_dbg(sdata, "setting non-peer PM to user value\n");
+               nonpeer_pm = ifmsh->mshcfg.power_mode;
+       }
+
+       /* need update if sleep counts move between 0 and non-zero */
+       if (ifmsh->nonpeer_pm != nonpeer_pm ||
+           !ifmsh->ps_peers_light_sleep != !light_sleep_cnt ||
+           !ifmsh->ps_peers_deep_sleep != !deep_sleep_cnt)
+               changed = BSS_CHANGED_BEACON;
+
+       ifmsh->nonpeer_pm = nonpeer_pm;
+       ifmsh->ps_peers_light_sleep = light_sleep_cnt;
+       ifmsh->ps_peers_deep_sleep = deep_sleep_cnt;
+
+       return changed;
+}
+
+/**
+ * ieee80211_mps_set_sta_local_pm - set local PM towards a mesh STA
+ *
+ * @sta: mesh STA
+ * @pm: the power mode to set
+ * Return BSS_CHANGED_BEACON if a beacon update is in order.
+ */
+u32 ieee80211_mps_set_sta_local_pm(struct sta_info *sta,
+                                  enum nl80211_mesh_power_mode pm)
+{
+       struct ieee80211_sub_if_data *sdata = sta->sdata;
+
+       mps_dbg(sdata, "local STA operates in mode %d with %pM\n",
+               pm, sta->sta.addr);
+
+       sta->local_pm = pm;
+
+       /*
+        * announce peer-specific power mode transition
+        * (see IEEE802.11-2012 13.14.3.2 and 13.14.3.3)
+        */
+       if (sta->plink_state == NL80211_PLINK_ESTAB)
+               mps_qos_null_tx(sta);
+
+       return ieee80211_mps_local_status_update(sdata);
+}
+
+/**
+ * ieee80211_mps_set_frame_flags - set mesh PS flags in FC (and QoS Control)
+ *
+ * @sdata: local mesh subif
+ * @sta: mesh STA
+ * @hdr: 802.11 frame header
+ *
+ * see IEEE802.11-2012 8.2.4.1.7 and 8.2.4.5.11
+ *
+ * NOTE: sta must be given when an individually-addressed QoS frame header
+ * is handled, for group-addressed and management frames it is not used
+ */
+void ieee80211_mps_set_frame_flags(struct ieee80211_sub_if_data *sdata,
+                                  struct sta_info *sta,
+                                  struct ieee80211_hdr *hdr)
+{
+       enum nl80211_mesh_power_mode pm;
+       u8 *qc;
+
+       if (WARN_ON(is_unicast_ether_addr(hdr->addr1) &&
+                   ieee80211_is_data_qos(hdr->frame_control) &&
+                   !sta))
+               return;
+
+       if (is_unicast_ether_addr(hdr->addr1) &&
+           ieee80211_is_data_qos(hdr->frame_control) &&
+           sta->plink_state == NL80211_PLINK_ESTAB)
+               pm = sta->local_pm;
+       else
+               pm = sdata->u.mesh.nonpeer_pm;
+
+       if (pm == NL80211_MESH_POWER_ACTIVE)
+               hdr->frame_control &= cpu_to_le16(~IEEE80211_FCTL_PM);
+       else
+               hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM);
+
+       if (!ieee80211_is_data_qos(hdr->frame_control))
+               return;
+
+       qc = ieee80211_get_qos_ctl(hdr);
+
+       if ((is_unicast_ether_addr(hdr->addr1) &&
+            pm == NL80211_MESH_POWER_DEEP_SLEEP) ||
+           (is_multicast_ether_addr(hdr->addr1) &&
+            sdata->u.mesh.ps_peers_deep_sleep > 0))
+               qc[1] |= (IEEE80211_QOS_CTL_MESH_PS_LEVEL >> 8);
+       else
+               qc[1] &= ~(IEEE80211_QOS_CTL_MESH_PS_LEVEL >> 8);
+}
+
+/**
+ * ieee80211_mps_sta_status_update - update buffering status of neighbor STA
+ *
+ * @sta: mesh STA
+ *
+ * called after change of peering status or non-peer/peer-specific power mode
+ */
+void ieee80211_mps_sta_status_update(struct sta_info *sta)
+{
+       enum nl80211_mesh_power_mode pm;
+       bool do_buffer;
+
+       /*
+        * use peer-specific power mode if peering is established and the
+        * peer's power mode is known
+        */
+       if (sta->plink_state == NL80211_PLINK_ESTAB &&
+           sta->peer_pm != NL80211_MESH_POWER_UNKNOWN)
+               pm = sta->peer_pm;
+       else
+               pm = sta->nonpeer_pm;
+
+       do_buffer = (pm != NL80211_MESH_POWER_ACTIVE);
+
+       /* Don't let the same PS state be set twice */
+       if (test_sta_flag(sta, WLAN_STA_PS_STA) == do_buffer)
+               return;
+
+       if (do_buffer) {
+               set_sta_flag(sta, WLAN_STA_PS_STA);
+               atomic_inc(&sta->sdata->u.mesh.ps.num_sta_ps);
+               mps_dbg(sta->sdata, "start PS buffering frames towards %pM\n",
+                       sta->sta.addr);
+       } else {
+               ieee80211_sta_ps_deliver_wakeup(sta);
+       }
+
+       /* clear the MPSP flags for non-peers or active STA */
+       if (sta->plink_state != NL80211_PLINK_ESTAB) {
+               clear_sta_flag(sta, WLAN_STA_MPSP_OWNER);
+               clear_sta_flag(sta, WLAN_STA_MPSP_RECIPIENT);
+       } else if (!do_buffer) {
+               clear_sta_flag(sta, WLAN_STA_MPSP_OWNER);
+       }
+}
+
+static void mps_set_sta_peer_pm(struct sta_info *sta,
+                               struct ieee80211_hdr *hdr)
+{
+       enum nl80211_mesh_power_mode pm;
+       u8 *qc = ieee80211_get_qos_ctl(hdr);
+
+       /*
+        * Test Power Management field of frame control (PW) and
+        * mesh power save level subfield of QoS control field (PSL)
+        *
+        * | PM | PSL| Mesh PM |
+        * +----+----+---------+
+        * | 0  |Rsrv|  Active |
+        * | 1  | 0  |  Light  |
+        * | 1  | 1  |  Deep   |
+        */
+       if (ieee80211_has_pm(hdr->frame_control)) {
+               if (qc[1] & (IEEE80211_QOS_CTL_MESH_PS_LEVEL >> 8))
+                       pm = NL80211_MESH_POWER_DEEP_SLEEP;
+               else
+                       pm = NL80211_MESH_POWER_LIGHT_SLEEP;
+       } else {
+               pm = NL80211_MESH_POWER_ACTIVE;
+       }
+
+       if (sta->peer_pm == pm)
+               return;
+
+       mps_dbg(sta->sdata, "STA %pM enters mode %d\n",
+               sta->sta.addr, pm);
+
+       sta->peer_pm = pm;
+
+       ieee80211_mps_sta_status_update(sta);
+}
+
+static void mps_set_sta_nonpeer_pm(struct sta_info *sta,
+                                  struct ieee80211_hdr *hdr)
+{
+       enum nl80211_mesh_power_mode pm;
+
+       if (ieee80211_has_pm(hdr->frame_control))
+               pm = NL80211_MESH_POWER_DEEP_SLEEP;
+       else
+               pm = NL80211_MESH_POWER_ACTIVE;
+
+       if (sta->nonpeer_pm == pm)
+               return;
+
+       mps_dbg(sta->sdata, "STA %pM sets non-peer mode to %d\n",
+               sta->sta.addr, pm);
+
+       sta->nonpeer_pm = pm;
+
+       ieee80211_mps_sta_status_update(sta);
+}
+
+/**
+ * ieee80211_mps_rx_h_sta_process - frame receive handler for mesh powersave
+ *
+ * @sta: STA info that transmitted the frame
+ * @hdr: IEEE 802.11 (QoS) Header
+ */
+void ieee80211_mps_rx_h_sta_process(struct sta_info *sta,
+                                   struct ieee80211_hdr *hdr)
+{
+       if (is_unicast_ether_addr(hdr->addr1) &&
+           ieee80211_is_data_qos(hdr->frame_control)) {
+               /*
+                * individually addressed QoS Data/Null frames contain
+                * peer link-specific PS mode towards the local STA
+                */
+               mps_set_sta_peer_pm(sta, hdr);
+
+               /* check for mesh Peer Service Period trigger frames */
+               ieee80211_mpsp_trigger_process(ieee80211_get_qos_ctl(hdr),
+                                              sta, false, false);
+       } else {
+               /*
+                * can only determine non-peer PS mode
+                * (see IEEE802.11-2012 8.2.4.1.7)
+                */
+               mps_set_sta_nonpeer_pm(sta, hdr);
+       }
+}
+
+
+/* mesh PS frame release */
+
+static void mpsp_trigger_send(struct sta_info *sta, bool rspi, bool eosp)
+{
+       struct ieee80211_sub_if_data *sdata = sta->sdata;
+       struct sk_buff *skb;
+       struct ieee80211_hdr *nullfunc;
+       struct ieee80211_tx_info *info;
+       u8 *qc;
+
+       skb = mps_qos_null_get(sta);
+       if (!skb)
+               return;
+
+       nullfunc = (struct ieee80211_hdr *) skb->data;
+       if (!eosp)
+               nullfunc->frame_control |=
+                               cpu_to_le16(IEEE80211_FCTL_MOREDATA);
+       /*
+        * | RSPI | EOSP |  MPSP triggering   |
+        * +------+------+--------------------+
+        * |  0   |  0   | local STA is owner |
+        * |  0   |  1   | no MPSP (MPSP end) |
+        * |  1   |  0   | both STA are owner |
+        * |  1   |  1   | peer STA is owner  | see IEEE802.11-2012 13.14.9.2
+        */
+       qc = ieee80211_get_qos_ctl(nullfunc);
+       if (rspi)
+               qc[1] |= (IEEE80211_QOS_CTL_RSPI >> 8);
+       if (eosp)
+               qc[0] |= IEEE80211_QOS_CTL_EOSP;
+
+       info = IEEE80211_SKB_CB(skb);
+
+       info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER |
+                      IEEE80211_TX_CTL_REQ_TX_STATUS;
+
+       mps_dbg(sdata, "sending MPSP trigger%s%s to %pM\n",
+               rspi ? " RSPI" : "", eosp ? " EOSP" : "", sta->sta.addr);
+
+       ieee80211_tx_skb(sdata, skb);
+}
+
+/**
+ * mpsp_qos_null_append - append QoS Null frame to MPSP skb queue if needed
+ *
+ * To properly end a mesh MPSP the last transmitted frame has to set the EOSP
+ * flag in the QoS Control field. In case the current tailing frame is not a
+ * QoS Data frame, append a QoS Null to carry the flag.
+ */
+static void mpsp_qos_null_append(struct sta_info *sta,
+                                struct sk_buff_head *frames)
+{
+       struct ieee80211_sub_if_data *sdata = sta->sdata;
+       struct sk_buff *new_skb, *skb = skb_peek_tail(frames);
+       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+       struct ieee80211_tx_info *info;
+
+       if (ieee80211_is_data_qos(hdr->frame_control))
+               return;
+
+       new_skb = mps_qos_null_get(sta);
+       if (!new_skb)
+               return;
+
+       mps_dbg(sdata, "appending QoS Null in MPSP towards %pM\n",
+               sta->sta.addr);
+       /*
+        * This frame has to be transmitted last. Assign lowest priority to
+        * make sure it cannot pass other frames when releasing multiple ACs.
+        */
+       new_skb->priority = 1;
+       skb_set_queue_mapping(new_skb, IEEE80211_AC_BK);
+       ieee80211_set_qos_hdr(sdata, new_skb);
+
+       info = IEEE80211_SKB_CB(new_skb);
+       info->control.vif = &sdata->vif;
+       info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
+
+       __skb_queue_tail(frames, new_skb);
+}
+
+/**
+ * mps_frame_deliver - transmit frames during mesh powersave
+ *
+ * @sta: STA info to transmit to
+ * @n_frames: number of frames to transmit. -1 for all
+ */
+static void mps_frame_deliver(struct sta_info *sta, int n_frames)
+{
+       struct ieee80211_sub_if_data *sdata = sta->sdata;
+       struct ieee80211_local *local = sdata->local;
+       int ac;
+       struct sk_buff_head frames;
+       struct sk_buff *skb;
+       bool more_data = false;
+
+       skb_queue_head_init(&frames);
+
+       /* collect frame(s) from buffers */
+       for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
+               while (n_frames != 0) {
+                       skb = skb_dequeue(&sta->tx_filtered[ac]);
+                       if (!skb) {
+                               skb = skb_dequeue(
+                                       &sta->ps_tx_buf[ac]);
+                               if (skb)
+                                       local->total_ps_buffered--;
+                       }
+                       if (!skb)
+                               break;
+                       n_frames--;
+                       __skb_queue_tail(&frames, skb);
+               }
+
+               if (!skb_queue_empty(&sta->tx_filtered[ac]) ||
+                   !skb_queue_empty(&sta->ps_tx_buf[ac]))
+                       more_data = true;
+       }
+
+       /* nothing to send? -> EOSP */
+       if (skb_queue_empty(&frames)) {
+               mpsp_trigger_send(sta, false, true);
+               return;
+       }
+
+       /* in a MPSP make sure the last skb is a QoS Data frame */
+       if (test_sta_flag(sta, WLAN_STA_MPSP_OWNER))
+               mpsp_qos_null_append(sta, &frames);
+
+       mps_dbg(sta->sdata, "sending %d frames to PS STA %pM\n",
+               skb_queue_len(&frames), sta->sta.addr);
+
+       /* prepare collected frames for transmission */
+       skb_queue_walk(&frames, skb) {
+               struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+               struct ieee80211_hdr *hdr = (void *) skb->data;
+
+               /*
+                * Tell TX path to send this frame even though the
+                * STA may still remain is PS mode after this frame
+                * exchange.
+                */
+               info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER;
+
+               if (more_data || !skb_queue_is_last(&frames, skb))
+                       hdr->frame_control |=
+                               cpu_to_le16(IEEE80211_FCTL_MOREDATA);
+               else
+                       hdr->frame_control &=
+                               cpu_to_le16(~IEEE80211_FCTL_MOREDATA);
+
+               if (skb_queue_is_last(&frames, skb) &&
+                   ieee80211_is_data_qos(hdr->frame_control)) {
+                       u8 *qoshdr = ieee80211_get_qos_ctl(hdr);
+
+                       /* MPSP trigger frame ends service period */
+                       *qoshdr |= IEEE80211_QOS_CTL_EOSP;
+                       info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
+               }
+       }
+
+       ieee80211_add_pending_skbs(local, &frames);
+       sta_info_recalc_tim(sta);
+}
+
+/**
+ * ieee80211_mpsp_trigger_process - track status of mesh Peer Service Periods
+ *
+ * @qc: QoS Control field
+ * @sta: peer to start a MPSP with
+ * @tx: frame was transmitted by the local STA
+ * @acked: frame has been transmitted successfully
+ *
+ * NOTE: active mode STA may only serve as MPSP owner
+ */
+void ieee80211_mpsp_trigger_process(u8 *qc, struct sta_info *sta,
+                                   bool tx, bool acked)
+{
+       u8 rspi = qc[1] & (IEEE80211_QOS_CTL_RSPI >> 8);
+       u8 eosp = qc[0] & IEEE80211_QOS_CTL_EOSP;
+
+       if (tx) {
+               if (rspi && acked)
+                       set_sta_flag(sta, WLAN_STA_MPSP_RECIPIENT);
+
+               if (eosp)
+                       clear_sta_flag(sta, WLAN_STA_MPSP_OWNER);
+               else if (acked &&
+                        test_sta_flag(sta, WLAN_STA_PS_STA) &&
+                        !test_and_set_sta_flag(sta, WLAN_STA_MPSP_OWNER))
+                       mps_frame_deliver(sta, -1);
+       } else {
+               if (eosp)
+                       clear_sta_flag(sta, WLAN_STA_MPSP_RECIPIENT);
+               else if (sta->local_pm != NL80211_MESH_POWER_ACTIVE)
+                       set_sta_flag(sta, WLAN_STA_MPSP_RECIPIENT);
+
+               if (rspi && !test_and_set_sta_flag(sta, WLAN_STA_MPSP_OWNER))
+                       mps_frame_deliver(sta, -1);
+       }
+}
+
+/**
+ * ieee80211_mps_frame_release - release buffered frames in response to beacon
+ *
+ * @sta: mesh STA
+ * @elems: beacon IEs
+ *
+ * For peers if we have individually-addressed frames buffered or the peer
+ * indicates buffered frames, send a corresponding MPSP trigger frame. Since
+ * we do not evaluate the awake window duration, QoS Nulls are used as MPSP
+ * trigger frames. If the neighbour STA is not a peer, only send single frames.
+ */
+void ieee80211_mps_frame_release(struct sta_info *sta,
+                                struct ieee802_11_elems *elems)
+{
+       int ac, buffer_local = 0;
+       bool has_buffered = false;
+
+       /* TIM map only for LLID <= IEEE80211_MAX_AID */
+       if (sta->plink_state == NL80211_PLINK_ESTAB)
+               has_buffered = ieee80211_check_tim(elems->tim, elems->tim_len,
+                               le16_to_cpu(sta->llid) % IEEE80211_MAX_AID);
+
+       if (has_buffered)
+               mps_dbg(sta->sdata, "%pM indicates buffered frames\n",
+                       sta->sta.addr);
+
+       /* only transmit to PS STA with announced, non-zero awake window */
+       if (test_sta_flag(sta, WLAN_STA_PS_STA) &&
+           (!elems->awake_window || !le16_to_cpu(*elems->awake_window)))
+               return;
+
+       for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
+               buffer_local += skb_queue_len(&sta->ps_tx_buf[ac]) +
+                               skb_queue_len(&sta->tx_filtered[ac]);
+
+       if (!has_buffered && !buffer_local)
+               return;
+
+       if (sta->plink_state == NL80211_PLINK_ESTAB)
+               mpsp_trigger_send(sta, has_buffered, !buffer_local);
+       else
+               mps_frame_deliver(sta, 1);
+}
index e930175..9f6464f 100644 (file)
 #include "rate.h"
 #include "led.h"
 
-#define IEEE80211_AUTH_TIMEOUT (HZ / 5)
-#define IEEE80211_AUTH_MAX_TRIES 3
-#define IEEE80211_AUTH_WAIT_ASSOC (HZ * 5)
-#define IEEE80211_ASSOC_TIMEOUT (HZ / 5)
-#define IEEE80211_ASSOC_MAX_TRIES 3
+#define IEEE80211_AUTH_TIMEOUT         (HZ / 5)
+#define IEEE80211_AUTH_TIMEOUT_SHORT   (HZ / 10)
+#define IEEE80211_AUTH_MAX_TRIES       3
+#define IEEE80211_AUTH_WAIT_ASSOC      (HZ * 5)
+#define IEEE80211_ASSOC_TIMEOUT                (HZ / 5)
+#define IEEE80211_ASSOC_TIMEOUT_SHORT  (HZ / 10)
+#define IEEE80211_ASSOC_MAX_TRIES      3
 
 static int max_nullfunc_tries = 2;
 module_param(max_nullfunc_tries, int, 0644);
@@ -112,6 +114,9 @@ enum rx_mgmt_action {
 
        /* caller must call cfg80211_send_assoc_timeout() */
        RX_MGMT_CFG80211_ASSOC_TIMEOUT,
+
+       /* used when a processed beacon causes a deauth */
+       RX_MGMT_CFG80211_TX_DEAUTH,
 };
 
 /* utils */
@@ -172,79 +177,331 @@ static int ecw2cw(int ecw)
        return (1 << ecw) - 1;
 }
 
-static u32 ieee80211_config_ht_tx(struct ieee80211_sub_if_data *sdata,
-                                 struct ieee80211_ht_operation *ht_oper,
-                                 const u8 *bssid, bool reconfig)
+static u32 chandef_downgrade(struct cfg80211_chan_def *c)
+{
+       u32 ret;
+       int tmp;
+
+       switch (c->width) {
+       case NL80211_CHAN_WIDTH_20:
+               c->width = NL80211_CHAN_WIDTH_20_NOHT;
+               ret = IEEE80211_STA_DISABLE_HT | IEEE80211_STA_DISABLE_VHT;
+               break;
+       case NL80211_CHAN_WIDTH_40:
+               c->width = NL80211_CHAN_WIDTH_20;
+               c->center_freq1 = c->chan->center_freq;
+               ret = IEEE80211_STA_DISABLE_40MHZ |
+                     IEEE80211_STA_DISABLE_VHT;
+               break;
+       case NL80211_CHAN_WIDTH_80:
+               tmp = (30 + c->chan->center_freq - c->center_freq1)/20;
+               /* n_P40 */
+               tmp /= 2;
+               /* freq_P40 */
+               c->center_freq1 = c->center_freq1 - 20 + 40 * tmp;
+               c->width = NL80211_CHAN_WIDTH_40;
+               ret = IEEE80211_STA_DISABLE_VHT;
+               break;
+       case NL80211_CHAN_WIDTH_80P80:
+               c->center_freq2 = 0;
+               c->width = NL80211_CHAN_WIDTH_80;
+               ret = IEEE80211_STA_DISABLE_80P80MHZ |
+                     IEEE80211_STA_DISABLE_160MHZ;
+               break;
+       case NL80211_CHAN_WIDTH_160:
+               /* n_P20 */
+               tmp = (70 + c->chan->center_freq - c->center_freq1)/20;
+               /* n_P80 */
+               tmp /= 4;
+               c->center_freq1 = c->center_freq1 - 40 + 80 * tmp;
+               c->width = NL80211_CHAN_WIDTH_80;
+               ret = IEEE80211_STA_DISABLE_80P80MHZ |
+                     IEEE80211_STA_DISABLE_160MHZ;
+               break;
+       default:
+       case NL80211_CHAN_WIDTH_20_NOHT:
+               WARN_ON_ONCE(1);
+               c->width = NL80211_CHAN_WIDTH_20_NOHT;
+               ret = IEEE80211_STA_DISABLE_HT | IEEE80211_STA_DISABLE_VHT;
+               break;
+       }
+
+       WARN_ON_ONCE(!cfg80211_chandef_valid(c));
+
+       return ret;
+}
+
+static u32
+ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
+                            struct ieee80211_supported_band *sband,
+                            struct ieee80211_channel *channel,
+                            const struct ieee80211_ht_operation *ht_oper,
+                            const struct ieee80211_vht_operation *vht_oper,
+                            struct cfg80211_chan_def *chandef, bool verbose)
+{
+       struct cfg80211_chan_def vht_chandef;
+       u32 ht_cfreq, ret;
+
+       chandef->chan = channel;
+       chandef->width = NL80211_CHAN_WIDTH_20_NOHT;
+       chandef->center_freq1 = channel->center_freq;
+       chandef->center_freq2 = 0;
+
+       if (!ht_oper || !sband->ht_cap.ht_supported) {
+               ret = IEEE80211_STA_DISABLE_HT | IEEE80211_STA_DISABLE_VHT;
+               goto out;
+       }
+
+       chandef->width = NL80211_CHAN_WIDTH_20;
+
+       ht_cfreq = ieee80211_channel_to_frequency(ht_oper->primary_chan,
+                                                 channel->band);
+       /* check that channel matches the right operating channel */
+       if (channel->center_freq != ht_cfreq) {
+               /*
+                * It's possible that some APs are confused here;
+                * Netgear WNDR3700 sometimes reports 4 higher than
+                * the actual channel in association responses, but
+                * since we look at probe response/beacon data here
+                * it should be OK.
+                */
+               if (verbose)
+                       sdata_info(sdata,
+                                  "Wrong control channel: center-freq: %d ht-cfreq: %d ht->primary_chan: %d band: %d - Disabling HT\n",
+                                  channel->center_freq, ht_cfreq,
+                                  ht_oper->primary_chan, channel->band);
+               ret = IEEE80211_STA_DISABLE_HT | IEEE80211_STA_DISABLE_VHT;
+               goto out;
+       }
+
+       /* check 40 MHz support, if we have it */
+       if (sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) {
+               switch (ht_oper->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) {
+               case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
+                       chandef->width = NL80211_CHAN_WIDTH_40;
+                       chandef->center_freq1 += 10;
+                       break;
+               case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
+                       chandef->width = NL80211_CHAN_WIDTH_40;
+                       chandef->center_freq1 -= 10;
+                       break;
+               }
+       } else {
+               /* 40 MHz (and 80 MHz) must be supported for VHT */
+               ret = IEEE80211_STA_DISABLE_VHT;
+               goto out;
+       }
+
+       if (!vht_oper || !sband->vht_cap.vht_supported) {
+               ret = IEEE80211_STA_DISABLE_VHT;
+               goto out;
+       }
+
+       vht_chandef.chan = channel;
+       vht_chandef.center_freq1 =
+               ieee80211_channel_to_frequency(vht_oper->center_freq_seg1_idx,
+                                              channel->band);
+       vht_chandef.center_freq2 = 0;
+
+       if (vht_oper->center_freq_seg2_idx)
+               vht_chandef.center_freq2 =
+                       ieee80211_channel_to_frequency(
+                               vht_oper->center_freq_seg2_idx,
+                               channel->band);
+
+       switch (vht_oper->chan_width) {
+       case IEEE80211_VHT_CHANWIDTH_USE_HT:
+               vht_chandef.width = chandef->width;
+               break;
+       case IEEE80211_VHT_CHANWIDTH_80MHZ:
+               vht_chandef.width = NL80211_CHAN_WIDTH_80;
+               break;
+       case IEEE80211_VHT_CHANWIDTH_160MHZ:
+               vht_chandef.width = NL80211_CHAN_WIDTH_160;
+               break;
+       case IEEE80211_VHT_CHANWIDTH_80P80MHZ:
+               vht_chandef.width = NL80211_CHAN_WIDTH_80P80;
+               break;
+       default:
+               if (verbose)
+                       sdata_info(sdata,
+                                  "AP VHT operation IE has invalid channel width (%d), disable VHT\n",
+                                  vht_oper->chan_width);
+               ret = IEEE80211_STA_DISABLE_VHT;
+               goto out;
+       }
+
+       if (!cfg80211_chandef_valid(&vht_chandef)) {
+               if (verbose)
+                       sdata_info(sdata,
+                                  "AP VHT information is invalid, disable VHT\n");
+               ret = IEEE80211_STA_DISABLE_VHT;
+               goto out;
+       }
+
+       if (cfg80211_chandef_identical(chandef, &vht_chandef)) {
+               ret = 0;
+               goto out;
+       }
+
+       if (!cfg80211_chandef_compatible(chandef, &vht_chandef)) {
+               if (verbose)
+                       sdata_info(sdata,
+                                  "AP VHT information doesn't match HT, disable VHT\n");
+               ret = IEEE80211_STA_DISABLE_VHT;
+               goto out;
+       }
+
+       *chandef = vht_chandef;
+
+       ret = 0;
+
+out:
+       /* don't print the message below for VHT mismatch if VHT is disabled */
+       if (ret & IEEE80211_STA_DISABLE_VHT)
+               vht_chandef = *chandef;
+
+       while (!cfg80211_chandef_usable(sdata->local->hw.wiphy, chandef,
+                                       IEEE80211_CHAN_DISABLED)) {
+               if (WARN_ON(chandef->width == NL80211_CHAN_WIDTH_20_NOHT)) {
+                       ret = IEEE80211_STA_DISABLE_HT |
+                             IEEE80211_STA_DISABLE_VHT;
+                       goto out;
+               }
+
+               ret |= chandef_downgrade(chandef);
+       }
+
+       if (chandef->width != vht_chandef.width && verbose)
+               sdata_info(sdata,
+                          "capabilities/regulatory prevented using AP HT/VHT configuration, downgraded\n");
+
+       WARN_ON_ONCE(!cfg80211_chandef_valid(chandef));
+       return ret;
+}
+
+static int ieee80211_config_bw(struct ieee80211_sub_if_data *sdata,
+                              struct sta_info *sta,
+                              const struct ieee80211_ht_operation *ht_oper,
+                              const struct ieee80211_vht_operation *vht_oper,
+                              const u8 *bssid, u32 *changed)
 {
        struct ieee80211_local *local = sdata->local;
+       struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
        struct ieee80211_supported_band *sband;
-       struct ieee80211_chanctx_conf *chanctx_conf;
        struct ieee80211_channel *chan;
-       struct sta_info *sta;
-       u32 changed = 0;
+       struct cfg80211_chan_def chandef;
        u16 ht_opmode;
-       bool disable_40 = false;
+       u32 flags;
+       enum ieee80211_sta_rx_bandwidth new_sta_bw;
+       int ret;
 
-       rcu_read_lock();
-       chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
-       if (WARN_ON(!chanctx_conf)) {
-               rcu_read_unlock();
+       /* if HT was/is disabled, don't track any bandwidth changes */
+       if (ifmgd->flags & IEEE80211_STA_DISABLE_HT || !ht_oper)
                return 0;
-       }
-       chan = chanctx_conf->def.chan;
-       rcu_read_unlock();
+
+       /* don't check VHT if we associated as non-VHT station */
+       if (ifmgd->flags & IEEE80211_STA_DISABLE_VHT)
+               vht_oper = NULL;
+
+       if (WARN_ON_ONCE(!sta))
+               return -EINVAL;
+
+       chan = sdata->vif.bss_conf.chandef.chan;
        sband = local->hw.wiphy->bands[chan->band];
 
-       switch (sdata->vif.bss_conf.chandef.width) {
+       /* calculate new channel (type) based on HT/VHT operation IEs */
+       flags = ieee80211_determine_chantype(sdata, sband, chan, ht_oper,
+                                            vht_oper, &chandef, false);
+
+       /*
+        * Downgrade the new channel if we associated with restricted
+        * capabilities. For example, if we associated as a 20 MHz STA
+        * to a 40 MHz AP (due to regulatory, capabilities or config
+        * reasons) then switching to a 40 MHz channel now won't do us
+        * any good -- we couldn't use it with the AP.
+        */
+       if (ifmgd->flags & IEEE80211_STA_DISABLE_80P80MHZ &&
+           chandef.width == NL80211_CHAN_WIDTH_80P80)
+               flags |= chandef_downgrade(&chandef);
+       if (ifmgd->flags & IEEE80211_STA_DISABLE_160MHZ &&
+           chandef.width == NL80211_CHAN_WIDTH_160)
+               flags |= chandef_downgrade(&chandef);
+       if (ifmgd->flags & IEEE80211_STA_DISABLE_40MHZ &&
+           chandef.width > NL80211_CHAN_WIDTH_20)
+               flags |= chandef_downgrade(&chandef);
+
+       if (cfg80211_chandef_identical(&chandef, &sdata->vif.bss_conf.chandef))
+               return 0;
+
+       sdata_info(sdata,
+                  "AP %pM changed bandwidth, new config is %d MHz, width %d (%d/%d MHz)\n",
+                  ifmgd->bssid, chandef.chan->center_freq, chandef.width,
+                  chandef.center_freq1, chandef.center_freq2);
+
+       if (flags != (ifmgd->flags & (IEEE80211_STA_DISABLE_HT |
+                                     IEEE80211_STA_DISABLE_VHT |
+                                     IEEE80211_STA_DISABLE_40MHZ |
+                                     IEEE80211_STA_DISABLE_80P80MHZ |
+                                     IEEE80211_STA_DISABLE_160MHZ)) ||
+           !cfg80211_chandef_valid(&chandef)) {
+               sdata_info(sdata,
+                          "AP %pM changed bandwidth in a way we can't support - disconnect\n",
+                          ifmgd->bssid);
+               return -EINVAL;
+       }
+
+       switch (chandef.width) {
+       case NL80211_CHAN_WIDTH_20_NOHT:
+       case NL80211_CHAN_WIDTH_20:
+               new_sta_bw = IEEE80211_STA_RX_BW_20;
+               break;
        case NL80211_CHAN_WIDTH_40:
-               if (sdata->vif.bss_conf.chandef.chan->center_freq >
-                               sdata->vif.bss_conf.chandef.center_freq1 &&
-                   chan->flags & IEEE80211_CHAN_NO_HT40MINUS)
-                       disable_40 = true;
-               if (sdata->vif.bss_conf.chandef.chan->center_freq <
-                               sdata->vif.bss_conf.chandef.center_freq1 &&
-                   chan->flags & IEEE80211_CHAN_NO_HT40PLUS)
-                       disable_40 = true;
+               new_sta_bw = IEEE80211_STA_RX_BW_40;
                break;
-       default:
+       case NL80211_CHAN_WIDTH_80:
+               new_sta_bw = IEEE80211_STA_RX_BW_80;
+               break;
+       case NL80211_CHAN_WIDTH_80P80:
+       case NL80211_CHAN_WIDTH_160:
+               new_sta_bw = IEEE80211_STA_RX_BW_160;
                break;
+       default:
+               return -EINVAL;
        }
 
-       /* This can change during the lifetime of the BSS */
-       if (!(ht_oper->ht_param & IEEE80211_HT_PARAM_CHAN_WIDTH_ANY))
-               disable_40 = true;
-
-       mutex_lock(&local->sta_mtx);
-       sta = sta_info_get(sdata, bssid);
-
-       WARN_ON_ONCE(!sta);
+       if (new_sta_bw > sta->cur_max_bandwidth)
+               new_sta_bw = sta->cur_max_bandwidth;
 
-       if (sta && !sta->supports_40mhz)
-               disable_40 = true;
-
-       if (sta && (!reconfig ||
-                   (disable_40 != !(sta->sta.ht_cap.cap &
-                                       IEEE80211_HT_CAP_SUP_WIDTH_20_40)))) {
+       if (new_sta_bw < sta->sta.bandwidth) {
+               sta->sta.bandwidth = new_sta_bw;
+               rate_control_rate_update(local, sband, sta,
+                                        IEEE80211_RC_BW_CHANGED);
+       }
 
-               if (disable_40)
-                       sta->sta.ht_cap.cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
-               else
-                       sta->sta.ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+       ret = ieee80211_vif_change_bandwidth(sdata, &chandef, changed);
+       if (ret) {
+               sdata_info(sdata,
+                          "AP %pM changed bandwidth to incompatible one - disconnect\n",
+                          ifmgd->bssid);
+               return ret;
+       }
 
+       if (new_sta_bw > sta->sta.bandwidth) {
+               sta->sta.bandwidth = new_sta_bw;
                rate_control_rate_update(local, sband, sta,
                                         IEEE80211_RC_BW_CHANGED);
        }
-       mutex_unlock(&local->sta_mtx);
 
        ht_opmode = le16_to_cpu(ht_oper->operation_mode);
 
        /* if bss configuration changed store the new one */
-       if (!reconfig || (sdata->vif.bss_conf.ht_operation_mode != ht_opmode)) {
-               changed |= BSS_CHANGED_HT;
+       if (sdata->vif.bss_conf.ht_operation_mode != ht_opmode) {
+               *changed |= BSS_CHANGED_HT;
                sdata->vif.bss_conf.ht_operation_mode = ht_opmode;
        }
 
-       return changed;
+       return 0;
 }
 
 /* frame sending functions */
@@ -644,6 +901,9 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
        drv_mgd_prepare_tx(local, sdata);
 
        IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
+       if (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)
+               IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS |
+                                               IEEE80211_TX_INTFL_MLME_CONN_TX;
        ieee80211_tx_skb(sdata, skb);
 }
 
@@ -680,7 +940,8 @@ void ieee80211_send_nullfunc(struct ieee80211_local *local,
        if (powersave)
                nullfunc->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM);
 
-       IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
+       IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT |
+                                       IEEE80211_TX_INTFL_OFFCHAN_TX_OK;
        if (ifmgd->flags & (IEEE80211_STA_BEACON_POLL |
                            IEEE80211_STA_CONNECTION_POLL))
                IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_USE_MINRATE;
@@ -784,10 +1045,10 @@ static void ieee80211_chswitch_timer(unsigned long data)
        ieee80211_queue_work(&sdata->local->hw, &ifmgd->chswitch_work);
 }
 
-void ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
-                                     struct ieee80211_channel_sw_ie *sw_elem,
-                                     struct ieee80211_bss *bss,
-                                     u64 timestamp)
+void
+ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
+                                const struct ieee80211_channel_sw_ie *sw_elem,
+                                struct ieee80211_bss *bss, u64 timestamp)
 {
        struct cfg80211_bss *cbss =
                container_of((void *)bss, struct cfg80211_bss, priv);
@@ -946,39 +1207,6 @@ static u32 ieee80211_handle_pwr_constr(struct ieee80211_sub_if_data *sdata,
        return 0;
 }
 
-void ieee80211_enable_dyn_ps(struct ieee80211_vif *vif)
-{
-       struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
-       struct ieee80211_local *local = sdata->local;
-       struct ieee80211_conf *conf = &local->hw.conf;
-
-       WARN_ON(sdata->vif.type != NL80211_IFTYPE_STATION ||
-               !(local->hw.flags & IEEE80211_HW_SUPPORTS_PS) ||
-               (local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_PS));
-
-       local->disable_dynamic_ps = false;
-       conf->dynamic_ps_timeout = local->dynamic_ps_user_timeout;
-}
-EXPORT_SYMBOL(ieee80211_enable_dyn_ps);
-
-void ieee80211_disable_dyn_ps(struct ieee80211_vif *vif)
-{
-       struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
-       struct ieee80211_local *local = sdata->local;
-       struct ieee80211_conf *conf = &local->hw.conf;
-
-       WARN_ON(sdata->vif.type != NL80211_IFTYPE_STATION ||
-               !(local->hw.flags & IEEE80211_HW_SUPPORTS_PS) ||
-               (local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_PS));
-
-       local->disable_dynamic_ps = true;
-       conf->dynamic_ps_timeout = 0;
-       del_timer_sync(&local->dynamic_ps_timer);
-       ieee80211_queue_work(&local->hw,
-                            &local->dynamic_ps_enable_work);
-}
-EXPORT_SYMBOL(ieee80211_disable_dyn_ps);
-
 /* powersave */
 static void ieee80211_enable_ps(struct ieee80211_local *local,
                                struct ieee80211_sub_if_data *sdata)
@@ -1081,7 +1309,6 @@ void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency)
        }
 
        if (count == 1 && ieee80211_powersave_allowed(found)) {
-               struct ieee80211_conf *conf = &local->hw.conf;
                s32 beaconint_us;
 
                if (latency < 0)
@@ -1105,10 +1332,7 @@ void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency)
                        else
                                timeout = 100;
                }
-               local->dynamic_ps_user_timeout = timeout;
-               if (!local->disable_dynamic_ps)
-                       conf->dynamic_ps_timeout =
-                               local->dynamic_ps_user_timeout;
+               local->hw.conf.dynamic_ps_timeout = timeout;
 
                if (beaconint_us > latency) {
                        local->ps_sdata = NULL;
@@ -1178,8 +1402,7 @@ void ieee80211_dynamic_ps_enable_work(struct work_struct *work)
        if (local->hw.conf.flags & IEEE80211_CONF_PS)
                return;
 
-       if (!local->disable_dynamic_ps &&
-           local->hw.conf.dynamic_ps_timeout > 0) {
+       if (local->hw.conf.dynamic_ps_timeout > 0) {
                /* don't enter PS if TX frames are pending */
                if (drv_tx_frames_pending(local)) {
                        mod_timer(&local->dynamic_ps_timer, jiffies +
@@ -1244,16 +1467,30 @@ void ieee80211_dynamic_ps_timer(unsigned long data)
        ieee80211_queue_work(&local->hw, &local->dynamic_ps_enable_work);
 }
 
+void ieee80211_dfs_cac_timer_work(struct work_struct *work)
+{
+       struct delayed_work *delayed_work =
+               container_of(work, struct delayed_work, work);
+       struct ieee80211_sub_if_data *sdata =
+               container_of(delayed_work, struct ieee80211_sub_if_data,
+                            dfs_cac_timer_work);
+
+       ieee80211_vif_release_channel(sdata);
+
+       cfg80211_cac_event(sdata->dev, NL80211_RADAR_CAC_FINISHED, GFP_KERNEL);
+}
+
 /* MLME */
 static bool ieee80211_sta_wmm_params(struct ieee80211_local *local,
                                     struct ieee80211_sub_if_data *sdata,
-                                    u8 *wmm_param, size_t wmm_param_len)
+                                    const u8 *wmm_param, size_t wmm_param_len)
 {
        struct ieee80211_tx_queue_params params;
        struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
        size_t left;
        int count;
-       u8 *pos, uapsd_queues = 0;
+       const u8 *pos;
+       u8 uapsd_queues = 0;
 
        if (!local->ops->conf_tx)
                return false;
@@ -1445,7 +1682,7 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
 
        ieee80211_led_assoc(local, 1);
 
-       if (local->hw.flags & IEEE80211_HW_NEED_DTIM_PERIOD) {
+       if (sdata->u.mgd.assoc_data->have_beacon) {
                /*
                 * If the AP is buggy we may get here with no DTIM period
                 * known, so assume it's 1 which is the only safe assumption
@@ -1453,6 +1690,7 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
                 * probably just won't work at all.
                 */
                bss_conf->dtim_period = sdata->u.mgd.dtim_period ?: 1;
+               bss_info_changed |= BSS_CHANGED_DTIM_PERIOD;
        } else {
                bss_conf->dtim_period = 0;
        }
@@ -1465,10 +1703,8 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
                bss_info_changed |= BSS_CHANGED_CQM;
 
        /* Enable ARP filtering */
-       if (bss_conf->arp_filter_enabled != sdata->arp_filter_state) {
-               bss_conf->arp_filter_enabled = sdata->arp_filter_state;
+       if (bss_conf->arp_addr_cnt)
                bss_info_changed |= BSS_CHANGED_ARP_FILTER;
-       }
 
        ieee80211_bss_info_change_notify(sdata, bss_info_changed);
 
@@ -1489,7 +1725,6 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
 {
        struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
        struct ieee80211_local *local = sdata->local;
-       struct sta_info *sta;
        u32 changed = 0;
 
        ASSERT_MGD_MTX(ifmgd);
@@ -1521,14 +1756,6 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
        netif_tx_stop_all_queues(sdata->dev);
        netif_carrier_off(sdata->dev);
 
-       mutex_lock(&local->sta_mtx);
-       sta = sta_info_get(sdata, ifmgd->bssid);
-       if (sta) {
-               set_sta_flag(sta, WLAN_STA_BLOCK_BA);
-               ieee80211_sta_tear_down_BA_sessions(sta, AGG_STOP_DESTROY_STA);
-       }
-       mutex_unlock(&local->sta_mtx);
-
        /*
         * if we want to get out of ps before disassoc (why?) we have
         * to do it before sending disassoc, as otherwise the null-packet
@@ -1582,10 +1809,8 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
        cancel_work_sync(&local->dynamic_ps_enable_work);
 
        /* Disable ARP filtering */
-       if (sdata->vif.bss_conf.arp_filter_enabled) {
-               sdata->vif.bss_conf.arp_filter_enabled = false;
+       if (sdata->vif.bss_conf.arp_addr_cnt)
                changed |= BSS_CHANGED_ARP_FILTER;
-       }
 
        sdata->vif.bss_conf.qos = false;
        changed |= BSS_CHANGED_QOS;
@@ -1668,17 +1893,18 @@ void ieee80211_sta_tx_notify(struct ieee80211_sub_if_data *sdata,
        if (!ieee80211_is_data(hdr->frame_control))
            return;
 
-       if (ack)
-               ieee80211_sta_reset_conn_monitor(sdata);
-
        if (ieee80211_is_nullfunc(hdr->frame_control) &&
            sdata->u.mgd.probe_send_count > 0) {
                if (ack)
-                       sdata->u.mgd.probe_send_count = 0;
+                       ieee80211_sta_reset_conn_monitor(sdata);
                else
                        sdata->u.mgd.nullfunc_failed = true;
                ieee80211_queue_work(&sdata->local->hw, &sdata->work);
+               return;
        }
+
+       if (ack)
+               ieee80211_sta_reset_conn_monitor(sdata);
 }
 
 static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata)
@@ -1719,7 +1945,7 @@ static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata)
                        ssid_len = ssid[1];
 
                ieee80211_send_probe_req(sdata, dst, ssid + 2, ssid_len, NULL,
-                                        0, (u32) -1, true, false,
+                                        0, (u32) -1, true, 0,
                                         ifmgd->associated->channel, false);
                rcu_read_unlock();
        }
@@ -1753,7 +1979,7 @@ static void ieee80211_mgd_probe_ap(struct ieee80211_sub_if_data *sdata,
 
        if (beacon)
                mlme_dbg_ratelimited(sdata,
-                                    "detected beacon loss from AP - sending probe request\n");
+                                    "detected beacon loss from AP - probing\n");
 
        ieee80211_cqm_rssi_notify(&sdata->vif,
                NL80211_CQM_RSSI_BEACON_LOSS_EVENT, GFP_KERNEL);
@@ -1834,11 +2060,9 @@ struct sk_buff *ieee80211_ap_probereq_get(struct ieee80211_hw *hw,
 }
 EXPORT_SYMBOL(ieee80211_ap_probereq_get);
 
-static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata,
-                                  bool transmit_frame)
+static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata)
 {
        struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
-       struct ieee80211_local *local = sdata->local;
        u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN];
 
        mutex_lock(&ifmgd->mtx);
@@ -1849,8 +2073,10 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata,
 
        ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH,
                               WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY,
-                              transmit_frame, frame_buf);
+                              true, frame_buf);
        ifmgd->flags &= ~IEEE80211_STA_CSA_RECEIVED;
+       ieee80211_wake_queues_by_reason(&sdata->local->hw,
+                                       IEEE80211_QUEUE_STOP_REASON_CSA);
        mutex_unlock(&ifmgd->mtx);
 
        /*
@@ -1858,10 +2084,6 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata,
         * but that's not a problem.
         */
        cfg80211_send_deauth(sdata->dev, frame_buf, IEEE80211_DEAUTH_FRAME_LEN);
-
-       mutex_lock(&local->mtx);
-       ieee80211_recalc_idle(local);
-       mutex_unlock(&local->mtx);
 }
 
 static void ieee80211_beacon_connection_loss_work(struct work_struct *work)
@@ -1880,10 +2102,10 @@ static void ieee80211_beacon_connection_loss_work(struct work_struct *work)
                rcu_read_unlock();
        }
 
-       if (sdata->local->hw.flags & IEEE80211_HW_CONNECTION_MONITOR) {
+       if (ifmgd->connection_loss) {
                sdata_info(sdata, "Connection to AP %pM lost\n",
                           ifmgd->bssid);
-               __ieee80211_disconnect(sdata, false);
+               __ieee80211_disconnect(sdata);
        } else {
                ieee80211_mgd_probe_ap(sdata, true);
        }
@@ -1895,9 +2117,7 @@ static void ieee80211_csa_connection_drop_work(struct work_struct *work)
                container_of(work, struct ieee80211_sub_if_data,
                             u.mgd.csa_connection_drop_work);
 
-       ieee80211_wake_queues_by_reason(&sdata->local->hw,
-                                       IEEE80211_QUEUE_STOP_REASON_CSA);
-       __ieee80211_disconnect(sdata, true);
+       __ieee80211_disconnect(sdata);
 }
 
 void ieee80211_beacon_loss(struct ieee80211_vif *vif)
@@ -1908,6 +2128,7 @@ void ieee80211_beacon_loss(struct ieee80211_vif *vif)
        trace_api_beacon_loss(sdata);
 
        WARN_ON(hw->flags & IEEE80211_HW_CONNECTION_MONITOR);
+       sdata->u.mgd.connection_loss = false;
        ieee80211_queue_work(hw, &sdata->u.mgd.beacon_connection_loss_work);
 }
 EXPORT_SYMBOL(ieee80211_beacon_loss);
@@ -1919,7 +2140,7 @@ void ieee80211_connection_loss(struct ieee80211_vif *vif)
 
        trace_api_connection_loss(sdata);
 
-       WARN_ON(!(hw->flags & IEEE80211_HW_CONNECTION_MONITOR));
+       sdata->u.mgd.connection_loss = true;
        ieee80211_queue_work(hw, &sdata->u.mgd.beacon_connection_loss_work);
 }
 EXPORT_SYMBOL(ieee80211_connection_loss);
@@ -1941,7 +2162,7 @@ static void ieee80211_destroy_auth_data(struct ieee80211_sub_if_data *sdata,
                ieee80211_vif_release_channel(sdata);
        }
 
-       cfg80211_put_bss(auth_data->bss);
+       cfg80211_put_bss(sdata->local->hw.wiphy, auth_data->bss);
        kfree(auth_data);
        sdata->u.mgd.auth_data = NULL;
 }
@@ -1949,9 +2170,11 @@ static void ieee80211_destroy_auth_data(struct ieee80211_sub_if_data *sdata,
 static void ieee80211_auth_challenge(struct ieee80211_sub_if_data *sdata,
                                     struct ieee80211_mgmt *mgmt, size_t len)
 {
+       struct ieee80211_local *local = sdata->local;
        struct ieee80211_mgd_auth_data *auth_data = sdata->u.mgd.auth_data;
        u8 *pos;
        struct ieee802_11_elems elems;
+       u32 tx_flags = 0;
 
        pos = mgmt->u.auth.variable;
        ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems);
@@ -1959,11 +2182,14 @@ static void ieee80211_auth_challenge(struct ieee80211_sub_if_data *sdata,
                return;
        auth_data->expected_transaction = 4;
        drv_mgd_prepare_tx(sdata->local, sdata);
+       if (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)
+               tx_flags = IEEE80211_TX_CTL_REQ_TX_STATUS |
+                          IEEE80211_TX_INTFL_MLME_CONN_TX;
        ieee80211_send_auth(sdata, 3, auth_data->algorithm, 0,
                            elems.challenge - 2, elems.challenge_len + 2,
                            auth_data->bss->bssid, auth_data->bss->bssid,
                            auth_data->key, auth_data->key_len,
-                           auth_data->key_idx);
+                           auth_data->key_idx, tx_flags);
 }
 
 static enum rx_mgmt_action __must_check
@@ -2030,6 +2256,7 @@ ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata,
        sdata_info(sdata, "authenticated\n");
        ifmgd->auth_data->done = true;
        ifmgd->auth_data->timeout = jiffies + IEEE80211_AUTH_WAIT_ASSOC;
+       ifmgd->auth_data->timeout_started = true;
        run_again(ifmgd, ifmgd->auth_data->timeout);
 
        if (ifmgd->auth_data->algorithm == WLAN_AUTH_SAE &&
@@ -2088,10 +2315,6 @@ ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata,
 
        ieee80211_set_disassoc(sdata, 0, 0, false, NULL);
 
-       mutex_lock(&sdata->local->mtx);
-       ieee80211_recalc_idle(sdata->local);
-       mutex_unlock(&sdata->local->mtx);
-
        return RX_MGMT_CFG80211_DEAUTH;
 }
 
@@ -2119,10 +2342,6 @@ ieee80211_rx_mgmt_disassoc(struct ieee80211_sub_if_data *sdata,
 
        ieee80211_set_disassoc(sdata, 0, 0, false, NULL);
 
-       mutex_lock(&sdata->local->mtx);
-       ieee80211_recalc_idle(sdata->local);
-       mutex_unlock(&sdata->local->mtx);
-
        return RX_MGMT_CFG80211_DISASSOC;
 }
 
@@ -2232,6 +2451,24 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
 
        ifmgd->aid = aid;
 
+       /*
+        * We previously checked these in the beacon/probe response, so
+        * they should be present here. This is just a safety net.
+        */
+       if (!(ifmgd->flags & IEEE80211_STA_DISABLE_HT) &&
+           (!elems.wmm_param || !elems.ht_cap_elem || !elems.ht_operation)) {
+               sdata_info(sdata,
+                          "HT AP is missing WMM params or HT capability/operation in AssocResp\n");
+               return false;
+       }
+
+       if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT) &&
+           (!elems.vht_cap_elem || !elems.vht_operation)) {
+               sdata_info(sdata,
+                          "VHT AP is missing VHT capability/operation in AssocResp\n");
+               return false;
+       }
+
        mutex_lock(&sdata->local->sta_mtx);
        /*
         * station info was already allocated and inserted before
@@ -2245,17 +2482,36 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
 
        sband = local->hw.wiphy->bands[ieee80211_get_sdata_band(sdata)];
 
+       /* Set up internal HT/VHT capabilities */
        if (elems.ht_cap_elem && !(ifmgd->flags & IEEE80211_STA_DISABLE_HT))
                ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband,
-                               elems.ht_cap_elem, &sta->sta.ht_cap);
-
-       sta->supports_40mhz =
-               sta->sta.ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+                                                 elems.ht_cap_elem, sta);
 
        if (elems.vht_cap_elem && !(ifmgd->flags & IEEE80211_STA_DISABLE_VHT))
                ieee80211_vht_cap_ie_to_sta_vht_cap(sdata, sband,
-                                                   elems.vht_cap_elem,
-                                                   &sta->sta.vht_cap);
+                                                   elems.vht_cap_elem, sta);
+
+       /*
+        * Some APs, e.g. Netgear WNDR3700, report invalid HT operation data
+        * in their association response, so ignore that data for our own
+        * configuration. If it changed since the last beacon, we'll get the
+        * next beacon and update then.
+        */
+
+       /*
+        * If an operating mode notification IE is present, override the
+        * NSS calculation (that would be done in rate_control_rate_init())
+        * and use the # of streams from that element.
+        */
+       if (elems.opmode_notif &&
+           !(*elems.opmode_notif & IEEE80211_OPMODE_NOTIF_RX_NSS_TYPE_BF)) {
+               u8 nss;
+
+               nss = *elems.opmode_notif & IEEE80211_OPMODE_NOTIF_RX_NSS_MASK;
+               nss >>= IEEE80211_OPMODE_NOTIF_RX_NSS_SHIFT;
+               nss += 1;
+               sta->sta.rx_nss = nss;
+       }
 
        rate_control_rate_init(sta);
 
@@ -2265,9 +2521,7 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
        if (elems.wmm_param)
                set_sta_flag(sta, WLAN_STA_WME);
 
-       err = sta_info_move_state(sta, IEEE80211_STA_AUTH);
-       if (!err)
-               err = sta_info_move_state(sta, IEEE80211_STA_ASSOC);
+       err = sta_info_move_state(sta, IEEE80211_STA_ASSOC);
        if (!err && !(ifmgd->flags & IEEE80211_STA_CONTROL_PORT))
                err = sta_info_move_state(sta, IEEE80211_STA_AUTHORIZED);
        if (err) {
@@ -2296,11 +2550,6 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
                ieee80211_set_wmm_default(sdata, false);
        changed |= BSS_CHANGED_QOS;
 
-       if (elems.ht_operation && elems.wmm_param &&
-           !(ifmgd->flags & IEEE80211_STA_DISABLE_HT))
-               changed |= ieee80211_config_ht_tx(sdata, elems.ht_operation,
-                                                 cbss->bssid, false);
-
        /* set AID and assoc capability,
         * ieee80211_set_associated() will tell the driver */
        bss_conf->aid = aid;
@@ -2374,6 +2623,7 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
                           "%pM rejected association temporarily; comeback duration %u TU (%u ms)\n",
                           mgmt->sa, tu, ms);
                assoc_data->timeout = jiffies + msecs_to_jiffies(ms);
+               assoc_data->timeout_started = true;
                if (ms > IEEE80211_ASSOC_TIMEOUT)
                        run_again(ifmgd, assoc_data->timeout);
                return RX_MGMT_NONE;
@@ -2389,7 +2639,7 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
                if (!ieee80211_assoc_success(sdata, *bss, mgmt, len)) {
                        /* oops -- internal error -- send timeout for now */
                        ieee80211_destroy_assoc_data(sdata, false);
-                       cfg80211_put_bss(*bss);
+                       cfg80211_put_bss(sdata->local->hw.wiphy, *bss);
                        return RX_MGMT_CFG80211_ASSOC_TIMEOUT;
                }
                sdata_info(sdata, "associated\n");
@@ -2425,7 +2675,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
                need_ps = sdata->u.mgd.associated && !sdata->u.mgd.dtim_period;
 
                if (elems->tim && !elems->parse_error) {
-                       struct ieee80211_tim_ie *tim_ie = elems->tim;
+                       const struct ieee80211_tim_ie *tim_ie = elems->tim;
                        sdata->u.mgd.dtim_period = tim_ie->dtim_period;
                }
        }
@@ -2497,6 +2747,7 @@ static void ieee80211_rx_mgmt_probe_resp(struct ieee80211_sub_if_data *sdata,
                sdata_info(sdata, "direct probe responded\n");
                ifmgd->auth_data->tries = 0;
                ifmgd->auth_data->timeout = jiffies;
+               ifmgd->auth_data->timeout_started = true;
                run_again(ifmgd, ifmgd->auth_data->timeout);
        }
 }
@@ -2522,10 +2773,10 @@ static const u64 care_about_ies =
        (1ULL << WLAN_EID_HT_CAPABILITY) |
        (1ULL << WLAN_EID_HT_OPERATION);
 
-static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
-                                    struct ieee80211_mgmt *mgmt,
-                                    size_t len,
-                                    struct ieee80211_rx_status *rx_status)
+static enum rx_mgmt_action
+ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
+                        struct ieee80211_mgmt *mgmt, size_t len,
+                        u8 *deauth_buf, struct ieee80211_rx_status *rx_status)
 {
        struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
        struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf;
@@ -2534,6 +2785,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
        struct ieee80211_local *local = sdata->local;
        struct ieee80211_chanctx_conf *chanctx_conf;
        struct ieee80211_channel *chan;
+       struct sta_info *sta;
        u32 changed = 0;
        bool erp_valid;
        u8 erp_value = 0;
@@ -2545,39 +2797,51 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
        /* Process beacon from the current BSS */
        baselen = (u8 *) mgmt->u.beacon.variable - (u8 *) mgmt;
        if (baselen > len)
-               return;
+               return RX_MGMT_NONE;
 
        rcu_read_lock();
        chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
        if (!chanctx_conf) {
                rcu_read_unlock();
-               return;
+               return RX_MGMT_NONE;
        }
 
        if (rx_status->freq != chanctx_conf->def.chan->center_freq) {
                rcu_read_unlock();
-               return;
+               return RX_MGMT_NONE;
        }
        chan = chanctx_conf->def.chan;
        rcu_read_unlock();
 
-       if (ifmgd->assoc_data && !ifmgd->assoc_data->have_beacon &&
+       if (ifmgd->assoc_data && ifmgd->assoc_data->need_beacon &&
            ether_addr_equal(mgmt->bssid, ifmgd->assoc_data->bss->bssid)) {
                ieee802_11_parse_elems(mgmt->u.beacon.variable,
                                       len - baselen, &elems);
 
                ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, &elems);
                ifmgd->assoc_data->have_beacon = true;
-               ifmgd->assoc_data->sent_assoc = false;
+               ifmgd->assoc_data->need_beacon = false;
+               if (local->hw.flags & IEEE80211_HW_TIMING_BEACON_ONLY) {
+                       sdata->vif.bss_conf.sync_tsf =
+                               le64_to_cpu(mgmt->u.beacon.timestamp);
+                       sdata->vif.bss_conf.sync_device_ts =
+                               rx_status->device_timestamp;
+                       if (elems.tim)
+                               sdata->vif.bss_conf.sync_dtim_count =
+                                       elems.tim->dtim_count;
+                       else
+                               sdata->vif.bss_conf.sync_dtim_count = 0;
+               }
                /* continue assoc process */
                ifmgd->assoc_data->timeout = jiffies;
+               ifmgd->assoc_data->timeout_started = true;
                run_again(ifmgd, ifmgd->assoc_data->timeout);
-               return;
+               return RX_MGMT_NONE;
        }
 
        if (!ifmgd->associated ||
            !ether_addr_equal(mgmt->bssid, ifmgd->associated->bssid))
-               return;
+               return RX_MGMT_NONE;
        bssid = ifmgd->associated->bssid;
 
        /* Track average RSSI from the Beacon frames of the current AP */
@@ -2608,12 +2872,12 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
                if (sig > ifmgd->rssi_max_thold &&
                    (last_sig <= ifmgd->rssi_min_thold || last_sig == 0)) {
                        ifmgd->last_ave_beacon_signal = sig;
-                       drv_rssi_callback(local, RSSI_EVENT_HIGH);
+                       drv_rssi_callback(local, sdata, RSSI_EVENT_HIGH);
                } else if (sig < ifmgd->rssi_min_thold &&
                           (last_sig >= ifmgd->rssi_max_thold ||
                           last_sig == 0)) {
                        ifmgd->last_ave_beacon_signal = sig;
-                       drv_rssi_callback(local, RSSI_EVENT_LOW);
+                       drv_rssi_callback(local, sdata, RSSI_EVENT_LOW);
                }
        }
 
@@ -2643,7 +2907,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
 
        if (ifmgd->flags & IEEE80211_STA_BEACON_POLL) {
                mlme_dbg_ratelimited(sdata,
-                                    "cancelling probereq poll due to a received beacon\n");
+                                    "cancelling AP probe due to a received beacon\n");
                mutex_lock(&local->mtx);
                ifmgd->flags &= ~IEEE80211_STA_BEACON_POLL;
                ieee80211_run_deferred_scan(local);
@@ -2715,7 +2979,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
        }
 
        if (ncrc == ifmgd->beacon_crc && ifmgd->beacon_crc_valid)
-               return;
+               return RX_MGMT_NONE;
        ifmgd->beacon_crc = ncrc;
        ifmgd->beacon_crc_valid = true;
 
@@ -2725,6 +2989,32 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
                                     elems.wmm_param_len))
                changed |= BSS_CHANGED_QOS;
 
+       /*
+        * If we haven't had a beacon before, tell the driver about the
+        * DTIM period (and beacon timing if desired) now.
+        */
+       if (!bss_conf->dtim_period) {
+               /* a few bogus AP send dtim_period = 0 or no TIM IE */
+               if (elems.tim)
+                       bss_conf->dtim_period = elems.tim->dtim_period ?: 1;
+               else
+                       bss_conf->dtim_period = 1;
+
+               if (local->hw.flags & IEEE80211_HW_TIMING_BEACON_ONLY) {
+                       sdata->vif.bss_conf.sync_tsf =
+                               le64_to_cpu(mgmt->u.beacon.timestamp);
+                       sdata->vif.bss_conf.sync_device_ts =
+                               rx_status->device_timestamp;
+                       if (elems.tim)
+                               sdata->vif.bss_conf.sync_dtim_count =
+                                       elems.tim->dtim_count;
+                       else
+                               sdata->vif.bss_conf.sync_dtim_count = 0;
+               }
+
+               changed |= BSS_CHANGED_DTIM_PERIOD;
+       }
+
        if (elems.erp_info && elems.erp_info_len >= 1) {
                erp_valid = true;
                erp_value = elems.erp_info[0];
@@ -2735,11 +3025,22 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
                        le16_to_cpu(mgmt->u.beacon.capab_info),
                        erp_valid, erp_value);
 
+       mutex_lock(&local->sta_mtx);
+       sta = sta_info_get(sdata, bssid);
+
+       if (ieee80211_config_bw(sdata, sta, elems.ht_operation,
+                               elems.vht_operation, bssid, &changed)) {
+               mutex_unlock(&local->sta_mtx);
+               ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH,
+                                      WLAN_REASON_DEAUTH_LEAVING,
+                                      true, deauth_buf);
+               return RX_MGMT_CFG80211_TX_DEAUTH;
+       }
 
-       if (elems.ht_cap_elem && elems.ht_operation && elems.wmm_param &&
-           !(ifmgd->flags & IEEE80211_STA_DISABLE_HT))
-               changed |= ieee80211_config_ht_tx(sdata, elems.ht_operation,
-                                                 bssid, true);
+       if (sta && elems.opmode_notif)
+               ieee80211_vht_handle_opmode(sdata, sta, *elems.opmode_notif,
+                                           rx_status->band, true);
+       mutex_unlock(&local->sta_mtx);
 
        if (elems.country_elem && elems.pwr_constr_elem &&
            mgmt->u.probe_resp.capab_info &
@@ -2750,6 +3051,8 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
                                                       elems.pwr_constr_elem);
 
        ieee80211_bss_info_change_notify(sdata, changed);
+
+       return RX_MGMT_NONE;
 }
 
 void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
@@ -2760,6 +3063,7 @@ void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
        struct ieee80211_mgmt *mgmt;
        struct cfg80211_bss *bss = NULL;
        enum rx_mgmt_action rma = RX_MGMT_NONE;
+       u8 deauth_buf[IEEE80211_DEAUTH_FRAME_LEN];
        u16 fc;
 
        rx_status = (struct ieee80211_rx_status *) skb->cb;
@@ -2770,7 +3074,8 @@ void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
 
        switch (fc & IEEE80211_FCTL_STYPE) {
        case IEEE80211_STYPE_BEACON:
-               ieee80211_rx_mgmt_beacon(sdata, mgmt, skb->len, rx_status);
+               rma = ieee80211_rx_mgmt_beacon(sdata, mgmt, skb->len,
+                                              deauth_buf, rx_status);
                break;
        case IEEE80211_STYPE_PROBE_RESP:
                ieee80211_rx_mgmt_probe_resp(sdata, skb);
@@ -2819,6 +3124,10 @@ void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
        case RX_MGMT_CFG80211_ASSOC_TIMEOUT:
                cfg80211_send_assoc_timeout(sdata->dev, mgmt->bssid);
                break;
+       case RX_MGMT_CFG80211_TX_DEAUTH:
+               cfg80211_send_deauth(sdata->dev, deauth_buf,
+                                    sizeof(deauth_buf));
+               break;
        default:
                WARN(1, "unexpected: %d", rma);
        }
@@ -2840,14 +3149,13 @@ static void ieee80211_sta_timer(unsigned long data)
 }
 
 static void ieee80211_sta_connection_lost(struct ieee80211_sub_if_data *sdata,
-                                         u8 *bssid, u8 reason)
+                                         u8 *bssid, u8 reason, bool tx)
 {
-       struct ieee80211_local *local = sdata->local;
        struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
        u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN];
 
        ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, reason,
-                              false, frame_buf);
+                              tx, frame_buf);
        mutex_unlock(&ifmgd->mtx);
 
        /*
@@ -2856,10 +3164,6 @@ static void ieee80211_sta_connection_lost(struct ieee80211_sub_if_data *sdata,
         */
        cfg80211_send_deauth(sdata->dev, frame_buf, IEEE80211_DEAUTH_FRAME_LEN);
 
-       mutex_lock(&local->mtx);
-       ieee80211_recalc_idle(local);
-       mutex_unlock(&local->mtx);
-
        mutex_lock(&ifmgd->mtx);
 }
 
@@ -2868,12 +3172,17 @@ static int ieee80211_probe_auth(struct ieee80211_sub_if_data *sdata)
        struct ieee80211_local *local = sdata->local;
        struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
        struct ieee80211_mgd_auth_data *auth_data = ifmgd->auth_data;
+       u32 tx_flags = 0;
 
        lockdep_assert_held(&ifmgd->mtx);
 
        if (WARN_ON_ONCE(!auth_data))
                return -EINVAL;
 
+       if (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)
+               tx_flags = IEEE80211_TX_CTL_REQ_TX_STATUS |
+                          IEEE80211_TX_INTFL_MLME_CONN_TX;
+
        auth_data->tries++;
 
        if (auth_data->tries > IEEE80211_AUTH_MAX_TRIES) {
@@ -2910,7 +3219,8 @@ static int ieee80211_probe_auth(struct ieee80211_sub_if_data *sdata)
                ieee80211_send_auth(sdata, trans, auth_data->algorithm, status,
                                    auth_data->data, auth_data->data_len,
                                    auth_data->bss->bssid,
-                                   auth_data->bss->bssid, NULL, 0, 0);
+                                   auth_data->bss->bssid, NULL, 0, 0,
+                                   tx_flags);
        } else {
                const u8 *ssidie;
 
@@ -2929,13 +3239,18 @@ static int ieee80211_probe_auth(struct ieee80211_sub_if_data *sdata)
                 * will not answer to direct packet in unassociated state.
                 */
                ieee80211_send_probe_req(sdata, NULL, ssidie + 2, ssidie[1],
-                                        NULL, 0, (u32) -1, true, false,
+                                        NULL, 0, (u32) -1, true, tx_flags,
                                         auth_data->bss->channel, false);
                rcu_read_unlock();
        }
 
-       auth_data->timeout = jiffies + IEEE80211_AUTH_TIMEOUT;
-       run_again(ifmgd, auth_data->timeout);
+       if (!(local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)) {
+               auth_data->timeout = jiffies + IEEE80211_AUTH_TIMEOUT;
+               ifmgd->auth_data->timeout_started = true;
+               run_again(ifmgd, auth_data->timeout);
+       } else {
+               auth_data->timeout_started = false;
+       }
 
        return 0;
 }
@@ -2961,15 +3276,32 @@ static int ieee80211_do_assoc(struct ieee80211_sub_if_data *sdata)
                return -ETIMEDOUT;
        }
 
-       sdata_info(sdata, "associate with %pM (try %d/%d)\n",
-                  assoc_data->bss->bssid, assoc_data->tries,
-                  IEEE80211_ASSOC_MAX_TRIES);
-       ieee80211_send_assoc(sdata);
+       sdata_info(sdata, "associate with %pM (try %d/%d)\n",
+                  assoc_data->bss->bssid, assoc_data->tries,
+                  IEEE80211_ASSOC_MAX_TRIES);
+       ieee80211_send_assoc(sdata);
+
+       if (!(local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)) {
+               assoc_data->timeout = jiffies + IEEE80211_ASSOC_TIMEOUT;
+               assoc_data->timeout_started = true;
+               run_again(&sdata->u.mgd, assoc_data->timeout);
+       } else {
+               assoc_data->timeout_started = false;
+       }
+
+       return 0;
+}
+
+void ieee80211_mgd_conn_tx_status(struct ieee80211_sub_if_data *sdata,
+                                 __le16 fc, bool acked)
+{
+       struct ieee80211_local *local = sdata->local;
 
-       assoc_data->timeout = jiffies + IEEE80211_ASSOC_TIMEOUT;
-       run_again(&sdata->u.mgd, assoc_data->timeout);
+       sdata->u.mgd.status_fc = fc;
+       sdata->u.mgd.status_acked = acked;
+       sdata->u.mgd.status_received = true;
 
-       return 0;
+       ieee80211_queue_work(&local->hw, &sdata->work);
 }
 
 void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata)
@@ -2979,7 +3311,36 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata)
 
        mutex_lock(&ifmgd->mtx);
 
-       if (ifmgd->auth_data &&
+       if (ifmgd->status_received) {
+               __le16 fc = ifmgd->status_fc;
+               bool status_acked = ifmgd->status_acked;
+
+               ifmgd->status_received = false;
+               if (ifmgd->auth_data &&
+                   (ieee80211_is_probe_req(fc) || ieee80211_is_auth(fc))) {
+                       if (status_acked) {
+                               ifmgd->auth_data->timeout =
+                                       jiffies + IEEE80211_AUTH_TIMEOUT_SHORT;
+                               run_again(ifmgd, ifmgd->auth_data->timeout);
+                       } else {
+                               ifmgd->auth_data->timeout = jiffies - 1;
+                       }
+                       ifmgd->auth_data->timeout_started = true;
+               } else if (ifmgd->assoc_data &&
+                          (ieee80211_is_assoc_req(fc) ||
+                           ieee80211_is_reassoc_req(fc))) {
+                       if (status_acked) {
+                               ifmgd->assoc_data->timeout =
+                                       jiffies + IEEE80211_ASSOC_TIMEOUT_SHORT;
+                               run_again(ifmgd, ifmgd->assoc_data->timeout);
+                       } else {
+                               ifmgd->assoc_data->timeout = jiffies - 1;
+                       }
+                       ifmgd->assoc_data->timeout_started = true;
+               }
+       }
+
+       if (ifmgd->auth_data && ifmgd->auth_data->timeout_started &&
            time_after(jiffies, ifmgd->auth_data->timeout)) {
                if (ifmgd->auth_data->done) {
                        /*
@@ -2998,12 +3359,13 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata)
                        cfg80211_send_auth_timeout(sdata->dev, bssid);
                        mutex_lock(&ifmgd->mtx);
                }
-       } else if (ifmgd->auth_data)
+       } else if (ifmgd->auth_data && ifmgd->auth_data->timeout_started)
                run_again(ifmgd, ifmgd->auth_data->timeout);
 
-       if (ifmgd->assoc_data &&
+       if (ifmgd->assoc_data && ifmgd->assoc_data->timeout_started &&
            time_after(jiffies, ifmgd->assoc_data->timeout)) {
-               if (!ifmgd->assoc_data->have_beacon ||
+               if ((ifmgd->assoc_data->need_beacon &&
+                    !ifmgd->assoc_data->have_beacon) ||
                    ieee80211_do_assoc(sdata)) {
                        u8 bssid[ETH_ALEN];
 
@@ -3015,7 +3377,7 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata)
                        cfg80211_send_assoc_timeout(sdata->dev, bssid);
                        mutex_lock(&ifmgd->mtx);
                }
-       } else if (ifmgd->assoc_data)
+       } else if (ifmgd->assoc_data && ifmgd->assoc_data->timeout_started)
                run_again(ifmgd, ifmgd->assoc_data->timeout);
 
        if (ifmgd->flags & (IEEE80211_STA_BEACON_POLL |
@@ -3046,7 +3408,8 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata)
                                         "No ack for nullfunc frame to AP %pM, disconnecting.\n",
                                         bssid);
                                ieee80211_sta_connection_lost(sdata, bssid,
-                                       WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY);
+                                       WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY,
+                                       false);
                        }
                } else if (time_is_after_jiffies(ifmgd->probe_timeout))
                        run_again(ifmgd, ifmgd->probe_timeout);
@@ -3055,7 +3418,7 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata)
                                 "Failed to send nullfunc to AP %pM after %dms, disconnecting\n",
                                 bssid, probe_wait_ms);
                        ieee80211_sta_connection_lost(sdata, bssid,
-                               WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY);
+                               WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY, false);
                } else if (ifmgd->probe_send_count < max_tries) {
                        mlme_dbg(sdata,
                                 "No probe response from AP %pM after %dms, try %d/%i\n",
@@ -3074,15 +3437,11 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata)
                                    bssid, probe_wait_ms);
 
                        ieee80211_sta_connection_lost(sdata, bssid,
-                               WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY);
+                               WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY, false);
                }
        }
 
        mutex_unlock(&ifmgd->mtx);
-
-       mutex_lock(&local->mtx);
-       ieee80211_recalc_idle(local);
-       mutex_unlock(&local->mtx);
 }
 
 static void ieee80211_sta_bcn_mon_timer(unsigned long data)
@@ -3094,6 +3453,7 @@ static void ieee80211_sta_bcn_mon_timer(unsigned long data)
        if (local->quiescing)
                return;
 
+       sdata->u.mgd.connection_loss = false;
        ieee80211_queue_work(&sdata->local->hw,
                             &sdata->u.mgd.beacon_connection_loss_work);
 }
@@ -3169,23 +3529,23 @@ void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata)
 {
        struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
 
-       if (!ifmgd->associated)
+       mutex_lock(&ifmgd->mtx);
+       if (!ifmgd->associated) {
+               mutex_unlock(&ifmgd->mtx);
                return;
+       }
 
        if (sdata->flags & IEEE80211_SDATA_DISCONNECT_RESUME) {
                sdata->flags &= ~IEEE80211_SDATA_DISCONNECT_RESUME;
-               mutex_lock(&ifmgd->mtx);
-               if (ifmgd->associated) {
-                       mlme_dbg(sdata,
-                                "driver requested disconnect after resume\n");
-                       ieee80211_sta_connection_lost(sdata,
-                               ifmgd->associated->bssid,
-                               WLAN_REASON_UNSPECIFIED);
-                       mutex_unlock(&ifmgd->mtx);
-                       return;
-               }
+               mlme_dbg(sdata, "driver requested disconnect after resume\n");
+               ieee80211_sta_connection_lost(sdata,
+                                             ifmgd->associated->bssid,
+                                             WLAN_REASON_UNSPECIFIED,
+                                             true);
                mutex_unlock(&ifmgd->mtx);
+               return;
        }
+       mutex_unlock(&ifmgd->mtx);
 
        if (test_and_clear_bit(TMR_RUNNING_TIMER, &ifmgd->timers_running))
                add_timer(&ifmgd->timer);
@@ -3261,201 +3621,6 @@ int ieee80211_max_network_latency(struct notifier_block *nb,
        return 0;
 }
 
-static u32 chandef_downgrade(struct cfg80211_chan_def *c)
-{
-       u32 ret;
-       int tmp;
-
-       switch (c->width) {
-       case NL80211_CHAN_WIDTH_20:
-               c->width = NL80211_CHAN_WIDTH_20_NOHT;
-               ret = IEEE80211_STA_DISABLE_HT | IEEE80211_STA_DISABLE_VHT;
-               break;
-       case NL80211_CHAN_WIDTH_40:
-               c->width = NL80211_CHAN_WIDTH_20;
-               c->center_freq1 = c->chan->center_freq;
-               ret = IEEE80211_STA_DISABLE_40MHZ |
-                     IEEE80211_STA_DISABLE_VHT;
-               break;
-       case NL80211_CHAN_WIDTH_80:
-               tmp = (30 + c->chan->center_freq - c->center_freq1)/20;
-               /* n_P40 */
-               tmp /= 2;
-               /* freq_P40 */
-               c->center_freq1 = c->center_freq1 - 20 + 40 * tmp;
-               c->width = NL80211_CHAN_WIDTH_40;
-               ret = IEEE80211_STA_DISABLE_VHT;
-               break;
-       case NL80211_CHAN_WIDTH_80P80:
-               c->center_freq2 = 0;
-               c->width = NL80211_CHAN_WIDTH_80;
-               ret = IEEE80211_STA_DISABLE_80P80MHZ |
-                     IEEE80211_STA_DISABLE_160MHZ;
-               break;
-       case NL80211_CHAN_WIDTH_160:
-               /* n_P20 */
-               tmp = (70 + c->chan->center_freq - c->center_freq1)/20;
-               /* n_P80 */
-               tmp /= 4;
-               c->center_freq1 = c->center_freq1 - 40 + 80 * tmp;
-               c->width = NL80211_CHAN_WIDTH_80;
-               ret = IEEE80211_STA_DISABLE_80P80MHZ |
-                     IEEE80211_STA_DISABLE_160MHZ;
-               break;
-       default:
-       case NL80211_CHAN_WIDTH_20_NOHT:
-               WARN_ON_ONCE(1);
-               c->width = NL80211_CHAN_WIDTH_20_NOHT;
-               ret = IEEE80211_STA_DISABLE_HT | IEEE80211_STA_DISABLE_VHT;
-               break;
-       }
-
-       WARN_ON_ONCE(!cfg80211_chandef_valid(c));
-
-       return ret;
-}
-
-static u32
-ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
-                            struct ieee80211_supported_band *sband,
-                            struct ieee80211_channel *channel,
-                            const struct ieee80211_ht_operation *ht_oper,
-                            const struct ieee80211_vht_operation *vht_oper,
-                            struct cfg80211_chan_def *chandef)
-{
-       struct cfg80211_chan_def vht_chandef;
-       u32 ht_cfreq, ret;
-
-       chandef->chan = channel;
-       chandef->width = NL80211_CHAN_WIDTH_20_NOHT;
-       chandef->center_freq1 = channel->center_freq;
-       chandef->center_freq2 = 0;
-
-       if (!ht_oper || !sband->ht_cap.ht_supported) {
-               ret = IEEE80211_STA_DISABLE_HT | IEEE80211_STA_DISABLE_VHT;
-               goto out;
-       }
-
-       chandef->width = NL80211_CHAN_WIDTH_20;
-
-       ht_cfreq = ieee80211_channel_to_frequency(ht_oper->primary_chan,
-                                                 channel->band);
-       /* check that channel matches the right operating channel */
-       if (channel->center_freq != ht_cfreq) {
-               /*
-                * It's possible that some APs are confused here;
-                * Netgear WNDR3700 sometimes reports 4 higher than
-                * the actual channel in association responses, but
-                * since we look at probe response/beacon data here
-                * it should be OK.
-                */
-               sdata_info(sdata,
-                          "Wrong control channel: center-freq: %d ht-cfreq: %d ht->primary_chan: %d band: %d - Disabling HT\n",
-                          channel->center_freq, ht_cfreq,
-                          ht_oper->primary_chan, channel->band);
-               ret = IEEE80211_STA_DISABLE_HT | IEEE80211_STA_DISABLE_VHT;
-               goto out;
-       }
-
-       /* check 40 MHz support, if we have it */
-       if (sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) {
-               switch (ht_oper->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) {
-               case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
-                       chandef->width = NL80211_CHAN_WIDTH_40;
-                       chandef->center_freq1 += 10;
-                       break;
-               case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
-                       chandef->width = NL80211_CHAN_WIDTH_40;
-                       chandef->center_freq1 -= 10;
-                       break;
-               }
-       } else {
-               /* 40 MHz (and 80 MHz) must be supported for VHT */
-               ret = IEEE80211_STA_DISABLE_VHT;
-               goto out;
-       }
-
-       if (!vht_oper || !sband->vht_cap.vht_supported) {
-               ret = IEEE80211_STA_DISABLE_VHT;
-               goto out;
-       }
-
-       vht_chandef.chan = channel;
-       vht_chandef.center_freq1 =
-               ieee80211_channel_to_frequency(vht_oper->center_freq_seg1_idx,
-                                              channel->band);
-       vht_chandef.center_freq2 = 0;
-
-       if (vht_oper->center_freq_seg2_idx)
-               vht_chandef.center_freq2 =
-                       ieee80211_channel_to_frequency(
-                               vht_oper->center_freq_seg2_idx,
-                               channel->band);
-
-       switch (vht_oper->chan_width) {
-       case IEEE80211_VHT_CHANWIDTH_USE_HT:
-               vht_chandef.width = chandef->width;
-               break;
-       case IEEE80211_VHT_CHANWIDTH_80MHZ:
-               vht_chandef.width = NL80211_CHAN_WIDTH_80;
-               break;
-       case IEEE80211_VHT_CHANWIDTH_160MHZ:
-               vht_chandef.width = NL80211_CHAN_WIDTH_160;
-               break;
-       case IEEE80211_VHT_CHANWIDTH_80P80MHZ:
-               vht_chandef.width = NL80211_CHAN_WIDTH_80P80;
-               break;
-       default:
-               sdata_info(sdata,
-                          "AP VHT operation IE has invalid channel width (%d), disable VHT\n",
-                          vht_oper->chan_width);
-               ret = IEEE80211_STA_DISABLE_VHT;
-               goto out;
-       }
-
-       if (!cfg80211_chandef_valid(&vht_chandef)) {
-               sdata_info(sdata,
-                          "AP VHT information is invalid, disable VHT\n");
-               ret = IEEE80211_STA_DISABLE_VHT;
-               goto out;
-       }
-
-       if (cfg80211_chandef_identical(chandef, &vht_chandef)) {
-               ret = 0;
-               goto out;
-       }
-
-       if (!cfg80211_chandef_compatible(chandef, &vht_chandef)) {
-               sdata_info(sdata,
-                          "AP VHT information doesn't match HT, disable VHT\n");
-               ret = IEEE80211_STA_DISABLE_VHT;
-               goto out;
-       }
-
-       *chandef = vht_chandef;
-
-       ret = 0;
-
-       while (!cfg80211_chandef_usable(sdata->local->hw.wiphy, chandef,
-                                       IEEE80211_CHAN_DISABLED)) {
-               if (WARN_ON(chandef->width == NL80211_CHAN_WIDTH_20_NOHT)) {
-                       ret = IEEE80211_STA_DISABLE_HT |
-                             IEEE80211_STA_DISABLE_VHT;
-                       goto out;
-               }
-
-               ret = chandef_downgrade(chandef);
-       }
-
-       if (chandef->width != vht_chandef.width)
-               sdata_info(sdata,
-                          "local regulatory prevented using AP HT/VHT configuration, downgraded\n");
-
-out:
-       WARN_ON_ONCE(!cfg80211_chandef_valid(chandef));
-       return ret;
-}
-
 static u8 ieee80211_ht_vht_rx_chains(struct ieee80211_sub_if_data *sdata,
                                     struct cfg80211_bss *cbss)
 {
@@ -3521,16 +3686,22 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata,
 
        if (!(ifmgd->flags & IEEE80211_STA_DISABLE_HT) &&
            sband->ht_cap.ht_supported) {
-               const u8 *ht_oper_ie;
+               const u8 *ht_oper_ie, *ht_cap;
 
                ht_oper_ie = ieee80211_bss_get_ie(cbss, WLAN_EID_HT_OPERATION);
                if (ht_oper_ie && ht_oper_ie[1] >= sizeof(*ht_oper))
                        ht_oper = (void *)(ht_oper_ie + 2);
+
+               ht_cap = ieee80211_bss_get_ie(cbss, WLAN_EID_HT_CAPABILITY);
+               if (!ht_cap || ht_cap[1] < sizeof(struct ieee80211_ht_cap)) {
+                       ifmgd->flags |= IEEE80211_STA_DISABLE_HT;
+                       ht_oper = NULL;
+               }
        }
 
        if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT) &&
            sband->vht_cap.vht_supported) {
-               const u8 *vht_oper_ie;
+               const u8 *vht_oper_ie, *vht_cap;
 
                vht_oper_ie = ieee80211_bss_get_ie(cbss,
                                                   WLAN_EID_VHT_OPERATION);
@@ -3540,15 +3711,21 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata,
                        vht_oper = NULL;
                        sdata_info(sdata,
                                   "AP advertised VHT without HT, disabling both\n");
-                       sdata->flags |= IEEE80211_STA_DISABLE_HT;
-                       sdata->flags |= IEEE80211_STA_DISABLE_VHT;
+                       ifmgd->flags |= IEEE80211_STA_DISABLE_HT;
+                       ifmgd->flags |= IEEE80211_STA_DISABLE_VHT;
+               }
+
+               vht_cap = ieee80211_bss_get_ie(cbss, WLAN_EID_VHT_CAPABILITY);
+               if (!vht_cap || vht_cap[1] < sizeof(struct ieee80211_vht_cap)) {
+                       ifmgd->flags |= IEEE80211_STA_DISABLE_VHT;
+                       vht_oper = NULL;
                }
        }
 
        ifmgd->flags |= ieee80211_determine_chantype(sdata, sband,
                                                     cbss->channel,
                                                     ht_oper, vht_oper,
-                                                    &chandef);
+                                                    &chandef, true);
 
        sdata->needed_rx_chains = min(ieee80211_ht_vht_rx_chains(sdata, cbss),
                                      local->rx_chains);
@@ -3565,8 +3742,11 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata,
         */
        ret = ieee80211_vif_use_channel(sdata, &chandef,
                                        IEEE80211_CHANCTX_SHARED);
-       while (ret && chandef.width != NL80211_CHAN_WIDTH_20_NOHT)
+       while (ret && chandef.width != NL80211_CHAN_WIDTH_20_NOHT) {
                ifmgd->flags |= chandef_downgrade(&chandef);
+               ret = ieee80211_vif_use_channel(sdata, &chandef,
+                                               IEEE80211_CHANCTX_SHARED);
+       }
        return ret;
 }
 
@@ -3595,15 +3775,12 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
                        return -ENOMEM;
        }
 
-       mutex_lock(&local->mtx);
-       ieee80211_recalc_idle(sdata->local);
-       mutex_unlock(&local->mtx);
-
        if (new_sta) {
                u32 rates = 0, basic_rates = 0;
                bool have_higher_than_11mbit;
                int min_rate = INT_MAX, min_rate_index = -1;
                struct ieee80211_supported_band *sband;
+               const struct cfg80211_bss_ies *ies;
 
                sband = local->hw.wiphy->bands[cbss->channel->band];
 
@@ -3647,8 +3824,34 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
 
                /* set timing information */
                sdata->vif.bss_conf.beacon_int = cbss->beacon_interval;
-               sdata->vif.bss_conf.sync_tsf = cbss->tsf;
-               sdata->vif.bss_conf.sync_device_ts = bss->device_ts;
+               rcu_read_lock();
+               ies = rcu_dereference(cbss->beacon_ies);
+               if (ies) {
+                       const u8 *tim_ie;
+
+                       sdata->vif.bss_conf.sync_tsf = ies->tsf;
+                       sdata->vif.bss_conf.sync_device_ts =
+                               bss->device_ts_beacon;
+                       tim_ie = cfg80211_find_ie(WLAN_EID_TIM,
+                                                 ies->data, ies->len);
+                       if (tim_ie && tim_ie[1] >= 2)
+                               sdata->vif.bss_conf.sync_dtim_count = tim_ie[2];
+                       else
+                               sdata->vif.bss_conf.sync_dtim_count = 0;
+               } else if (!(local->hw.flags &
+                                       IEEE80211_HW_TIMING_BEACON_ONLY)) {
+                       ies = rcu_dereference(cbss->proberesp_ies);
+                       /* must be non-NULL since beacon IEs were NULL */
+                       sdata->vif.bss_conf.sync_tsf = ies->tsf;
+                       sdata->vif.bss_conf.sync_device_ts =
+                               bss->device_ts_presp;
+                       sdata->vif.bss_conf.sync_dtim_count = 0;
+               } else {
+                       sdata->vif.bss_conf.sync_tsf = 0;
+                       sdata->vif.bss_conf.sync_device_ts = 0;
+                       sdata->vif.bss_conf.sync_dtim_count = 0;
+               }
+               rcu_read_unlock();
 
                /* tell driver about BSSID, basic rates and timing */
                ieee80211_bss_info_change_notify(sdata,
@@ -3768,7 +3971,7 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
        }
 
        /* hold our own reference */
-       cfg80211_ref_bss(auth_data->bss);
+       cfg80211_ref_bss(local->hw.wiphy, auth_data->bss);
        err = 0;
        goto out_unlock;
 
@@ -3791,6 +3994,7 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
        struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
        struct ieee80211_bss *bss = (void *)req->bss->priv;
        struct ieee80211_mgd_assoc_data *assoc_data;
+       const struct cfg80211_bss_ies *beacon_ies;
        struct ieee80211_supported_band *sband;
        const u8 *ssidie, *ht_ie, *vht_ie;
        int i, err;
@@ -3956,40 +4160,48 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
        if (err)
                goto err_clear;
 
-       if (sdata->local->hw.flags & IEEE80211_HW_NEED_DTIM_PERIOD) {
-               const struct cfg80211_bss_ies *beacon_ies;
+       rcu_read_lock();
+       beacon_ies = rcu_dereference(req->bss->beacon_ies);
 
-               rcu_read_lock();
-               beacon_ies = rcu_dereference(req->bss->beacon_ies);
-               if (!beacon_ies) {
-                       /*
-                        * Wait up to one beacon interval ...
-                        * should this be more if we miss one?
-                        */
-                       sdata_info(sdata, "waiting for beacon from %pM\n",
-                                  ifmgd->bssid);
-                       assoc_data->timeout =
-                               TU_TO_EXP_TIME(req->bss->beacon_interval);
-               } else {
-                       const u8 *tim_ie = cfg80211_find_ie(WLAN_EID_TIM,
-                                                           beacon_ies->data,
-                                                           beacon_ies->len);
-                       if (tim_ie && tim_ie[1] >=
-                                       sizeof(struct ieee80211_tim_ie)) {
-                               const struct ieee80211_tim_ie *tim;
-                               tim = (void *)(tim_ie + 2);
-                               ifmgd->dtim_period = tim->dtim_period;
-                       }
-                       assoc_data->have_beacon = true;
-                       assoc_data->sent_assoc = false;
-                       assoc_data->timeout = jiffies;
+       if (sdata->local->hw.flags & IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC &&
+           !beacon_ies) {
+               /*
+                * Wait up to one beacon interval ...
+                * should this be more if we miss one?
+                */
+               sdata_info(sdata, "waiting for beacon from %pM\n",
+                          ifmgd->bssid);
+               assoc_data->timeout = TU_TO_EXP_TIME(req->bss->beacon_interval);
+               assoc_data->timeout_started = true;
+               assoc_data->need_beacon = true;
+       } else if (beacon_ies) {
+               const u8 *tim_ie = cfg80211_find_ie(WLAN_EID_TIM,
+                                                   beacon_ies->data,
+                                                   beacon_ies->len);
+               u8 dtim_count = 0;
+
+               if (tim_ie && tim_ie[1] >= sizeof(struct ieee80211_tim_ie)) {
+                       const struct ieee80211_tim_ie *tim;
+                       tim = (void *)(tim_ie + 2);
+                       ifmgd->dtim_period = tim->dtim_period;
+                       dtim_count = tim->dtim_count;
                }
-               rcu_read_unlock();
-       } else {
                assoc_data->have_beacon = true;
-               assoc_data->sent_assoc = false;
                assoc_data->timeout = jiffies;
+               assoc_data->timeout_started = true;
+
+               if (local->hw.flags & IEEE80211_HW_TIMING_BEACON_ONLY) {
+                       sdata->vif.bss_conf.sync_tsf = beacon_ies->tsf;
+                       sdata->vif.bss_conf.sync_device_ts =
+                               bss->device_ts_beacon;
+                       sdata->vif.bss_conf.sync_dtim_count = dtim_count;
+               }
+       } else {
+               assoc_data->timeout = jiffies;
+               assoc_data->timeout_started = true;
        }
+       rcu_read_unlock();
+
        run_again(ifmgd, assoc_data->timeout);
 
        if (bss->corrupt_data) {
@@ -4056,10 +4268,6 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
        mutex_unlock(&ifmgd->mtx);
 
  out:
-       mutex_lock(&sdata->local->mtx);
-       ieee80211_recalc_idle(sdata->local);
-       mutex_unlock(&sdata->local->mtx);
-
        if (sent_frame)
                __cfg80211_send_deauth(sdata->dev, frame_buf,
                                       IEEE80211_DEAUTH_FRAME_LEN);
@@ -4100,10 +4308,6 @@ int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata,
        __cfg80211_send_disassoc(sdata->dev, frame_buf,
                                 IEEE80211_DEAUTH_FRAME_LEN);
 
-       mutex_lock(&sdata->local->mtx);
-       ieee80211_recalc_idle(sdata->local);
-       mutex_unlock(&sdata->local->mtx);
-
        return 0;
 }
 
index 82baf5b..cc79b4a 100644 (file)
@@ -113,6 +113,15 @@ void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local)
         * notify the AP about us leaving the channel and stop all
         * STA interfaces.
         */
+
+       /*
+        * Stop queues and transmit all frames queued by the driver
+        * before sending nullfunc to enable powersave at the AP.
+        */
+       ieee80211_stop_queues_by_reason(&local->hw,
+                                       IEEE80211_QUEUE_STOP_REASON_OFFCHANNEL);
+       drv_flush(local, false);
+
        mutex_lock(&local->iflist_mtx);
        list_for_each_entry(sdata, &local->interfaces, list) {
                if (!ieee80211_sdata_running(sdata))
@@ -133,12 +142,9 @@ void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local)
                                sdata, BSS_CHANGED_BEACON_ENABLED);
                }
 
-               if (sdata->vif.type != NL80211_IFTYPE_MONITOR) {
-                       netif_tx_stop_all_queues(sdata->dev);
-                       if (sdata->vif.type == NL80211_IFTYPE_STATION &&
-                           sdata->u.mgd.associated)
-                               ieee80211_offchannel_ps_enable(sdata);
-               }
+               if (sdata->vif.type == NL80211_IFTYPE_STATION &&
+                   sdata->u.mgd.associated)
+                       ieee80211_offchannel_ps_enable(sdata);
        }
        mutex_unlock(&local->iflist_mtx);
 }
@@ -166,20 +172,6 @@ void ieee80211_offchannel_return(struct ieee80211_local *local)
                    sdata->u.mgd.associated)
                        ieee80211_offchannel_ps_disable(sdata);
 
-               if (sdata->vif.type != NL80211_IFTYPE_MONITOR) {
-                       /*
-                        * This may wake up queues even though the driver
-                        * currently has them stopped. This is not very
-                        * likely, since the driver won't have gotten any
-                        * (or hardly any) new packets while we weren't
-                        * on the right channel, and even if it happens
-                        * it will at most lead to queueing up one more
-                        * packet per queue in mac80211 rather than on
-                        * the interface qdisc.
-                        */
-                       netif_tx_wake_all_queues(sdata->dev);
-               }
-
                if (test_and_clear_bit(SDATA_STATE_OFFCHANNEL_BEACON_STOPPED,
                                       &sdata->state)) {
                        sdata->vif.bss_conf.enable_beacon = true;
@@ -188,6 +180,9 @@ void ieee80211_offchannel_return(struct ieee80211_local *local)
                }
        }
        mutex_unlock(&local->iflist_mtx);
+
+       ieee80211_wake_queues_by_reason(&local->hw,
+                                       IEEE80211_QUEUE_STOP_REASON_OFFCHANNEL);
 }
 
 void ieee80211_handle_roc_started(struct ieee80211_roc_work *roc)
index e45b836..d0275f3 100644 (file)
@@ -38,6 +38,8 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
 
        ieee80211_scan_cancel(local);
 
+       ieee80211_dfs_cac_cancel(local);
+
        if (hw->flags & IEEE80211_HW_AMPDU_AGGREGATION) {
                mutex_lock(&local->sta_mtx);
                list_for_each_entry(sta, &local->sta_list, list) {
@@ -228,3 +230,13 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
  * ieee80211_reconfig(), which is also needed for hardware
  * hang/firmware failure/etc. recovery.
  */
+
+void ieee80211_report_wowlan_wakeup(struct ieee80211_vif *vif,
+                                   struct cfg80211_wowlan_wakeup *wakeup,
+                                   gfp_t gfp)
+{
+       struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+
+       cfg80211_report_wowlan_wakeup(&sdata->wdev, wakeup, gfp);
+}
+EXPORT_SYMBOL(ieee80211_report_wowlan_wakeup);
index 301386d..d35a5dd 100644 (file)
@@ -68,6 +68,8 @@ static inline void rate_control_rate_init(struct sta_info *sta)
        sband = local->hw.wiphy->bands[chanctx_conf->def.chan->band];
        rcu_read_unlock();
 
+       ieee80211_sta_set_rx_nss(sta);
+
        ref->ops->rate_init(ref->priv, sband, ista, priv_sta);
        set_sta_flag(sta, WLAN_STA_RATE_CONTROL);
 }
index 8c5acdc..eea45a2 100644 (file)
@@ -494,6 +494,33 @@ minstrel_free_sta(void *priv, struct ieee80211_sta *sta, void *priv_sta)
        kfree(mi);
 }
 
+static void
+minstrel_init_cck_rates(struct minstrel_priv *mp)
+{
+       static const int bitrates[4] = { 10, 20, 55, 110 };
+       struct ieee80211_supported_band *sband;
+       int i, j;
+
+       sband = mp->hw->wiphy->bands[IEEE80211_BAND_2GHZ];
+       if (!sband)
+               return;
+
+       for (i = 0, j = 0; i < sband->n_bitrates; i++) {
+               struct ieee80211_rate *rate = &sband->bitrates[i];
+
+               if (rate->flags & IEEE80211_RATE_ERP_G)
+                       continue;
+
+               for (j = 0; j < ARRAY_SIZE(bitrates); j++) {
+                       if (rate->bitrate != bitrates[j])
+                               continue;
+
+                       mp->cck_rates[j] = i;
+                       break;
+               }
+       }
+}
+
 static void *
 minstrel_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
 {
@@ -539,6 +566,8 @@ minstrel_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
                        S_IRUGO | S_IWUGO, debugfsdir, &mp->fixed_rate_idx);
 #endif
 
+       minstrel_init_cck_rates(mp);
+
        return mp;
 }
 
index 5d278ec..5ecf757 100644 (file)
@@ -79,6 +79,8 @@ struct minstrel_priv {
        unsigned int lookaround_rate;
        unsigned int lookaround_rate_mrr;
 
+       u8 cck_rates[4];
+
 #ifdef CONFIG_MAC80211_DEBUGFS
        /*
         * enable fixed rate processing per RC
index 9f9c453..3af141c 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2010 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2010-2013 Felix Fietkau <nbd@openwrt.org>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
        }                                                               \
 }
 
+#define CCK_DURATION(_bitrate, _short, _len)           \
+       (10 /* SIFS */ +                                \
+        (_short ? 72 + 24 : 144 + 48 ) +               \
+        (8 * (_len + 4) * 10) / (_bitrate))
+
+#define CCK_ACK_DURATION(_bitrate, _short)                     \
+       (CCK_DURATION((_bitrate > 10 ? 20 : 10), false, 60) +   \
+        CCK_DURATION(_bitrate, _short, AVG_PKT_SIZE))
+
+#define CCK_DURATION_LIST(_short)                      \
+       CCK_ACK_DURATION(10, _short),                   \
+       CCK_ACK_DURATION(20, _short),                   \
+       CCK_ACK_DURATION(55, _short),                   \
+       CCK_ACK_DURATION(110, _short)
+
+#define CCK_GROUP                                              \
+       [MINSTREL_MAX_STREAMS * MINSTREL_STREAM_GROUPS] = {     \
+               .streams = 0,                                   \
+               .duration = {                                   \
+                       CCK_DURATION_LIST(false),               \
+                       CCK_DURATION_LIST(true)                 \
+               }                                               \
+       }
+
 /*
  * To enable sufficiently targeted rate sampling, MCS rates are divided into
  * groups, based on the number of streams and flags (HT40, SGI) that they
@@ -95,8 +119,13 @@ const struct mcs_group minstrel_mcs_groups[] = {
 #if MINSTREL_MAX_STREAMS >= 3
        MCS_GROUP(3, 1, 1),
 #endif
+
+       /* must be last */
+       CCK_GROUP
 };
 
+#define MINSTREL_CCK_GROUP     (ARRAY_SIZE(minstrel_mcs_groups) - 1)
+
 static u8 sample_table[SAMPLE_COLUMNS][MCS_GROUP_RATES];
 
 /*
@@ -119,6 +148,29 @@ minstrel_ht_get_group_idx(struct ieee80211_tx_rate *rate)
                         !!(rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH));
 }
 
+static struct minstrel_rate_stats *
+minstrel_ht_get_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
+                     struct ieee80211_tx_rate *rate)
+{
+       int group, idx;
+
+       if (rate->flags & IEEE80211_TX_RC_MCS) {
+               group = minstrel_ht_get_group_idx(rate);
+               idx = rate->idx % MCS_GROUP_RATES;
+       } else {
+               group = MINSTREL_CCK_GROUP;
+
+               for (idx = 0; idx < ARRAY_SIZE(mp->cck_rates); idx++)
+                       if (rate->idx == mp->cck_rates[idx])
+                               break;
+
+               /* short preamble */
+               if (!(mi->groups[group].supported & BIT(idx)))
+                       idx += 4;
+       }
+       return &mi->groups[group].rates[idx];
+}
+
 static inline struct minstrel_rate_stats *
 minstrel_get_ratestats(struct minstrel_ht_sta *mi, int index)
 {
@@ -159,7 +211,7 @@ static void
 minstrel_ht_calc_tp(struct minstrel_ht_sta *mi, int group, int rate)
 {
        struct minstrel_rate_stats *mr;
-       unsigned int usecs;
+       unsigned int usecs = 0;
 
        mr = &mi->groups[group].rates[rate];
 
@@ -168,7 +220,9 @@ minstrel_ht_calc_tp(struct minstrel_ht_sta *mi, int group, int rate)
                return;
        }
 
-       usecs = mi->overhead / MINSTREL_TRUNC(mi->avg_ampdu_len);
+       if (group != MINSTREL_CCK_GROUP)
+               usecs = mi->overhead / MINSTREL_TRUNC(mi->avg_ampdu_len);
+
        usecs += minstrel_mcs_groups[group].duration[rate];
        mr->cur_tp = MINSTREL_TRUNC((1000000 / usecs) * mr->probability);
 }
@@ -231,10 +285,6 @@ minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
                        if (!mr->cur_tp)
                                continue;
 
-                       /* ignore the lowest rate of each single-stream group */
-                       if (!i && minstrel_mcs_groups[group].streams == 1)
-                               continue;
-
                        if ((mr->cur_tp > cur_prob_tp && mr->probability >
                             MINSTREL_FRAC(3, 4)) || mr->probability > cur_prob) {
                                mg->max_prob_rate = index;
@@ -297,7 +347,7 @@ minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
 }
 
 static bool
-minstrel_ht_txstat_valid(struct ieee80211_tx_rate *rate)
+minstrel_ht_txstat_valid(struct minstrel_priv *mp, struct ieee80211_tx_rate *rate)
 {
        if (rate->idx < 0)
                return false;
@@ -305,7 +355,13 @@ minstrel_ht_txstat_valid(struct ieee80211_tx_rate *rate)
        if (!rate->count)
                return false;
 
-       return !!(rate->flags & IEEE80211_TX_RC_MCS);
+       if (rate->flags & IEEE80211_TX_RC_MCS)
+               return true;
+
+       return rate->idx == mp->cck_rates[0] ||
+              rate->idx == mp->cck_rates[1] ||
+              rate->idx == mp->cck_rates[2] ||
+              rate->idx == mp->cck_rates[3];
 }
 
 static void
@@ -390,7 +446,6 @@ minstrel_ht_tx_status(void *priv, struct ieee80211_supported_band *sband,
        struct minstrel_rate_stats *rate, *rate2;
        struct minstrel_priv *mp = priv;
        bool last;
-       int group;
        int i;
 
        if (!msp->is_ht)
@@ -419,13 +474,12 @@ minstrel_ht_tx_status(void *priv, struct ieee80211_supported_band *sband,
        if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
                mi->sample_packets += info->status.ampdu_len;
 
-       last = !minstrel_ht_txstat_valid(&ar[0]);
+       last = !minstrel_ht_txstat_valid(mp, &ar[0]);
        for (i = 0; !last; i++) {
                last = (i == IEEE80211_TX_MAX_RATES - 1) ||
-                      !minstrel_ht_txstat_valid(&ar[i + 1]);
+                      !minstrel_ht_txstat_valid(mp, &ar[i + 1]);
 
-               group = minstrel_ht_get_group_idx(&ar[i]);
-               rate = &mi->groups[group].rates[ar[i].idx % 8];
+               rate = minstrel_ht_get_stats(mp, mi, &ar[i]);
 
                if (last)
                        rate->success += info->status.ampdu_ack_len;
@@ -451,7 +505,8 @@ minstrel_ht_tx_status(void *priv, struct ieee80211_supported_band *sband,
 
        if (time_after(jiffies, mi->stats_update + (mp->update_interval / 2 * HZ) / 1000)) {
                minstrel_ht_update_stats(mp, mi);
-               if (!(info->flags & IEEE80211_TX_CTL_AMPDU))
+               if (!(info->flags & IEEE80211_TX_CTL_AMPDU) &&
+                   mi->max_prob_rate / MCS_GROUP_RATES != MINSTREL_CCK_GROUP)
                        minstrel_aggr_check(sta, skb);
        }
 }
@@ -467,6 +522,7 @@ minstrel_calc_retransmit(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
        unsigned int ctime = 0;
        unsigned int t_slot = 9; /* FIXME */
        unsigned int ampdu_len = MINSTREL_TRUNC(mi->avg_ampdu_len);
+       unsigned int overhead = 0, overhead_rtscts = 0;
 
        mr = minstrel_get_ratestats(mi, index);
        if (mr->probability < MINSTREL_FRAC(1, 10)) {
@@ -488,9 +544,14 @@ minstrel_calc_retransmit(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
        ctime += (t_slot * cw) >> 1;
        cw = min((cw << 1) | 1, mp->cw_max);
 
+       if (index / MCS_GROUP_RATES != MINSTREL_CCK_GROUP) {
+               overhead = mi->overhead;
+               overhead_rtscts = mi->overhead_rtscts;
+       }
+
        /* Total TX time for data and Contention after first 2 tries */
-       tx_time = ctime + 2 * (mi->overhead + tx_time_data);
-       tx_time_rtscts = ctime + 2 * (mi->overhead_rtscts + tx_time_data);
+       tx_time = ctime + 2 * (overhead + tx_time_data);
+       tx_time_rtscts = ctime + 2 * (overhead_rtscts + tx_time_data);
 
        /* See how many more tries we can fit inside segment size */
        do {
@@ -499,8 +560,8 @@ minstrel_calc_retransmit(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
                cw = min((cw << 1) | 1, mp->cw_max);
 
                /* Total TX time after this try */
-               tx_time += ctime + mi->overhead + tx_time_data;
-               tx_time_rtscts += ctime + mi->overhead_rtscts + tx_time_data;
+               tx_time += ctime + overhead + tx_time_data;
+               tx_time_rtscts += ctime + overhead_rtscts + tx_time_data;
 
                if (tx_time_rtscts < mp->segment_size)
                        mr->retry_count_rtscts++;
@@ -530,9 +591,16 @@ minstrel_ht_set_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
        else
                rate->count = mr->retry_count;
 
-       rate->flags = IEEE80211_TX_RC_MCS | group->flags;
+       rate->flags = 0;
        if (rtscts)
                rate->flags |= IEEE80211_TX_RC_USE_RTS_CTS;
+
+       if (index / MCS_GROUP_RATES == MINSTREL_CCK_GROUP) {
+               rate->idx = mp->cck_rates[index % ARRAY_SIZE(mp->cck_rates)];
+               return;
+       }
+
+       rate->flags |= IEEE80211_TX_RC_MCS | group->flags;
        rate->idx = index % MCS_GROUP_RATES + (group->streams - 1) * MCS_GROUP_RATES;
 }
 
@@ -596,6 +664,22 @@ minstrel_get_sample_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
 }
 
 static void
+minstrel_ht_check_cck_shortpreamble(struct minstrel_priv *mp,
+                                   struct minstrel_ht_sta *mi, bool val)
+{
+       u8 supported = mi->groups[MINSTREL_CCK_GROUP].supported;
+
+       if (!supported || !mi->cck_supported_short)
+               return;
+
+       if (supported & (mi->cck_supported_short << (val * 4)))
+               return;
+
+       supported ^= mi->cck_supported_short | (mi->cck_supported_short << 4);
+       mi->groups[MINSTREL_CCK_GROUP].supported = supported;
+}
+
+static void
 minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
                      struct ieee80211_tx_rate_control *txrc)
 {
@@ -614,6 +698,7 @@ minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
                return mac80211_minstrel.get_rate(priv, sta, &msp->legacy, txrc);
 
        info->flags |= mi->tx_flags;
+       minstrel_ht_check_cck_shortpreamble(mp, mi, txrc->short_preamble);
 
        /* Don't use EAPOL frames for sampling on non-mrr hw */
        if (mp->hw->max_rates == 1 &&
@@ -687,6 +772,30 @@ minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
 }
 
 static void
+minstrel_ht_update_cck(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
+                      struct ieee80211_supported_band *sband,
+                      struct ieee80211_sta *sta)
+{
+       int i;
+
+       if (sband->band != IEEE80211_BAND_2GHZ)
+               return;
+
+       mi->cck_supported = 0;
+       mi->cck_supported_short = 0;
+       for (i = 0; i < 4; i++) {
+               if (!rate_supported(sta, sband->band, mp->cck_rates[i]))
+                       continue;
+
+               mi->cck_supported |= BIT(i);
+               if (sband->bitrates[i].flags & IEEE80211_RATE_SHORT_PREAMBLE)
+                       mi->cck_supported_short |= BIT(i);
+       }
+
+       mi->groups[MINSTREL_CCK_GROUP].supported = mi->cck_supported;
+}
+
+static void
 minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband,
                         struct ieee80211_sta *sta, void *priv_sta)
 {
@@ -699,14 +808,13 @@ minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband,
        int ack_dur;
        int stbc;
        int i;
-       unsigned int smps;
 
        /* fall back to the old minstrel for legacy stations */
        if (!sta->ht_cap.ht_supported)
                goto use_legacy;
 
        BUILD_BUG_ON(ARRAY_SIZE(minstrel_mcs_groups) !=
-               MINSTREL_MAX_STREAMS * MINSTREL_STREAM_GROUPS);
+               MINSTREL_MAX_STREAMS * MINSTREL_STREAM_GROUPS + 1);
 
        msp->is_ht = true;
        memset(mi, 0, sizeof(*mi));
@@ -735,28 +843,29 @@ minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband,
        if (sta_cap & IEEE80211_HT_CAP_LDPC_CODING)
                mi->tx_flags |= IEEE80211_TX_CTL_LDPC;
 
-       smps = (sta_cap & IEEE80211_HT_CAP_SM_PS) >>
-               IEEE80211_HT_CAP_SM_PS_SHIFT;
-
        for (i = 0; i < ARRAY_SIZE(mi->groups); i++) {
-               u16 req = 0;
-
                mi->groups[i].supported = 0;
-               if (minstrel_mcs_groups[i].flags & IEEE80211_TX_RC_SHORT_GI) {
-                       if (minstrel_mcs_groups[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
-                               req |= IEEE80211_HT_CAP_SGI_40;
-                       else
-                               req |= IEEE80211_HT_CAP_SGI_20;
+               if (i == MINSTREL_CCK_GROUP) {
+                       minstrel_ht_update_cck(mp, mi, sband, sta);
+                       continue;
                }
 
-               if (minstrel_mcs_groups[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
-                       req |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+               if (minstrel_mcs_groups[i].flags & IEEE80211_TX_RC_SHORT_GI) {
+                       if (minstrel_mcs_groups[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH) {
+                               if (!(sta_cap & IEEE80211_HT_CAP_SGI_40))
+                                       continue;
+                       } else {
+                               if (!(sta_cap & IEEE80211_HT_CAP_SGI_20))
+                                       continue;
+                       }
+               }
 
-               if ((sta_cap & req) != req)
+               if (minstrel_mcs_groups[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH &&
+                   sta->bandwidth < IEEE80211_STA_RX_BW_40)
                        continue;
 
                /* Mark MCS > 7 as unsupported if STA is in static SMPS mode */
-               if (smps == WLAN_HT_CAP_SM_PS_STATIC &&
+               if (sta->smps_mode == IEEE80211_SMPS_STATIC &&
                    minstrel_mcs_groups[i].streams > 1)
                        continue;
 
index 462d2b2..302dbd5 100644 (file)
@@ -107,8 +107,11 @@ struct minstrel_ht_sta {
        /* current MCS group to be sampled */
        u8 sample_group;
 
+       u8 cck_supported;
+       u8 cck_supported_short;
+
        /* MCS rate group info and statistics */
-       struct minstrel_mcs_group_data groups[MINSTREL_MAX_STREAMS * MINSTREL_STREAM_GROUPS];
+       struct minstrel_mcs_group_data groups[MINSTREL_MAX_STREAMS * MINSTREL_STREAM_GROUPS + 1];
 };
 
 struct minstrel_ht_sta_priv {
index e788f76..df44a5a 100644 (file)
 #include "rc80211_minstrel.h"
 #include "rc80211_minstrel_ht.h"
 
+static char *
+minstrel_ht_stats_dump(struct minstrel_ht_sta *mi, int i, char *p)
+{
+       unsigned int max_mcs = MINSTREL_MAX_STREAMS * MINSTREL_STREAM_GROUPS;
+       const struct mcs_group *mg;
+       unsigned int j, tp, prob, eprob;
+       char htmode = '2';
+       char gimode = 'L';
+
+       if (!mi->groups[i].supported)
+               return p;
+
+       mg = &minstrel_mcs_groups[i];
+       if (mg->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
+               htmode = '4';
+       if (mg->flags & IEEE80211_TX_RC_SHORT_GI)
+               gimode = 'S';
+
+       for (j = 0; j < MCS_GROUP_RATES; j++) {
+               struct minstrel_rate_stats *mr = &mi->groups[i].rates[j];
+               static const int bitrates[4] = { 10, 20, 55, 110 };
+               int idx = i * MCS_GROUP_RATES + j;
+
+               if (!(mi->groups[i].supported & BIT(j)))
+                       continue;
+
+               if (i == max_mcs)
+                       p += sprintf(p, "CCK/%cP   ", j < 4 ? 'L' : 'S');
+               else
+                       p += sprintf(p, "HT%c0/%cGI ", htmode, gimode);
+
+               *(p++) = (idx == mi->max_tp_rate) ? 'T' : ' ';
+               *(p++) = (idx == mi->max_tp_rate2) ? 't' : ' ';
+               *(p++) = (idx == mi->max_prob_rate) ? 'P' : ' ';
+
+               if (i == max_mcs) {
+                       int r = bitrates[j % 4];
+                       p += sprintf(p, " %2u.%1uM", r / 10, r % 10);
+               } else {
+                       p += sprintf(p, " MCS%-2u", (mg->streams - 1) *
+                                        MCS_GROUP_RATES + j);
+               }
+
+               tp = mr->cur_tp / 10;
+               prob = MINSTREL_TRUNC(mr->cur_prob * 1000);
+               eprob = MINSTREL_TRUNC(mr->probability * 1000);
+
+               p += sprintf(p, "      %6u.%1u   %6u.%1u    %6u.%1u    "
+                               "%3u            %3u(%3u)  %8llu    %8llu\n",
+                               tp / 10, tp % 10,
+                               eprob / 10, eprob % 10,
+                               prob / 10, prob % 10,
+                               mr->retry_count,
+                               mr->last_success,
+                               mr->last_attempts,
+                               (unsigned long long)mr->succ_hist,
+                               (unsigned long long)mr->att_hist);
+       }
+
+       return p;
+}
+
 static int
 minstrel_ht_stats_open(struct inode *inode, struct file *file)
 {
        struct minstrel_ht_sta_priv *msp = inode->i_private;
        struct minstrel_ht_sta *mi = &msp->ht;
        struct minstrel_debugfs_info *ms;
-       unsigned int i, j, tp, prob, eprob;
+       unsigned int i;
+       unsigned int max_mcs = MINSTREL_MAX_STREAMS * MINSTREL_STREAM_GROUPS;
        char *p;
        int ret;
 
@@ -38,50 +101,13 @@ minstrel_ht_stats_open(struct inode *inode, struct file *file)
 
        file->private_data = ms;
        p = ms->buf;
-       p += sprintf(p, "type      rate     throughput  ewma prob   this prob  "
-                       "this succ/attempt   success    attempts\n");
-       for (i = 0; i < MINSTREL_MAX_STREAMS * MINSTREL_STREAM_GROUPS; i++) {
-               char htmode = '2';
-               char gimode = 'L';
-
-               if (!mi->groups[i].supported)
-                       continue;
-
-               if (minstrel_mcs_groups[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
-                       htmode = '4';
-               if (minstrel_mcs_groups[i].flags & IEEE80211_TX_RC_SHORT_GI)
-                       gimode = 'S';
+       p += sprintf(p, "type         rate     throughput  ewma prob   this prob  "
+                       "retry   this succ/attempt   success    attempts\n");
 
-               for (j = 0; j < MCS_GROUP_RATES; j++) {
-                       struct minstrel_rate_stats *mr = &mi->groups[i].rates[j];
-                       int idx = i * MCS_GROUP_RATES + j;
+       p = minstrel_ht_stats_dump(mi, max_mcs, p);
+       for (i = 0; i < max_mcs; i++)
+               p = minstrel_ht_stats_dump(mi, i, p);
 
-                       if (!(mi->groups[i].supported & BIT(j)))
-                               continue;
-
-                       p += sprintf(p, "HT%c0/%cGI ", htmode, gimode);
-
-                       *(p++) = (idx == mi->max_tp_rate) ? 'T' : ' ';
-                       *(p++) = (idx == mi->max_tp_rate2) ? 't' : ' ';
-                       *(p++) = (idx == mi->max_prob_rate) ? 'P' : ' ';
-                       p += sprintf(p, "MCS%-2u", (minstrel_mcs_groups[i].streams - 1) *
-                                       MCS_GROUP_RATES + j);
-
-                       tp = mr->cur_tp / 10;
-                       prob = MINSTREL_TRUNC(mr->cur_prob * 1000);
-                       eprob = MINSTREL_TRUNC(mr->probability * 1000);
-
-                       p += sprintf(p, "  %6u.%1u   %6u.%1u   %6u.%1u        "
-                                       "%3u(%3u)   %8llu    %8llu\n",
-                                       tp / 10, tp % 10,
-                                       eprob / 10, eprob % 10,
-                                       prob / 10, prob % 10,
-                                       mr->last_success,
-                                       mr->last_attempts,
-                                       (unsigned long long)mr->succ_hist,
-                                       (unsigned long long)mr->att_hist);
-               }
-       }
        p += sprintf(p, "\nTotal packet count::    ideal %d      "
                        "lookaround %d\n",
                        max(0, (int) mi->total_packets - (int) mi->sample_packets),
index a190895..3acb70b 100644 (file)
@@ -668,9 +668,9 @@ static inline u16 seq_sub(u16 sq1, u16 sq2)
 
 static void ieee80211_release_reorder_frame(struct ieee80211_sub_if_data *sdata,
                                            struct tid_ampdu_rx *tid_agg_rx,
-                                           int index)
+                                           int index,
+                                           struct sk_buff_head *frames)
 {
-       struct ieee80211_local *local = sdata->local;
        struct sk_buff *skb = tid_agg_rx->reorder_buf[index];
        struct ieee80211_rx_status *status;
 
@@ -684,7 +684,7 @@ static void ieee80211_release_reorder_frame(struct ieee80211_sub_if_data *sdata,
        tid_agg_rx->reorder_buf[index] = NULL;
        status = IEEE80211_SKB_RXCB(skb);
        status->rx_flags |= IEEE80211_RX_DEFERRED_RELEASE;
-       skb_queue_tail(&local->rx_skb_queue, skb);
+       __skb_queue_tail(frames, skb);
 
 no_frame:
        tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num);
@@ -692,7 +692,8 @@ no_frame:
 
 static void ieee80211_release_reorder_frames(struct ieee80211_sub_if_data *sdata,
                                             struct tid_ampdu_rx *tid_agg_rx,
-                                            u16 head_seq_num)
+                                            u16 head_seq_num,
+                                            struct sk_buff_head *frames)
 {
        int index;
 
@@ -701,7 +702,8 @@ static void ieee80211_release_reorder_frames(struct ieee80211_sub_if_data *sdata
        while (seq_less(tid_agg_rx->head_seq_num, head_seq_num)) {
                index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) %
                                                        tid_agg_rx->buf_size;
-               ieee80211_release_reorder_frame(sdata, tid_agg_rx, index);
+               ieee80211_release_reorder_frame(sdata, tid_agg_rx, index,
+                                               frames);
        }
 }
 
@@ -717,7 +719,8 @@ static void ieee80211_release_reorder_frames(struct ieee80211_sub_if_data *sdata
 #define HT_RX_REORDER_BUF_TIMEOUT (HZ / 10)
 
 static void ieee80211_sta_reorder_release(struct ieee80211_sub_if_data *sdata,
-                                         struct tid_ampdu_rx *tid_agg_rx)
+                                         struct tid_ampdu_rx *tid_agg_rx,
+                                         struct sk_buff_head *frames)
 {
        int index, j;
 
@@ -746,7 +749,8 @@ static void ieee80211_sta_reorder_release(struct ieee80211_sub_if_data *sdata,
 
                        ht_dbg_ratelimited(sdata,
                                           "release an RX reorder frame due to timeout on earlier frames\n");
-                       ieee80211_release_reorder_frame(sdata, tid_agg_rx, j);
+                       ieee80211_release_reorder_frame(sdata, tid_agg_rx, j,
+                                                       frames);
 
                        /*
                         * Increment the head seq# also for the skipped slots.
@@ -756,7 +760,8 @@ static void ieee80211_sta_reorder_release(struct ieee80211_sub_if_data *sdata,
                        skipped = 0;
                }
        } else while (tid_agg_rx->reorder_buf[index]) {
-               ieee80211_release_reorder_frame(sdata, tid_agg_rx, index);
+               ieee80211_release_reorder_frame(sdata, tid_agg_rx, index,
+                                               frames);
                index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) %
                                                        tid_agg_rx->buf_size;
        }
@@ -788,7 +793,8 @@ static void ieee80211_sta_reorder_release(struct ieee80211_sub_if_data *sdata,
  */
 static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_sub_if_data *sdata,
                                             struct tid_ampdu_rx *tid_agg_rx,
-                                            struct sk_buff *skb)
+                                            struct sk_buff *skb,
+                                            struct sk_buff_head *frames)
 {
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
        u16 sc = le16_to_cpu(hdr->seq_ctrl);
@@ -816,7 +822,7 @@ static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_sub_if_data *sdata
                head_seq_num = seq_inc(seq_sub(mpdu_seq_num, buf_size));
                /* release stored frames up to new head to stack */
                ieee80211_release_reorder_frames(sdata, tid_agg_rx,
-                                                head_seq_num);
+                                                head_seq_num, frames);
        }
 
        /* Now the new frame is always in the range of the reordering buffer */
@@ -846,7 +852,7 @@ static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_sub_if_data *sdata
        tid_agg_rx->reorder_buf[index] = skb;
        tid_agg_rx->reorder_time[index] = jiffies;
        tid_agg_rx->stored_mpdu_num++;
-       ieee80211_sta_reorder_release(sdata, tid_agg_rx);
+       ieee80211_sta_reorder_release(sdata, tid_agg_rx, frames);
 
  out:
        spin_unlock(&tid_agg_rx->reorder_lock);
@@ -857,7 +863,8 @@ static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_sub_if_data *sdata
  * Reorder MPDUs from A-MPDUs, keeping them on a buffer. Returns
  * true if the MPDU was buffered, false if it should be processed.
  */
-static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx)
+static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx,
+                                      struct sk_buff_head *frames)
 {
        struct sk_buff *skb = rx->skb;
        struct ieee80211_local *local = rx->local;
@@ -922,11 +929,12 @@ static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx)
         * sure that we cannot get to it any more before doing
         * anything with it.
         */
-       if (ieee80211_sta_manage_reorder_buf(rx->sdata, tid_agg_rx, skb))
+       if (ieee80211_sta_manage_reorder_buf(rx->sdata, tid_agg_rx, skb,
+                                            frames))
                return;
 
  dont_reorder:
-       skb_queue_tail(&local->rx_skb_queue, skb);
+       __skb_queue_tail(frames, skb);
 }
 
 static ieee80211_rx_result debug_noinline
@@ -1452,6 +1460,10 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
                }
        }
 
+       /* mesh power save support */
+       if (ieee80211_vif_is_mesh(&rx->sdata->vif))
+               ieee80211_mps_rx_h_sta_process(sta, hdr);
+
        /*
         * Drop (qos-)data::nullfunc frames silently, since they
         * are used only to control station power saving mode.
@@ -2090,7 +2102,10 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
        if (is_multicast_ether_addr(fwd_hdr->addr1)) {
                IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_mcast);
                memcpy(fwd_hdr->addr2, sdata->vif.addr, ETH_ALEN);
+               /* update power mode indication when forwarding */
+               ieee80211_mps_set_frame_flags(sdata, NULL, fwd_hdr);
        } else if (!mesh_nexthop_lookup(fwd_skb, sdata)) {
+               /* mesh power mode flags updated in mesh_nexthop_lookup */
                IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_unicast);
        } else {
                /* unable to resolve next hop */
@@ -2177,7 +2192,7 @@ ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
 }
 
 static ieee80211_rx_result debug_noinline
-ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx)
+ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx, struct sk_buff_head *frames)
 {
        struct sk_buff *skb = rx->skb;
        struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data;
@@ -2216,7 +2231,7 @@ ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx)
                spin_lock(&tid_agg_rx->reorder_lock);
                /* release stored frames up to start of BAR */
                ieee80211_release_reorder_frames(rx->sdata, tid_agg_rx,
-                                                start_seq_num);
+                                                start_seq_num, frames);
                spin_unlock(&tid_agg_rx->reorder_lock);
 
                kfree_skb(skb);
@@ -2360,31 +2375,27 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
                switch (mgmt->u.action.u.ht_smps.action) {
                case WLAN_HT_ACTION_SMPS: {
                        struct ieee80211_supported_band *sband;
-                       u8 smps;
+                       enum ieee80211_smps_mode smps_mode;
 
                        /* convert to HT capability */
                        switch (mgmt->u.action.u.ht_smps.smps_control) {
                        case WLAN_HT_SMPS_CONTROL_DISABLED:
-                               smps = WLAN_HT_CAP_SM_PS_DISABLED;
+                               smps_mode = IEEE80211_SMPS_OFF;
                                break;
                        case WLAN_HT_SMPS_CONTROL_STATIC:
-                               smps = WLAN_HT_CAP_SM_PS_STATIC;
+                               smps_mode = IEEE80211_SMPS_STATIC;
                                break;
                        case WLAN_HT_SMPS_CONTROL_DYNAMIC:
-                               smps = WLAN_HT_CAP_SM_PS_DYNAMIC;
+                               smps_mode = IEEE80211_SMPS_DYNAMIC;
                                break;
                        default:
                                goto invalid;
                        }
-                       smps <<= IEEE80211_HT_CAP_SM_PS_SHIFT;
 
                        /* if no change do nothing */
-                       if ((rx->sta->sta.ht_cap.cap &
-                                       IEEE80211_HT_CAP_SM_PS) == smps)
+                       if (rx->sta->sta.smps_mode == smps_mode)
                                goto handled;
-
-                       rx->sta->sta.ht_cap.cap &= ~IEEE80211_HT_CAP_SM_PS;
-                       rx->sta->sta.ht_cap.cap |= smps;
+                       rx->sta->sta.smps_mode = smps_mode;
 
                        sband = rx->local->hw.wiphy->bands[status->band];
 
@@ -2395,26 +2406,21 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
                case WLAN_HT_ACTION_NOTIFY_CHANWIDTH: {
                        struct ieee80211_supported_band *sband;
                        u8 chanwidth = mgmt->u.action.u.ht_notify_cw.chanwidth;
-                       bool old_40mhz, new_40mhz;
+                       enum ieee80211_sta_rx_bandwidth new_bw;
 
                        /* If it doesn't support 40 MHz it can't change ... */
-                       if (!rx->sta->supports_40mhz)
+                       if (!(rx->sta->sta.ht_cap.cap &
+                                       IEEE80211_HT_CAP_SUP_WIDTH_20_40))
                                goto handled;
 
-                       old_40mhz = rx->sta->sta.ht_cap.cap &
-                                       IEEE80211_HT_CAP_SUP_WIDTH_20_40;
-                       new_40mhz = chanwidth == IEEE80211_HT_CHANWIDTH_ANY;
+                       if (chanwidth == IEEE80211_HT_CHANWIDTH_20MHZ)
+                               new_bw = IEEE80211_STA_RX_BW_20;
+                       else
+                               new_bw = ieee80211_sta_cur_vht_bw(rx->sta);
 
-                       if (old_40mhz == new_40mhz)
+                       if (rx->sta->sta.bandwidth == new_bw)
                                goto handled;
 
-                       if (new_40mhz)
-                               rx->sta->sta.ht_cap.cap |=
-                                       IEEE80211_HT_CAP_SUP_WIDTH_20_40;
-                       else
-                               rx->sta->sta.ht_cap.cap &=
-                                       ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
-
                        sband = rx->local->hw.wiphy->bands[status->band];
 
                        rate_control_rate_update(local, sband, rx->sta,
@@ -2426,6 +2432,37 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
                }
 
                break;
+       case WLAN_CATEGORY_VHT:
+               if (sdata->vif.type != NL80211_IFTYPE_STATION &&
+                   sdata->vif.type != NL80211_IFTYPE_MESH_POINT &&
+                   sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
+                   sdata->vif.type != NL80211_IFTYPE_AP &&
+                   sdata->vif.type != NL80211_IFTYPE_ADHOC)
+                       break;
+
+               /* verify action code is present */
+               if (len < IEEE80211_MIN_ACTION_SIZE + 1)
+                       goto invalid;
+
+               switch (mgmt->u.action.u.vht_opmode_notif.action_code) {
+               case WLAN_VHT_ACTION_OPMODE_NOTIF: {
+                       u8 opmode;
+
+                       /* verify opmode is present */
+                       if (len < IEEE80211_MIN_ACTION_SIZE + 2)
+                               goto invalid;
+
+                       opmode = mgmt->u.action.u.vht_opmode_notif.operating_mode;
+
+                       ieee80211_vht_handle_opmode(rx->sdata, rx->sta,
+                                                   opmode, status->band,
+                                                   false);
+                       goto handled;
+               }
+               default:
+                       break;
+               }
+               break;
        case WLAN_CATEGORY_BACK:
                if (sdata->vif.type != NL80211_IFTYPE_STATION &&
                    sdata->vif.type != NL80211_IFTYPE_MESH_POINT &&
@@ -2677,8 +2714,9 @@ ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx)
                        return RX_DROP_MONITOR;
                break;
        case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ):
-               /* process only for ibss */
-               if (sdata->vif.type != NL80211_IFTYPE_ADHOC)
+               /* process only for ibss and mesh */
+               if (sdata->vif.type != NL80211_IFTYPE_ADHOC &&
+                   sdata->vif.type != NL80211_IFTYPE_MESH_POINT)
                        return RX_DROP_MONITOR;
                break;
        default:
@@ -2801,7 +2839,8 @@ static void ieee80211_rx_handlers_result(struct ieee80211_rx_data *rx,
        }
 }
 
-static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx)
+static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx,
+                                 struct sk_buff_head *frames)
 {
        ieee80211_rx_result res = RX_DROP_MONITOR;
        struct sk_buff *skb;
@@ -2813,15 +2852,9 @@ static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx)
                        goto rxh_next;  \
        } while (0);
 
-       spin_lock(&rx->local->rx_skb_queue.lock);
-       if (rx->local->running_rx_handler)
-               goto unlock;
-
-       rx->local->running_rx_handler = true;
-
-       while ((skb = __skb_dequeue(&rx->local->rx_skb_queue))) {
-               spin_unlock(&rx->local->rx_skb_queue.lock);
+       spin_lock_bh(&rx->local->rx_path_lock);
 
+       while ((skb = __skb_dequeue(frames))) {
                /*
                 * all the other fields are valid across frames
                 * that belong to an aMPDU since they are on the
@@ -2842,7 +2875,12 @@ static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx)
 #endif
                CALL_RXH(ieee80211_rx_h_amsdu)
                CALL_RXH(ieee80211_rx_h_data)
-               CALL_RXH(ieee80211_rx_h_ctrl);
+
+               /* special treatment -- needs the queue */
+               res = ieee80211_rx_h_ctrl(rx, frames);
+               if (res != RX_CONTINUE)
+                       goto rxh_next;
+
                CALL_RXH(ieee80211_rx_h_mgmt_check)
                CALL_RXH(ieee80211_rx_h_action)
                CALL_RXH(ieee80211_rx_h_userspace_mgmt)
@@ -2851,20 +2889,20 @@ static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx)
 
  rxh_next:
                ieee80211_rx_handlers_result(rx, res);
-               spin_lock(&rx->local->rx_skb_queue.lock);
+
 #undef CALL_RXH
        }
 
-       rx->local->running_rx_handler = false;
-
- unlock:
-       spin_unlock(&rx->local->rx_skb_queue.lock);
+       spin_unlock_bh(&rx->local->rx_path_lock);
 }
 
 static void ieee80211_invoke_rx_handlers(struct ieee80211_rx_data *rx)
 {
+       struct sk_buff_head reorder_release;
        ieee80211_rx_result res = RX_DROP_MONITOR;
 
+       __skb_queue_head_init(&reorder_release);
+
 #define CALL_RXH(rxh)                  \
        do {                            \
                res = rxh(rx);          \
@@ -2874,9 +2912,9 @@ static void ieee80211_invoke_rx_handlers(struct ieee80211_rx_data *rx)
 
        CALL_RXH(ieee80211_rx_h_check)
 
-       ieee80211_rx_reorder_ampdu(rx);
+       ieee80211_rx_reorder_ampdu(rx, &reorder_release);
 
-       ieee80211_rx_handlers(rx);
+       ieee80211_rx_handlers(rx, &reorder_release);
        return;
 
  rxh_next:
@@ -2891,6 +2929,7 @@ static void ieee80211_invoke_rx_handlers(struct ieee80211_rx_data *rx)
  */
 void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid)
 {
+       struct sk_buff_head frames;
        struct ieee80211_rx_data rx = {
                .sta = sta,
                .sdata = sta->sdata,
@@ -2906,11 +2945,13 @@ void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid)
        if (!tid_agg_rx)
                return;
 
+       __skb_queue_head_init(&frames);
+
        spin_lock(&tid_agg_rx->reorder_lock);
-       ieee80211_sta_reorder_release(sta->sdata, tid_agg_rx);
+       ieee80211_sta_reorder_release(sta->sdata, tid_agg_rx, &frames);
        spin_unlock(&tid_agg_rx->reorder_lock);
 
-       ieee80211_rx_handlers(&rx);
+       ieee80211_rx_handlers(&rx, &frames);
 }
 
 /* main receive path */
index 607684c..43a45cf 100644 (file)
 
 #define IEEE80211_PROBE_DELAY (HZ / 33)
 #define IEEE80211_CHANNEL_TIME (HZ / 33)
-#define IEEE80211_PASSIVE_CHANNEL_TIME (HZ / 8)
-
-static void ieee80211_rx_bss_free(struct cfg80211_bss *cbss)
-{
-       struct ieee80211_bss *bss = (void *)cbss->priv;
-
-       kfree(bss_mesh_id(bss));
-       kfree(bss_mesh_cfg(bss));
-}
+#define IEEE80211_PASSIVE_CHANNEL_TIME (HZ / 9)
 
 void ieee80211_rx_bss_put(struct ieee80211_local *local,
                          struct ieee80211_bss *bss)
 {
        if (!bss)
                return;
-       cfg80211_put_bss(container_of((void *)bss, struct cfg80211_bss, priv));
+       cfg80211_put_bss(local->hw.wiphy,
+                        container_of((void *)bss, struct cfg80211_bss, priv));
 }
 
 static bool is_uapsd_supported(struct ieee802_11_elems *elems)
@@ -85,10 +78,12 @@ ieee80211_bss_info_update(struct ieee80211_local *local,
        if (!cbss)
                return NULL;
 
-       cbss->free_priv = ieee80211_rx_bss_free;
        bss = (void *)cbss->priv;
 
-       bss->device_ts = rx_status->device_timestamp;
+       if (beacon)
+               bss->device_ts_beacon = rx_status->device_timestamp;
+       else
+               bss->device_ts_presp = rx_status->device_timestamp;
 
        if (elems->parse_error) {
                if (beacon)
@@ -146,9 +141,6 @@ ieee80211_bss_info_update(struct ieee80211_local *local,
                        bss->valid_data |= IEEE80211_BSS_VALID_WMM;
        }
 
-       if (!beacon)
-               bss->last_probe_resp = jiffies;
-
        return bss;
 }
 
@@ -342,6 +334,9 @@ static int ieee80211_start_sw_scan(struct ieee80211_local *local)
 
        ieee80211_offchannel_stop_vifs(local);
 
+       /* ensure nullfunc is transmitted before leaving operating channel */
+       drv_flush(local, false);
+
        ieee80211_configure_filter(local);
 
        /* We need to set power level at maximum rate for scanning. */
@@ -356,6 +351,9 @@ static int ieee80211_start_sw_scan(struct ieee80211_local *local)
 static bool ieee80211_can_scan(struct ieee80211_local *local,
                               struct ieee80211_sub_if_data *sdata)
 {
+       if (local->radar_detect_enabled)
+               return false;
+
        if (!list_empty(&local->roc_list))
                return false;
 
@@ -390,6 +388,11 @@ static void ieee80211_scan_state_send_probe(struct ieee80211_local *local,
        int i;
        struct ieee80211_sub_if_data *sdata;
        enum ieee80211_band band = local->hw.conf.channel->band;
+       u32 tx_flags;
+
+       tx_flags = IEEE80211_TX_INTFL_OFFCHAN_TX_OK;
+       if (local->scan_req->no_cck)
+               tx_flags |= IEEE80211_TX_CTL_NO_CCK_RATE;
 
        sdata = rcu_dereference_protected(local->scan_sdata,
                                          lockdep_is_held(&local->mtx));
@@ -401,8 +404,7 @@ static void ieee80211_scan_state_send_probe(struct ieee80211_local *local,
                        local->scan_req->ssids[i].ssid_len,
                        local->scan_req->ie, local->scan_req->ie_len,
                        local->scan_req->rates[band], false,
-                       local->scan_req->no_cck,
-                       local->hw.conf.channel, true);
+                       tx_flags, local->hw.conf.channel, true);
 
        /*
         * After sending probe requests, wait for probe responses
@@ -546,8 +548,6 @@ static void ieee80211_scan_state_decision(struct ieee80211_local *local,
        bool associated = false;
        bool tx_empty = true;
        bool bad_latency;
-       bool listen_int_exceeded;
-       unsigned long min_beacon_int = 0;
        struct ieee80211_sub_if_data *sdata;
        struct ieee80211_channel *next_chan;
        enum mac80211_scan_state next_scan_state;
@@ -566,11 +566,6 @@ static void ieee80211_scan_state_decision(struct ieee80211_local *local,
                        if (sdata->u.mgd.associated) {
                                associated = true;
 
-                               if (sdata->vif.bss_conf.beacon_int <
-                                   min_beacon_int || min_beacon_int == 0)
-                                       min_beacon_int =
-                                               sdata->vif.bss_conf.beacon_int;
-
                                if (!qdisc_all_tx_empty(sdata->dev)) {
                                        tx_empty = false;
                                        break;
@@ -587,34 +582,19 @@ static void ieee80211_scan_state_decision(struct ieee80211_local *local,
         * see if we can scan another channel without interfering
         * with the current traffic situation.
         *
-        * Since we don't know if the AP has pending frames for us
-        * we can only check for our tx queues and use the current
-        * pm_qos requirements for rx. Hence, if no tx traffic occurs
-        * at all we will scan as many channels in a row as the pm_qos
-        * latency allows us to. Additionally we also check for the
-        * currently negotiated listen interval to prevent losing
-        * frames unnecessarily.
-        *
-        * Otherwise switch back to the operating channel.
+        * Keep good latency, do not stay off-channel more than 125 ms.
         */
 
        bad_latency = time_after(jiffies +
-                       ieee80211_scan_get_channel_time(next_chan),
-                       local->leave_oper_channel_time +
-                       usecs_to_jiffies(pm_qos_request(PM_QOS_NETWORK_LATENCY)));
-
-       listen_int_exceeded = time_after(jiffies +
-                       ieee80211_scan_get_channel_time(next_chan),
-                       local->leave_oper_channel_time +
-                       usecs_to_jiffies(min_beacon_int * 1024) *
-                       local->hw.conf.listen_interval);
+                                ieee80211_scan_get_channel_time(next_chan),
+                                local->leave_oper_channel_time + HZ / 8);
 
        if (associated && !tx_empty) {
                if (local->scan_req->flags & NL80211_SCAN_FLAG_LOW_PRIORITY)
                        next_scan_state = SCAN_ABORT;
                else
                        next_scan_state = SCAN_SUSPEND;
-       } else if (associated && (bad_latency || listen_int_exceeded)) {
+       } else if (associated && bad_latency) {
                next_scan_state = SCAN_SUSPEND;
        } else {
                next_scan_state = SCAN_SET_CHANNEL;
index 9d864ed..a79ce82 100644 (file)
@@ -120,6 +120,8 @@ static void cleanup_single_sta(struct sta_info *sta)
                if (sta->sdata->vif.type == NL80211_IFTYPE_AP ||
                    sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
                        ps = &sdata->bss->ps;
+               else if (ieee80211_vif_is_mesh(&sdata->vif))
+                       ps = &sdata->u.mesh.ps;
                else
                        return;
 
@@ -135,13 +137,8 @@ static void cleanup_single_sta(struct sta_info *sta)
                ieee80211_purge_tx_queue(&local->hw, &sta->tx_filtered[ac]);
        }
 
-#ifdef CONFIG_MAC80211_MESH
-       if (ieee80211_vif_is_mesh(&sdata->vif)) {
-               mesh_accept_plinks_update(sdata);
-               mesh_plink_deactivate(sta);
-               del_timer_sync(&sta->plink_timer);
-       }
-#endif
+       if (ieee80211_vif_is_mesh(&sdata->vif))
+               mesh_sta_cleanup(sta);
 
        cancel_work_sync(&sta->drv_unblock_wk);
 
@@ -378,12 +375,9 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
        for (i = 0; i < IEEE80211_NUM_TIDS; i++)
                sta->last_seq_ctrl[i] = cpu_to_le16(USHRT_MAX);
 
-       sta_dbg(sdata, "Allocated STA %pM\n", sta->sta.addr);
+       sta->sta.smps_mode = IEEE80211_SMPS_OFF;
 
-#ifdef CONFIG_MAC80211_MESH
-       sta->plink_state = NL80211_PLINK_LISTEN;
-       init_timer(&sta->plink_timer);
-#endif
+       sta_dbg(sdata, "Allocated STA %pM\n", sta->sta.addr);
 
        return sta;
 }
@@ -579,7 +573,6 @@ void sta_info_recalc_tim(struct sta_info *sta)
 {
        struct ieee80211_local *local = sta->local;
        struct ps_data *ps;
-       unsigned long flags;
        bool indicate_tim = false;
        u8 ignore_for_tim = sta->sta.uapsd_queues;
        int ac;
@@ -592,6 +585,12 @@ void sta_info_recalc_tim(struct sta_info *sta)
 
                ps = &sta->sdata->bss->ps;
                id = sta->sta.aid;
+#ifdef CONFIG_MAC80211_MESH
+       } else if (ieee80211_vif_is_mesh(&sta->sdata->vif)) {
+               ps = &sta->sdata->u.mesh.ps;
+               /* TIM map only for PLID <= IEEE80211_MAX_AID */
+               id = le16_to_cpu(sta->plid) % IEEE80211_MAX_AID;
+#endif
        } else {
                return;
        }
@@ -630,7 +629,7 @@ void sta_info_recalc_tim(struct sta_info *sta)
        }
 
  done:
-       spin_lock_irqsave(&local->tim_lock, flags);
+       spin_lock_bh(&local->tim_lock);
 
        if (indicate_tim)
                __bss_tim_set(ps->tim, id);
@@ -643,7 +642,7 @@ void sta_info_recalc_tim(struct sta_info *sta)
                local->tim_in_locked_section = false;
        }
 
-       spin_unlock_irqrestore(&local->tim_lock, flags);
+       spin_unlock_bh(&local->tim_lock);
 }
 
 static bool sta_info_buffer_expired(struct sta_info *sta, struct sk_buff *skb)
@@ -750,8 +749,9 @@ static bool sta_info_cleanup_expire_buffered(struct ieee80211_local *local,
        bool have_buffered = false;
        int ac;
 
-       /* This is only necessary for stations on BSS interfaces */
-       if (!sta->sdata->bss)
+       /* This is only necessary for stations on BSS/MBSS interfaces */
+       if (!sta->sdata->bss &&
+           !ieee80211_vif_is_mesh(&sta->sdata->vif))
                return false;
 
        for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
@@ -939,6 +939,11 @@ void ieee80211_sta_expire(struct ieee80211_sub_if_data *sdata,
                if (time_after(jiffies, sta->last_rx + exp_time)) {
                        sta_dbg(sta->sdata, "expiring inactive STA %pM\n",
                                sta->sta.addr);
+
+                       if (ieee80211_vif_is_mesh(&sdata->vif) &&
+                           test_sta_flag(sta, WLAN_STA_PS_STA))
+                               atomic_dec(&sdata->u.mesh.ps.num_sta_ps);
+
                        WARN_ON(__sta_info_destroy(sta));
                }
        }
@@ -997,6 +1002,8 @@ static void clear_sta_ps_flags(void *_sta)
        if (sdata->vif.type == NL80211_IFTYPE_AP ||
            sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
                ps = &sdata->bss->ps;
+       else if (ieee80211_vif_is_mesh(&sdata->vif))
+               ps = &sdata->u.mesh.ps;
        else
                return;
 
@@ -1114,6 +1121,8 @@ static void ieee80211_send_null_response(struct ieee80211_sub_if_data *sdata,
 
        drv_allow_buffered_frames(local, sta, BIT(tid), 1, reason, false);
 
+       skb->dev = sdata->dev;
+
        rcu_read_lock();
        chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
        if (WARN_ON(!chanctx_conf)) {
index af7d78a..63dfdb5 100644 (file)
@@ -56,6 +56,8 @@
  * @WLAN_STA_INSERTED: This station is inserted into the hash table.
  * @WLAN_STA_RATE_CONTROL: rate control was initialized for this station.
  * @WLAN_STA_TOFFSET_KNOWN: toffset calculated for this station is valid.
+ * @WLAN_STA_MPSP_OWNER: local STA is owner of a mesh Peer Service Period.
+ * @WLAN_STA_MPSP_RECIPIENT: local STA is recipient of a MPSP.
  */
 enum ieee80211_sta_info_flags {
        WLAN_STA_AUTH,
@@ -78,6 +80,8 @@ enum ieee80211_sta_info_flags {
        WLAN_STA_INSERTED,
        WLAN_STA_RATE_CONTROL,
        WLAN_STA_TOFFSET_KNOWN,
+       WLAN_STA_MPSP_OWNER,
+       WLAN_STA_MPSP_RECIPIENT,
 };
 
 #define ADDBA_RESP_INTERVAL HZ
@@ -282,6 +286,9 @@ struct sta_ampdu_mlme {
  * @t_offset_setpoint: reference timing offset of this sta to be used when
  *     calculating clockdrift
  * @ch_width: peer's channel width
+ * @local_pm: local link-specific power save mode
+ * @peer_pm: peer-specific power save mode towards local STA
+ * @nonpeer_pm: STA power save mode towards non-peer neighbors
  * @debugfs: debug filesystem info
  * @dead: set to true when sta is unlinked
  * @uploaded: set to true when sta is uploaded to the driver
@@ -289,8 +296,9 @@ struct sta_ampdu_mlme {
  * @sta: station information we share with the driver
  * @sta_state: duplicates information about station state (for debug)
  * @beacon_loss_count: number of times beacon loss has triggered
- * @supports_40mhz: tracks whether the station advertised 40 MHz support
- *     as we overwrite its HT parameters with the currently used value
+ * @rcu_head: RCU head used for freeing this station struct
+ * @cur_max_bandwidth: maximum bandwidth to use for TX to the station,
+ *     taken from HT/VHT capabilities or VHT operating mode notification
  */
 struct sta_info {
        /* General information, mostly static */
@@ -379,6 +387,10 @@ struct sta_info {
        s64 t_offset;
        s64 t_offset_setpoint;
        enum nl80211_chan_width ch_width;
+       /* mesh power save */
+       enum nl80211_mesh_power_mode local_pm;
+       enum nl80211_mesh_power_mode peer_pm;
+       enum nl80211_mesh_power_mode nonpeer_pm;
 #endif
 
 #ifdef CONFIG_MAC80211_DEBUGFS
@@ -388,11 +400,11 @@ struct sta_info {
        } debugfs;
 #endif
 
+       enum ieee80211_sta_rx_bandwidth cur_max_bandwidth;
+
        unsigned int lost_packets;
        unsigned int beacon_loss_count;
 
-       bool supports_40mhz;
-
        /* keep last! */
        struct ieee80211_sta sta;
 };
index 07d9957..4343920 100644 (file)
@@ -335,7 +335,8 @@ static void ieee80211_report_used_skb(struct ieee80211_local *local,
        if (dropped)
                acked = false;
 
-       if (info->flags & IEEE80211_TX_INTFL_NL80211_FRAME_TX) {
+       if (info->flags & (IEEE80211_TX_INTFL_NL80211_FRAME_TX |
+                          IEEE80211_TX_INTFL_MLME_CONN_TX)) {
                struct ieee80211_sub_if_data *sdata = NULL;
                struct ieee80211_sub_if_data *iter_sdata;
                u64 cookie = (unsigned long)skb;
@@ -357,10 +358,13 @@ static void ieee80211_report_used_skb(struct ieee80211_local *local,
                        sdata = rcu_dereference(local->p2p_sdata);
                }
 
-               if (!sdata)
+               if (!sdata) {
                        skb->dev = NULL;
-               else if (ieee80211_is_nullfunc(hdr->frame_control) ||
-                        ieee80211_is_qos_nullfunc(hdr->frame_control)) {
+               } else if (info->flags & IEEE80211_TX_INTFL_MLME_CONN_TX) {
+                       ieee80211_mgd_conn_tx_status(sdata, hdr->frame_control,
+                                                    acked);
+               } else if (ieee80211_is_nullfunc(hdr->frame_control) ||
+                          ieee80211_is_qos_nullfunc(hdr->frame_control)) {
                        cfg80211_probe_status(sdata->dev, hdr->addr1,
                                              cookie, acked, GFP_ATOMIC);
                } else {
@@ -468,6 +472,13 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
                        return;
                }
 
+               /* mesh Peer Service Period support */
+               if (ieee80211_vif_is_mesh(&sta->sdata->vif) &&
+                   ieee80211_is_data_qos(fc))
+                       ieee80211_mpsp_trigger_process(
+                                       ieee80211_get_qos_ctl(hdr),
+                                       sta, true, acked);
+
                if ((local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) &&
                    (rates_idx != -1))
                        sta->last_tx_rate = info->status.rates[rates_idx];
@@ -502,11 +513,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
                                       IEEE80211_BAR_CTRL_TID_INFO_MASK) >>
                                      IEEE80211_BAR_CTRL_TID_INFO_SHIFT;
 
-                               if (local->hw.flags &
-                                   IEEE80211_HW_TEARDOWN_AGGR_ON_BAR_FAIL)
-                                       ieee80211_stop_tx_ba_session(&sta->sta, tid);
-                               else
-                                       ieee80211_set_bar_pending(sta, tid, ssn);
+                               ieee80211_set_bar_pending(sta, tid, ssn);
                        }
                }
 
index 57e14d5..3ed801d 100644 (file)
@@ -177,12 +177,11 @@ void ieee80211_get_tkip_p1k_iv(struct ieee80211_key_conf *keyconf,
        struct ieee80211_key *key = (struct ieee80211_key *)
                        container_of(keyconf, struct ieee80211_key, conf);
        struct tkip_ctx *ctx = &key->u.tkip.tx;
-       unsigned long flags;
 
-       spin_lock_irqsave(&key->u.tkip.txlock, flags);
+       spin_lock_bh(&key->u.tkip.txlock);
        ieee80211_compute_tkip_p1k(key, iv32);
        memcpy(p1k, ctx->p1k, sizeof(ctx->p1k));
-       spin_unlock_irqrestore(&key->u.tkip.txlock, flags);
+       spin_unlock_bh(&key->u.tkip.txlock);
 }
 EXPORT_SYMBOL(ieee80211_get_tkip_p1k_iv);
 
@@ -208,12 +207,11 @@ void ieee80211_get_tkip_p2k(struct ieee80211_key_conf *keyconf,
        const u8 *data = (u8 *)hdr + ieee80211_hdrlen(hdr->frame_control);
        u32 iv32 = get_unaligned_le32(&data[4]);
        u16 iv16 = data[2] | (data[0] << 8);
-       unsigned long flags;
 
-       spin_lock_irqsave(&key->u.tkip.txlock, flags);
+       spin_lock_bh(&key->u.tkip.txlock);
        ieee80211_compute_tkip_p1k(key, iv32);
        tkip_mixing_phase2(tk, ctx, iv16, p2k);
-       spin_unlock_irqrestore(&key->u.tkip.txlock, flags);
+       spin_unlock_bh(&key->u.tkip.txlock);
 }
 EXPORT_SYMBOL(ieee80211_get_tkip_p2k);
 
index 41861b9..1183c4a 100644 (file)
@@ -36,7 +36,7 @@
                        __entry->control_freq = (c)->chan->center_freq;         \
                        __entry->chan_width = (c)->width;                       \
                        __entry->center_freq1 = (c)->center_freq1;              \
-                       __entry->center_freq1 = (c)->center_freq2;
+                       __entry->center_freq2 = (c)->center_freq2;
 #define CHANDEF_PR_FMT " control:%d MHz width:%d center: %d/%d MHz"
 #define CHANDEF_PR_ARG __entry->control_freq, __entry->chan_width,             \
                        __entry->center_freq1, __entry->center_freq2
@@ -340,6 +340,7 @@ TRACE_EVENT(drv_bss_info_changed,
                __field(u16, assoc_cap)
                __field(u64, sync_tsf)
                __field(u32, sync_device_ts)
+               __field(u8, sync_dtim_count)
                __field(u32, basic_rates)
                __array(int, mcast_rate, IEEE80211_NUM_BANDS)
                __field(u16, ht_operation_mode)
@@ -347,8 +348,11 @@ TRACE_EVENT(drv_bss_info_changed,
                __field(s32, cqm_rssi_hyst);
                __field(u32, channel_width);
                __field(u32, channel_cfreq1);
-               __dynamic_array(u32, arp_addr_list, info->arp_addr_cnt);
-               __field(bool, arp_filter_enabled);
+               __dynamic_array(u32, arp_addr_list,
+                               info->arp_addr_cnt > IEEE80211_BSS_ARP_ADDR_LIST_LEN ?
+                                       IEEE80211_BSS_ARP_ADDR_LIST_LEN :
+                                       info->arp_addr_cnt);
+               __field(int, arp_addr_cnt);
                __field(bool, qos);
                __field(bool, idle);
                __field(bool, ps);
@@ -376,6 +380,7 @@ TRACE_EVENT(drv_bss_info_changed,
                __entry->assoc_cap = info->assoc_capability;
                __entry->sync_tsf = info->sync_tsf;
                __entry->sync_device_ts = info->sync_device_ts;
+               __entry->sync_dtim_count = info->sync_dtim_count;
                __entry->basic_rates = info->basic_rates;
                memcpy(__entry->mcast_rate, info->mcast_rate,
                       sizeof(__entry->mcast_rate));
@@ -384,9 +389,11 @@ TRACE_EVENT(drv_bss_info_changed,
                __entry->cqm_rssi_hyst = info->cqm_rssi_hyst;
                __entry->channel_width = info->chandef.width;
                __entry->channel_cfreq1 = info->chandef.center_freq1;
+               __entry->arp_addr_cnt = info->arp_addr_cnt;
                memcpy(__get_dynamic_array(arp_addr_list), info->arp_addr_list,
-                      sizeof(u32) * info->arp_addr_cnt);
-               __entry->arp_filter_enabled = info->arp_filter_enabled;
+                      sizeof(u32) * (info->arp_addr_cnt > IEEE80211_BSS_ARP_ADDR_LIST_LEN ?
+                                       IEEE80211_BSS_ARP_ADDR_LIST_LEN :
+                                       info->arp_addr_cnt));
                __entry->qos = info->qos;
                __entry->idle = info->idle;
                __entry->ps = info->ps;
@@ -1184,23 +1191,26 @@ TRACE_EVENT(drv_set_rekey_data,
 
 TRACE_EVENT(drv_rssi_callback,
        TP_PROTO(struct ieee80211_local *local,
+                struct ieee80211_sub_if_data *sdata,
                 enum ieee80211_rssi_event rssi_event),
 
-       TP_ARGS(local, rssi_event),
+       TP_ARGS(local, sdata, rssi_event),
 
        TP_STRUCT__entry(
                LOCAL_ENTRY
+               VIF_ENTRY
                __field(u32, rssi_event)
        ),
 
        TP_fast_assign(
                LOCAL_ASSIGN;
+               VIF_ASSIGN;
                __entry->rssi_event = rssi_event;
        ),
 
        TP_printk(
-               LOCAL_PR_FMT " rssi_event:%d",
-               LOCAL_PR_ARG, __entry->rssi_event
+               LOCAL_PR_FMT VIF_PR_FMT " rssi_event:%d",
+               LOCAL_PR_ARG, VIF_PR_ARG, __entry->rssi_event
        )
 );
 
@@ -1432,6 +1442,14 @@ DEFINE_EVENT(local_only_evt, drv_restart_complete,
        TP_ARGS(local)
 );
 
+#if IS_ENABLED(CONFIG_IPV6)
+DEFINE_EVENT(local_sdata_evt, drv_ipv6_addr_change,
+       TP_PROTO(struct ieee80211_local *local,
+                struct ieee80211_sub_if_data *sdata),
+       TP_ARGS(local, sdata)
+);
+#endif
+
 /*
  * Tracing for API calls that drivers call.
  */
@@ -1821,6 +1839,48 @@ TRACE_EVENT(stop_queue,
        )
 );
 
+TRACE_EVENT(drv_set_default_unicast_key,
+       TP_PROTO(struct ieee80211_local *local,
+                struct ieee80211_sub_if_data *sdata,
+                int key_idx),
+
+       TP_ARGS(local, sdata, key_idx),
+
+       TP_STRUCT__entry(
+               LOCAL_ENTRY
+               VIF_ENTRY
+               __field(int, key_idx)
+       ),
+
+       TP_fast_assign(
+               LOCAL_ASSIGN;
+               VIF_ASSIGN;
+               __entry->key_idx = key_idx;
+       ),
+
+       TP_printk(LOCAL_PR_FMT VIF_PR_FMT " key_idx:%d",
+                 LOCAL_PR_ARG, VIF_PR_ARG, __entry->key_idx)
+);
+
+TRACE_EVENT(api_radar_detected,
+       TP_PROTO(struct ieee80211_local *local),
+
+       TP_ARGS(local),
+
+       TP_STRUCT__entry(
+               LOCAL_ENTRY
+       ),
+
+       TP_fast_assign(
+               LOCAL_ASSIGN;
+       ),
+
+       TP_printk(
+               LOCAL_PR_FMT " radar detected",
+               LOCAL_PR_ARG
+       )
+);
+
 #ifdef CONFIG_MAC80211_MESSAGE_TRACING
 #undef TRACE_SYSTEM
 #define TRACE_SYSTEM mac80211_msg
index f32d681..fe644f9 100644 (file)
@@ -329,6 +329,8 @@ static void purge_old_ps_buffers(struct ieee80211_local *local)
 
                if (sdata->vif.type == NL80211_IFTYPE_AP)
                        ps = &sdata->u.ap.ps;
+               else if (ieee80211_vif_is_mesh(&sdata->vif))
+                       ps = &sdata->u.mesh.ps;
                else
                        continue;
 
@@ -372,18 +374,20 @@ ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx)
        /*
         * broadcast/multicast frame
         *
-        * If any of the associated stations is in power save mode,
+        * If any of the associated/peer stations is in power save mode,
         * the frame is buffered to be sent after DTIM beacon frame.
         * This is done either by the hardware or us.
         */
 
-       /* powersaving STAs currently only in AP/VLAN mode */
+       /* powersaving STAs currently only in AP/VLAN/mesh mode */
        if (tx->sdata->vif.type == NL80211_IFTYPE_AP ||
            tx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
                if (!tx->sdata->bss)
                        return TX_CONTINUE;
 
                ps = &tx->sdata->bss->ps;
+       } else if (ieee80211_vif_is_mesh(&tx->sdata->vif)) {
+               ps = &tx->sdata->u.mesh.ps;
        } else {
                return TX_CONTINUE;
        }
@@ -594,7 +598,8 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
                        break;
                }
 
-               if (unlikely(tx->key && tx->key->flags & KEY_FLAG_TAINTED))
+               if (unlikely(tx->key && tx->key->flags & KEY_FLAG_TAINTED &&
+                            !ieee80211_is_deauth(hdr->frame_control)))
                        return TX_DROP;
 
                if (!skip_hw && tx->key &&
@@ -1225,6 +1230,21 @@ static bool ieee80211_tx_frags(struct ieee80211_local *local,
                spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
                if (local->queue_stop_reasons[q] ||
                    (!txpending && !skb_queue_empty(&local->pending[q]))) {
+                       if (unlikely(info->flags &
+                                       IEEE80211_TX_INTFL_OFFCHAN_TX_OK &&
+                                    local->queue_stop_reasons[q] &
+                                       ~BIT(IEEE80211_QUEUE_STOP_REASON_OFFCHANNEL))) {
+                               /*
+                                * Drop off-channel frames if queues are stopped
+                                * for any reason other than off-channel
+                                * operation. Never queue them.
+                                */
+                               spin_unlock_irqrestore(
+                                       &local->queue_stop_reason_lock, flags);
+                               ieee80211_purge_tx_queue(&local->hw, skbs);
+                               return true;
+                       }
+
                        /*
                         * Since queue is stopped, queue up frames for later
                         * transmission from the tx-pending tasklet when the
@@ -1472,12 +1492,14 @@ void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb,
        hdr = (struct ieee80211_hdr *) skb->data;
        info->control.vif = &sdata->vif;
 
-       if (ieee80211_vif_is_mesh(&sdata->vif) &&
-           ieee80211_is_data(hdr->frame_control) &&
-           !is_multicast_ether_addr(hdr->addr1) &&
-           mesh_nexthop_resolve(skb, sdata)) {
-               /* skb queued: don't free */
-               return;
+       if (ieee80211_vif_is_mesh(&sdata->vif)) {
+               if (ieee80211_is_data(hdr->frame_control) &&
+                   is_unicast_ether_addr(hdr->addr1)) {
+                       if (mesh_nexthop_resolve(skb, sdata))
+                               return; /* skb queued: don't free */
+               } else {
+                       ieee80211_mps_set_frame_flags(sdata, NULL, hdr);
+               }
        }
 
        ieee80211_set_qos_hdr(sdata, skb);
@@ -1787,16 +1809,16 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
                        break;
                /* fall through */
        case NL80211_IFTYPE_AP:
+               if (sdata->vif.type == NL80211_IFTYPE_AP)
+                       chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
+               if (!chanctx_conf)
+                       goto fail_rcu;
                fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS);
                /* DA BSSID SA */
                memcpy(hdr.addr1, skb->data, ETH_ALEN);
                memcpy(hdr.addr2, sdata->vif.addr, ETH_ALEN);
                memcpy(hdr.addr3, skb->data + ETH_ALEN, ETH_ALEN);
                hdrlen = 24;
-               if (sdata->vif.type == NL80211_IFTYPE_AP)
-                       chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
-               if (!chanctx_conf)
-                       goto fail_rcu;
                band = chanctx_conf->def.chan->band;
                break;
        case NL80211_IFTYPE_WDS:
@@ -2342,11 +2364,9 @@ static int ieee80211_beacon_add_tim(struct ieee80211_sub_if_data *sdata,
        if (local->tim_in_locked_section) {
                __ieee80211_beacon_add_tim(sdata, ps, skb);
        } else {
-               unsigned long flags;
-
-               spin_lock_irqsave(&local->tim_lock, flags);
+               spin_lock(&local->tim_lock);
                __ieee80211_beacon_add_tim(sdata, ps, skb);
-               spin_unlock_irqrestore(&local->tim_lock, flags);
+               spin_unlock(&local->tim_lock);
        }
 
        return 0;
@@ -2424,66 +2444,26 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
                hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
                                                 IEEE80211_STYPE_BEACON);
        } else if (ieee80211_vif_is_mesh(&sdata->vif)) {
-               struct ieee80211_mgmt *mgmt;
                struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
-               u8 *pos;
-               int hdr_len = offsetof(struct ieee80211_mgmt, u.beacon) +
-                             sizeof(mgmt->u.beacon);
+               struct beacon_data *bcn = rcu_dereference(ifmsh->beacon);
 
-#ifdef CONFIG_MAC80211_MESH
-               if (!sdata->u.mesh.mesh_id_len)
+               if (!bcn)
                        goto out;
-#endif
 
                if (ifmsh->sync_ops)
                        ifmsh->sync_ops->adjust_tbtt(
                                                sdata);
 
                skb = dev_alloc_skb(local->tx_headroom +
-                                   hdr_len +
-                                   2 + /* NULL SSID */
-                                   2 + 8 + /* supported rates */
-                                   2 + 3 + /* DS params */
-                                   2 + (IEEE80211_MAX_SUPP_RATES - 8) +
-                                   2 + sizeof(struct ieee80211_ht_cap) +
-                                   2 + sizeof(struct ieee80211_ht_operation) +
-                                   2 + sdata->u.mesh.mesh_id_len +
-                                   2 + sizeof(struct ieee80211_meshconf_ie) +
-                                   sdata->u.mesh.ie_len);
+                                   bcn->head_len +
+                                   256 + /* TIM IE */
+                                   bcn->tail_len);
                if (!skb)
                        goto out;
-
-               skb_reserve(skb, local->hw.extra_tx_headroom);
-               mgmt = (struct ieee80211_mgmt *) skb_put(skb, hdr_len);
-               memset(mgmt, 0, hdr_len);
-               mgmt->frame_control =
-                   cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_BEACON);
-               eth_broadcast_addr(mgmt->da);
-               memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
-               memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
-               mgmt->u.beacon.beacon_int =
-                       cpu_to_le16(sdata->vif.bss_conf.beacon_int);
-               mgmt->u.beacon.capab_info |= cpu_to_le16(
-                       sdata->u.mesh.security ? WLAN_CAPABILITY_PRIVACY : 0);
-
-               pos = skb_put(skb, 2);
-               *pos++ = WLAN_EID_SSID;
-               *pos++ = 0x0;
-
-               band = chanctx_conf->def.chan->band;
-
-               if (ieee80211_add_srates_ie(sdata, skb, true, band) ||
-                   mesh_add_ds_params_ie(skb, sdata) ||
-                   ieee80211_add_ext_srates_ie(sdata, skb, true, band) ||
-                   mesh_add_rsn_ie(skb, sdata) ||
-                   mesh_add_ht_cap_ie(skb, sdata) ||
-                   mesh_add_ht_oper_ie(skb, sdata) ||
-                   mesh_add_meshid_ie(skb, sdata) ||
-                   mesh_add_meshconf_ie(skb, sdata) ||
-                   mesh_add_vendor_ies(skb, sdata)) {
-                       pr_err("o11s: couldn't add ies!\n");
-                       goto out;
-               }
+               skb_reserve(skb, local->tx_headroom);
+               memcpy(skb_put(skb, bcn->head_len), bcn->head, bcn->head_len);
+               ieee80211_beacon_add_tim(sdata, &ifmsh->ps, skb);
+               memcpy(skb_put(skb, bcn->tail_len), bcn->tail, bcn->tail_len);
        } else {
                WARN_ON(1);
                goto out;
@@ -2733,6 +2713,8 @@ ieee80211_get_buffered_bc(struct ieee80211_hw *hw,
                        goto out;
 
                ps = &sdata->u.ap.ps;
+       } else if (ieee80211_vif_is_mesh(&sdata->vif)) {
+               ps = &sdata->u.mesh.ps;
        } else {
                goto out;
        }
@@ -2756,6 +2738,7 @@ ieee80211_get_buffered_bc(struct ieee80211_hw *hw,
                                cpu_to_le16(IEEE80211_FCTL_MOREDATA);
                }
 
+               sdata = IEEE80211_DEV_TO_SUB_IF(skb->dev);
                if (!ieee80211_tx_prepare(sdata, &tx, skb))
                        break;
                dev_kfree_skb_any(skb);
@@ -2788,6 +2771,8 @@ void __ieee80211_tx_skb_tid_band(struct ieee80211_sub_if_data *sdata,
        skb_set_queue_mapping(skb, ac);
        skb->priority = tid;
 
+       skb->dev = sdata->dev;
+
        /*
         * The other path calling ieee80211_xmit is from the tasklet,
         * and while we can handle concurrent transmissions locking
index 7519018..0f38f43 100644 (file)
@@ -739,11 +739,7 @@ u32 ieee802_11_parse_elems_crc(u8 *start, size_t len,
                                if (calc_crc)
                                        crc = crc32_be(crc, pos - 2, elen + 2);
 
-                               if (pos[3] == 1) {
-                                       /* OUI Type 1 - WPA IE */
-                                       elems->wpa = pos;
-                                       elems->wpa_len = elen;
-                               } else if (elen >= 5 && pos[3] == 2) {
+                               if (elen >= 5 && pos[3] == 2) {
                                        /* OUI Type 2 - WMM IE */
                                        if (pos[4] == 0) {
                                                elems->wmm_info = pos;
@@ -791,6 +787,12 @@ u32 ieee802_11_parse_elems_crc(u8 *start, size_t len,
                        else
                                elem_parse_failed = true;
                        break;
+               case WLAN_EID_OPMODE_NOTIF:
+                       if (elen > 0)
+                               elems->opmode_notif = pos;
+                       else
+                               elem_parse_failed = true;
+                       break;
                case WLAN_EID_MESH_ID:
                        elems->mesh_id = pos;
                        elems->mesh_id_len = elen;
@@ -805,6 +807,10 @@ u32 ieee802_11_parse_elems_crc(u8 *start, size_t len,
                        elems->peering = pos;
                        elems->peering_len = elen;
                        break;
+               case WLAN_EID_MESH_AWAKE_WINDOW:
+                       if (elen >= 2)
+                               elems->awake_window = (void *)pos;
+                       break;
                case WLAN_EID_PREQ:
                        elems->preq = pos;
                        elems->preq_len = elen;
@@ -1029,8 +1035,9 @@ u32 ieee80211_mandatory_rates(struct ieee80211_local *local,
 
 void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
                         u16 transaction, u16 auth_alg, u16 status,
-                        u8 *extra, size_t extra_len, const u8 *da,
-                        const u8 *bssid, const u8 *key, u8 key_len, u8 key_idx)
+                        const u8 *extra, size_t extra_len, const u8 *da,
+                        const u8 *bssid, const u8 *key, u8 key_len, u8 key_idx,
+                        u32 tx_flags)
 {
        struct ieee80211_local *local = sdata->local;
        struct sk_buff *skb;
@@ -1063,7 +1070,8 @@ void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
                WARN_ON(err);
        }
 
-       IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
+       IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT |
+                                       tx_flags;
        ieee80211_tx_skb(sdata, skb);
 }
 
@@ -1277,7 +1285,7 @@ struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
 void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst,
                              const u8 *ssid, size_t ssid_len,
                              const u8 *ie, size_t ie_len,
-                             u32 ratemask, bool directed, bool no_cck,
+                             u32 ratemask, bool directed, u32 tx_flags,
                              struct ieee80211_channel *channel, bool scan)
 {
        struct sk_buff *skb;
@@ -1286,9 +1294,7 @@ void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst,
                                        ssid, ssid_len,
                                        ie, ie_len, directed);
        if (skb) {
-               if (no_cck)
-                       IEEE80211_SKB_CB(skb)->flags |=
-                               IEEE80211_TX_CTL_NO_CCK_RATE;
+               IEEE80211_SKB_CB(skb)->flags |= tx_flags;
                if (scan)
                        ieee80211_tx_skb_tid_band(sdata, skb, 7, channel->band);
                else
@@ -1538,6 +1544,10 @@ int ieee80211_reconfig(struct ieee80211_local *local)
                        changed |= BSS_CHANGED_ASSOC |
                                   BSS_CHANGED_ARP_FILTER |
                                   BSS_CHANGED_PS;
+
+                       if (sdata->u.mgd.dtim_period)
+                               changed |= BSS_CHANGED_DTIM_PERIOD;
+
                        mutex_lock(&sdata->u.mgd.mtx);
                        ieee80211_bss_info_change_notify(sdata, changed);
                        mutex_unlock(&sdata->u.mgd.mtx);
@@ -1937,7 +1947,7 @@ u8 *ieee80211_ie_build_ht_oper(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap,
 }
 
 void ieee80211_ht_oper_to_chandef(struct ieee80211_channel *control_chan,
-                                 struct ieee80211_ht_operation *ht_oper,
+                                 const struct ieee80211_ht_operation *ht_oper,
                                  struct cfg80211_chan_def *chandef)
 {
        enum nl80211_channel_type channel_type;
@@ -2125,3 +2135,49 @@ u64 ieee80211_calculate_rx_timestamp(struct ieee80211_local *local,
 
        return ts;
 }
+
+void ieee80211_dfs_cac_cancel(struct ieee80211_local *local)
+{
+       struct ieee80211_sub_if_data *sdata;
+
+       mutex_lock(&local->iflist_mtx);
+       list_for_each_entry(sdata, &local->interfaces, list) {
+               cancel_delayed_work_sync(&sdata->dfs_cac_timer_work);
+
+               if (sdata->wdev.cac_started) {
+                       ieee80211_vif_release_channel(sdata);
+                       cfg80211_cac_event(sdata->dev,
+                                          NL80211_RADAR_CAC_ABORTED,
+                                          GFP_KERNEL);
+               }
+       }
+       mutex_unlock(&local->iflist_mtx);
+}
+
+void ieee80211_dfs_radar_detected_work(struct work_struct *work)
+{
+       struct ieee80211_local *local =
+               container_of(work, struct ieee80211_local, radar_detected_work);
+       struct cfg80211_chan_def chandef;
+
+       ieee80211_dfs_cac_cancel(local);
+
+       if (local->use_chanctx)
+               /* currently not handled */
+               WARN_ON(1);
+       else {
+               cfg80211_chandef_create(&chandef, local->hw.conf.channel,
+                                       local->hw.conf.channel_type);
+               cfg80211_radar_event(local->hw.wiphy, &chandef, GFP_KERNEL);
+       }
+}
+
+void ieee80211_radar_detected(struct ieee80211_hw *hw)
+{
+       struct ieee80211_local *local = hw_to_local(hw);
+
+       trace_api_radar_detected(local);
+
+       ieee80211_queue_work(hw, &local->radar_detected_work);
+}
+EXPORT_SYMBOL(ieee80211_radar_detected);
index f311388..a2c2258 100644 (file)
 #include <linux/export.h>
 #include <net/mac80211.h>
 #include "ieee80211_i.h"
+#include "rate.h"
 
 
-void ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
-                                        struct ieee80211_supported_band *sband,
-                                        struct ieee80211_vht_cap *vht_cap_ie,
-                                        struct ieee80211_sta_vht_cap *vht_cap)
+void
+ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
+                                   struct ieee80211_supported_band *sband,
+                                   const struct ieee80211_vht_cap *vht_cap_ie,
+                                   struct sta_info *sta)
 {
-       if (WARN_ON_ONCE(!vht_cap))
-               return;
+       struct ieee80211_sta_vht_cap *vht_cap = &sta->sta.vht_cap;
 
        memset(vht_cap, 0, sizeof(*vht_cap));
 
+       if (!sta->sta.ht_cap.ht_supported)
+               return;
+
        if (!vht_cap_ie || !sband->vht_cap.vht_supported)
                return;
 
+       /* A VHT STA must support 40 MHz */
+       if (!(sta->sta.ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40))
+               return;
+
        vht_cap->vht_supported = true;
 
        vht_cap->cap = le32_to_cpu(vht_cap_ie->vht_cap_info);
@@ -32,4 +40,156 @@ void ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
        /* Copy peer MCS info, the driver might need them. */
        memcpy(&vht_cap->vht_mcs, &vht_cap_ie->supp_mcs,
               sizeof(struct ieee80211_vht_mcs_info));
+
+       switch (vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) {
+       case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ:
+       case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ:
+               sta->cur_max_bandwidth = IEEE80211_STA_RX_BW_160;
+               break;
+       default:
+               sta->cur_max_bandwidth = IEEE80211_STA_RX_BW_80;
+       }
+
+       sta->sta.bandwidth = ieee80211_sta_cur_vht_bw(sta);
+}
+
+enum ieee80211_sta_rx_bandwidth ieee80211_sta_cur_vht_bw(struct sta_info *sta)
+{
+       struct ieee80211_sub_if_data *sdata = sta->sdata;
+       u32 cap = sta->sta.vht_cap.cap;
+       enum ieee80211_sta_rx_bandwidth bw;
+
+       if (!sta->sta.vht_cap.vht_supported) {
+               bw = sta->sta.ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 ?
+                               IEEE80211_STA_RX_BW_40 : IEEE80211_STA_RX_BW_20;
+               goto check_max;
+       }
+
+       switch (sdata->vif.bss_conf.chandef.width) {
+       default:
+               WARN_ON_ONCE(1);
+               /* fall through */
+       case NL80211_CHAN_WIDTH_20_NOHT:
+       case NL80211_CHAN_WIDTH_20:
+       case NL80211_CHAN_WIDTH_40:
+               bw = sta->sta.ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 ?
+                               IEEE80211_STA_RX_BW_40 : IEEE80211_STA_RX_BW_20;
+               break;
+       case NL80211_CHAN_WIDTH_160:
+               if ((cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) ==
+                               IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ) {
+                       bw = IEEE80211_STA_RX_BW_160;
+                       break;
+               }
+               /* fall through */
+       case NL80211_CHAN_WIDTH_80P80:
+               if ((cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) ==
+                               IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ) {
+                       bw = IEEE80211_STA_RX_BW_160;
+                       break;
+               }
+               /* fall through */
+       case NL80211_CHAN_WIDTH_80:
+               bw = IEEE80211_STA_RX_BW_80;
+       }
+
+ check_max:
+       if (bw > sta->cur_max_bandwidth)
+               bw = sta->cur_max_bandwidth;
+       return bw;
+}
+
+void ieee80211_sta_set_rx_nss(struct sta_info *sta)
+{
+       u8 ht_rx_nss = 0, vht_rx_nss = 0;
+
+       /* if we received a notification already don't overwrite it */
+       if (sta->sta.rx_nss)
+               return;
+
+       if (sta->sta.ht_cap.ht_supported) {
+               if (sta->sta.ht_cap.mcs.rx_mask[0])
+                       ht_rx_nss++;
+               if (sta->sta.ht_cap.mcs.rx_mask[1])
+                       ht_rx_nss++;
+               if (sta->sta.ht_cap.mcs.rx_mask[2])
+                       ht_rx_nss++;
+               if (sta->sta.ht_cap.mcs.rx_mask[3])
+                       ht_rx_nss++;
+               /* FIXME: consider rx_highest? */
+       }
+
+       if (sta->sta.vht_cap.vht_supported) {
+               int i;
+               u16 rx_mcs_map;
+
+               rx_mcs_map = le16_to_cpu(sta->sta.vht_cap.vht_mcs.rx_mcs_map);
+
+               for (i = 7; i >= 0; i--) {
+                       u8 mcs = (rx_mcs_map >> (2 * i)) & 3;
+
+                       if (mcs != IEEE80211_VHT_MCS_NOT_SUPPORTED) {
+                               vht_rx_nss = i + 1;
+                               break;
+                       }
+               }
+               /* FIXME: consider rx_highest? */
+       }
+
+       ht_rx_nss = max(ht_rx_nss, vht_rx_nss);
+       sta->sta.rx_nss = max_t(u8, 1, ht_rx_nss);
+}
+
+void ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
+                                struct sta_info *sta, u8 opmode,
+                                enum ieee80211_band band, bool nss_only)
+{
+       struct ieee80211_local *local = sdata->local;
+       struct ieee80211_supported_band *sband;
+       enum ieee80211_sta_rx_bandwidth new_bw;
+       u32 changed = 0;
+       u8 nss;
+
+       sband = local->hw.wiphy->bands[band];
+
+       /* ignore - no support for BF yet */
+       if (opmode & IEEE80211_OPMODE_NOTIF_RX_NSS_TYPE_BF)
+               return;
+
+       nss = opmode & IEEE80211_OPMODE_NOTIF_RX_NSS_MASK;
+       nss >>= IEEE80211_OPMODE_NOTIF_RX_NSS_SHIFT;
+       nss += 1;
+
+       if (sta->sta.rx_nss != nss) {
+               sta->sta.rx_nss = nss;
+               changed |= IEEE80211_RC_NSS_CHANGED;
+       }
+
+       if (nss_only)
+               goto change;
+
+       switch (opmode & IEEE80211_OPMODE_NOTIF_CHANWIDTH_MASK) {
+       case IEEE80211_OPMODE_NOTIF_CHANWIDTH_20MHZ:
+               sta->cur_max_bandwidth = IEEE80211_STA_RX_BW_20;
+               break;
+       case IEEE80211_OPMODE_NOTIF_CHANWIDTH_40MHZ:
+               sta->cur_max_bandwidth = IEEE80211_STA_RX_BW_40;
+               break;
+       case IEEE80211_OPMODE_NOTIF_CHANWIDTH_80MHZ:
+               sta->cur_max_bandwidth = IEEE80211_STA_RX_BW_80;
+               break;
+       case IEEE80211_OPMODE_NOTIF_CHANWIDTH_160MHZ:
+               sta->cur_max_bandwidth = IEEE80211_STA_RX_BW_160;
+               break;
+       }
+
+       new_bw = ieee80211_sta_cur_vht_bw(sta);
+       if (new_bw != sta->sta.bandwidth) {
+               sta->sta.bandwidth = new_bw;
+               changed |= IEEE80211_RC_NSS_CHANGED;
+       }
+
+ change:
+       if (changed)
+               rate_control_rate_update(local, sband, sta, changed);
 }
index 906f00c..afba19c 100644 (file)
@@ -191,6 +191,15 @@ void ieee80211_set_qos_hdr(struct ieee80211_sub_if_data *sdata,
 
        /* qos header is 2 bytes */
        *p++ = ack_policy | tid;
-       *p = ieee80211_vif_is_mesh(&sdata->vif) ?
-               (IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT >> 8) : 0;
+       if (ieee80211_vif_is_mesh(&sdata->vif)) {
+               /* preserve RSPI and Mesh PS Level bit */
+               *p &= ((IEEE80211_QOS_CTL_RSPI |
+                       IEEE80211_QOS_CTL_MESH_PS_LEVEL) >> 8);
+
+               /* Nulls don't have a mesh header (frame body) */
+               if (!ieee80211_is_qos_nullfunc(hdr->frame_control))
+                       *p |= (IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT >> 8);
+       } else {
+               *p = 0;
+       }
 }
index c175ee8..c7c6d64 100644 (file)
@@ -181,7 +181,6 @@ static int tkip_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
        struct ieee80211_key *key = tx->key;
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-       unsigned long flags;
        unsigned int hdrlen;
        int len, tail;
        u8 *pos;
@@ -216,12 +215,12 @@ static int tkip_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
                return 0;
 
        /* Increase IV for the frame */
-       spin_lock_irqsave(&key->u.tkip.txlock, flags);
+       spin_lock(&key->u.tkip.txlock);
        key->u.tkip.tx.iv16++;
        if (key->u.tkip.tx.iv16 == 0)
                key->u.tkip.tx.iv32++;
        pos = ieee80211_tkip_add_iv(pos, key);
-       spin_unlock_irqrestore(&key->u.tkip.txlock, flags);
+       spin_unlock(&key->u.tkip.txlock);
 
        /* hwaccel - with software IV */
        if (info->control.hw_key)
index 199b922..d20c6d3 100644 (file)
@@ -41,7 +41,7 @@ static inline int mac802154_fetch_skb_u8(struct sk_buff *skb, u8 *val)
                return -EINVAL;
 
        *val = skb->data[0];
-        skb_pull(skb, 1);
+       skb_pull(skb, 1);
 
        return 0;
 }
@@ -137,16 +137,12 @@ static int mac802154_header_create(struct sk_buff *skb,
        struct ieee802154_addr dev_addr;
        struct mac802154_sub_if_data *priv = netdev_priv(dev);
        int pos = 2;
-       u8 *head;
+       u8 head[MAC802154_FRAME_HARD_HEADER_LEN];
        u16 fc;
 
        if (!daddr)
                return -EINVAL;
 
-       head = kzalloc(MAC802154_FRAME_HARD_HEADER_LEN, GFP_KERNEL);
-       if (head == NULL)
-               return -ENOMEM;
-
        head[pos++] = mac_cb(skb)->seq; /* DSN/BSN */
        fc = mac_cb_type(skb);
 
@@ -210,7 +206,6 @@ static int mac802154_header_create(struct sk_buff *skb,
        head[1] = fc >> 8;
 
        memcpy(skb_push(skb, pos), head, pos);
-       kfree(head);
 
        return pos;
 }
index 9713e6e..0b779d7 100644 (file)
@@ -605,12 +605,12 @@ int __net_init ip_vs_app_net_init(struct net *net)
        struct netns_ipvs *ipvs = net_ipvs(net);
 
        INIT_LIST_HEAD(&ipvs->app_list);
-       proc_net_fops_create(net, "ip_vs_app", 0, &ip_vs_app_fops);
+       proc_create("ip_vs_app", 0, net->proc_net, &ip_vs_app_fops);
        return 0;
 }
 
 void __net_exit ip_vs_app_net_cleanup(struct net *net)
 {
        unregister_ip_vs_app(net, NULL /* all */);
-       proc_net_remove(net, "ip_vs_app");
+       remove_proc_entry("ip_vs_app", net->proc_net);
 }
index 68e368a..9f00db7 100644 (file)
@@ -1291,8 +1291,8 @@ int __net_init ip_vs_conn_net_init(struct net *net)
 
        atomic_set(&ipvs->conn_count, 0);
 
-       proc_net_fops_create(net, "ip_vs_conn", 0, &ip_vs_conn_fops);
-       proc_net_fops_create(net, "ip_vs_conn_sync", 0, &ip_vs_conn_sync_fops);
+       proc_create("ip_vs_conn", 0, net->proc_net, &ip_vs_conn_fops);
+       proc_create("ip_vs_conn_sync", 0, net->proc_net, &ip_vs_conn_sync_fops);
        return 0;
 }
 
@@ -1300,8 +1300,8 @@ void __net_exit ip_vs_conn_net_cleanup(struct net *net)
 {
        /* flush all the connection entries first */
        ip_vs_conn_flush(net);
-       proc_net_remove(net, "ip_vs_conn");
-       proc_net_remove(net, "ip_vs_conn_sync");
+       remove_proc_entry("ip_vs_conn", net->proc_net);
+       remove_proc_entry("ip_vs_conn_sync", net->proc_net);
 }
 
 int __init ip_vs_conn_init(void)
index ec664cb..c68198b 100644 (file)
@@ -3800,10 +3800,10 @@ int __net_init ip_vs_control_net_init(struct net *net)
 
        spin_lock_init(&ipvs->tot_stats.lock);
 
-       proc_net_fops_create(net, "ip_vs", 0, &ip_vs_info_fops);
-       proc_net_fops_create(net, "ip_vs_stats", 0, &ip_vs_stats_fops);
-       proc_net_fops_create(net, "ip_vs_stats_percpu", 0,
-                            &ip_vs_stats_percpu_fops);
+       proc_create("ip_vs", 0, net->proc_net, &ip_vs_info_fops);
+       proc_create("ip_vs_stats", 0, net->proc_net, &ip_vs_stats_fops);
+       proc_create("ip_vs_stats_percpu", 0, net->proc_net,
+                   &ip_vs_stats_percpu_fops);
 
        if (ip_vs_control_net_init_sysctl(net))
                goto err;
@@ -3822,9 +3822,9 @@ void __net_exit ip_vs_control_net_cleanup(struct net *net)
        ip_vs_trash_cleanup(net);
        ip_vs_stop_estimator(net, &ipvs->tot_stats);
        ip_vs_control_net_cleanup_sysctl(net);
-       proc_net_remove(net, "ip_vs_stats_percpu");
-       proc_net_remove(net, "ip_vs_stats");
-       proc_net_remove(net, "ip_vs");
+       remove_proc_entry("ip_vs_stats_percpu", net->proc_net);
+       remove_proc_entry("ip_vs_stats", net->proc_net);
+       remove_proc_entry("ip_vs", net->proc_net);
        free_percpu(ipvs->tot_stats.cpustats);
 }
 
index 746048b..ae8ec6f 100644 (file)
@@ -61,14 +61,27 @@ sctp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
        return 1;
 }
 
+static void sctp_nat_csum(struct sk_buff *skb, sctp_sctphdr_t *sctph,
+                         unsigned int sctphoff)
+{
+       __u32 crc32;
+       struct sk_buff *iter;
+
+       crc32 = sctp_start_cksum((__u8 *)sctph, skb_headlen(skb) - sctphoff);
+       skb_walk_frags(skb, iter)
+               crc32 = sctp_update_cksum((u8 *) iter->data,
+                                         skb_headlen(iter), crc32);
+       sctph->checksum = sctp_end_cksum(crc32);
+
+       skb->ip_summed = CHECKSUM_UNNECESSARY;
+}
+
 static int
 sctp_snat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
                  struct ip_vs_conn *cp, struct ip_vs_iphdr *iph)
 {
        sctp_sctphdr_t *sctph;
        unsigned int sctphoff = iph->len;
-       struct sk_buff *iter;
-       __be32 crc32;
 
 #ifdef CONFIG_IP_VS_IPV6
        if (cp->af == AF_INET6 && iph->fragoffs)
@@ -92,13 +105,7 @@ sctp_snat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
        sctph = (void *) skb_network_header(skb) + sctphoff;
        sctph->source = cp->vport;
 
-       /* Calculate the checksum */
-       crc32 = sctp_start_cksum((u8 *) sctph, skb_headlen(skb) - sctphoff);
-       skb_walk_frags(skb, iter)
-               crc32 = sctp_update_cksum((u8 *) iter->data, skb_headlen(iter),
-                                         crc32);
-       crc32 = sctp_end_cksum(crc32);
-       sctph->checksum = crc32;
+       sctp_nat_csum(skb, sctph, sctphoff);
 
        return 1;
 }
@@ -109,8 +116,6 @@ sctp_dnat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
 {
        sctp_sctphdr_t *sctph;
        unsigned int sctphoff = iph->len;
-       struct sk_buff *iter;
-       __be32 crc32;
 
 #ifdef CONFIG_IP_VS_IPV6
        if (cp->af == AF_INET6 && iph->fragoffs)
@@ -134,13 +139,7 @@ sctp_dnat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
        sctph = (void *) skb_network_header(skb) + sctphoff;
        sctph->dest = cp->dport;
 
-       /* Calculate the checksum */
-       crc32 = sctp_start_cksum((u8 *) sctph, skb_headlen(skb) - sctphoff);
-       skb_walk_frags(skb, iter)
-               crc32 = sctp_update_cksum((u8 *) iter->data, skb_headlen(iter),
-                                         crc32);
-       crc32 = sctp_end_cksum(crc32);
-       sctph->checksum = crc32;
+       sctp_nat_csum(skb, sctph, sctphoff);
 
        return 1;
 }
index effa10c..44fd10c 100644 (file)
@@ -1795,6 +1795,8 @@ int start_sync_thread(struct net *net, int state, char *mcast_ifn, __u8 syncid)
                                             GFP_KERNEL);
                        if (!tinfo->buf)
                                goto outtinfo;
+               } else {
+                       tinfo->buf = NULL;
                }
                tinfo->id = id;
 
index bdd3418..3921e5b 100644 (file)
@@ -571,7 +571,8 @@ static int exp_proc_init(struct net *net)
 #ifdef CONFIG_NF_CONNTRACK_PROCFS
        struct proc_dir_entry *proc;
 
-       proc = proc_net_fops_create(net, "nf_conntrack_expect", 0440, &exp_file_ops);
+       proc = proc_create("nf_conntrack_expect", 0440, net->proc_net,
+                          &exp_file_ops);
        if (!proc)
                return -ENOMEM;
 #endif /* CONFIG_NF_CONNTRACK_PROCFS */
@@ -581,7 +582,7 @@ static int exp_proc_init(struct net *net)
 static void exp_proc_remove(struct net *net)
 {
 #ifdef CONFIG_NF_CONNTRACK_PROCFS
-       proc_net_remove(net, "nf_conntrack_expect");
+       remove_proc_entry("nf_conntrack_expect", net->proc_net);
 #endif /* CONFIG_NF_CONNTRACK_PROCFS */
 }
 
index c08768d..013cdf6 100644 (file)
@@ -237,7 +237,9 @@ int __nf_ct_try_assign_helper(struct nf_conn *ct, struct nf_conn *tmpl,
                /* We only allow helper re-assignment of the same sort since
                 * we cannot reallocate the helper extension area.
                 */
-               if (help->helper != helper) {
+               struct nf_conntrack_helper *tmp = rcu_dereference(help->helper);
+
+               if (tmp && tmp->help != helper->help) {
                        RCU_INIT_POINTER(help->helper, NULL);
                        goto out;
                }
index d490a30..5d60e04 100644 (file)
@@ -1782,6 +1782,9 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
                if (nlh->nlmsg_flags & NLM_F_CREATE) {
                        enum ip_conntrack_events events;
 
+                       if (!cda[CTA_TUPLE_ORIG] || !cda[CTA_TUPLE_REPLY])
+                               return -EINVAL;
+
                        ct = ctnetlink_create_conntrack(net, zone, cda, &otuple,
                                                        &rtuple, u3);
                        if (IS_ERR(ct))
index 7936bf7..6bcce40 100644 (file)
@@ -366,7 +366,7 @@ static int nf_conntrack_standalone_init_proc(struct net *net)
 {
        struct proc_dir_entry *pde;
 
-       pde = proc_net_fops_create(net, "nf_conntrack", 0440, &ct_file_ops);
+       pde = proc_create("nf_conntrack", 0440, net->proc_net, &ct_file_ops);
        if (!pde)
                goto out_nf_conntrack;
 
@@ -377,7 +377,7 @@ static int nf_conntrack_standalone_init_proc(struct net *net)
        return 0;
 
 out_stat_nf_conntrack:
-       proc_net_remove(net, "nf_conntrack");
+       remove_proc_entry("nf_conntrack", net->proc_net);
 out_nf_conntrack:
        return -ENOMEM;
 }
@@ -385,7 +385,7 @@ out_nf_conntrack:
 static void nf_conntrack_standalone_fini_proc(struct net *net)
 {
        remove_proc_entry("nf_conntrack", net->proc_net_stat);
-       proc_net_remove(net, "nf_conntrack");
+       remove_proc_entry("nf_conntrack", net->proc_net);
 }
 #else
 static int nf_conntrack_standalone_init_proc(struct net *net)
index 7b3a9e5..686c771 100644 (file)
@@ -1323,12 +1323,12 @@ int xt_proto_init(struct net *net, u_int8_t af)
 out_remove_matches:
        strlcpy(buf, xt_prefix[af], sizeof(buf));
        strlcat(buf, FORMAT_MATCHES, sizeof(buf));
-       proc_net_remove(net, buf);
+       remove_proc_entry(buf, net->proc_net);
 
 out_remove_tables:
        strlcpy(buf, xt_prefix[af], sizeof(buf));
        strlcat(buf, FORMAT_TABLES, sizeof(buf));
-       proc_net_remove(net, buf);
+       remove_proc_entry(buf, net->proc_net);
 out:
        return -1;
 #endif
@@ -1342,15 +1342,15 @@ void xt_proto_fini(struct net *net, u_int8_t af)
 
        strlcpy(buf, xt_prefix[af], sizeof(buf));
        strlcat(buf, FORMAT_TABLES, sizeof(buf));
-       proc_net_remove(net, buf);
+       remove_proc_entry(buf, net->proc_net);
 
        strlcpy(buf, xt_prefix[af], sizeof(buf));
        strlcat(buf, FORMAT_TARGETS, sizeof(buf));
-       proc_net_remove(net, buf);
+       remove_proc_entry(buf, net->proc_net);
 
        strlcpy(buf, xt_prefix[af], sizeof(buf));
        strlcat(buf, FORMAT_MATCHES, sizeof(buf));
-       proc_net_remove(net, buf);
+       remove_proc_entry(buf, net->proc_net);
 #endif /*CONFIG_PROC_FS*/
 }
 EXPORT_SYMBOL_GPL(xt_proto_fini);
index a9d7af9..98218c8 100644 (file)
@@ -867,7 +867,7 @@ static int __net_init hashlimit_proc_net_init(struct net *net)
 #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
        hashlimit_net->ip6t_hashlimit = proc_mkdir("ip6t_hashlimit", net->proc_net);
        if (!hashlimit_net->ip6t_hashlimit) {
-               proc_net_remove(net, "ipt_hashlimit");
+               remove_proc_entry("ipt_hashlimit", net->proc_net);
                return -ENOMEM;
        }
 #endif
@@ -897,9 +897,9 @@ static void __net_exit hashlimit_proc_net_exit(struct net *net)
        hashlimit_net->ip6t_hashlimit = NULL;
        mutex_unlock(&hashlimit_mutex);
 
-       proc_net_remove(net, "ipt_hashlimit");
+       remove_proc_entry("ipt_hashlimit", net->proc_net);
 #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
-       proc_net_remove(net, "ip6t_hashlimit");
+       remove_proc_entry("ip6t_hashlimit", net->proc_net);
 #endif
 }
 
index 978efc9..31bf233 100644 (file)
@@ -643,7 +643,7 @@ static void __net_exit recent_proc_net_exit(struct net *net)
        recent_net->xt_recent = NULL;
        spin_unlock_bh(&recent_lock);
 
-       proc_net_remove(net, "xt_recent");
+       remove_proc_entry("xt_recent", net->proc_net);
 }
 #else
 static inline int recent_proc_net_init(struct net *net)
index 74827e3..3d55e0c 100644 (file)
@@ -2145,7 +2145,7 @@ static const struct net_proto_family netlink_family_ops = {
 static int __net_init netlink_net_init(struct net *net)
 {
 #ifdef CONFIG_PROC_FS
-       if (!proc_net_fops_create(net, "netlink", 0, &netlink_seq_fops))
+       if (!proc_create("netlink", 0, net->proc_net, &netlink_seq_fops))
                return -ENOMEM;
 #endif
        return 0;
@@ -2154,7 +2154,7 @@ static int __net_init netlink_net_init(struct net *net)
 static void __net_exit netlink_net_exit(struct net *net)
 {
 #ifdef CONFIG_PROC_FS
-       proc_net_remove(net, "netlink");
+       remove_proc_entry("netlink", net->proc_net);
 #endif
 }
 
index 7261eb8..297b07a 100644 (file)
@@ -1452,9 +1452,9 @@ static int __init nr_proto_init(void)
 
        nr_loopback_init();
 
-       proc_net_fops_create(&init_net, "nr", S_IRUGO, &nr_info_fops);
-       proc_net_fops_create(&init_net, "nr_neigh", S_IRUGO, &nr_neigh_fops);
-       proc_net_fops_create(&init_net, "nr_nodes", S_IRUGO, &nr_nodes_fops);
+       proc_create("nr", S_IRUGO, init_net.proc_net, &nr_info_fops);
+       proc_create("nr_neigh", S_IRUGO, init_net.proc_net, &nr_neigh_fops);
+       proc_create("nr_nodes", S_IRUGO, init_net.proc_net, &nr_nodes_fops);
 out:
        return rc;
 fail:
@@ -1482,9 +1482,9 @@ static void __exit nr_exit(void)
 {
        int i;
 
-       proc_net_remove(&init_net, "nr");
-       proc_net_remove(&init_net, "nr_neigh");
-       proc_net_remove(&init_net, "nr_nodes");
+       remove_proc_entry("nr", init_net.proc_net);
+       remove_proc_entry("nr_neigh", init_net.proc_net);
+       remove_proc_entry("nr_nodes", init_net.proc_net);
        nr_loopback_clear();
 
        nr_rt_free();
index 85bc75c..746f5a2 100644 (file)
@@ -549,14 +549,13 @@ int nfc_llcp_set_remote_gb(struct nfc_dev *dev, u8 *gb, u8 gb_len)
                pr_err("No LLCP device\n");
                return -ENODEV;
        }
+       if (gb_len < 3)
+               return -EINVAL;
 
        memset(local->remote_gb, 0, NFC_MAX_GT_LEN);
        memcpy(local->remote_gb, gb, gb_len);
        local->remote_gb_len = gb_len;
 
-       if (local->remote_gb == NULL || local->remote_gb_len == 0)
-               return -ENODEV;
-
        if (memcmp(local->remote_gb, llcp_magic, 3)) {
                pr_err("MAC does not support LLCP\n");
                return -EINVAL;
index d8c13a9..9dc537d 100644 (file)
@@ -301,7 +301,7 @@ static int queue_gso_packets(struct net *net, int dp_ifindex,
        struct sk_buff *segs, *nskb;
        int err;
 
-       segs = skb_gso_segment(skb, NETIF_F_SG | NETIF_F_HW_CSUM);
+       segs = __skb_gso_segment(skb, NETIF_F_SG | NETIF_F_HW_CSUM, false);
        if (IS_ERR(segs))
                return PTR_ERR(segs);
 
index a9327e2..670cbc3 100644 (file)
 /* Must be called with rcu_read_lock. */
 static void netdev_port_receive(struct vport *vport, struct sk_buff *skb)
 {
-       if (unlikely(!vport)) {
-               kfree_skb(skb);
-               return;
-       }
+       if (unlikely(!vport))
+               goto error;
+
+       if (unlikely(skb_warn_if_lro(skb)))
+               goto error;
 
        /* Make our own copy of the packet.  Otherwise we will mangle the
         * packet for anyone who came before us (e.g. tcpdump via AF_PACKET).
@@ -50,6 +51,10 @@ static void netdev_port_receive(struct vport *vport, struct sk_buff *skb)
 
        skb_push(skb, ETH_HLEN);
        ovs_vport_receive(vport, skb);
+       return;
+
+error:
+       kfree_skb(skb);
 }
 
 /* Called with rcu_read_lock and bottom-halves disabled. */
@@ -169,9 +174,6 @@ static int netdev_send(struct vport *vport, struct sk_buff *skb)
                goto error;
        }
 
-       if (unlikely(skb_warn_if_lro(skb)))
-               goto error;
-
        skb->dev = netdev_vport->dev;
        len = skb->len;
        dev_queue_xmit(skb);
index e639645..c7bfeff 100644 (file)
@@ -2361,13 +2361,15 @@ static int packet_release(struct socket *sock)
 
        packet_flush_mclist(sk);
 
-       memset(&req_u, 0, sizeof(req_u));
-
-       if (po->rx_ring.pg_vec)
+       if (po->rx_ring.pg_vec) {
+               memset(&req_u, 0, sizeof(req_u));
                packet_set_ring(sk, &req_u, 1, 0);
+       }
 
-       if (po->tx_ring.pg_vec)
+       if (po->tx_ring.pg_vec) {
+               memset(&req_u, 0, sizeof(req_u));
                packet_set_ring(sk, &req_u, 1, 1);
+       }
 
        fanout_release(sk);
 
@@ -3826,7 +3828,7 @@ static int __net_init packet_net_init(struct net *net)
        mutex_init(&net->packet.sklist_lock);
        INIT_HLIST_HEAD(&net->packet.sklist);
 
-       if (!proc_net_fops_create(net, "packet", 0, &packet_seq_fops))
+       if (!proc_create("packet", 0, net->proc_net, &packet_seq_fops))
                return -ENOMEM;
 
        return 0;
@@ -3834,7 +3836,7 @@ static int __net_init packet_net_init(struct net *net)
 
 static void __net_exit packet_net_exit(struct net *net)
 {
-       proc_net_remove(net, "packet");
+       remove_proc_entry("packet", net->proc_net);
 }
 
 static struct pernet_operations packet_net_ops = {
index 5bf6341..45a7df6 100644 (file)
@@ -320,7 +320,7 @@ static int __net_init phonet_init_net(struct net *net)
 {
        struct phonet_net *pnn = phonet_pernet(net);
 
-       if (!proc_net_fops_create(net, "phonet", 0, &pn_sock_seq_fops))
+       if (!proc_create("phonet", 0, net->proc_net, &pn_sock_seq_fops))
                return -ENOMEM;
 
        INIT_LIST_HEAD(&pnn->pndevs.list);
@@ -331,7 +331,7 @@ static int __net_init phonet_init_net(struct net *net)
 
 static void __net_exit phonet_exit_net(struct net *net)
 {
-       proc_net_remove(net, "phonet");
+       remove_proc_entry("phonet", net->proc_net);
 }
 
 static struct pernet_operations phonet_net_ops = {
@@ -348,7 +348,7 @@ int __init phonet_device_init(void)
        if (err)
                return err;
 
-       proc_net_fops_create(&init_net, "pnresource", 0, &pn_res_seq_fops);
+       proc_create("pnresource", 0, init_net.proc_net, &pn_res_seq_fops);
        register_netdevice_notifier(&phonet_device_notifier);
        err = phonet_netlink_register();
        if (err)
@@ -361,7 +361,7 @@ void phonet_device_exit(void)
        rtnl_unregister_all(PF_PHONET);
        unregister_netdevice_notifier(&phonet_device_notifier);
        unregister_pernet_subsys(&phonet_net_ops);
-       proc_net_remove(&init_net, "pnresource");
+       remove_proc_entry("pnresource", init_net.proc_net);
 }
 
 int phonet_route_add(struct net_device *dev, u8 daddr)
index c4719ce..b768fe9 100644 (file)
@@ -1575,10 +1575,13 @@ static int __init rose_proto_init(void)
 
        rose_add_loopback_neigh();
 
-       proc_net_fops_create(&init_net, "rose", S_IRUGO, &rose_info_fops);
-       proc_net_fops_create(&init_net, "rose_neigh", S_IRUGO, &rose_neigh_fops);
-       proc_net_fops_create(&init_net, "rose_nodes", S_IRUGO, &rose_nodes_fops);
-       proc_net_fops_create(&init_net, "rose_routes", S_IRUGO, &rose_routes_fops);
+       proc_create("rose", S_IRUGO, init_net.proc_net, &rose_info_fops);
+       proc_create("rose_neigh", S_IRUGO, init_net.proc_net,
+                   &rose_neigh_fops);
+       proc_create("rose_nodes", S_IRUGO, init_net.proc_net,
+                   &rose_nodes_fops);
+       proc_create("rose_routes", S_IRUGO, init_net.proc_net,
+                   &rose_routes_fops);
 out:
        return rc;
 fail:
@@ -1605,10 +1608,10 @@ static void __exit rose_exit(void)
 {
        int i;
 
-       proc_net_remove(&init_net, "rose");
-       proc_net_remove(&init_net, "rose_neigh");
-       proc_net_remove(&init_net, "rose_nodes");
-       proc_net_remove(&init_net, "rose_routes");
+       remove_proc_entry("rose", init_net.proc_net);
+       remove_proc_entry("rose_neigh", init_net.proc_net);
+       remove_proc_entry("rose_nodes", init_net.proc_net);
+       remove_proc_entry("rose_routes", init_net.proc_net);
        rose_loopback_clear();
 
        rose_rt_free();
index 5b0fd29..e61aa60 100644 (file)
@@ -839,8 +839,9 @@ static int __init af_rxrpc_init(void)
        }
 
 #ifdef CONFIG_PROC_FS
-       proc_net_fops_create(&init_net, "rxrpc_calls", 0, &rxrpc_call_seq_fops);
-       proc_net_fops_create(&init_net, "rxrpc_conns", 0, &rxrpc_connection_seq_fops);
+       proc_create("rxrpc_calls", 0, init_net.proc_net, &rxrpc_call_seq_fops);
+       proc_create("rxrpc_conns", 0, init_net.proc_net,
+                   &rxrpc_connection_seq_fops);
 #endif
        return 0;
 
@@ -878,8 +879,8 @@ static void __exit af_rxrpc_exit(void)
 
        _debug("flush scheduled work");
        flush_workqueue(rxrpc_workqueue);
-       proc_net_remove(&init_net, "rxrpc_conns");
-       proc_net_remove(&init_net, "rxrpc_calls");
+       remove_proc_entry("rxrpc_conns", init_net.proc_net);
+       remove_proc_entry("rxrpc_calls", init_net.proc_net);
        destroy_workqueue(rxrpc_workqueue);
        kmem_cache_destroy(rxrpc_call_jar);
        _leave("");
index 0fb9e3f..e0f6de6 100644 (file)
@@ -207,10 +207,8 @@ static int tcf_ipt(struct sk_buff *skb, const struct tc_action *a,
        struct tcf_ipt *ipt = a->priv;
        struct xt_action_param par;
 
-       if (skb_cloned(skb)) {
-               if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
-                       return TC_ACT_UNSPEC;
-       }
+       if (skb_unclone(skb, GFP_ATOMIC))
+               return TC_ACT_UNSPEC;
 
        spin_lock(&ipt->tcf_lock);
 
index 0c3fadd..7ed78c9 100644 (file)
@@ -131,8 +131,7 @@ static int tcf_pedit(struct sk_buff *skb, const struct tc_action *a,
        int i, munged = 0;
        unsigned int off;
 
-       if (skb_cloned(skb) &&
-           pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
+       if (skb_unclone(skb, GFP_ATOMIC))
                return p->tcf_action;
 
        off = skb_network_offset(skb);
index 8dbd695..823463a 100644 (file)
 #include <net/act_api.h>
 #include <net/netlink.h>
 
-#define L2T(p, L)   qdisc_l2t((p)->tcfp_R_tab, L)
-#define L2T_P(p, L) qdisc_l2t((p)->tcfp_P_tab, L)
+struct tcf_police {
+       struct tcf_common       common;
+       int                     tcfp_result;
+       u32                     tcfp_ewma_rate;
+       s64                     tcfp_burst;
+       u32                     tcfp_mtu;
+       s64                     tcfp_toks;
+       s64                     tcfp_ptoks;
+       s64                     tcfp_mtu_ptoks;
+       s64                     tcfp_t_c;
+       struct psched_ratecfg   rate;
+       bool                    rate_present;
+       struct psched_ratecfg   peak;
+       bool                    peak_present;
+};
+#define to_police(pc)  \
+       container_of(pc, struct tcf_police, common)
 
 #define POL_TAB_MASK     15
 static struct tcf_common *tcf_police_ht[POL_TAB_MASK + 1];
@@ -108,10 +123,6 @@ static void tcf_police_destroy(struct tcf_police *p)
                        write_unlock_bh(&police_lock);
                        gen_kill_estimator(&p->tcf_bstats,
                                           &p->tcf_rate_est);
-                       if (p->tcfp_R_tab)
-                               qdisc_put_rtab(p->tcfp_R_tab);
-                       if (p->tcfp_P_tab)
-                               qdisc_put_rtab(p->tcfp_P_tab);
                        /*
                         * gen_estimator est_timer() might access p->tcf_lock
                         * or bstats, wait a RCU grace period before freeing p
@@ -212,26 +223,36 @@ override:
        }
 
        /* No failure allowed after this point */
-       if (R_tab != NULL) {
-               qdisc_put_rtab(police->tcfp_R_tab);
-               police->tcfp_R_tab = R_tab;
+       police->tcfp_mtu = parm->mtu;
+       if (police->tcfp_mtu == 0) {
+               police->tcfp_mtu = ~0;
+               if (R_tab)
+                       police->tcfp_mtu = 255 << R_tab->rate.cell_log;
+       }
+       if (R_tab) {
+               police->rate_present = true;
+               psched_ratecfg_precompute(&police->rate, R_tab->rate.rate);
+               qdisc_put_rtab(R_tab);
+       } else {
+               police->rate_present = false;
        }
-       if (P_tab != NULL) {
-               qdisc_put_rtab(police->tcfp_P_tab);
-               police->tcfp_P_tab = P_tab;
+       if (P_tab) {
+               police->peak_present = true;
+               psched_ratecfg_precompute(&police->peak, P_tab->rate.rate);
+               qdisc_put_rtab(P_tab);
+       } else {
+               police->peak_present = false;
        }
 
        if (tb[TCA_POLICE_RESULT])
                police->tcfp_result = nla_get_u32(tb[TCA_POLICE_RESULT]);
-       police->tcfp_toks = police->tcfp_burst = parm->burst;
-       police->tcfp_mtu = parm->mtu;
-       if (police->tcfp_mtu == 0) {
-               police->tcfp_mtu = ~0;
-               if (police->tcfp_R_tab)
-                       police->tcfp_mtu = 255<<police->tcfp_R_tab->rate.cell_log;
+       police->tcfp_burst = PSCHED_TICKS2NS(parm->burst);
+       police->tcfp_toks = police->tcfp_burst;
+       if (police->peak_present) {
+               police->tcfp_mtu_ptoks = (s64) psched_l2t_ns(&police->peak,
+                                                            police->tcfp_mtu);
+               police->tcfp_ptoks = police->tcfp_mtu_ptoks;
        }
-       if (police->tcfp_P_tab)
-               police->tcfp_ptoks = L2T_P(police, police->tcfp_mtu);
        police->tcf_action = parm->action;
 
        if (tb[TCA_POLICE_AVRATE])
@@ -241,7 +262,7 @@ override:
        if (ret != ACT_P_CREATED)
                return ret;
 
-       police->tcfp_t_c = psched_get_time();
+       police->tcfp_t_c = ktime_to_ns(ktime_get());
        police->tcf_index = parm->index ? parm->index :
                tcf_hash_new_index(&police_idx_gen, &police_hash_info);
        h = tcf_hash(police->tcf_index, POL_TAB_MASK);
@@ -287,9 +308,9 @@ static int tcf_act_police(struct sk_buff *skb, const struct tc_action *a,
                          struct tcf_result *res)
 {
        struct tcf_police *police = a->priv;
-       psched_time_t now;
-       long toks;
-       long ptoks = 0;
+       s64 now;
+       s64 toks;
+       s64 ptoks = 0;
 
        spin_lock(&police->tcf_lock);
 
@@ -305,24 +326,25 @@ static int tcf_act_police(struct sk_buff *skb, const struct tc_action *a,
        }
 
        if (qdisc_pkt_len(skb) <= police->tcfp_mtu) {
-               if (police->tcfp_R_tab == NULL) {
+               if (!police->rate_present) {
                        spin_unlock(&police->tcf_lock);
                        return police->tcfp_result;
                }
 
-               now = psched_get_time();
-               toks = psched_tdiff_bounded(now, police->tcfp_t_c,
-                                           police->tcfp_burst);
-               if (police->tcfp_P_tab) {
+               now = ktime_to_ns(ktime_get());
+               toks = min_t(s64, now - police->tcfp_t_c,
+                            police->tcfp_burst);
+               if (police->peak_present) {
                        ptoks = toks + police->tcfp_ptoks;
-                       if (ptoks > (long)L2T_P(police, police->tcfp_mtu))
-                               ptoks = (long)L2T_P(police, police->tcfp_mtu);
-                       ptoks -= L2T_P(police, qdisc_pkt_len(skb));
+                       if (ptoks > police->tcfp_mtu_ptoks)
+                               ptoks = police->tcfp_mtu_ptoks;
+                       ptoks -= (s64) psched_l2t_ns(&police->peak,
+                                                    qdisc_pkt_len(skb));
                }
                toks += police->tcfp_toks;
-               if (toks > (long)police->tcfp_burst)
+               if (toks > police->tcfp_burst)
                        toks = police->tcfp_burst;
-               toks -= L2T(police, qdisc_pkt_len(skb));
+               toks -= (s64) psched_l2t_ns(&police->rate, qdisc_pkt_len(skb));
                if ((toks|ptoks) >= 0) {
                        police->tcfp_t_c = now;
                        police->tcfp_toks = toks;
@@ -348,15 +370,15 @@ tcf_act_police_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
                .index = police->tcf_index,
                .action = police->tcf_action,
                .mtu = police->tcfp_mtu,
-               .burst = police->tcfp_burst,
+               .burst = PSCHED_NS2TICKS(police->tcfp_burst),
                .refcnt = police->tcf_refcnt - ref,
                .bindcnt = police->tcf_bindcnt - bind,
        };
 
-       if (police->tcfp_R_tab)
-               opt.rate = police->tcfp_R_tab->rate;
-       if (police->tcfp_P_tab)
-               opt.peakrate = police->tcfp_P_tab->rate;
+       if (police->rate_present)
+               opt.rate.rate = psched_ratecfg_getrate(&police->rate);
+       if (police->peak_present)
+               opt.peakrate.rate = psched_ratecfg_getrate(&police->peak);
        if (nla_put(skb, TCA_POLICE_TBF, sizeof(opt), &opt))
                goto nla_put_failure;
        if (police->tcfp_result &&
index d84f7e7..a181b48 100644 (file)
@@ -493,7 +493,7 @@ void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc)
 }
 EXPORT_SYMBOL(qdisc_watchdog_init);
 
-void qdisc_watchdog_schedule(struct qdisc_watchdog *wd, psched_time_t expires)
+void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires)
 {
        if (test_bit(__QDISC_STATE_DEACTIVATED,
                     &qdisc_root_sleeping(wd->qdisc)->state))
@@ -502,10 +502,10 @@ void qdisc_watchdog_schedule(struct qdisc_watchdog *wd, psched_time_t expires)
        qdisc_throttled(wd->qdisc);
 
        hrtimer_start(&wd->timer,
-                     ns_to_ktime(PSCHED_TICKS2NS(expires)),
+                     ns_to_ktime(expires),
                      HRTIMER_MODE_ABS);
 }
-EXPORT_SYMBOL(qdisc_watchdog_schedule);
+EXPORT_SYMBOL(qdisc_watchdog_schedule_ns);
 
 void qdisc_watchdog_cancel(struct qdisc_watchdog *wd)
 {
@@ -1768,7 +1768,7 @@ static int __net_init psched_net_init(struct net *net)
 {
        struct proc_dir_entry *e;
 
-       e = proc_net_fops_create(net, "psched", 0, &psched_fops);
+       e = proc_create("psched", 0, net->proc_net, &psched_fops);
        if (e == NULL)
                return -ENOMEM;
 
@@ -1777,7 +1777,7 @@ static int __net_init psched_net_init(struct net *net)
 
 static void __net_exit psched_net_exit(struct net *net)
 {
-       proc_net_remove(net, "psched");
+       remove_proc_entry("psched", net->proc_net);
 }
 #else
 static int __net_init psched_net_init(struct net *net)
index 5d81a44..ffad481 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/rcupdate.h>
 #include <linux/list.h>
 #include <linux/slab.h>
+#include <net/sch_generic.h>
 #include <net/pkt_sched.h>
 #include <net/dst.h>
 
@@ -896,3 +897,39 @@ void dev_shutdown(struct net_device *dev)
 
        WARN_ON(timer_pending(&dev->watchdog_timer));
 }
+
+void psched_ratecfg_precompute(struct psched_ratecfg *r, u32 rate)
+{
+       u64 factor;
+       u64 mult;
+       int shift;
+
+       r->rate_bps = rate << 3;
+       r->shift = 0;
+       r->mult = 1;
+       /*
+        * Calibrate mult, shift so that token counting is accurate
+        * for smallest packet size (64 bytes).  Token (time in ns) is
+        * computed as (bytes * 8) * NSEC_PER_SEC / rate_bps.  It will
+        * work as long as the smallest packet transfer time can be
+        * accurately represented in nanosec.
+        */
+       if (r->rate_bps > 0) {
+               /*
+                * Higher shift gives better accuracy.  Find the largest
+                * shift such that mult fits in 32 bits.
+                */
+               for (shift = 0; shift < 16; shift++) {
+                       r->shift = shift;
+                       factor = 8LLU * NSEC_PER_SEC * (1 << r->shift);
+                       mult = div64_u64(factor, r->rate_bps);
+                       if (mult > UINT_MAX)
+                               break;
+               }
+
+               r->shift = shift - 1;
+               factor = 8LLU * NSEC_PER_SEC * (1 << r->shift);
+               r->mult = div64_u64(factor, r->rate_bps);
+       }
+}
+EXPORT_SYMBOL(psched_ratecfg_precompute);
index 51561ea..03c2692 100644 (file)
@@ -38,6 +38,7 @@
 #include <linux/workqueue.h>
 #include <linux/slab.h>
 #include <net/netlink.h>
+#include <net/sch_generic.h>
 #include <net/pkt_sched.h>
 
 /* HTB algorithm.
@@ -71,12 +72,6 @@ enum htb_cmode {
        HTB_CAN_SEND            /* class can send */
 };
 
-struct htb_rate_cfg {
-       u64 rate_bps;
-       u32 mult;
-       u32 shift;
-};
-
 /* interior & leaf nodes; props specific to leaves are marked L: */
 struct htb_class {
        struct Qdisc_class_common common;
@@ -124,8 +119,8 @@ struct htb_class {
        int filter_cnt;
 
        /* token bucket parameters */
-       struct htb_rate_cfg rate;
-       struct htb_rate_cfg ceil;
+       struct psched_ratecfg rate;
+       struct psched_ratecfg ceil;
        s64 buffer, cbuffer;    /* token bucket depth/rate */
        psched_tdiff_t mbuffer; /* max wait time */
        s64 tokens, ctokens;    /* current number of tokens */
@@ -168,45 +163,6 @@ struct htb_sched {
        struct work_struct work;
 };
 
-static u64 l2t_ns(struct htb_rate_cfg *r, unsigned int len)
-{
-       return ((u64)len * r->mult) >> r->shift;
-}
-
-static void htb_precompute_ratedata(struct htb_rate_cfg *r)
-{
-       u64 factor;
-       u64 mult;
-       int shift;
-
-       r->shift = 0;
-       r->mult = 1;
-       /*
-        * Calibrate mult, shift so that token counting is accurate
-        * for smallest packet size (64 bytes).  Token (time in ns) is
-        * computed as (bytes * 8) * NSEC_PER_SEC / rate_bps.  It will
-        * work as long as the smallest packet transfer time can be
-        * accurately represented in nanosec.
-        */
-       if (r->rate_bps > 0) {
-               /*
-                * Higher shift gives better accuracy.  Find the largest
-                * shift such that mult fits in 32 bits.
-                */
-               for (shift = 0; shift < 16; shift++) {
-                       r->shift = shift;
-                       factor = 8LLU * NSEC_PER_SEC * (1 << r->shift);
-                       mult = div64_u64(factor, r->rate_bps);
-                       if (mult > UINT_MAX)
-                               break;
-               }
-
-               r->shift = shift - 1;
-               factor = 8LLU * NSEC_PER_SEC * (1 << r->shift);
-               r->mult = div64_u64(factor, r->rate_bps);
-       }
-}
-
 /* find class in global hash table using given handle */
 static inline struct htb_class *htb_find(u32 handle, struct Qdisc *sch)
 {
@@ -632,7 +588,7 @@ static inline void htb_accnt_tokens(struct htb_class *cl, int bytes, s64 diff)
 
        if (toks > cl->buffer)
                toks = cl->buffer;
-       toks -= (s64) l2t_ns(&cl->rate, bytes);
+       toks -= (s64) psched_l2t_ns(&cl->rate, bytes);
        if (toks <= -cl->mbuffer)
                toks = 1 - cl->mbuffer;
 
@@ -645,7 +601,7 @@ static inline void htb_accnt_ctokens(struct htb_class *cl, int bytes, s64 diff)
 
        if (toks > cl->cbuffer)
                toks = cl->cbuffer;
-       toks -= (s64) l2t_ns(&cl->ceil, bytes);
+       toks -= (s64) psched_l2t_ns(&cl->ceil, bytes);
        if (toks <= -cl->mbuffer)
                toks = 1 - cl->mbuffer;
 
@@ -1134,10 +1090,10 @@ static int htb_dump_class(struct Qdisc *sch, unsigned long arg,
 
        memset(&opt, 0, sizeof(opt));
 
-       opt.rate.rate = cl->rate.rate_bps >> 3;
-       opt.buffer = cl->buffer;
-       opt.ceil.rate = cl->ceil.rate_bps >> 3;
-       opt.cbuffer = cl->cbuffer;
+       opt.rate.rate = psched_ratecfg_getrate(&cl->rate);
+       opt.buffer = PSCHED_NS2TICKS(cl->buffer);
+       opt.ceil.rate = psched_ratecfg_getrate(&cl->ceil);
+       opt.cbuffer = PSCHED_NS2TICKS(cl->cbuffer);
        opt.quantum = cl->quantum;
        opt.prio = cl->prio;
        opt.level = cl->level;
@@ -1459,8 +1415,8 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
                cl->parent = parent;
 
                /* set class to be in HTB_CAN_SEND state */
-               cl->tokens = hopt->buffer;
-               cl->ctokens = hopt->cbuffer;
+               cl->tokens = PSCHED_TICKS2NS(hopt->buffer);
+               cl->ctokens = PSCHED_TICKS2NS(hopt->cbuffer);
                cl->mbuffer = 60 * PSCHED_TICKS_PER_SEC;        /* 1min */
                cl->t_c = psched_get_time();
                cl->cmode = HTB_CAN_SEND;
@@ -1503,17 +1459,11 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
                        cl->prio = TC_HTB_NUMPRIO - 1;
        }
 
-       cl->buffer = hopt->buffer;
-       cl->cbuffer = hopt->cbuffer;
-
-       cl->rate.rate_bps = (u64)hopt->rate.rate << 3;
-       cl->ceil.rate_bps = (u64)hopt->ceil.rate << 3;
-
-       htb_precompute_ratedata(&cl->rate);
-       htb_precompute_ratedata(&cl->ceil);
+       psched_ratecfg_precompute(&cl->rate, hopt->rate.rate);
+       psched_ratecfg_precompute(&cl->ceil, hopt->ceil.rate);
 
-       cl->buffer = hopt->buffer << PSCHED_SHIFT;
-       cl->cbuffer = hopt->buffer << PSCHED_SHIFT;
+       cl->buffer = PSCHED_TICKS2NS(hopt->buffer);
+       cl->cbuffer = PSCHED_TICKS2NS(hopt->buffer);
 
        sch_tree_unlock(sch);
 
index 298c0dd..3d2acc7 100644 (file)
@@ -438,18 +438,18 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
                if (q->rate) {
                        struct sk_buff_head *list = &sch->q;
 
-                       delay += packet_len_2_sched_time(skb->len, q);
-
                        if (!skb_queue_empty(list)) {
                                /*
-                                * Last packet in queue is reference point (now).
-                                * First packet in queue is already in flight,
-                                * calculate this time bonus and substract
+                                * Last packet in queue is reference point (now),
+                                * calculate this time bonus and subtract
                                 * from delay.
                                 */
-                               delay -= now - netem_skb_cb(skb_peek(list))->time_to_send;
+                               delay -= netem_skb_cb(skb_peek_tail(list))->time_to_send - now;
+                               delay = max_t(psched_tdiff_t, 0, delay);
                                now = netem_skb_cb(skb_peek_tail(list))->time_to_send;
                        }
+
+                       delay += packet_len_2_sched_time(skb->len, q);
                }
 
                cb->time_to_send = now + delay;
index 4b056c1..c8388f3 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/errno.h>
 #include <linux/skbuff.h>
 #include <net/netlink.h>
+#include <net/sch_generic.h>
 #include <net/pkt_sched.h>
 
 
 struct tbf_sched_data {
 /* Parameters */
        u32             limit;          /* Maximal length of backlog: bytes */
-       u32             buffer;         /* Token bucket depth/rate: MUST BE >= MTU/B */
-       u32             mtu;
+       s64             buffer;         /* Token bucket depth/rate: MUST BE >= MTU/B */
+       s64             mtu;
        u32             max_size;
-       struct qdisc_rate_table *R_tab;
-       struct qdisc_rate_table *P_tab;
+       struct psched_ratecfg rate;
+       struct psched_ratecfg peak;
+       bool peak_present;
 
 /* Variables */
-       long    tokens;                 /* Current number of B tokens */
-       long    ptokens;                /* Current number of P tokens */
-       psched_time_t   t_c;            /* Time check-point */
+       s64     tokens;                 /* Current number of B tokens */
+       s64     ptokens;                /* Current number of P tokens */
+       s64     t_c;                    /* Time check-point */
        struct Qdisc    *qdisc;         /* Inner qdisc, default - bfifo queue */
        struct qdisc_watchdog watchdog; /* Watchdog timer */
 };
 
-#define L2T(q, L)   qdisc_l2t((q)->R_tab, L)
-#define L2T_P(q, L) qdisc_l2t((q)->P_tab, L)
-
 static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 {
        struct tbf_sched_data *q = qdisc_priv(sch);
@@ -156,24 +155,24 @@ static struct sk_buff *tbf_dequeue(struct Qdisc *sch)
        skb = q->qdisc->ops->peek(q->qdisc);
 
        if (skb) {
-               psched_time_t now;
-               long toks;
-               long ptoks = 0;
+               s64 now;
+               s64 toks;
+               s64 ptoks = 0;
                unsigned int len = qdisc_pkt_len(skb);
 
-               now = psched_get_time();
-               toks = psched_tdiff_bounded(now, q->t_c, q->buffer);
+               now = ktime_to_ns(ktime_get());
+               toks = min_t(s64, now - q->t_c, q->buffer);
 
-               if (q->P_tab) {
+               if (q->peak_present) {
                        ptoks = toks + q->ptokens;
-                       if (ptoks > (long)q->mtu)
+                       if (ptoks > q->mtu)
                                ptoks = q->mtu;
-                       ptoks -= L2T_P(q, len);
+                       ptoks -= (s64) psched_l2t_ns(&q->peak, len);
                }
                toks += q->tokens;
-               if (toks > (long)q->buffer)
+               if (toks > q->buffer)
                        toks = q->buffer;
-               toks -= L2T(q, len);
+               toks -= (s64) psched_l2t_ns(&q->rate, len);
 
                if ((toks|ptoks) >= 0) {
                        skb = qdisc_dequeue_peeked(q->qdisc);
@@ -189,8 +188,8 @@ static struct sk_buff *tbf_dequeue(struct Qdisc *sch)
                        return skb;
                }
 
-               qdisc_watchdog_schedule(&q->watchdog,
-                                       now + max_t(long, -toks, -ptoks));
+               qdisc_watchdog_schedule_ns(&q->watchdog,
+                                          now + max_t(long, -toks, -ptoks));
 
                /* Maybe we have a shorter packet in the queue,
                   which can be sent now. It sounds cool,
@@ -214,7 +213,7 @@ static void tbf_reset(struct Qdisc *sch)
 
        qdisc_reset(q->qdisc);
        sch->q.qlen = 0;
-       q->t_c = psched_get_time();
+       q->t_c = ktime_to_ns(ktime_get());
        q->tokens = q->buffer;
        q->ptokens = q->mtu;
        qdisc_watchdog_cancel(&q->watchdog);
@@ -293,14 +292,19 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt)
                q->qdisc = child;
        }
        q->limit = qopt->limit;
-       q->mtu = qopt->mtu;
+       q->mtu = PSCHED_TICKS2NS(qopt->mtu);
        q->max_size = max_size;
-       q->buffer = qopt->buffer;
+       q->buffer = PSCHED_TICKS2NS(qopt->buffer);
        q->tokens = q->buffer;
        q->ptokens = q->mtu;
 
-       swap(q->R_tab, rtab);
-       swap(q->P_tab, ptab);
+       psched_ratecfg_precompute(&q->rate, rtab->rate.rate);
+       if (ptab) {
+               psched_ratecfg_precompute(&q->peak, ptab->rate.rate);
+               q->peak_present = true;
+       } else {
+               q->peak_present = false;
+       }
 
        sch_tree_unlock(sch);
        err = 0;
@@ -319,7 +323,7 @@ static int tbf_init(struct Qdisc *sch, struct nlattr *opt)
        if (opt == NULL)
                return -EINVAL;
 
-       q->t_c = psched_get_time();
+       q->t_c = ktime_to_ns(ktime_get());
        qdisc_watchdog_init(&q->watchdog, sch);
        q->qdisc = &noop_qdisc;
 
@@ -331,12 +335,6 @@ static void tbf_destroy(struct Qdisc *sch)
        struct tbf_sched_data *q = qdisc_priv(sch);
 
        qdisc_watchdog_cancel(&q->watchdog);
-
-       if (q->P_tab)
-               qdisc_put_rtab(q->P_tab);
-       if (q->R_tab)
-               qdisc_put_rtab(q->R_tab);
-
        qdisc_destroy(q->qdisc);
 }
 
@@ -352,13 +350,13 @@ static int tbf_dump(struct Qdisc *sch, struct sk_buff *skb)
                goto nla_put_failure;
 
        opt.limit = q->limit;
-       opt.rate = q->R_tab->rate;
-       if (q->P_tab)
-               opt.peakrate = q->P_tab->rate;
+       opt.rate.rate = psched_ratecfg_getrate(&q->rate);
+       if (q->peak_present)
+               opt.peakrate.rate = psched_ratecfg_getrate(&q->peak);
        else
                memset(&opt.peakrate, 0, sizeof(opt.peakrate));
-       opt.mtu = q->mtu;
-       opt.buffer = q->buffer;
+       opt.mtu = PSCHED_NS2TICKS(q->mtu);
+       opt.buffer = PSCHED_NS2TICKS(q->buffer);
        if (nla_put(skb, TCA_TBF_PARMS, sizeof(opt), &opt))
                goto nla_put_failure;
 
index 7521d94..cf48528 100644 (file)
@@ -3,8 +3,8 @@
 #
 
 menuconfig IP_SCTP
-       tristate "The SCTP Protocol (EXPERIMENTAL)"
-       depends on INET && EXPERIMENTAL
+       tristate "The SCTP Protocol"
+       depends on INET
        depends on IPV6 || IPV6=n
        select CRYPTO
        select CRYPTO_HMAC
index 159b9bc..ba1dfc3 100644 (file)
@@ -71,7 +71,7 @@ void sctp_auth_key_put(struct sctp_auth_bytes *key)
                return;
 
        if (atomic_dec_and_test(&key->refcnt)) {
-               kfree(key);
+               kzfree(key);
                SCTP_DBG_OBJCNT_DEC(keys);
        }
 }
@@ -200,27 +200,28 @@ static struct sctp_auth_bytes *sctp_auth_make_key_vector(
        struct sctp_auth_bytes *new;
        __u32   len;
        __u32   offset = 0;
+       __u16   random_len, hmacs_len, chunks_len = 0;
 
-       len = ntohs(random->param_hdr.length) + ntohs(hmacs->param_hdr.length);
-        if (chunks)
-               len += ntohs(chunks->param_hdr.length);
+       random_len = ntohs(random->param_hdr.length);
+       hmacs_len = ntohs(hmacs->param_hdr.length);
+       if (chunks)
+               chunks_len = ntohs(chunks->param_hdr.length);
 
-       new = kmalloc(sizeof(struct sctp_auth_bytes) + len, gfp);
+       len = random_len + hmacs_len + chunks_len;
+
+       new = sctp_auth_create_key(len, gfp);
        if (!new)
                return NULL;
 
-       new->len = len;
-
-       memcpy(new->data, random, ntohs(random->param_hdr.length));
-       offset += ntohs(random->param_hdr.length);
+       memcpy(new->data, random, random_len);
+       offset += random_len;
 
        if (chunks) {
-               memcpy(new->data + offset, chunks,
-                       ntohs(chunks->param_hdr.length));
-               offset += ntohs(chunks->param_hdr.length);
+               memcpy(new->data + offset, chunks, chunks_len);
+               offset += chunks_len;
        }
 
-       memcpy(new->data + offset, hmacs, ntohs(hmacs->param_hdr.length));
+       memcpy(new->data + offset, hmacs, hmacs_len);
 
        return new;
 }
@@ -350,8 +351,8 @@ static struct sctp_auth_bytes *sctp_auth_asoc_create_secret(
        secret = sctp_auth_asoc_set_secret(ep_key, first_vector, last_vector,
                                            gfp);
 out:
-       kfree(local_key_vector);
-       kfree(peer_key_vector);
+       sctp_auth_key_put(local_key_vector);
+       sctp_auth_key_put(peer_key_vector);
 
        return secret;
 }
index 17a001b..73aad3d 100644 (file)
@@ -151,9 +151,7 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
        ep->rcvbuf_policy = net->sctp.rcvbuf_policy;
 
        /* Initialize the secret key used with cookie. */
-       get_random_bytes(&ep->secret_key[0], SCTP_SECRET_SIZE);
-       ep->last_key = ep->current_key = 0;
-       ep->key_changed_at = jiffies;
+       get_random_bytes(ep->secret_key, sizeof(ep->secret_key));
 
        /* SCTP-AUTH extensions*/
        INIT_LIST_HEAD(&ep->endpoint_shared_keys);
@@ -271,6 +269,8 @@ static void sctp_endpoint_destroy(struct sctp_endpoint *ep)
        sctp_inq_free(&ep->base.inqueue);
        sctp_bind_addr_free(&ep->base.bind_addr);
 
+       memset(ep->secret_key, 0, sizeof(ep->secret_key));
+
        /* Remove and free the port */
        if (sctp_sk(ep->base.sk)->bind_hash)
                sctp_put_port(ep->base.sk);
index f3f0f4d..391a245 100644 (file)
@@ -326,9 +326,10 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
         */
        rcu_read_lock();
        list_for_each_entry_rcu(laddr, &bp->address_list, list) {
-               if (!laddr->valid && laddr->state != SCTP_ADDR_SRC)
+               if (!laddr->valid)
                        continue;
-               if ((laddr->a.sa.sa_family == AF_INET6) &&
+               if ((laddr->state == SCTP_ADDR_SRC) &&
+                   (laddr->a.sa.sa_family == AF_INET6) &&
                    (scope <= sctp_scope(&laddr->a))) {
                        bmatchlen = sctp_v6_addr_match_len(daddr, &laddr->a);
                        if (!baddr || (matchlen < bmatchlen)) {
index 5f7518d..ad0dba8 100644 (file)
@@ -122,12 +122,12 @@ static const struct file_operations sctpprobe_fops = {
        .llseek = noop_llseek,
 };
 
-sctp_disposition_t jsctp_sf_eat_sack(struct net *net,
-                                    const struct sctp_endpoint *ep,
-                                    const struct sctp_association *asoc,
-                                    const sctp_subtype_t type,
-                                    void *arg,
-                                    sctp_cmd_seq_t *commands)
+static sctp_disposition_t jsctp_sf_eat_sack(struct net *net,
+                                           const struct sctp_endpoint *ep,
+                                           const struct sctp_association *asoc,
+                                           const sctp_subtype_t type,
+                                           void *arg,
+                                           sctp_cmd_seq_t *commands)
 {
        struct sctp_transport *sp;
        static __u32 lcwnd = 0;
@@ -183,13 +183,20 @@ static __init int sctpprobe_init(void)
 {
        int ret = -ENOMEM;
 
+       /* Warning: if the function signature of sctp_sf_eat_sack_6_2,
+        * has been changed, you also have to change the signature of
+        * jsctp_sf_eat_sack, otherwise you end up right here!
+        */
+       BUILD_BUG_ON(__same_type(sctp_sf_eat_sack_6_2,
+                                jsctp_sf_eat_sack) == 0);
+
        init_waitqueue_head(&sctpw.wait);
        spin_lock_init(&sctpw.lock);
        if (kfifo_alloc(&sctpw.fifo, bufsize, GFP_KERNEL))
                return ret;
 
-       if (!proc_net_fops_create(&init_net, procname, S_IRUSR,
-                                 &sctpprobe_fops))
+       if (!proc_create(procname, S_IRUSR, init_net.proc_net,
+                        &sctpprobe_fops))
                goto free_kfifo;
 
        ret = register_jprobe(&sctp_recv_probe);
@@ -201,7 +208,7 @@ static __init int sctpprobe_init(void)
        return 0;
 
 remove_proc:
-       proc_net_remove(&init_net, procname);
+       remove_proc_entry(procname, init_net.proc_net);
 free_kfifo:
        kfifo_free(&sctpw.fifo);
        return ret;
@@ -210,7 +217,7 @@ free_kfifo:
 static __exit void sctpprobe_exit(void)
 {
        kfifo_free(&sctpw.fifo);
-       proc_net_remove(&init_net, procname);
+       remove_proc_entry(procname, init_net.proc_net);
        unregister_jprobe(&sctp_recv_probe);
 }
 
index e1c5fc2..a193f3b 100644 (file)
@@ -1589,8 +1589,6 @@ static sctp_cookie_param_t *sctp_pack_cookie(const struct sctp_endpoint *ep,
        struct sctp_signed_cookie *cookie;
        struct scatterlist sg;
        int headersize, bodysize;
-       unsigned int keylen;
-       char *key;
 
        /* Header size is static data prior to the actual cookie, including
         * any padding.
@@ -1650,12 +1648,11 @@ static sctp_cookie_param_t *sctp_pack_cookie(const struct sctp_endpoint *ep,
 
                /* Sign the message.  */
                sg_init_one(&sg, &cookie->c, bodysize);
-               keylen = SCTP_SECRET_SIZE;
-               key = (char *)ep->secret_key[ep->current_key];
                desc.tfm = sctp_sk(ep->base.sk)->hmac;
                desc.flags = 0;
 
-               if (crypto_hash_setkey(desc.tfm, key, keylen) ||
+               if (crypto_hash_setkey(desc.tfm, ep->secret_key,
+                                      sizeof(ep->secret_key)) ||
                    crypto_hash_digest(&desc, &sg, bodysize, cookie->signature))
                        goto free_cookie;
        }
@@ -1682,8 +1679,7 @@ struct sctp_association *sctp_unpack_cookie(
        int headersize, bodysize, fixed_size;
        __u8 *digest = ep->digest;
        struct scatterlist sg;
-       unsigned int keylen, len;
-       char *key;
+       unsigned int len;
        sctp_scope_t scope;
        struct sk_buff *skb = chunk->skb;
        struct timeval tv;
@@ -1718,34 +1714,21 @@ struct sctp_association *sctp_unpack_cookie(
                goto no_hmac;
 
        /* Check the signature.  */
-       keylen = SCTP_SECRET_SIZE;
        sg_init_one(&sg, bear_cookie, bodysize);
-       key = (char *)ep->secret_key[ep->current_key];
        desc.tfm = sctp_sk(ep->base.sk)->hmac;
        desc.flags = 0;
 
        memset(digest, 0x00, SCTP_SIGNATURE_SIZE);
-       if (crypto_hash_setkey(desc.tfm, key, keylen) ||
+       if (crypto_hash_setkey(desc.tfm, ep->secret_key,
+                              sizeof(ep->secret_key)) ||
            crypto_hash_digest(&desc, &sg, bodysize, digest)) {
                *error = -SCTP_IERROR_NOMEM;
                goto fail;
        }
 
        if (memcmp(digest, cookie->signature, SCTP_SIGNATURE_SIZE)) {
-               /* Try the previous key. */
-               key = (char *)ep->secret_key[ep->last_key];
-               memset(digest, 0x00, SCTP_SIGNATURE_SIZE);
-               if (crypto_hash_setkey(desc.tfm, key, keylen) ||
-                   crypto_hash_digest(&desc, &sg, bodysize, digest)) {
-                       *error = -SCTP_IERROR_NOMEM;
-                       goto fail;
-               }
-
-               if (memcmp(digest, cookie->signature, SCTP_SIGNATURE_SIZE)) {
-                       /* Yikes!  Still bad signature! */
-                       *error = -SCTP_IERROR_BAD_SIG;
-                       goto fail;
-               }
+               *error = -SCTP_IERROR_BAD_SIG;
+               goto fail;
        }
 
 no_hmac:
index 9e65758..cedd9bf 100644 (file)
@@ -3390,7 +3390,7 @@ static int sctp_setsockopt_auth_key(struct sock *sk,
 
        ret = sctp_auth_set_key(sctp_sk(sk)->ep, asoc, authkey);
 out:
-       kfree(authkey);
+       kzfree(authkey);
        return ret;
 }
 
index 5c4d82c..ee0d029 100644 (file)
@@ -2837,7 +2837,7 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
        }
 
        ifr = compat_alloc_user_space(buf_size);
-       rxnfc = (void *)ifr + ALIGN(sizeof(struct ifreq), 8);
+       rxnfc = (void __user *)ifr + ALIGN(sizeof(struct ifreq), 8);
 
        if (copy_in_user(&ifr->ifr_name, &ifr32->ifr_name, IFNAMSIZ))
                return -EFAULT;
@@ -2861,12 +2861,12 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
                        offsetof(struct ethtool_rxnfc, fs.ring_cookie));
 
                if (copy_in_user(rxnfc, compat_rxnfc,
-                                (void *)(&rxnfc->fs.m_ext + 1) -
-                                (void *)rxnfc) ||
+                                (void __user *)(&rxnfc->fs.m_ext + 1) -
+                                (void __user *)rxnfc) ||
                    copy_in_user(&rxnfc->fs.ring_cookie,
                                 &compat_rxnfc->fs.ring_cookie,
-                                (void *)(&rxnfc->fs.location + 1) -
-                                (void *)&rxnfc->fs.ring_cookie) ||
+                                (void __user *)(&rxnfc->fs.location + 1) -
+                                (void __user *)&rxnfc->fs.ring_cookie) ||
                    copy_in_user(&rxnfc->rule_cnt, &compat_rxnfc->rule_cnt,
                                 sizeof(rxnfc->rule_cnt)))
                        return -EFAULT;
@@ -2878,12 +2878,12 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
 
        if (convert_out) {
                if (copy_in_user(compat_rxnfc, rxnfc,
-                                (const void *)(&rxnfc->fs.m_ext + 1) -
-                                (const void *)rxnfc) ||
+                                (const void __user *)(&rxnfc->fs.m_ext + 1) -
+                                (const void __user *)rxnfc) ||
                    copy_in_user(&compat_rxnfc->fs.ring_cookie,
                                 &rxnfc->fs.ring_cookie,
-                                (const void *)(&rxnfc->fs.location + 1) -
-                                (const void *)&rxnfc->fs.ring_cookie) ||
+                                (const void __user *)(&rxnfc->fs.location + 1) -
+                                (const void __user *)&rxnfc->fs.ring_cookie) ||
                    copy_in_user(&compat_rxnfc->rule_cnt, &rxnfc->rule_cnt,
                                 sizeof(rxnfc->rule_cnt)))
                        return -EFAULT;
index bfa3171..fb20f25 100644 (file)
@@ -98,9 +98,25 @@ __rpc_add_timer(struct rpc_wait_queue *queue, struct rpc_task *task)
        list_add(&task->u.tk_wait.timer_list, &queue->timer_list.list);
 }
 
+static void rpc_rotate_queue_owner(struct rpc_wait_queue *queue)
+{
+       struct list_head *q = &queue->tasks[queue->priority];
+       struct rpc_task *task;
+
+       if (!list_empty(q)) {
+               task = list_first_entry(q, struct rpc_task, u.tk_wait.list);
+               if (task->tk_owner == queue->owner)
+                       list_move_tail(&task->u.tk_wait.list, q);
+       }
+}
+
 static void rpc_set_waitqueue_priority(struct rpc_wait_queue *queue, int priority)
 {
-       queue->priority = priority;
+       if (queue->priority != priority) {
+               /* Fairness: rotate the list when changing priority */
+               rpc_rotate_queue_owner(queue);
+               queue->priority = priority;
+       }
 }
 
 static void rpc_set_waitqueue_owner(struct rpc_wait_queue *queue, pid_t pid)
index 0a148c9..0f679df 100644 (file)
@@ -465,7 +465,7 @@ static int svc_udp_get_dest_address4(struct svc_rqst *rqstp,
 }
 
 /*
- * See net/ipv6/datagram.c : datagram_recv_ctl
+ * See net/ipv6/datagram.c : ip6_datagram_recv_ctl
  */
 static int svc_udp_get_dest_address6(struct svc_rqst *rqstp,
                                     struct cmsghdr *cmh)
index 54f89f9..2655c9f 100644 (file)
@@ -774,6 +774,7 @@ void tipc_bclink_init(void)
        bcl->owner = &bclink->node;
        bcl->max_pkt = MAX_PKT_DEFAULT_MCAST;
        tipc_link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT);
+       spin_lock_init(&bcbearer->bearer.lock);
        bcl->b_ptr = &bcbearer->bearer;
        bcl->state = WORKING_WORKING;
        strlcpy(bcl->name, tipc_bclink_name, TIPC_MAX_LINK_NAME);
index 9b4e483..a9622b6 100644 (file)
@@ -43,7 +43,8 @@
 #define SS_LISTENING   -1      /* socket is listening */
 #define SS_READY       -2      /* socket is connectionless */
 
-#define OVERLOAD_LIMIT_BASE    10000
+#define CONN_OVERLOAD_LIMIT    ((TIPC_FLOW_CONTROL_WIN * 2 + 1) * \
+                               SKB_TRUESIZE(TIPC_MAX_USER_MSG_SIZE))
 #define CONN_TIMEOUT_DEFAULT   8000    /* default connect timeout = 8s */
 
 struct tipc_sock {
@@ -129,19 +130,6 @@ static void advance_rx_queue(struct sock *sk)
 }
 
 /**
- * discard_rx_queue - discard all buffers in socket receive queue
- *
- * Caller must hold socket lock
- */
-static void discard_rx_queue(struct sock *sk)
-{
-       struct sk_buff *buf;
-
-       while ((buf = __skb_dequeue(&sk->sk_receive_queue)))
-               kfree_skb(buf);
-}
-
-/**
  * reject_rx_queue - reject all buffers in socket receive queue
  *
  * Caller must hold socket lock
@@ -215,7 +203,6 @@ static int tipc_create(struct net *net, struct socket *sock, int protocol,
 
        sock_init_data(sock, sk);
        sk->sk_backlog_rcv = backlog_rcv;
-       sk->sk_rcvbuf = TIPC_FLOW_CONTROL_WIN * 2 * TIPC_MAX_USER_MSG_SIZE * 2;
        sk->sk_data_ready = tipc_data_ready;
        sk->sk_write_space = tipc_write_space;
        tipc_sk(sk)->p = tp_ptr;
@@ -292,7 +279,7 @@ static int release(struct socket *sock)
        res = tipc_deleteport(tport->ref);
 
        /* Discard any remaining (connection-based) messages in receive queue */
-       discard_rx_queue(sk);
+       __skb_queue_purge(&sk->sk_receive_queue);
 
        /* Reject any messages that accumulated in backlog queue */
        sock->state = SS_DISCONNECTING;
@@ -516,8 +503,7 @@ static int send_msg(struct kiocb *iocb, struct socket *sock,
        if (unlikely((m->msg_namelen < sizeof(*dest)) ||
                     (dest->family != AF_TIPC)))
                return -EINVAL;
-       if ((total_len > TIPC_MAX_USER_MSG_SIZE) ||
-           (m->msg_iovlen > (unsigned int)INT_MAX))
+       if (total_len > TIPC_MAX_USER_MSG_SIZE)
                return -EMSGSIZE;
 
        if (iocb)
@@ -625,8 +611,7 @@ static int send_packet(struct kiocb *iocb, struct socket *sock,
        if (unlikely(dest))
                return send_msg(iocb, sock, m, total_len);
 
-       if ((total_len > TIPC_MAX_USER_MSG_SIZE) ||
-           (m->msg_iovlen > (unsigned int)INT_MAX))
+       if (total_len > TIPC_MAX_USER_MSG_SIZE)
                return -EMSGSIZE;
 
        if (iocb)
@@ -711,8 +696,7 @@ static int send_stream(struct kiocb *iocb, struct socket *sock,
                goto exit;
        }
 
-       if ((total_len > (unsigned int)INT_MAX) ||
-           (m->msg_iovlen > (unsigned int)INT_MAX)) {
+       if (total_len > (unsigned int)INT_MAX) {
                res = -EMSGSIZE;
                goto exit;
        }
@@ -1155,34 +1139,6 @@ static void tipc_data_ready(struct sock *sk, int len)
 }
 
 /**
- * rx_queue_full - determine if receive queue can accept another message
- * @msg: message to be added to queue
- * @queue_size: current size of queue
- * @base: nominal maximum size of queue
- *
- * Returns 1 if queue is unable to accept message, 0 otherwise
- */
-static int rx_queue_full(struct tipc_msg *msg, u32 queue_size, u32 base)
-{
-       u32 threshold;
-       u32 imp = msg_importance(msg);
-
-       if (imp == TIPC_LOW_IMPORTANCE)
-               threshold = base;
-       else if (imp == TIPC_MEDIUM_IMPORTANCE)
-               threshold = base * 2;
-       else if (imp == TIPC_HIGH_IMPORTANCE)
-               threshold = base * 100;
-       else
-               return 0;
-
-       if (msg_connected(msg))
-               threshold *= 4;
-
-       return queue_size >= threshold;
-}
-
-/**
  * filter_connect - Handle all incoming messages for a connection-based socket
  * @tsock: TIPC socket
  * @msg: message
@@ -1260,6 +1216,36 @@ static u32 filter_connect(struct tipc_sock *tsock, struct sk_buff **buf)
 }
 
 /**
+ * rcvbuf_limit - get proper overload limit of socket receive queue
+ * @sk: socket
+ * @buf: message
+ *
+ * For all connection oriented messages, irrespective of importance,
+ * the default overload value (i.e. 67MB) is set as limit.
+ *
+ * For all connectionless messages, by default new queue limits are
+ * as belows:
+ *
+ * TIPC_LOW_IMPORTANCE       (5MB)
+ * TIPC_MEDIUM_IMPORTANCE    (10MB)
+ * TIPC_HIGH_IMPORTANCE      (20MB)
+ * TIPC_CRITICAL_IMPORTANCE  (40MB)
+ *
+ * Returns overload limit according to corresponding message importance
+ */
+static unsigned int rcvbuf_limit(struct sock *sk, struct sk_buff *buf)
+{
+       struct tipc_msg *msg = buf_msg(buf);
+       unsigned int limit;
+
+       if (msg_connected(msg))
+               limit = CONN_OVERLOAD_LIMIT;
+       else
+               limit = sk->sk_rcvbuf << (msg_importance(msg) + 5);
+       return limit;
+}
+
+/**
  * filter_rcv - validate incoming message
  * @sk: socket
  * @buf: message
@@ -1275,7 +1261,7 @@ static u32 filter_rcv(struct sock *sk, struct sk_buff *buf)
 {
        struct socket *sock = sk->sk_socket;
        struct tipc_msg *msg = buf_msg(buf);
-       u32 recv_q_len;
+       unsigned int limit = rcvbuf_limit(sk, buf);
        u32 res = TIPC_OK;
 
        /* Reject message if it is wrong sort of message for socket */
@@ -1292,15 +1278,13 @@ static u32 filter_rcv(struct sock *sk, struct sk_buff *buf)
        }
 
        /* Reject message if there isn't room to queue it */
-       recv_q_len = skb_queue_len(&sk->sk_receive_queue);
-       if (unlikely(recv_q_len >= (OVERLOAD_LIMIT_BASE / 2))) {
-               if (rx_queue_full(msg, recv_q_len, OVERLOAD_LIMIT_BASE / 2))
-                       return TIPC_ERR_OVERLOAD;
-       }
+       if (sk_rmem_alloc_get(sk) + buf->truesize >= limit)
+               return TIPC_ERR_OVERLOAD;
 
-       /* Enqueue message (finally!) */
+       /* Enqueue message */
        TIPC_SKB_CB(buf)->handle = 0;
        __skb_queue_tail(&sk->sk_receive_queue, buf);
+       skb_set_owner_r(buf, sk);
 
        sk->sk_data_ready(sk, 0);
        return TIPC_OK;
@@ -1349,7 +1333,7 @@ static u32 dispatch(struct tipc_port *tport, struct sk_buff *buf)
        if (!sock_owned_by_user(sk)) {
                res = filter_rcv(sk, buf);
        } else {
-               if (sk_add_backlog(sk, buf, sk->sk_rcvbuf))
+               if (sk_add_backlog(sk, buf, rcvbuf_limit(sk, buf)))
                        res = TIPC_ERR_OVERLOAD;
                else
                        res = TIPC_OK;
@@ -1583,6 +1567,7 @@ static int accept(struct socket *sock, struct socket *new_sock, int flags)
        } else {
                __skb_dequeue(&sk->sk_receive_queue);
                __skb_queue_head(&new_sk->sk_receive_queue, buf);
+               skb_set_owner_r(buf, new_sk);
        }
        release_sock(new_sk);
 
@@ -1637,7 +1622,7 @@ restart:
        case SS_DISCONNECTING:
 
                /* Discard any unreceived messages */
-               discard_rx_queue(sk);
+               __skb_queue_purge(&sk->sk_receive_queue);
 
                /* Wake up anyone sleeping in poll */
                sk->sk_state_change(sk);
index 0c61236..87d2842 100644 (file)
@@ -2402,7 +2402,7 @@ static int __net_init unix_net_init(struct net *net)
                goto out;
 
 #ifdef CONFIG_PROC_FS
-       if (!proc_net_fops_create(net, "unix", 0, &unix_seq_fops)) {
+       if (!proc_create("unix", 0, net->proc_net, &unix_seq_fops)) {
                unix_sysctl_unregister(net);
                goto out;
        }
@@ -2415,7 +2415,7 @@ out:
 static void __net_exit unix_net_exit(struct net *net)
 {
        unix_sysctl_unregister(net);
-       proc_net_remove(net, "unix");
+       remove_proc_entry("unix", net->proc_net);
 }
 
 static struct pernet_operations unix_net_ops = {
diff --git a/net/vmw_vsock/Kconfig b/net/vmw_vsock/Kconfig
new file mode 100644 (file)
index 0000000..b5fa7e4
--- /dev/null
@@ -0,0 +1,28 @@
+#
+# Vsock protocol
+#
+
+config VSOCKETS
+       tristate "Virtual Socket protocol"
+       help
+         Virtual Socket Protocol is a socket protocol similar to TCP/IP
+         allowing comunication between Virtual Machines and hypervisor
+         or host.
+
+         You should also select one or more hypervisor-specific transports
+         below.
+
+         To compile this driver as a module, choose M here: the module
+         will be called vsock. If unsure, say N.
+
+config VMWARE_VMCI_VSOCKETS
+       tristate "VMware VMCI transport for Virtual Sockets"
+       depends on VSOCKETS && VMWARE_VMCI
+       help
+         This module implements a VMCI transport for Virtual Sockets.
+
+         Enable this transport if your Virtual Machine runs on a VMware
+         hypervisor.
+
+         To compile this driver as a module, choose M here: the module
+         will be called vmw_vsock_vmci_transport. If unsure, say N.
diff --git a/net/vmw_vsock/Makefile b/net/vmw_vsock/Makefile
new file mode 100644 (file)
index 0000000..2ce52d7
--- /dev/null
@@ -0,0 +1,7 @@
+obj-$(CONFIG_VSOCKETS) += vsock.o
+obj-$(CONFIG_VMWARE_VMCI_VSOCKETS) += vmw_vsock_vmci_transport.o
+
+vsock-y += af_vsock.o vsock_addr.o
+
+vmw_vsock_vmci_transport-y += vmci_transport.o vmci_transport_notify.o \
+       vmci_transport_notify_qstate.o
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
new file mode 100644 (file)
index 0000000..ca511c4
--- /dev/null
@@ -0,0 +1,2012 @@
+/*
+ * VMware vSockets Driver
+ *
+ * Copyright (C) 2007-2013 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ */
+
+/* Implementation notes:
+ *
+ * - There are two kinds of sockets: those created by user action (such as
+ * calling socket(2)) and those created by incoming connection request packets.
+ *
+ * - There are two "global" tables, one for bound sockets (sockets that have
+ * specified an address that they are responsible for) and one for connected
+ * sockets (sockets that have established a connection with another socket).
+ * These tables are "global" in that all sockets on the system are placed
+ * within them. - Note, though, that the bound table contains an extra entry
+ * for a list of unbound sockets and SOCK_DGRAM sockets will always remain in
+ * that list. The bound table is used solely for lookup of sockets when packets
+ * are received and that's not necessary for SOCK_DGRAM sockets since we create
+ * a datagram handle for each and need not perform a lookup.  Keeping SOCK_DGRAM
+ * sockets out of the bound hash buckets will reduce the chance of collisions
+ * when looking for SOCK_STREAM sockets and prevents us from having to check the
+ * socket type in the hash table lookups.
+ *
+ * - Sockets created by user action will either be "client" sockets that
+ * initiate a connection or "server" sockets that listen for connections; we do
+ * not support simultaneous connects (two "client" sockets connecting).
+ *
+ * - "Server" sockets are referred to as listener sockets throughout this
+ * implementation because they are in the SS_LISTEN state.  When a connection
+ * request is received (the second kind of socket mentioned above), we create a
+ * new socket and refer to it as a pending socket.  These pending sockets are
+ * placed on the pending connection list of the listener socket.  When future
+ * packets are received for the address the listener socket is bound to, we
+ * check if the source of the packet is from one that has an existing pending
+ * connection.  If it does, we process the packet for the pending socket.  When
+ * that socket reaches the connected state, it is removed from the listener
+ * socket's pending list and enqueued in the listener socket's accept queue.
+ * Callers of accept(2) will accept connected sockets from the listener socket's
+ * accept queue.  If the socket cannot be accepted for some reason then it is
+ * marked rejected.  Once the connection is accepted, it is owned by the user
+ * process and the responsibility for cleanup falls with that user process.
+ *
+ * - It is possible that these pending sockets will never reach the connected
+ * state; in fact, we may never receive another packet after the connection
+ * request.  Because of this, we must schedule a cleanup function to run in the
+ * future, after some amount of time passes where a connection should have been
+ * established.  This function ensures that the socket is off all lists so it
+ * cannot be retrieved, then drops all references to the socket so it is cleaned
+ * up (sock_put() -> sk_free() -> our sk_destruct implementation).  Note this
+ * function will also cleanup rejected sockets, those that reach the connected
+ * state but leave it before they have been accepted.
+ *
+ * - Sockets created by user action will be cleaned up when the user process
+ * calls close(2), causing our release implementation to be called. Our release
+ * implementation will perform some cleanup then drop the last reference so our
+ * sk_destruct implementation is invoked.  Our sk_destruct implementation will
+ * perform additional cleanup that's common for both types of sockets.
+ *
+ * - A socket's reference count is what ensures that the structure won't be
+ * freed.  Each entry in a list (such as the "global" bound and connected tables
+ * and the listener socket's pending list and connected queue) ensures a
+ * reference.  When we defer work until process context and pass a socket as our
+ * argument, we must ensure the reference count is increased to ensure the
+ * socket isn't freed before the function is run; the deferred function will
+ * then drop the reference.
+ */
+
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/cred.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/kmod.h>
+#include <linux/list.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/net.h>
+#include <linux/poll.h>
+#include <linux/skbuff.h>
+#include <linux/smp.h>
+#include <linux/socket.h>
+#include <linux/stddef.h>
+#include <linux/unistd.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+#include <net/sock.h>
+
+#include "af_vsock.h"
+
+static int __vsock_bind(struct sock *sk, struct sockaddr_vm *addr);
+static void vsock_sk_destruct(struct sock *sk);
+static int vsock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
+
+/* Protocol family. */
+static struct proto vsock_proto = {
+       .name = "AF_VSOCK",
+       .owner = THIS_MODULE,
+       .obj_size = sizeof(struct vsock_sock),
+};
+
+/* The default peer timeout indicates how long we will wait for a peer response
+ * to a control message.
+ */
+#define VSOCK_DEFAULT_CONNECT_TIMEOUT (2 * HZ)
+
+#define SS_LISTEN 255
+
+static const struct vsock_transport *transport;
+static DEFINE_MUTEX(vsock_register_mutex);
+
+/**** EXPORTS ****/
+
+/* Get the ID of the local context.  This is transport dependent. */
+
+int vm_sockets_get_local_cid(void)
+{
+       return transport->get_local_cid();
+}
+EXPORT_SYMBOL_GPL(vm_sockets_get_local_cid);
+
+/**** UTILS ****/
+
+/* Each bound VSocket is stored in the bind hash table and each connected
+ * VSocket is stored in the connected hash table.
+ *
+ * Unbound sockets are all put on the same list attached to the end of the hash
+ * table (vsock_unbound_sockets).  Bound sockets are added to the hash table in
+ * the bucket that their local address hashes to (vsock_bound_sockets(addr)
+ * represents the list that addr hashes to).
+ *
+ * Specifically, we initialize the vsock_bind_table array to a size of
+ * VSOCK_HASH_SIZE + 1 so that vsock_bind_table[0] through
+ * vsock_bind_table[VSOCK_HASH_SIZE - 1] are for bound sockets and
+ * vsock_bind_table[VSOCK_HASH_SIZE] is for unbound sockets.  The hash function
+ * mods with VSOCK_HASH_SIZE - 1 to ensure this.
+ */
+#define VSOCK_HASH_SIZE         251
+#define MAX_PORT_RETRIES        24
+
+#define VSOCK_HASH(addr)        ((addr)->svm_port % (VSOCK_HASH_SIZE - 1))
+#define vsock_bound_sockets(addr) (&vsock_bind_table[VSOCK_HASH(addr)])
+#define vsock_unbound_sockets     (&vsock_bind_table[VSOCK_HASH_SIZE])
+
+/* XXX This can probably be implemented in a better way. */
+#define VSOCK_CONN_HASH(src, dst)                              \
+       (((src)->svm_cid ^ (dst)->svm_port) % (VSOCK_HASH_SIZE - 1))
+#define vsock_connected_sockets(src, dst)              \
+       (&vsock_connected_table[VSOCK_CONN_HASH(src, dst)])
+#define vsock_connected_sockets_vsk(vsk)                               \
+       vsock_connected_sockets(&(vsk)->remote_addr, &(vsk)->local_addr)
+
+static struct list_head vsock_bind_table[VSOCK_HASH_SIZE + 1];
+static struct list_head vsock_connected_table[VSOCK_HASH_SIZE];
+static DEFINE_SPINLOCK(vsock_table_lock);
+
+static __init void vsock_init_tables(void)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(vsock_bind_table); i++)
+               INIT_LIST_HEAD(&vsock_bind_table[i]);
+
+       for (i = 0; i < ARRAY_SIZE(vsock_connected_table); i++)
+               INIT_LIST_HEAD(&vsock_connected_table[i]);
+}
+
+static void __vsock_insert_bound(struct list_head *list,
+                                struct vsock_sock *vsk)
+{
+       sock_hold(&vsk->sk);
+       list_add(&vsk->bound_table, list);
+}
+
+static void __vsock_insert_connected(struct list_head *list,
+                                    struct vsock_sock *vsk)
+{
+       sock_hold(&vsk->sk);
+       list_add(&vsk->connected_table, list);
+}
+
+static void __vsock_remove_bound(struct vsock_sock *vsk)
+{
+       list_del_init(&vsk->bound_table);
+       sock_put(&vsk->sk);
+}
+
+static void __vsock_remove_connected(struct vsock_sock *vsk)
+{
+       list_del_init(&vsk->connected_table);
+       sock_put(&vsk->sk);
+}
+
+static struct sock *__vsock_find_bound_socket(struct sockaddr_vm *addr)
+{
+       struct vsock_sock *vsk;
+
+       list_for_each_entry(vsk, vsock_bound_sockets(addr), bound_table)
+               if (vsock_addr_equals_addr_any(addr, &vsk->local_addr))
+                       return sk_vsock(vsk);
+
+       return NULL;
+}
+
+static struct sock *__vsock_find_connected_socket(struct sockaddr_vm *src,
+                                                 struct sockaddr_vm *dst)
+{
+       struct vsock_sock *vsk;
+
+       list_for_each_entry(vsk, vsock_connected_sockets(src, dst),
+                           connected_table) {
+               if (vsock_addr_equals_addr(src, &vsk->remote_addr)
+                   && vsock_addr_equals_addr(dst, &vsk->local_addr)) {
+                       return sk_vsock(vsk);
+               }
+       }
+
+       return NULL;
+}
+
+static bool __vsock_in_bound_table(struct vsock_sock *vsk)
+{
+       return !list_empty(&vsk->bound_table);
+}
+
+static bool __vsock_in_connected_table(struct vsock_sock *vsk)
+{
+       return !list_empty(&vsk->connected_table);
+}
+
+static void vsock_insert_unbound(struct vsock_sock *vsk)
+{
+       spin_lock_bh(&vsock_table_lock);
+       __vsock_insert_bound(vsock_unbound_sockets, vsk);
+       spin_unlock_bh(&vsock_table_lock);
+}
+
+void vsock_insert_connected(struct vsock_sock *vsk)
+{
+       struct list_head *list = vsock_connected_sockets(
+               &vsk->remote_addr, &vsk->local_addr);
+
+       spin_lock_bh(&vsock_table_lock);
+       __vsock_insert_connected(list, vsk);
+       spin_unlock_bh(&vsock_table_lock);
+}
+EXPORT_SYMBOL_GPL(vsock_insert_connected);
+
+void vsock_remove_bound(struct vsock_sock *vsk)
+{
+       spin_lock_bh(&vsock_table_lock);
+       __vsock_remove_bound(vsk);
+       spin_unlock_bh(&vsock_table_lock);
+}
+EXPORT_SYMBOL_GPL(vsock_remove_bound);
+
+void vsock_remove_connected(struct vsock_sock *vsk)
+{
+       spin_lock_bh(&vsock_table_lock);
+       __vsock_remove_connected(vsk);
+       spin_unlock_bh(&vsock_table_lock);
+}
+EXPORT_SYMBOL_GPL(vsock_remove_connected);
+
+struct sock *vsock_find_bound_socket(struct sockaddr_vm *addr)
+{
+       struct sock *sk;
+
+       spin_lock_bh(&vsock_table_lock);
+       sk = __vsock_find_bound_socket(addr);
+       if (sk)
+               sock_hold(sk);
+
+       spin_unlock_bh(&vsock_table_lock);
+
+       return sk;
+}
+EXPORT_SYMBOL_GPL(vsock_find_bound_socket);
+
+struct sock *vsock_find_connected_socket(struct sockaddr_vm *src,
+                                        struct sockaddr_vm *dst)
+{
+       struct sock *sk;
+
+       spin_lock_bh(&vsock_table_lock);
+       sk = __vsock_find_connected_socket(src, dst);
+       if (sk)
+               sock_hold(sk);
+
+       spin_unlock_bh(&vsock_table_lock);
+
+       return sk;
+}
+EXPORT_SYMBOL_GPL(vsock_find_connected_socket);
+
+static bool vsock_in_bound_table(struct vsock_sock *vsk)
+{
+       bool ret;
+
+       spin_lock_bh(&vsock_table_lock);
+       ret = __vsock_in_bound_table(vsk);
+       spin_unlock_bh(&vsock_table_lock);
+
+       return ret;
+}
+
+static bool vsock_in_connected_table(struct vsock_sock *vsk)
+{
+       bool ret;
+
+       spin_lock_bh(&vsock_table_lock);
+       ret = __vsock_in_connected_table(vsk);
+       spin_unlock_bh(&vsock_table_lock);
+
+       return ret;
+}
+
+void vsock_for_each_connected_socket(void (*fn)(struct sock *sk))
+{
+       int i;
+
+       spin_lock_bh(&vsock_table_lock);
+
+       for (i = 0; i < ARRAY_SIZE(vsock_connected_table); i++) {
+               struct vsock_sock *vsk;
+               list_for_each_entry(vsk, &vsock_connected_table[i],
+                                   connected_table);
+                       fn(sk_vsock(vsk));
+       }
+
+       spin_unlock_bh(&vsock_table_lock);
+}
+EXPORT_SYMBOL_GPL(vsock_for_each_connected_socket);
+
+void vsock_add_pending(struct sock *listener, struct sock *pending)
+{
+       struct vsock_sock *vlistener;
+       struct vsock_sock *vpending;
+
+       vlistener = vsock_sk(listener);
+       vpending = vsock_sk(pending);
+
+       sock_hold(pending);
+       sock_hold(listener);
+       list_add_tail(&vpending->pending_links, &vlistener->pending_links);
+}
+EXPORT_SYMBOL_GPL(vsock_add_pending);
+
+void vsock_remove_pending(struct sock *listener, struct sock *pending)
+{
+       struct vsock_sock *vpending = vsock_sk(pending);
+
+       list_del_init(&vpending->pending_links);
+       sock_put(listener);
+       sock_put(pending);
+}
+EXPORT_SYMBOL_GPL(vsock_remove_pending);
+
+void vsock_enqueue_accept(struct sock *listener, struct sock *connected)
+{
+       struct vsock_sock *vlistener;
+       struct vsock_sock *vconnected;
+
+       vlistener = vsock_sk(listener);
+       vconnected = vsock_sk(connected);
+
+       sock_hold(connected);
+       sock_hold(listener);
+       list_add_tail(&vconnected->accept_queue, &vlistener->accept_queue);
+}
+EXPORT_SYMBOL_GPL(vsock_enqueue_accept);
+
+static struct sock *vsock_dequeue_accept(struct sock *listener)
+{
+       struct vsock_sock *vlistener;
+       struct vsock_sock *vconnected;
+
+       vlistener = vsock_sk(listener);
+
+       if (list_empty(&vlistener->accept_queue))
+               return NULL;
+
+       vconnected = list_entry(vlistener->accept_queue.next,
+                               struct vsock_sock, accept_queue);
+
+       list_del_init(&vconnected->accept_queue);
+       sock_put(listener);
+       /* The caller will need a reference on the connected socket so we let
+        * it call sock_put().
+        */
+
+       return sk_vsock(vconnected);
+}
+
+static bool vsock_is_accept_queue_empty(struct sock *sk)
+{
+       struct vsock_sock *vsk = vsock_sk(sk);
+       return list_empty(&vsk->accept_queue);
+}
+
+static bool vsock_is_pending(struct sock *sk)
+{
+       struct vsock_sock *vsk = vsock_sk(sk);
+       return !list_empty(&vsk->pending_links);
+}
+
+static int vsock_send_shutdown(struct sock *sk, int mode)
+{
+       return transport->shutdown(vsock_sk(sk), mode);
+}
+
+void vsock_pending_work(struct work_struct *work)
+{
+       struct sock *sk;
+       struct sock *listener;
+       struct vsock_sock *vsk;
+       bool cleanup;
+
+       vsk = container_of(work, struct vsock_sock, dwork.work);
+       sk = sk_vsock(vsk);
+       listener = vsk->listener;
+       cleanup = true;
+
+       lock_sock(listener);
+       lock_sock(sk);
+
+       if (vsock_is_pending(sk)) {
+               vsock_remove_pending(listener, sk);
+       } else if (!vsk->rejected) {
+               /* We are not on the pending list and accept() did not reject
+                * us, so we must have been accepted by our user process.  We
+                * just need to drop our references to the sockets and be on
+                * our way.
+                */
+               cleanup = false;
+               goto out;
+       }
+
+       listener->sk_ack_backlog--;
+
+       /* We need to remove ourself from the global connected sockets list so
+        * incoming packets can't find this socket, and to reduce the reference
+        * count.
+        */
+       if (vsock_in_connected_table(vsk))
+               vsock_remove_connected(vsk);
+
+       sk->sk_state = SS_FREE;
+
+out:
+       release_sock(sk);
+       release_sock(listener);
+       if (cleanup)
+               sock_put(sk);
+
+       sock_put(sk);
+       sock_put(listener);
+}
+EXPORT_SYMBOL_GPL(vsock_pending_work);
+
+/**** SOCKET OPERATIONS ****/
+
+static int __vsock_bind_stream(struct vsock_sock *vsk,
+                              struct sockaddr_vm *addr)
+{
+       static u32 port = LAST_RESERVED_PORT + 1;
+       struct sockaddr_vm new_addr;
+
+       vsock_addr_init(&new_addr, addr->svm_cid, addr->svm_port);
+
+       if (addr->svm_port == VMADDR_PORT_ANY) {
+               bool found = false;
+               unsigned int i;
+
+               for (i = 0; i < MAX_PORT_RETRIES; i++) {
+                       if (port <= LAST_RESERVED_PORT)
+                               port = LAST_RESERVED_PORT + 1;
+
+                       new_addr.svm_port = port++;
+
+                       if (!__vsock_find_bound_socket(&new_addr)) {
+                               found = true;
+                               break;
+                       }
+               }
+
+               if (!found)
+                       return -EADDRNOTAVAIL;
+       } else {
+               /* If port is in reserved range, ensure caller
+                * has necessary privileges.
+                */
+               if (addr->svm_port <= LAST_RESERVED_PORT &&
+                   !capable(CAP_NET_BIND_SERVICE)) {
+                       return -EACCES;
+               }
+
+               if (__vsock_find_bound_socket(&new_addr))
+                       return -EADDRINUSE;
+       }
+
+       vsock_addr_init(&vsk->local_addr, new_addr.svm_cid, new_addr.svm_port);
+
+       /* Remove stream sockets from the unbound list and add them to the hash
+        * table for easy lookup by its address.  The unbound list is simply an
+        * extra entry at the end of the hash table, a trick used by AF_UNIX.
+        */
+       __vsock_remove_bound(vsk);
+       __vsock_insert_bound(vsock_bound_sockets(&vsk->local_addr), vsk);
+
+       return 0;
+}
+
+static int __vsock_bind_dgram(struct vsock_sock *vsk,
+                             struct sockaddr_vm *addr)
+{
+       return transport->dgram_bind(vsk, addr);
+}
+
+static int __vsock_bind(struct sock *sk, struct sockaddr_vm *addr)
+{
+       struct vsock_sock *vsk = vsock_sk(sk);
+       u32 cid;
+       int retval;
+
+       /* First ensure this socket isn't already bound. */
+       if (vsock_addr_bound(&vsk->local_addr))
+               return -EINVAL;
+
+       /* Now bind to the provided address or select appropriate values if
+        * none are provided (VMADDR_CID_ANY and VMADDR_PORT_ANY).  Note that
+        * like AF_INET prevents binding to a non-local IP address (in most
+        * cases), we only allow binding to the local CID.
+        */
+       cid = transport->get_local_cid();
+       if (addr->svm_cid != cid && addr->svm_cid != VMADDR_CID_ANY)
+               return -EADDRNOTAVAIL;
+
+       switch (sk->sk_socket->type) {
+       case SOCK_STREAM:
+               spin_lock_bh(&vsock_table_lock);
+               retval = __vsock_bind_stream(vsk, addr);
+               spin_unlock_bh(&vsock_table_lock);
+               break;
+
+       case SOCK_DGRAM:
+               retval = __vsock_bind_dgram(vsk, addr);
+               break;
+
+       default:
+               retval = -EINVAL;
+               break;
+       }
+
+       return retval;
+}
+
+struct sock *__vsock_create(struct net *net,
+                           struct socket *sock,
+                           struct sock *parent,
+                           gfp_t priority,
+                           unsigned short type)
+{
+       struct sock *sk;
+       struct vsock_sock *psk;
+       struct vsock_sock *vsk;
+
+       sk = sk_alloc(net, AF_VSOCK, priority, &vsock_proto);
+       if (!sk)
+               return NULL;
+
+       sock_init_data(sock, sk);
+
+       /* sk->sk_type is normally set in sock_init_data, but only if sock is
+        * non-NULL. We make sure that our sockets always have a type by
+        * setting it here if needed.
+        */
+       if (!sock)
+               sk->sk_type = type;
+
+       vsk = vsock_sk(sk);
+       vsock_addr_init(&vsk->local_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY);
+       vsock_addr_init(&vsk->remote_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY);
+
+       sk->sk_destruct = vsock_sk_destruct;
+       sk->sk_backlog_rcv = vsock_queue_rcv_skb;
+       sk->sk_state = 0;
+       sock_reset_flag(sk, SOCK_DONE);
+
+       INIT_LIST_HEAD(&vsk->bound_table);
+       INIT_LIST_HEAD(&vsk->connected_table);
+       vsk->listener = NULL;
+       INIT_LIST_HEAD(&vsk->pending_links);
+       INIT_LIST_HEAD(&vsk->accept_queue);
+       vsk->rejected = false;
+       vsk->sent_request = false;
+       vsk->ignore_connecting_rst = false;
+       vsk->peer_shutdown = 0;
+
+       psk = parent ? vsock_sk(parent) : NULL;
+       if (parent) {
+               vsk->trusted = psk->trusted;
+               vsk->owner = get_cred(psk->owner);
+               vsk->connect_timeout = psk->connect_timeout;
+       } else {
+               vsk->trusted = capable(CAP_NET_ADMIN);
+               vsk->owner = get_current_cred();
+               vsk->connect_timeout = VSOCK_DEFAULT_CONNECT_TIMEOUT;
+       }
+
+       if (transport->init(vsk, psk) < 0) {
+               sk_free(sk);
+               return NULL;
+       }
+
+       if (sock)
+               vsock_insert_unbound(vsk);
+
+       return sk;
+}
+EXPORT_SYMBOL_GPL(__vsock_create);
+
+static void __vsock_release(struct sock *sk)
+{
+       if (sk) {
+               struct sk_buff *skb;
+               struct sock *pending;
+               struct vsock_sock *vsk;
+
+               vsk = vsock_sk(sk);
+               pending = NULL; /* Compiler warning. */
+
+               if (vsock_in_bound_table(vsk))
+                       vsock_remove_bound(vsk);
+
+               if (vsock_in_connected_table(vsk))
+                       vsock_remove_connected(vsk);
+
+               transport->release(vsk);
+
+               lock_sock(sk);
+               sock_orphan(sk);
+               sk->sk_shutdown = SHUTDOWN_MASK;
+
+               while ((skb = skb_dequeue(&sk->sk_receive_queue)))
+                       kfree_skb(skb);
+
+               /* Clean up any sockets that never were accepted. */
+               while ((pending = vsock_dequeue_accept(sk)) != NULL) {
+                       __vsock_release(pending);
+                       sock_put(pending);
+               }
+
+               release_sock(sk);
+               sock_put(sk);
+       }
+}
+
+static void vsock_sk_destruct(struct sock *sk)
+{
+       struct vsock_sock *vsk = vsock_sk(sk);
+
+       transport->destruct(vsk);
+
+       /* When clearing these addresses, there's no need to set the family and
+        * possibly register the address family with the kernel.
+        */
+       vsock_addr_init(&vsk->local_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY);
+       vsock_addr_init(&vsk->remote_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY);
+
+       put_cred(vsk->owner);
+}
+
+static int vsock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
+{
+       int err;
+
+       err = sock_queue_rcv_skb(sk, skb);
+       if (err)
+               kfree_skb(skb);
+
+       return err;
+}
+
+s64 vsock_stream_has_data(struct vsock_sock *vsk)
+{
+       return transport->stream_has_data(vsk);
+}
+EXPORT_SYMBOL_GPL(vsock_stream_has_data);
+
+s64 vsock_stream_has_space(struct vsock_sock *vsk)
+{
+       return transport->stream_has_space(vsk);
+}
+EXPORT_SYMBOL_GPL(vsock_stream_has_space);
+
+static int vsock_release(struct socket *sock)
+{
+       __vsock_release(sock->sk);
+       sock->sk = NULL;
+       sock->state = SS_FREE;
+
+       return 0;
+}
+
+static int
+vsock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
+{
+       int err;
+       struct sock *sk;
+       struct sockaddr_vm *vm_addr;
+
+       sk = sock->sk;
+
+       if (vsock_addr_cast(addr, addr_len, &vm_addr) != 0)
+               return -EINVAL;
+
+       lock_sock(sk);
+       err = __vsock_bind(sk, vm_addr);
+       release_sock(sk);
+
+       return err;
+}
+
+static int vsock_getname(struct socket *sock,
+                        struct sockaddr *addr, int *addr_len, int peer)
+{
+       int err;
+       struct sock *sk;
+       struct vsock_sock *vsk;
+       struct sockaddr_vm *vm_addr;
+
+       sk = sock->sk;
+       vsk = vsock_sk(sk);
+       err = 0;
+
+       lock_sock(sk);
+
+       if (peer) {
+               if (sock->state != SS_CONNECTED) {
+                       err = -ENOTCONN;
+                       goto out;
+               }
+               vm_addr = &vsk->remote_addr;
+       } else {
+               vm_addr = &vsk->local_addr;
+       }
+
+       if (!vm_addr) {
+               err = -EINVAL;
+               goto out;
+       }
+
+       /* sys_getsockname() and sys_getpeername() pass us a
+        * MAX_SOCK_ADDR-sized buffer and don't set addr_len.  Unfortunately
+        * that macro is defined in socket.c instead of .h, so we hardcode its
+        * value here.
+        */
+       BUILD_BUG_ON(sizeof(*vm_addr) > 128);
+       memcpy(addr, vm_addr, sizeof(*vm_addr));
+       *addr_len = sizeof(*vm_addr);
+
+out:
+       release_sock(sk);
+       return err;
+}
+
+static int vsock_shutdown(struct socket *sock, int mode)
+{
+       int err;
+       struct sock *sk;
+
+       /* User level uses SHUT_RD (0) and SHUT_WR (1), but the kernel uses
+        * RCV_SHUTDOWN (1) and SEND_SHUTDOWN (2), so we must increment mode
+        * here like the other address families do.  Note also that the
+        * increment makes SHUT_RDWR (2) into RCV_SHUTDOWN | SEND_SHUTDOWN (3),
+        * which is what we want.
+        */
+       mode++;
+
+       if ((mode & ~SHUTDOWN_MASK) || !mode)
+               return -EINVAL;
+
+       /* If this is a STREAM socket and it is not connected then bail out
+        * immediately.  If it is a DGRAM socket then we must first kick the
+        * socket so that it wakes up from any sleeping calls, for example
+        * recv(), and then afterwards return the error.
+        */
+
+       sk = sock->sk;
+       if (sock->state == SS_UNCONNECTED) {
+               err = -ENOTCONN;
+               if (sk->sk_type == SOCK_STREAM)
+                       return err;
+       } else {
+               sock->state = SS_DISCONNECTING;
+               err = 0;
+       }
+
+       /* Receive and send shutdowns are treated alike. */
+       mode = mode & (RCV_SHUTDOWN | SEND_SHUTDOWN);
+       if (mode) {
+               lock_sock(sk);
+               sk->sk_shutdown |= mode;
+               sk->sk_state_change(sk);
+               release_sock(sk);
+
+               if (sk->sk_type == SOCK_STREAM) {
+                       sock_reset_flag(sk, SOCK_DONE);
+                       vsock_send_shutdown(sk, mode);
+               }
+       }
+
+       return err;
+}
+
+static unsigned int vsock_poll(struct file *file, struct socket *sock,
+                              poll_table *wait)
+{
+       struct sock *sk;
+       unsigned int mask;
+       struct vsock_sock *vsk;
+
+       sk = sock->sk;
+       vsk = vsock_sk(sk);
+
+       poll_wait(file, sk_sleep(sk), wait);
+       mask = 0;
+
+       if (sk->sk_err)
+               /* Signify that there has been an error on this socket. */
+               mask |= POLLERR;
+
+       /* INET sockets treat local write shutdown and peer write shutdown as a
+        * case of POLLHUP set.
+        */
+       if ((sk->sk_shutdown == SHUTDOWN_MASK) ||
+           ((sk->sk_shutdown & SEND_SHUTDOWN) &&
+            (vsk->peer_shutdown & SEND_SHUTDOWN))) {
+               mask |= POLLHUP;
+       }
+
+       if (sk->sk_shutdown & RCV_SHUTDOWN ||
+           vsk->peer_shutdown & SEND_SHUTDOWN) {
+               mask |= POLLRDHUP;
+       }
+
+       if (sock->type == SOCK_DGRAM) {
+               /* For datagram sockets we can read if there is something in
+                * the queue and write as long as the socket isn't shutdown for
+                * sending.
+                */
+               if (!skb_queue_empty(&sk->sk_receive_queue) ||
+                   (sk->sk_shutdown & RCV_SHUTDOWN)) {
+                       mask |= POLLIN | POLLRDNORM;
+               }
+
+               if (!(sk->sk_shutdown & SEND_SHUTDOWN))
+                       mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
+
+       } else if (sock->type == SOCK_STREAM) {
+               lock_sock(sk);
+
+               /* Listening sockets that have connections in their accept
+                * queue can be read.
+                */
+               if (sk->sk_state == SS_LISTEN
+                   && !vsock_is_accept_queue_empty(sk))
+                       mask |= POLLIN | POLLRDNORM;
+
+               /* If there is something in the queue then we can read. */
+               if (transport->stream_is_active(vsk) &&
+                   !(sk->sk_shutdown & RCV_SHUTDOWN)) {
+                       bool data_ready_now = false;
+                       int ret = transport->notify_poll_in(
+                                       vsk, 1, &data_ready_now);
+                       if (ret < 0) {
+                               mask |= POLLERR;
+                       } else {
+                               if (data_ready_now)
+                                       mask |= POLLIN | POLLRDNORM;
+
+                       }
+               }
+
+               /* Sockets whose connections have been closed, reset, or
+                * terminated should also be considered read, and we check the
+                * shutdown flag for that.
+                */
+               if (sk->sk_shutdown & RCV_SHUTDOWN ||
+                   vsk->peer_shutdown & SEND_SHUTDOWN) {
+                       mask |= POLLIN | POLLRDNORM;
+               }
+
+               /* Connected sockets that can produce data can be written. */
+               if (sk->sk_state == SS_CONNECTED) {
+                       if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
+                               bool space_avail_now = false;
+                               int ret = transport->notify_poll_out(
+                                               vsk, 1, &space_avail_now);
+                               if (ret < 0) {
+                                       mask |= POLLERR;
+                               } else {
+                                       if (space_avail_now)
+                                               /* Remove POLLWRBAND since INET
+                                                * sockets are not setting it.
+                                                */
+                                               mask |= POLLOUT | POLLWRNORM;
+
+                               }
+                       }
+               }
+
+               /* Simulate INET socket poll behaviors, which sets
+                * POLLOUT|POLLWRNORM when peer is closed and nothing to read,
+                * but local send is not shutdown.
+                */
+               if (sk->sk_state == SS_UNCONNECTED) {
+                       if (!(sk->sk_shutdown & SEND_SHUTDOWN))
+                               mask |= POLLOUT | POLLWRNORM;
+
+               }
+
+               release_sock(sk);
+       }
+
+       return mask;
+}
+
+static int vsock_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
+                              struct msghdr *msg, size_t len)
+{
+       int err;
+       struct sock *sk;
+       struct vsock_sock *vsk;
+       struct sockaddr_vm *remote_addr;
+
+       if (msg->msg_flags & MSG_OOB)
+               return -EOPNOTSUPP;
+
+       /* For now, MSG_DONTWAIT is always assumed... */
+       err = 0;
+       sk = sock->sk;
+       vsk = vsock_sk(sk);
+
+       lock_sock(sk);
+
+       if (!vsock_addr_bound(&vsk->local_addr)) {
+               struct sockaddr_vm local_addr;
+
+               vsock_addr_init(&local_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY);
+               err = __vsock_bind(sk, &local_addr);
+               if (err != 0)
+                       goto out;
+
+       }
+
+       /* If the provided message contains an address, use that.  Otherwise
+        * fall back on the socket's remote handle (if it has been connected).
+        */
+       if (msg->msg_name &&
+           vsock_addr_cast(msg->msg_name, msg->msg_namelen,
+                           &remote_addr) == 0) {
+               /* Ensure this address is of the right type and is a valid
+                * destination.
+                */
+
+               if (remote_addr->svm_cid == VMADDR_CID_ANY)
+                       remote_addr->svm_cid = transport->get_local_cid();
+
+               if (!vsock_addr_bound(remote_addr)) {
+                       err = -EINVAL;
+                       goto out;
+               }
+       } else if (sock->state == SS_CONNECTED) {
+               remote_addr = &vsk->remote_addr;
+
+               if (remote_addr->svm_cid == VMADDR_CID_ANY)
+                       remote_addr->svm_cid = transport->get_local_cid();
+
+               /* XXX Should connect() or this function ensure remote_addr is
+                * bound?
+                */
+               if (!vsock_addr_bound(&vsk->remote_addr)) {
+                       err = -EINVAL;
+                       goto out;
+               }
+       } else {
+               err = -EINVAL;
+               goto out;
+       }
+
+       if (!transport->dgram_allow(remote_addr->svm_cid,
+                                   remote_addr->svm_port)) {
+               err = -EINVAL;
+               goto out;
+       }
+
+       err = transport->dgram_enqueue(vsk, remote_addr, msg->msg_iov, len);
+
+out:
+       release_sock(sk);
+       return err;
+}
+
+static int vsock_dgram_connect(struct socket *sock,
+                              struct sockaddr *addr, int addr_len, int flags)
+{
+       int err;
+       struct sock *sk;
+       struct vsock_sock *vsk;
+       struct sockaddr_vm *remote_addr;
+
+       sk = sock->sk;
+       vsk = vsock_sk(sk);
+
+       err = vsock_addr_cast(addr, addr_len, &remote_addr);
+       if (err == -EAFNOSUPPORT && remote_addr->svm_family == AF_UNSPEC) {
+               lock_sock(sk);
+               vsock_addr_init(&vsk->remote_addr, VMADDR_CID_ANY,
+                               VMADDR_PORT_ANY);
+               sock->state = SS_UNCONNECTED;
+               release_sock(sk);
+               return 0;
+       } else if (err != 0)
+               return -EINVAL;
+
+       lock_sock(sk);
+
+       if (!vsock_addr_bound(&vsk->local_addr)) {
+               struct sockaddr_vm local_addr;
+
+               vsock_addr_init(&local_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY);
+               err = __vsock_bind(sk, &local_addr);
+               if (err != 0)
+                       goto out;
+
+       }
+
+       if (!transport->dgram_allow(remote_addr->svm_cid,
+                                   remote_addr->svm_port)) {
+               err = -EINVAL;
+               goto out;
+       }
+
+       memcpy(&vsk->remote_addr, remote_addr, sizeof(vsk->remote_addr));
+       sock->state = SS_CONNECTED;
+
+out:
+       release_sock(sk);
+       return err;
+}
+
+static int vsock_dgram_recvmsg(struct kiocb *kiocb, struct socket *sock,
+                              struct msghdr *msg, size_t len, int flags)
+{
+       return transport->dgram_dequeue(kiocb, vsock_sk(sock->sk), msg, len,
+                                       flags);
+}
+
+static const struct proto_ops vsock_dgram_ops = {
+       .family = PF_VSOCK,
+       .owner = THIS_MODULE,
+       .release = vsock_release,
+       .bind = vsock_bind,
+       .connect = vsock_dgram_connect,
+       .socketpair = sock_no_socketpair,
+       .accept = sock_no_accept,
+       .getname = vsock_getname,
+       .poll = vsock_poll,
+       .ioctl = sock_no_ioctl,
+       .listen = sock_no_listen,
+       .shutdown = vsock_shutdown,
+       .setsockopt = sock_no_setsockopt,
+       .getsockopt = sock_no_getsockopt,
+       .sendmsg = vsock_dgram_sendmsg,
+       .recvmsg = vsock_dgram_recvmsg,
+       .mmap = sock_no_mmap,
+       .sendpage = sock_no_sendpage,
+};
+
+static void vsock_connect_timeout(struct work_struct *work)
+{
+       struct sock *sk;
+       struct vsock_sock *vsk;
+
+       vsk = container_of(work, struct vsock_sock, dwork.work);
+       sk = sk_vsock(vsk);
+
+       lock_sock(sk);
+       if (sk->sk_state == SS_CONNECTING &&
+           (sk->sk_shutdown != SHUTDOWN_MASK)) {
+               sk->sk_state = SS_UNCONNECTED;
+               sk->sk_err = ETIMEDOUT;
+               sk->sk_error_report(sk);
+       }
+       release_sock(sk);
+
+       sock_put(sk);
+}
+
+static int vsock_stream_connect(struct socket *sock, struct sockaddr *addr,
+                               int addr_len, int flags)
+{
+       int err;
+       struct sock *sk;
+       struct vsock_sock *vsk;
+       struct sockaddr_vm *remote_addr;
+       long timeout;
+       DEFINE_WAIT(wait);
+
+       err = 0;
+       sk = sock->sk;
+       vsk = vsock_sk(sk);
+
+       lock_sock(sk);
+
+       /* XXX AF_UNSPEC should make us disconnect like AF_INET. */
+       switch (sock->state) {
+       case SS_CONNECTED:
+               err = -EISCONN;
+               goto out;
+       case SS_DISCONNECTING:
+               err = -EINVAL;
+               goto out;
+       case SS_CONNECTING:
+               /* This continues on so we can move sock into the SS_CONNECTED
+                * state once the connection has completed (at which point err
+                * will be set to zero also).  Otherwise, we will either wait
+                * for the connection or return -EALREADY should this be a
+                * non-blocking call.
+                */
+               err = -EALREADY;
+               break;
+       default:
+               if ((sk->sk_state == SS_LISTEN) ||
+                   vsock_addr_cast(addr, addr_len, &remote_addr) != 0) {
+                       err = -EINVAL;
+                       goto out;
+               }
+
+               /* The hypervisor and well-known contexts do not have socket
+                * endpoints.
+                */
+               if (!transport->stream_allow(remote_addr->svm_cid,
+                                            remote_addr->svm_port)) {
+                       err = -ENETUNREACH;
+                       goto out;
+               }
+
+               /* Set the remote address that we are connecting to. */
+               memcpy(&vsk->remote_addr, remote_addr,
+                      sizeof(vsk->remote_addr));
+
+               /* Autobind this socket to the local address if necessary. */
+               if (!vsock_addr_bound(&vsk->local_addr)) {
+                       struct sockaddr_vm local_addr;
+
+                       vsock_addr_init(&local_addr, VMADDR_CID_ANY,
+                                       VMADDR_PORT_ANY);
+                       err = __vsock_bind(sk, &local_addr);
+                       if (err != 0)
+                               goto out;
+
+               }
+
+               sk->sk_state = SS_CONNECTING;
+
+               err = transport->connect(vsk);
+               if (err < 0)
+                       goto out;
+
+               /* Mark sock as connecting and set the error code to in
+                * progress in case this is a non-blocking connect.
+                */
+               sock->state = SS_CONNECTING;
+               err = -EINPROGRESS;
+       }
+
+       /* The receive path will handle all communication until we are able to
+        * enter the connected state.  Here we wait for the connection to be
+        * completed or a notification of an error.
+        */
+       timeout = vsk->connect_timeout;
+       prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
+
+       while (sk->sk_state != SS_CONNECTED && sk->sk_err == 0) {
+               if (flags & O_NONBLOCK) {
+                       /* If we're not going to block, we schedule a timeout
+                        * function to generate a timeout on the connection
+                        * attempt, in case the peer doesn't respond in a
+                        * timely manner. We hold on to the socket until the
+                        * timeout fires.
+                        */
+                       sock_hold(sk);
+                       INIT_DELAYED_WORK(&vsk->dwork,
+                                         vsock_connect_timeout);
+                       schedule_delayed_work(&vsk->dwork, timeout);
+
+                       /* Skip ahead to preserve error code set above. */
+                       goto out_wait;
+               }
+
+               release_sock(sk);
+               timeout = schedule_timeout(timeout);
+               lock_sock(sk);
+
+               if (signal_pending(current)) {
+                       err = sock_intr_errno(timeout);
+                       goto out_wait_error;
+               } else if (timeout == 0) {
+                       err = -ETIMEDOUT;
+                       goto out_wait_error;
+               }
+
+               prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
+       }
+
+       if (sk->sk_err) {
+               err = -sk->sk_err;
+               goto out_wait_error;
+       } else
+               err = 0;
+
+out_wait:
+       finish_wait(sk_sleep(sk), &wait);
+out:
+       release_sock(sk);
+       return err;
+
+out_wait_error:
+       sk->sk_state = SS_UNCONNECTED;
+       sock->state = SS_UNCONNECTED;
+       goto out_wait;
+}
+
+static int vsock_accept(struct socket *sock, struct socket *newsock, int flags)
+{
+       struct sock *listener;
+       int err;
+       struct sock *connected;
+       struct vsock_sock *vconnected;
+       long timeout;
+       DEFINE_WAIT(wait);
+
+       err = 0;
+       listener = sock->sk;
+
+       lock_sock(listener);
+
+       if (sock->type != SOCK_STREAM) {
+               err = -EOPNOTSUPP;
+               goto out;
+       }
+
+       if (listener->sk_state != SS_LISTEN) {
+               err = -EINVAL;
+               goto out;
+       }
+
+       /* Wait for children sockets to appear; these are the new sockets
+        * created upon connection establishment.
+        */
+       timeout = sock_sndtimeo(listener, flags & O_NONBLOCK);
+       prepare_to_wait(sk_sleep(listener), &wait, TASK_INTERRUPTIBLE);
+
+       while ((connected = vsock_dequeue_accept(listener)) == NULL &&
+              listener->sk_err == 0) {
+               release_sock(listener);
+               timeout = schedule_timeout(timeout);
+               lock_sock(listener);
+
+               if (signal_pending(current)) {
+                       err = sock_intr_errno(timeout);
+                       goto out_wait;
+               } else if (timeout == 0) {
+                       err = -EAGAIN;
+                       goto out_wait;
+               }
+
+               prepare_to_wait(sk_sleep(listener), &wait, TASK_INTERRUPTIBLE);
+       }
+
+       if (listener->sk_err)
+               err = -listener->sk_err;
+
+       if (connected) {
+               listener->sk_ack_backlog--;
+
+               lock_sock(connected);
+               vconnected = vsock_sk(connected);
+
+               /* If the listener socket has received an error, then we should
+                * reject this socket and return.  Note that we simply mark the
+                * socket rejected, drop our reference, and let the cleanup
+                * function handle the cleanup; the fact that we found it in
+                * the listener's accept queue guarantees that the cleanup
+                * function hasn't run yet.
+                */
+               if (err) {
+                       vconnected->rejected = true;
+                       release_sock(connected);
+                       sock_put(connected);
+                       goto out_wait;
+               }
+
+               newsock->state = SS_CONNECTED;
+               sock_graft(connected, newsock);
+               release_sock(connected);
+               sock_put(connected);
+       }
+
+out_wait:
+       finish_wait(sk_sleep(listener), &wait);
+out:
+       release_sock(listener);
+       return err;
+}
+
+static int vsock_listen(struct socket *sock, int backlog)
+{
+       int err;
+       struct sock *sk;
+       struct vsock_sock *vsk;
+
+       sk = sock->sk;
+
+       lock_sock(sk);
+
+       if (sock->type != SOCK_STREAM) {
+               err = -EOPNOTSUPP;
+               goto out;
+       }
+
+       if (sock->state != SS_UNCONNECTED) {
+               err = -EINVAL;
+               goto out;
+       }
+
+       vsk = vsock_sk(sk);
+
+       if (!vsock_addr_bound(&vsk->local_addr)) {
+               err = -EINVAL;
+               goto out;
+       }
+
+       sk->sk_max_ack_backlog = backlog;
+       sk->sk_state = SS_LISTEN;
+
+       err = 0;
+
+out:
+       release_sock(sk);
+       return err;
+}
+
+static int vsock_stream_setsockopt(struct socket *sock,
+                                  int level,
+                                  int optname,
+                                  char __user *optval,
+                                  unsigned int optlen)
+{
+       int err;
+       struct sock *sk;
+       struct vsock_sock *vsk;
+       u64 val;
+
+       if (level != AF_VSOCK)
+               return -ENOPROTOOPT;
+
+#define COPY_IN(_v)                                       \
+       do {                                              \
+               if (optlen < sizeof(_v)) {                \
+                       err = -EINVAL;                    \
+                       goto exit;                        \
+               }                                         \
+               if (copy_from_user(&_v, optval, sizeof(_v)) != 0) {     \
+                       err = -EFAULT;                                  \
+                       goto exit;                                      \
+               }                                                       \
+       } while (0)
+
+       err = 0;
+       sk = sock->sk;
+       vsk = vsock_sk(sk);
+
+       lock_sock(sk);
+
+       switch (optname) {
+       case SO_VM_SOCKETS_BUFFER_SIZE:
+               COPY_IN(val);
+               transport->set_buffer_size(vsk, val);
+               break;
+
+       case SO_VM_SOCKETS_BUFFER_MAX_SIZE:
+               COPY_IN(val);
+               transport->set_max_buffer_size(vsk, val);
+               break;
+
+       case SO_VM_SOCKETS_BUFFER_MIN_SIZE:
+               COPY_IN(val);
+               transport->set_min_buffer_size(vsk, val);
+               break;
+
+       case SO_VM_SOCKETS_CONNECT_TIMEOUT: {
+               struct timeval tv;
+               COPY_IN(tv);
+               if (tv.tv_sec >= 0 && tv.tv_usec < USEC_PER_SEC &&
+                   tv.tv_sec < (MAX_SCHEDULE_TIMEOUT / HZ - 1)) {
+                       vsk->connect_timeout = tv.tv_sec * HZ +
+                           DIV_ROUND_UP(tv.tv_usec, (1000000 / HZ));
+                       if (vsk->connect_timeout == 0)
+                               vsk->connect_timeout =
+                                   VSOCK_DEFAULT_CONNECT_TIMEOUT;
+
+               } else {
+                       err = -ERANGE;
+               }
+               break;
+       }
+
+       default:
+               err = -ENOPROTOOPT;
+               break;
+       }
+
+#undef COPY_IN
+
+exit:
+       release_sock(sk);
+       return err;
+}
+
+static int vsock_stream_getsockopt(struct socket *sock,
+                                  int level, int optname,
+                                  char __user *optval,
+                                  int __user *optlen)
+{
+       int err;
+       int len;
+       struct sock *sk;
+       struct vsock_sock *vsk;
+       u64 val;
+
+       if (level != AF_VSOCK)
+               return -ENOPROTOOPT;
+
+       err = get_user(len, optlen);
+       if (err != 0)
+               return err;
+
+#define COPY_OUT(_v)                            \
+       do {                                    \
+               if (len < sizeof(_v))           \
+                       return -EINVAL;         \
+                                               \
+               len = sizeof(_v);               \
+               if (copy_to_user(optval, &_v, len) != 0)        \
+                       return -EFAULT;                         \
+                                                               \
+       } while (0)
+
+       err = 0;
+       sk = sock->sk;
+       vsk = vsock_sk(sk);
+
+       switch (optname) {
+       case SO_VM_SOCKETS_BUFFER_SIZE:
+               val = transport->get_buffer_size(vsk);
+               COPY_OUT(val);
+               break;
+
+       case SO_VM_SOCKETS_BUFFER_MAX_SIZE:
+               val = transport->get_max_buffer_size(vsk);
+               COPY_OUT(val);
+               break;
+
+       case SO_VM_SOCKETS_BUFFER_MIN_SIZE:
+               val = transport->get_min_buffer_size(vsk);
+               COPY_OUT(val);
+               break;
+
+       case SO_VM_SOCKETS_CONNECT_TIMEOUT: {
+               struct timeval tv;
+               tv.tv_sec = vsk->connect_timeout / HZ;
+               tv.tv_usec =
+                   (vsk->connect_timeout -
+                    tv.tv_sec * HZ) * (1000000 / HZ);
+               COPY_OUT(tv);
+               break;
+       }
+       default:
+               return -ENOPROTOOPT;
+       }
+
+       err = put_user(len, optlen);
+       if (err != 0)
+               return -EFAULT;
+
+#undef COPY_OUT
+
+       return 0;
+}
+
+static int vsock_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
+                               struct msghdr *msg, size_t len)
+{
+       struct sock *sk;
+       struct vsock_sock *vsk;
+       ssize_t total_written;
+       long timeout;
+       int err;
+       struct vsock_transport_send_notify_data send_data;
+
+       DEFINE_WAIT(wait);
+
+       sk = sock->sk;
+       vsk = vsock_sk(sk);
+       total_written = 0;
+       err = 0;
+
+       if (msg->msg_flags & MSG_OOB)
+               return -EOPNOTSUPP;
+
+       lock_sock(sk);
+
+       /* Callers should not provide a destination with stream sockets. */
+       if (msg->msg_namelen) {
+               err = sk->sk_state == SS_CONNECTED ? -EISCONN : -EOPNOTSUPP;
+               goto out;
+       }
+
+       /* Send data only if both sides are not shutdown in the direction. */
+       if (sk->sk_shutdown & SEND_SHUTDOWN ||
+           vsk->peer_shutdown & RCV_SHUTDOWN) {
+               err = -EPIPE;
+               goto out;
+       }
+
+       if (sk->sk_state != SS_CONNECTED ||
+           !vsock_addr_bound(&vsk->local_addr)) {
+               err = -ENOTCONN;
+               goto out;
+       }
+
+       if (!vsock_addr_bound(&vsk->remote_addr)) {
+               err = -EDESTADDRREQ;
+               goto out;
+       }
+
+       /* Wait for room in the produce queue to enqueue our user's data. */
+       timeout = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
+
+       err = transport->notify_send_init(vsk, &send_data);
+       if (err < 0)
+               goto out;
+
+       prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
+
+       while (total_written < len) {
+               ssize_t written;
+
+               while (vsock_stream_has_space(vsk) == 0 &&
+                      sk->sk_err == 0 &&
+                      !(sk->sk_shutdown & SEND_SHUTDOWN) &&
+                      !(vsk->peer_shutdown & RCV_SHUTDOWN)) {
+
+                       /* Don't wait for non-blocking sockets. */
+                       if (timeout == 0) {
+                               err = -EAGAIN;
+                               goto out_wait;
+                       }
+
+                       err = transport->notify_send_pre_block(vsk, &send_data);
+                       if (err < 0)
+                               goto out_wait;
+
+                       release_sock(sk);
+                       timeout = schedule_timeout(timeout);
+                       lock_sock(sk);
+                       if (signal_pending(current)) {
+                               err = sock_intr_errno(timeout);
+                               goto out_wait;
+                       } else if (timeout == 0) {
+                               err = -EAGAIN;
+                               goto out_wait;
+                       }
+
+                       prepare_to_wait(sk_sleep(sk), &wait,
+                                       TASK_INTERRUPTIBLE);
+               }
+
+               /* These checks occur both as part of and after the loop
+                * conditional since we need to check before and after
+                * sleeping.
+                */
+               if (sk->sk_err) {
+                       err = -sk->sk_err;
+                       goto out_wait;
+               } else if ((sk->sk_shutdown & SEND_SHUTDOWN) ||
+                          (vsk->peer_shutdown & RCV_SHUTDOWN)) {
+                       err = -EPIPE;
+                       goto out_wait;
+               }
+
+               err = transport->notify_send_pre_enqueue(vsk, &send_data);
+               if (err < 0)
+                       goto out_wait;
+
+               /* Note that enqueue will only write as many bytes as are free
+                * in the produce queue, so we don't need to ensure len is
+                * smaller than the queue size.  It is the caller's
+                * responsibility to check how many bytes we were able to send.
+                */
+
+               written = transport->stream_enqueue(
+                               vsk, msg->msg_iov,
+                               len - total_written);
+               if (written < 0) {
+                       err = -ENOMEM;
+                       goto out_wait;
+               }
+
+               total_written += written;
+
+               err = transport->notify_send_post_enqueue(
+                               vsk, written, &send_data);
+               if (err < 0)
+                       goto out_wait;
+
+       }
+
+out_wait:
+       if (total_written > 0)
+               err = total_written;
+       finish_wait(sk_sleep(sk), &wait);
+out:
+       release_sock(sk);
+       return err;
+}
+
+
+static int
+vsock_stream_recvmsg(struct kiocb *kiocb,
+                    struct socket *sock,
+                    struct msghdr *msg, size_t len, int flags)
+{
+       struct sock *sk;
+       struct vsock_sock *vsk;
+       int err;
+       size_t target;
+       ssize_t copied;
+       long timeout;
+       struct vsock_transport_recv_notify_data recv_data;
+
+       DEFINE_WAIT(wait);
+
+       sk = sock->sk;
+       vsk = vsock_sk(sk);
+       err = 0;
+
+       lock_sock(sk);
+
+       if (sk->sk_state != SS_CONNECTED) {
+               /* Recvmsg is supposed to return 0 if a peer performs an
+                * orderly shutdown. Differentiate between that case and when a
+                * peer has not connected or a local shutdown occured with the
+                * SOCK_DONE flag.
+                */
+               if (sock_flag(sk, SOCK_DONE))
+                       err = 0;
+               else
+                       err = -ENOTCONN;
+
+               goto out;
+       }
+
+       if (flags & MSG_OOB) {
+               err = -EOPNOTSUPP;
+               goto out;
+       }
+
+       /* We don't check peer_shutdown flag here since peer may actually shut
+        * down, but there can be data in the queue that a local socket can
+        * receive.
+        */
+       if (sk->sk_shutdown & RCV_SHUTDOWN) {
+               err = 0;
+               goto out;
+       }
+
+       /* It is valid on Linux to pass in a zero-length receive buffer.  This
+        * is not an error.  We may as well bail out now.
+        */
+       if (!len) {
+               err = 0;
+               goto out;
+       }
+
+       /* We must not copy less than target bytes into the user's buffer
+        * before returning successfully, so we wait for the consume queue to
+        * have that much data to consume before dequeueing.  Note that this
+        * makes it impossible to handle cases where target is greater than the
+        * queue size.
+        */
+       target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
+       if (target >= transport->stream_rcvhiwat(vsk)) {
+               err = -ENOMEM;
+               goto out;
+       }
+       timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
+       copied = 0;
+
+       err = transport->notify_recv_init(vsk, target, &recv_data);
+       if (err < 0)
+               goto out;
+
+       prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
+
+       while (1) {
+               s64 ready = vsock_stream_has_data(vsk);
+
+               if (ready < 0) {
+                       /* Invalid queue pair content. XXX This should be
+                        * changed to a connection reset in a later change.
+                        */
+
+                       err = -ENOMEM;
+                       goto out_wait;
+               } else if (ready > 0) {
+                       ssize_t read;
+
+                       err = transport->notify_recv_pre_dequeue(
+                                       vsk, target, &recv_data);
+                       if (err < 0)
+                               break;
+
+                       read = transport->stream_dequeue(
+                                       vsk, msg->msg_iov,
+                                       len - copied, flags);
+                       if (read < 0) {
+                               err = -ENOMEM;
+                               break;
+                       }
+
+                       copied += read;
+
+                       err = transport->notify_recv_post_dequeue(
+                                       vsk, target, read,
+                                       !(flags & MSG_PEEK), &recv_data);
+                       if (err < 0)
+                               goto out_wait;
+
+                       if (read >= target || flags & MSG_PEEK)
+                               break;
+
+                       target -= read;
+               } else {
+                       if (sk->sk_err != 0 || (sk->sk_shutdown & RCV_SHUTDOWN)
+                           || (vsk->peer_shutdown & SEND_SHUTDOWN)) {
+                               break;
+                       }
+                       /* Don't wait for non-blocking sockets. */
+                       if (timeout == 0) {
+                               err = -EAGAIN;
+                               break;
+                       }
+
+                       err = transport->notify_recv_pre_block(
+                                       vsk, target, &recv_data);
+                       if (err < 0)
+                               break;
+
+                       release_sock(sk);
+                       timeout = schedule_timeout(timeout);
+                       lock_sock(sk);
+
+                       if (signal_pending(current)) {
+                               err = sock_intr_errno(timeout);
+                               break;
+                       } else if (timeout == 0) {
+                               err = -EAGAIN;
+                               break;
+                       }
+
+                       prepare_to_wait(sk_sleep(sk), &wait,
+                                       TASK_INTERRUPTIBLE);
+               }
+       }
+
+       if (sk->sk_err)
+               err = -sk->sk_err;
+       else if (sk->sk_shutdown & RCV_SHUTDOWN)
+               err = 0;
+
+       if (copied > 0) {
+               /* We only do these additional bookkeeping/notification steps
+                * if we actually copied something out of the queue pair
+                * instead of just peeking ahead.
+                */
+
+               if (!(flags & MSG_PEEK)) {
+                       /* If the other side has shutdown for sending and there
+                        * is nothing more to read, then modify the socket
+                        * state.
+                        */
+                       if (vsk->peer_shutdown & SEND_SHUTDOWN) {
+                               if (vsock_stream_has_data(vsk) <= 0) {
+                                       sk->sk_state = SS_UNCONNECTED;
+                                       sock_set_flag(sk, SOCK_DONE);
+                                       sk->sk_state_change(sk);
+                               }
+                       }
+               }
+               err = copied;
+       }
+
+out_wait:
+       finish_wait(sk_sleep(sk), &wait);
+out:
+       release_sock(sk);
+       return err;
+}
+
+static const struct proto_ops vsock_stream_ops = {
+       .family = PF_VSOCK,
+       .owner = THIS_MODULE,
+       .release = vsock_release,
+       .bind = vsock_bind,
+       .connect = vsock_stream_connect,
+       .socketpair = sock_no_socketpair,
+       .accept = vsock_accept,
+       .getname = vsock_getname,
+       .poll = vsock_poll,
+       .ioctl = sock_no_ioctl,
+       .listen = vsock_listen,
+       .shutdown = vsock_shutdown,
+       .setsockopt = vsock_stream_setsockopt,
+       .getsockopt = vsock_stream_getsockopt,
+       .sendmsg = vsock_stream_sendmsg,
+       .recvmsg = vsock_stream_recvmsg,
+       .mmap = sock_no_mmap,
+       .sendpage = sock_no_sendpage,
+};
+
+static int vsock_create(struct net *net, struct socket *sock,
+                       int protocol, int kern)
+{
+       if (!sock)
+               return -EINVAL;
+
+       if (protocol && protocol != PF_VSOCK)
+               return -EPROTONOSUPPORT;
+
+       switch (sock->type) {
+       case SOCK_DGRAM:
+               sock->ops = &vsock_dgram_ops;
+               break;
+       case SOCK_STREAM:
+               sock->ops = &vsock_stream_ops;
+               break;
+       default:
+               return -ESOCKTNOSUPPORT;
+       }
+
+       sock->state = SS_UNCONNECTED;
+
+       return __vsock_create(net, sock, NULL, GFP_KERNEL, 0) ? 0 : -ENOMEM;
+}
+
+static const struct net_proto_family vsock_family_ops = {
+       .family = AF_VSOCK,
+       .create = vsock_create,
+       .owner = THIS_MODULE,
+};
+
+static long vsock_dev_do_ioctl(struct file *filp,
+                              unsigned int cmd, void __user *ptr)
+{
+       u32 __user *p = ptr;
+       int retval = 0;
+
+       switch (cmd) {
+       case IOCTL_VM_SOCKETS_GET_LOCAL_CID:
+               if (put_user(transport->get_local_cid(), p) != 0)
+                       retval = -EFAULT;
+               break;
+
+       default:
+               pr_err("Unknown ioctl %d\n", cmd);
+               retval = -EINVAL;
+       }
+
+       return retval;
+}
+
+static long vsock_dev_ioctl(struct file *filp,
+                           unsigned int cmd, unsigned long arg)
+{
+       return vsock_dev_do_ioctl(filp, cmd, (void __user *)arg);
+}
+
+#ifdef CONFIG_COMPAT
+static long vsock_dev_compat_ioctl(struct file *filp,
+                                  unsigned int cmd, unsigned long arg)
+{
+       return vsock_dev_do_ioctl(filp, cmd, compat_ptr(arg));
+}
+#endif
+
+static const struct file_operations vsock_device_ops = {
+       .owner          = THIS_MODULE,
+       .unlocked_ioctl = vsock_dev_ioctl,
+#ifdef CONFIG_COMPAT
+       .compat_ioctl   = vsock_dev_compat_ioctl,
+#endif
+       .open           = nonseekable_open,
+};
+
+static struct miscdevice vsock_device = {
+       .name           = "vsock",
+       .minor          = MISC_DYNAMIC_MINOR,
+       .fops           = &vsock_device_ops,
+};
+
+static int __vsock_core_init(void)
+{
+       int err;
+
+       vsock_init_tables();
+
+       err = misc_register(&vsock_device);
+       if (err) {
+               pr_err("Failed to register misc device\n");
+               return -ENOENT;
+       }
+
+       err = proto_register(&vsock_proto, 1);  /* we want our slab */
+       if (err) {
+               pr_err("Cannot register vsock protocol\n");
+               goto err_misc_deregister;
+       }
+
+       err = sock_register(&vsock_family_ops);
+       if (err) {
+               pr_err("could not register af_vsock (%d) address family: %d\n",
+                      AF_VSOCK, err);
+               goto err_unregister_proto;
+       }
+
+       return 0;
+
+err_unregister_proto:
+       proto_unregister(&vsock_proto);
+err_misc_deregister:
+       misc_deregister(&vsock_device);
+       return err;
+}
+
+int vsock_core_init(const struct vsock_transport *t)
+{
+       int retval = mutex_lock_interruptible(&vsock_register_mutex);
+       if (retval)
+               return retval;
+
+       if (transport) {
+               retval = -EBUSY;
+               goto out;
+       }
+
+       transport = t;
+       retval = __vsock_core_init();
+       if (retval)
+               transport = NULL;
+
+out:
+       mutex_unlock(&vsock_register_mutex);
+       return retval;
+}
+EXPORT_SYMBOL_GPL(vsock_core_init);
+
+void vsock_core_exit(void)
+{
+       mutex_lock(&vsock_register_mutex);
+
+       misc_deregister(&vsock_device);
+       sock_unregister(AF_VSOCK);
+       proto_unregister(&vsock_proto);
+
+       /* We do not want the assignment below re-ordered. */
+       mb();
+       transport = NULL;
+
+       mutex_unlock(&vsock_register_mutex);
+}
+EXPORT_SYMBOL_GPL(vsock_core_exit);
+
+MODULE_AUTHOR("VMware, Inc.");
+MODULE_DESCRIPTION("VMware Virtual Socket Family");
+MODULE_VERSION("1.0.0.0-k");
+MODULE_LICENSE("GPL v2");
diff --git a/net/vmw_vsock/af_vsock.h b/net/vmw_vsock/af_vsock.h
new file mode 100644 (file)
index 0000000..7d64d36
--- /dev/null
@@ -0,0 +1,175 @@
+/*
+ * VMware vSockets Driver
+ *
+ * Copyright (C) 2007-2013 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __AF_VSOCK_H__
+#define __AF_VSOCK_H__
+
+#include <linux/kernel.h>
+#include <linux/workqueue.h>
+#include <linux/vm_sockets.h>
+
+#include "vsock_addr.h"
+
+#define LAST_RESERVED_PORT 1023
+
+#define vsock_sk(__sk)    ((struct vsock_sock *)__sk)
+#define sk_vsock(__vsk)   (&(__vsk)->sk)
+
+struct vsock_sock {
+       /* sk must be the first member. */
+       struct sock sk;
+       struct sockaddr_vm local_addr;
+       struct sockaddr_vm remote_addr;
+       /* Links for the global tables of bound and connected sockets. */
+       struct list_head bound_table;
+       struct list_head connected_table;
+       /* Accessed without the socket lock held. This means it can never be
+        * modified outsided of socket create or destruct.
+        */
+       bool trusted;
+       bool cached_peer_allow_dgram;   /* Dgram communication allowed to
+                                        * cached peer?
+                                        */
+       u32 cached_peer;  /* Context ID of last dgram destination check. */
+       const struct cred *owner;
+       /* Rest are SOCK_STREAM only. */
+       long connect_timeout;
+       /* Listening socket that this came from. */
+       struct sock *listener;
+       /* Used for pending list and accept queue during connection handshake.
+        * The listening socket is the head for both lists.  Sockets created
+        * for connection requests are placed in the pending list until they
+        * are connected, at which point they are put in the accept queue list
+        * so they can be accepted in accept().  If accept() cannot accept the
+        * connection, it is marked as rejected so the cleanup function knows
+        * to clean up the socket.
+        */
+       struct list_head pending_links;
+       struct list_head accept_queue;
+       bool rejected;
+       struct delayed_work dwork;
+       u32 peer_shutdown;
+       bool sent_request;
+       bool ignore_connecting_rst;
+
+       /* Private to transport. */
+       void *trans;
+};
+
+s64 vsock_stream_has_data(struct vsock_sock *vsk);
+s64 vsock_stream_has_space(struct vsock_sock *vsk);
+void vsock_pending_work(struct work_struct *work);
+struct sock *__vsock_create(struct net *net,
+                           struct socket *sock,
+                           struct sock *parent,
+                           gfp_t priority, unsigned short type);
+
+/**** TRANSPORT ****/
+
+struct vsock_transport_recv_notify_data {
+       u64 data1; /* Transport-defined. */
+       u64 data2; /* Transport-defined. */
+       bool notify_on_block;
+};
+
+struct vsock_transport_send_notify_data {
+       u64 data1; /* Transport-defined. */
+       u64 data2; /* Transport-defined. */
+};
+
+struct vsock_transport {
+       /* Initialize/tear-down socket. */
+       int (*init)(struct vsock_sock *, struct vsock_sock *);
+       void (*destruct)(struct vsock_sock *);
+       void (*release)(struct vsock_sock *);
+
+       /* Connections. */
+       int (*connect)(struct vsock_sock *);
+
+       /* DGRAM. */
+       int (*dgram_bind)(struct vsock_sock *, struct sockaddr_vm *);
+       int (*dgram_dequeue)(struct kiocb *kiocb, struct vsock_sock *vsk,
+                            struct msghdr *msg, size_t len, int flags);
+       int (*dgram_enqueue)(struct vsock_sock *, struct sockaddr_vm *,
+                            struct iovec *, size_t len);
+       bool (*dgram_allow)(u32 cid, u32 port);
+
+       /* STREAM. */
+       /* TODO: stream_bind() */
+       ssize_t (*stream_dequeue)(struct vsock_sock *, struct iovec *,
+                                 size_t len, int flags);
+       ssize_t (*stream_enqueue)(struct vsock_sock *, struct iovec *,
+                                 size_t len);
+       s64 (*stream_has_data)(struct vsock_sock *);
+       s64 (*stream_has_space)(struct vsock_sock *);
+       u64 (*stream_rcvhiwat)(struct vsock_sock *);
+       bool (*stream_is_active)(struct vsock_sock *);
+       bool (*stream_allow)(u32 cid, u32 port);
+
+       /* Notification. */
+       int (*notify_poll_in)(struct vsock_sock *, size_t, bool *);
+       int (*notify_poll_out)(struct vsock_sock *, size_t, bool *);
+       int (*notify_recv_init)(struct vsock_sock *, size_t,
+               struct vsock_transport_recv_notify_data *);
+       int (*notify_recv_pre_block)(struct vsock_sock *, size_t,
+               struct vsock_transport_recv_notify_data *);
+       int (*notify_recv_pre_dequeue)(struct vsock_sock *, size_t,
+               struct vsock_transport_recv_notify_data *);
+       int (*notify_recv_post_dequeue)(struct vsock_sock *, size_t,
+               ssize_t, bool, struct vsock_transport_recv_notify_data *);
+       int (*notify_send_init)(struct vsock_sock *,
+               struct vsock_transport_send_notify_data *);
+       int (*notify_send_pre_block)(struct vsock_sock *,
+               struct vsock_transport_send_notify_data *);
+       int (*notify_send_pre_enqueue)(struct vsock_sock *,
+               struct vsock_transport_send_notify_data *);
+       int (*notify_send_post_enqueue)(struct vsock_sock *, ssize_t,
+               struct vsock_transport_send_notify_data *);
+
+       /* Shutdown. */
+       int (*shutdown)(struct vsock_sock *, int);
+
+       /* Buffer sizes. */
+       void (*set_buffer_size)(struct vsock_sock *, u64);
+       void (*set_min_buffer_size)(struct vsock_sock *, u64);
+       void (*set_max_buffer_size)(struct vsock_sock *, u64);
+       u64 (*get_buffer_size)(struct vsock_sock *);
+       u64 (*get_min_buffer_size)(struct vsock_sock *);
+       u64 (*get_max_buffer_size)(struct vsock_sock *);
+
+       /* Addressing. */
+       u32 (*get_local_cid)(void);
+};
+
+/**** CORE ****/
+
+int vsock_core_init(const struct vsock_transport *t);
+void vsock_core_exit(void);
+
+/**** UTILS ****/
+
+void vsock_release_pending(struct sock *pending);
+void vsock_add_pending(struct sock *listener, struct sock *pending);
+void vsock_remove_pending(struct sock *listener, struct sock *pending);
+void vsock_enqueue_accept(struct sock *listener, struct sock *connected);
+void vsock_insert_connected(struct vsock_sock *vsk);
+void vsock_remove_bound(struct vsock_sock *vsk);
+void vsock_remove_connected(struct vsock_sock *vsk);
+struct sock *vsock_find_bound_socket(struct sockaddr_vm *addr);
+struct sock *vsock_find_connected_socket(struct sockaddr_vm *src,
+                                        struct sockaddr_vm *dst);
+void vsock_for_each_connected_socket(void (*fn)(struct sock *sk));
+
+#endif /* __AF_VSOCK_H__ */
diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c
new file mode 100644 (file)
index 0000000..a70ace8
--- /dev/null
@@ -0,0 +1,2155 @@
+/*
+ * VMware vSockets Driver
+ *
+ * Copyright (C) 2007-2013 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/cred.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/kmod.h>
+#include <linux/list.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/net.h>
+#include <linux/poll.h>
+#include <linux/skbuff.h>
+#include <linux/smp.h>
+#include <linux/socket.h>
+#include <linux/stddef.h>
+#include <linux/unistd.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+#include <net/sock.h>
+
+#include "af_vsock.h"
+#include "vmci_transport_notify.h"
+
+static int vmci_transport_recv_dgram_cb(void *data, struct vmci_datagram *dg);
+static int vmci_transport_recv_stream_cb(void *data, struct vmci_datagram *dg);
+static void vmci_transport_peer_attach_cb(u32 sub_id,
+                                         const struct vmci_event_data *ed,
+                                         void *client_data);
+static void vmci_transport_peer_detach_cb(u32 sub_id,
+                                         const struct vmci_event_data *ed,
+                                         void *client_data);
+static void vmci_transport_recv_pkt_work(struct work_struct *work);
+static int vmci_transport_recv_listen(struct sock *sk,
+                                     struct vmci_transport_packet *pkt);
+static int vmci_transport_recv_connecting_server(
+                                       struct sock *sk,
+                                       struct sock *pending,
+                                       struct vmci_transport_packet *pkt);
+static int vmci_transport_recv_connecting_client(
+                                       struct sock *sk,
+                                       struct vmci_transport_packet *pkt);
+static int vmci_transport_recv_connecting_client_negotiate(
+                                       struct sock *sk,
+                                       struct vmci_transport_packet *pkt);
+static int vmci_transport_recv_connecting_client_invalid(
+                                       struct sock *sk,
+                                       struct vmci_transport_packet *pkt);
+static int vmci_transport_recv_connected(struct sock *sk,
+                                        struct vmci_transport_packet *pkt);
+static bool vmci_transport_old_proto_override(bool *old_pkt_proto);
+static u16 vmci_transport_new_proto_supported_versions(void);
+static bool vmci_transport_proto_to_notify_struct(struct sock *sk, u16 *proto,
+                                                 bool old_pkt_proto);
+
+struct vmci_transport_recv_pkt_info {
+       struct work_struct work;
+       struct sock *sk;
+       struct vmci_transport_packet pkt;
+};
+
+static struct vmci_handle vmci_transport_stream_handle = { VMCI_INVALID_ID,
+                                                          VMCI_INVALID_ID };
+static u32 vmci_transport_qp_resumed_sub_id = VMCI_INVALID_ID;
+
+static int PROTOCOL_OVERRIDE = -1;
+
+#define VMCI_TRANSPORT_DEFAULT_QP_SIZE_MIN   128
+#define VMCI_TRANSPORT_DEFAULT_QP_SIZE       262144
+#define VMCI_TRANSPORT_DEFAULT_QP_SIZE_MAX   262144
+
+/* The default peer timeout indicates how long we will wait for a peer response
+ * to a control message.
+ */
+#define VSOCK_DEFAULT_CONNECT_TIMEOUT (2 * HZ)
+
+#define SS_LISTEN 255
+
+/* Helper function to convert from a VMCI error code to a VSock error code. */
+
+static s32 vmci_transport_error_to_vsock_error(s32 vmci_error)
+{
+       int err;
+
+       switch (vmci_error) {
+       case VMCI_ERROR_NO_MEM:
+               err = ENOMEM;
+               break;
+       case VMCI_ERROR_DUPLICATE_ENTRY:
+       case VMCI_ERROR_ALREADY_EXISTS:
+               err = EADDRINUSE;
+               break;
+       case VMCI_ERROR_NO_ACCESS:
+               err = EPERM;
+               break;
+       case VMCI_ERROR_NO_RESOURCES:
+               err = ENOBUFS;
+               break;
+       case VMCI_ERROR_INVALID_RESOURCE:
+               err = EHOSTUNREACH;
+               break;
+       case VMCI_ERROR_INVALID_ARGS:
+       default:
+               err = EINVAL;
+       }
+
+       return err > 0 ? -err : err;
+}
+
+static inline void
+vmci_transport_packet_init(struct vmci_transport_packet *pkt,
+                          struct sockaddr_vm *src,
+                          struct sockaddr_vm *dst,
+                          u8 type,
+                          u64 size,
+                          u64 mode,
+                          struct vmci_transport_waiting_info *wait,
+                          u16 proto,
+                          struct vmci_handle handle)
+{
+       /* We register the stream control handler as an any cid handle so we
+        * must always send from a source address of VMADDR_CID_ANY
+        */
+       pkt->dg.src = vmci_make_handle(VMADDR_CID_ANY,
+                                      VMCI_TRANSPORT_PACKET_RID);
+       pkt->dg.dst = vmci_make_handle(dst->svm_cid,
+                                      VMCI_TRANSPORT_PACKET_RID);
+       pkt->dg.payload_size = sizeof(*pkt) - sizeof(pkt->dg);
+       pkt->version = VMCI_TRANSPORT_PACKET_VERSION;
+       pkt->type = type;
+       pkt->src_port = src->svm_port;
+       pkt->dst_port = dst->svm_port;
+       memset(&pkt->proto, 0, sizeof(pkt->proto));
+       memset(&pkt->_reserved2, 0, sizeof(pkt->_reserved2));
+
+       switch (pkt->type) {
+       case VMCI_TRANSPORT_PACKET_TYPE_INVALID:
+               pkt->u.size = 0;
+               break;
+
+       case VMCI_TRANSPORT_PACKET_TYPE_REQUEST:
+       case VMCI_TRANSPORT_PACKET_TYPE_NEGOTIATE:
+               pkt->u.size = size;
+               break;
+
+       case VMCI_TRANSPORT_PACKET_TYPE_OFFER:
+       case VMCI_TRANSPORT_PACKET_TYPE_ATTACH:
+               pkt->u.handle = handle;
+               break;
+
+       case VMCI_TRANSPORT_PACKET_TYPE_WROTE:
+       case VMCI_TRANSPORT_PACKET_TYPE_READ:
+       case VMCI_TRANSPORT_PACKET_TYPE_RST:
+               pkt->u.size = 0;
+               break;
+
+       case VMCI_TRANSPORT_PACKET_TYPE_SHUTDOWN:
+               pkt->u.mode = mode;
+               break;
+
+       case VMCI_TRANSPORT_PACKET_TYPE_WAITING_READ:
+       case VMCI_TRANSPORT_PACKET_TYPE_WAITING_WRITE:
+               memcpy(&pkt->u.wait, wait, sizeof(pkt->u.wait));
+               break;
+
+       case VMCI_TRANSPORT_PACKET_TYPE_REQUEST2:
+       case VMCI_TRANSPORT_PACKET_TYPE_NEGOTIATE2:
+               pkt->u.size = size;
+               pkt->proto = proto;
+               break;
+       }
+}
+
+static inline void
+vmci_transport_packet_get_addresses(struct vmci_transport_packet *pkt,
+                                   struct sockaddr_vm *local,
+                                   struct sockaddr_vm *remote)
+{
+       vsock_addr_init(local, pkt->dg.dst.context, pkt->dst_port);
+       vsock_addr_init(remote, pkt->dg.src.context, pkt->src_port);
+}
+
+static int
+__vmci_transport_send_control_pkt(struct vmci_transport_packet *pkt,
+                                 struct sockaddr_vm *src,
+                                 struct sockaddr_vm *dst,
+                                 enum vmci_transport_packet_type type,
+                                 u64 size,
+                                 u64 mode,
+                                 struct vmci_transport_waiting_info *wait,
+                                 u16 proto,
+                                 struct vmci_handle handle,
+                                 bool convert_error)
+{
+       int err;
+
+       vmci_transport_packet_init(pkt, src, dst, type, size, mode, wait,
+                                  proto, handle);
+       err = vmci_datagram_send(&pkt->dg);
+       if (convert_error && (err < 0))
+               return vmci_transport_error_to_vsock_error(err);
+
+       return err;
+}
+
+static int
+vmci_transport_reply_control_pkt_fast(struct vmci_transport_packet *pkt,
+                                     enum vmci_transport_packet_type type,
+                                     u64 size,
+                                     u64 mode,
+                                     struct vmci_transport_waiting_info *wait,
+                                     struct vmci_handle handle)
+{
+       struct vmci_transport_packet reply;
+       struct sockaddr_vm src, dst;
+
+       if (pkt->type == VMCI_TRANSPORT_PACKET_TYPE_RST) {
+               return 0;
+       } else {
+               vmci_transport_packet_get_addresses(pkt, &src, &dst);
+               return __vmci_transport_send_control_pkt(&reply, &src, &dst,
+                                                        type,
+                                                        size, mode, wait,
+                                                        VSOCK_PROTO_INVALID,
+                                                        handle, true);
+       }
+}
+
+static int
+vmci_transport_send_control_pkt_bh(struct sockaddr_vm *src,
+                                  struct sockaddr_vm *dst,
+                                  enum vmci_transport_packet_type type,
+                                  u64 size,
+                                  u64 mode,
+                                  struct vmci_transport_waiting_info *wait,
+                                  struct vmci_handle handle)
+{
+       /* Note that it is safe to use a single packet across all CPUs since
+        * two tasklets of the same type are guaranteed to not ever run
+        * simultaneously. If that ever changes, or VMCI stops using tasklets,
+        * we can use per-cpu packets.
+        */
+       static struct vmci_transport_packet pkt;
+
+       return __vmci_transport_send_control_pkt(&pkt, src, dst, type,
+                                                size, mode, wait,
+                                                VSOCK_PROTO_INVALID, handle,
+                                                false);
+}
+
+static int
+vmci_transport_send_control_pkt(struct sock *sk,
+                               enum vmci_transport_packet_type type,
+                               u64 size,
+                               u64 mode,
+                               struct vmci_transport_waiting_info *wait,
+                               u16 proto,
+                               struct vmci_handle handle)
+{
+       struct vmci_transport_packet *pkt;
+       struct vsock_sock *vsk;
+       int err;
+
+       vsk = vsock_sk(sk);
+
+       if (!vsock_addr_bound(&vsk->local_addr))
+               return -EINVAL;
+
+       if (!vsock_addr_bound(&vsk->remote_addr))
+               return -EINVAL;
+
+       pkt = kmalloc(sizeof(*pkt), GFP_KERNEL);
+       if (!pkt)
+               return -ENOMEM;
+
+       err = __vmci_transport_send_control_pkt(pkt, &vsk->local_addr,
+                                               &vsk->remote_addr, type, size,
+                                               mode, wait, proto, handle,
+                                               true);
+       kfree(pkt);
+
+       return err;
+}
+
+static int vmci_transport_send_reset_bh(struct sockaddr_vm *dst,
+                                       struct sockaddr_vm *src,
+                                       struct vmci_transport_packet *pkt)
+{
+       if (pkt->type == VMCI_TRANSPORT_PACKET_TYPE_RST)
+               return 0;
+       return vmci_transport_send_control_pkt_bh(
+                                       dst, src,
+                                       VMCI_TRANSPORT_PACKET_TYPE_RST, 0,
+                                       0, NULL, VMCI_INVALID_HANDLE);
+}
+
+static int vmci_transport_send_reset(struct sock *sk,
+                                    struct vmci_transport_packet *pkt)
+{
+       if (pkt->type == VMCI_TRANSPORT_PACKET_TYPE_RST)
+               return 0;
+       return vmci_transport_send_control_pkt(sk,
+                                       VMCI_TRANSPORT_PACKET_TYPE_RST,
+                                       0, 0, NULL, VSOCK_PROTO_INVALID,
+                                       VMCI_INVALID_HANDLE);
+}
+
+static int vmci_transport_send_negotiate(struct sock *sk, size_t size)
+{
+       return vmci_transport_send_control_pkt(
+                                       sk,
+                                       VMCI_TRANSPORT_PACKET_TYPE_NEGOTIATE,
+                                       size, 0, NULL,
+                                       VSOCK_PROTO_INVALID,
+                                       VMCI_INVALID_HANDLE);
+}
+
+static int vmci_transport_send_negotiate2(struct sock *sk, size_t size,
+                                         u16 version)
+{
+       return vmci_transport_send_control_pkt(
+                                       sk,
+                                       VMCI_TRANSPORT_PACKET_TYPE_NEGOTIATE2,
+                                       size, 0, NULL, version,
+                                       VMCI_INVALID_HANDLE);
+}
+
+static int vmci_transport_send_qp_offer(struct sock *sk,
+                                       struct vmci_handle handle)
+{
+       return vmci_transport_send_control_pkt(
+                                       sk, VMCI_TRANSPORT_PACKET_TYPE_OFFER, 0,
+                                       0, NULL,
+                                       VSOCK_PROTO_INVALID, handle);
+}
+
+static int vmci_transport_send_attach(struct sock *sk,
+                                     struct vmci_handle handle)
+{
+       return vmci_transport_send_control_pkt(
+                                       sk, VMCI_TRANSPORT_PACKET_TYPE_ATTACH,
+                                       0, 0, NULL, VSOCK_PROTO_INVALID,
+                                       handle);
+}
+
+static int vmci_transport_reply_reset(struct vmci_transport_packet *pkt)
+{
+       return vmci_transport_reply_control_pkt_fast(
+                                               pkt,
+                                               VMCI_TRANSPORT_PACKET_TYPE_RST,
+                                               0, 0, NULL,
+                                               VMCI_INVALID_HANDLE);
+}
+
+static int vmci_transport_send_invalid_bh(struct sockaddr_vm *dst,
+                                         struct sockaddr_vm *src)
+{
+       return vmci_transport_send_control_pkt_bh(
+                                       dst, src,
+                                       VMCI_TRANSPORT_PACKET_TYPE_INVALID,
+                                       0, 0, NULL, VMCI_INVALID_HANDLE);
+}
+
+int vmci_transport_send_wrote_bh(struct sockaddr_vm *dst,
+                                struct sockaddr_vm *src)
+{
+       return vmci_transport_send_control_pkt_bh(
+                                       dst, src,
+                                       VMCI_TRANSPORT_PACKET_TYPE_WROTE, 0,
+                                       0, NULL, VMCI_INVALID_HANDLE);
+}
+
+int vmci_transport_send_read_bh(struct sockaddr_vm *dst,
+                               struct sockaddr_vm *src)
+{
+       return vmci_transport_send_control_pkt_bh(
+                                       dst, src,
+                                       VMCI_TRANSPORT_PACKET_TYPE_READ, 0,
+                                       0, NULL, VMCI_INVALID_HANDLE);
+}
+
+int vmci_transport_send_wrote(struct sock *sk)
+{
+       return vmci_transport_send_control_pkt(
+                                       sk, VMCI_TRANSPORT_PACKET_TYPE_WROTE, 0,
+                                       0, NULL, VSOCK_PROTO_INVALID,
+                                       VMCI_INVALID_HANDLE);
+}
+
+int vmci_transport_send_read(struct sock *sk)
+{
+       return vmci_transport_send_control_pkt(
+                                       sk, VMCI_TRANSPORT_PACKET_TYPE_READ, 0,
+                                       0, NULL, VSOCK_PROTO_INVALID,
+                                       VMCI_INVALID_HANDLE);
+}
+
+int vmci_transport_send_waiting_write(struct sock *sk,
+                                     struct vmci_transport_waiting_info *wait)
+{
+       return vmci_transport_send_control_pkt(
+                               sk, VMCI_TRANSPORT_PACKET_TYPE_WAITING_WRITE,
+                               0, 0, wait, VSOCK_PROTO_INVALID,
+                               VMCI_INVALID_HANDLE);
+}
+
+int vmci_transport_send_waiting_read(struct sock *sk,
+                                    struct vmci_transport_waiting_info *wait)
+{
+       return vmci_transport_send_control_pkt(
+                               sk, VMCI_TRANSPORT_PACKET_TYPE_WAITING_READ,
+                               0, 0, wait, VSOCK_PROTO_INVALID,
+                               VMCI_INVALID_HANDLE);
+}
+
+static int vmci_transport_shutdown(struct vsock_sock *vsk, int mode)
+{
+       return vmci_transport_send_control_pkt(
+                                       &vsk->sk,
+                                       VMCI_TRANSPORT_PACKET_TYPE_SHUTDOWN,
+                                       0, mode, NULL,
+                                       VSOCK_PROTO_INVALID,
+                                       VMCI_INVALID_HANDLE);
+}
+
+static int vmci_transport_send_conn_request(struct sock *sk, size_t size)
+{
+       return vmci_transport_send_control_pkt(sk,
+                                       VMCI_TRANSPORT_PACKET_TYPE_REQUEST,
+                                       size, 0, NULL,
+                                       VSOCK_PROTO_INVALID,
+                                       VMCI_INVALID_HANDLE);
+}
+
+static int vmci_transport_send_conn_request2(struct sock *sk, size_t size,
+                                            u16 version)
+{
+       return vmci_transport_send_control_pkt(
+                                       sk, VMCI_TRANSPORT_PACKET_TYPE_REQUEST2,
+                                       size, 0, NULL, version,
+                                       VMCI_INVALID_HANDLE);
+}
+
+static struct sock *vmci_transport_get_pending(
+                                       struct sock *listener,
+                                       struct vmci_transport_packet *pkt)
+{
+       struct vsock_sock *vlistener;
+       struct vsock_sock *vpending;
+       struct sock *pending;
+
+       vlistener = vsock_sk(listener);
+
+       list_for_each_entry(vpending, &vlistener->pending_links,
+                           pending_links) {
+               struct sockaddr_vm src;
+               struct sockaddr_vm dst;
+
+               vsock_addr_init(&src, pkt->dg.src.context, pkt->src_port);
+               vsock_addr_init(&dst, pkt->dg.dst.context, pkt->dst_port);
+
+               if (vsock_addr_equals_addr(&src, &vpending->remote_addr) &&
+                   vsock_addr_equals_addr(&dst, &vpending->local_addr)) {
+                       pending = sk_vsock(vpending);
+                       sock_hold(pending);
+                       goto found;
+               }
+       }
+
+       pending = NULL;
+found:
+       return pending;
+
+}
+
+static void vmci_transport_release_pending(struct sock *pending)
+{
+       sock_put(pending);
+}
+
+/* We allow two kinds of sockets to communicate with a restricted VM: 1)
+ * trusted sockets 2) sockets from applications running as the same user as the
+ * VM (this is only true for the host side and only when using hosted products)
+ */
+
+static bool vmci_transport_is_trusted(struct vsock_sock *vsock, u32 peer_cid)
+{
+       return vsock->trusted ||
+              vmci_is_context_owner(peer_cid, vsock->owner->uid);
+}
+
+/* We allow sending datagrams to and receiving datagrams from a restricted VM
+ * only if it is trusted as described in vmci_transport_is_trusted.
+ */
+
+static bool vmci_transport_allow_dgram(struct vsock_sock *vsock, u32 peer_cid)
+{
+       if (vsock->cached_peer != peer_cid) {
+               vsock->cached_peer = peer_cid;
+               if (!vmci_transport_is_trusted(vsock, peer_cid) &&
+                   (vmci_context_get_priv_flags(peer_cid) &
+                    VMCI_PRIVILEGE_FLAG_RESTRICTED)) {
+                       vsock->cached_peer_allow_dgram = false;
+               } else {
+                       vsock->cached_peer_allow_dgram = true;
+               }
+       }
+
+       return vsock->cached_peer_allow_dgram;
+}
+
+static int
+vmci_transport_queue_pair_alloc(struct vmci_qp **qpair,
+                               struct vmci_handle *handle,
+                               u64 produce_size,
+                               u64 consume_size,
+                               u32 peer, u32 flags, bool trusted)
+{
+       int err = 0;
+
+       if (trusted) {
+               /* Try to allocate our queue pair as trusted. This will only
+                * work if vsock is running in the host.
+                */
+
+               err = vmci_qpair_alloc(qpair, handle, produce_size,
+                                      consume_size,
+                                      peer, flags,
+                                      VMCI_PRIVILEGE_FLAG_TRUSTED);
+               if (err != VMCI_ERROR_NO_ACCESS)
+                       goto out;
+
+       }
+
+       err = vmci_qpair_alloc(qpair, handle, produce_size, consume_size,
+                              peer, flags, VMCI_NO_PRIVILEGE_FLAGS);
+out:
+       if (err < 0) {
+               pr_err("Could not attach to queue pair with %d\n",
+                      err);
+               err = vmci_transport_error_to_vsock_error(err);
+       }
+
+       return err;
+}
+
+static int
+vmci_transport_datagram_create_hnd(u32 resource_id,
+                                  u32 flags,
+                                  vmci_datagram_recv_cb recv_cb,
+                                  void *client_data,
+                                  struct vmci_handle *out_handle)
+{
+       int err = 0;
+
+       /* Try to allocate our datagram handler as trusted. This will only work
+        * if vsock is running in the host.
+        */
+
+       err = vmci_datagram_create_handle_priv(resource_id, flags,
+                                              VMCI_PRIVILEGE_FLAG_TRUSTED,
+                                              recv_cb,
+                                              client_data, out_handle);
+
+       if (err == VMCI_ERROR_NO_ACCESS)
+               err = vmci_datagram_create_handle(resource_id, flags,
+                                                 recv_cb, client_data,
+                                                 out_handle);
+
+       return err;
+}
+
+/* This is invoked as part of a tasklet that's scheduled when the VMCI
+ * interrupt fires.  This is run in bottom-half context and if it ever needs to
+ * sleep it should defer that work to a work queue.
+ */
+
+static int vmci_transport_recv_dgram_cb(void *data, struct vmci_datagram *dg)
+{
+       struct sock *sk;
+       size_t size;
+       struct sk_buff *skb;
+       struct vsock_sock *vsk;
+
+       sk = (struct sock *)data;
+
+       /* This handler is privileged when this module is running on the host.
+        * We will get datagrams from all endpoints (even VMs that are in a
+        * restricted context). If we get one from a restricted context then
+        * the destination socket must be trusted.
+        *
+        * NOTE: We access the socket struct without holding the lock here.
+        * This is ok because the field we are interested is never modified
+        * outside of the create and destruct socket functions.
+        */
+       vsk = vsock_sk(sk);
+       if (!vmci_transport_allow_dgram(vsk, dg->src.context))
+               return VMCI_ERROR_NO_ACCESS;
+
+       size = VMCI_DG_SIZE(dg);
+
+       /* Attach the packet to the socket's receive queue as an sk_buff. */
+       skb = alloc_skb(size, GFP_ATOMIC);
+       if (skb) {
+               /* sk_receive_skb() will do a sock_put(), so hold here. */
+               sock_hold(sk);
+               skb_put(skb, size);
+               memcpy(skb->data, dg, size);
+               sk_receive_skb(sk, skb, 0);
+       }
+
+       return VMCI_SUCCESS;
+}
+
+static bool vmci_transport_stream_allow(u32 cid, u32 port)
+{
+       static const u32 non_socket_contexts[] = {
+               VMADDR_CID_HYPERVISOR,
+               VMADDR_CID_RESERVED,
+       };
+       int i;
+
+       BUILD_BUG_ON(sizeof(cid) != sizeof(*non_socket_contexts));
+
+       for (i = 0; i < ARRAY_SIZE(non_socket_contexts); i++) {
+               if (cid == non_socket_contexts[i])
+                       return false;
+       }
+
+       return true;
+}
+
+/* This is invoked as part of a tasklet that's scheduled when the VMCI
+ * interrupt fires.  This is run in bottom-half context but it defers most of
+ * its work to the packet handling work queue.
+ */
+
+static int vmci_transport_recv_stream_cb(void *data, struct vmci_datagram *dg)
+{
+       struct sock *sk;
+       struct sockaddr_vm dst;
+       struct sockaddr_vm src;
+       struct vmci_transport_packet *pkt;
+       struct vsock_sock *vsk;
+       bool bh_process_pkt;
+       int err;
+
+       sk = NULL;
+       err = VMCI_SUCCESS;
+       bh_process_pkt = false;
+
+       /* Ignore incoming packets from contexts without sockets, or resources
+        * that aren't vsock implementations.
+        */
+
+       if (!vmci_transport_stream_allow(dg->src.context, -1)
+           || VMCI_TRANSPORT_PACKET_RID != dg->src.resource)
+               return VMCI_ERROR_NO_ACCESS;
+
+       if (VMCI_DG_SIZE(dg) < sizeof(*pkt))
+               /* Drop datagrams that do not contain full VSock packets. */
+               return VMCI_ERROR_INVALID_ARGS;
+
+       pkt = (struct vmci_transport_packet *)dg;
+
+       /* Find the socket that should handle this packet.  First we look for a
+        * connected socket and if there is none we look for a socket bound to
+        * the destintation address.
+        */
+       vsock_addr_init(&src, pkt->dg.src.context, pkt->src_port);
+       vsock_addr_init(&dst, pkt->dg.dst.context, pkt->dst_port);
+
+       sk = vsock_find_connected_socket(&src, &dst);
+       if (!sk) {
+               sk = vsock_find_bound_socket(&dst);
+               if (!sk) {
+                       /* We could not find a socket for this specified
+                        * address.  If this packet is a RST, we just drop it.
+                        * If it is another packet, we send a RST.  Note that
+                        * we do not send a RST reply to RSTs so that we do not
+                        * continually send RSTs between two endpoints.
+                        *
+                        * Note that since this is a reply, dst is src and src
+                        * is dst.
+                        */
+                       if (vmci_transport_send_reset_bh(&dst, &src, pkt) < 0)
+                               pr_err("unable to send reset\n");
+
+                       err = VMCI_ERROR_NOT_FOUND;
+                       goto out;
+               }
+       }
+
+       /* If the received packet type is beyond all types known to this
+        * implementation, reply with an invalid message.  Hopefully this will
+        * help when implementing backwards compatibility in the future.
+        */
+       if (pkt->type >= VMCI_TRANSPORT_PACKET_TYPE_MAX) {
+               vmci_transport_send_invalid_bh(&dst, &src);
+               err = VMCI_ERROR_INVALID_ARGS;
+               goto out;
+       }
+
+       /* This handler is privileged when this module is running on the host.
+        * We will get datagram connect requests from all endpoints (even VMs
+        * that are in a restricted context). If we get one from a restricted
+        * context then the destination socket must be trusted.
+        *
+        * NOTE: We access the socket struct without holding the lock here.
+        * This is ok because the field we are interested is never modified
+        * outside of the create and destruct socket functions.
+        */
+       vsk = vsock_sk(sk);
+       if (!vmci_transport_allow_dgram(vsk, pkt->dg.src.context)) {
+               err = VMCI_ERROR_NO_ACCESS;
+               goto out;
+       }
+
+       /* We do most everything in a work queue, but let's fast path the
+        * notification of reads and writes to help data transfer performance.
+        * We can only do this if there is no process context code executing
+        * for this socket since that may change the state.
+        */
+       bh_lock_sock(sk);
+
+       if (!sock_owned_by_user(sk) && sk->sk_state == SS_CONNECTED)
+               vmci_trans(vsk)->notify_ops->handle_notify_pkt(
+                               sk, pkt, true, &dst, &src,
+                               &bh_process_pkt);
+
+       bh_unlock_sock(sk);
+
+       if (!bh_process_pkt) {
+               struct vmci_transport_recv_pkt_info *recv_pkt_info;
+
+               recv_pkt_info = kmalloc(sizeof(*recv_pkt_info), GFP_ATOMIC);
+               if (!recv_pkt_info) {
+                       if (vmci_transport_send_reset_bh(&dst, &src, pkt) < 0)
+                               pr_err("unable to send reset\n");
+
+                       err = VMCI_ERROR_NO_MEM;
+                       goto out;
+               }
+
+               recv_pkt_info->sk = sk;
+               memcpy(&recv_pkt_info->pkt, pkt, sizeof(recv_pkt_info->pkt));
+               INIT_WORK(&recv_pkt_info->work, vmci_transport_recv_pkt_work);
+
+               schedule_work(&recv_pkt_info->work);
+               /* Clear sk so that the reference count incremented by one of
+                * the Find functions above is not decremented below.  We need
+                * that reference count for the packet handler we've scheduled
+                * to run.
+                */
+               sk = NULL;
+       }
+
+out:
+       if (sk)
+               sock_put(sk);
+
+       return err;
+}
+
+static void vmci_transport_peer_attach_cb(u32 sub_id,
+                                         const struct vmci_event_data *e_data,
+                                         void *client_data)
+{
+       struct sock *sk = client_data;
+       const struct vmci_event_payload_qp *e_payload;
+       struct vsock_sock *vsk;
+
+       e_payload = vmci_event_data_const_payload(e_data);
+
+       vsk = vsock_sk(sk);
+
+       /* We don't ask for delayed CBs when we subscribe to this event (we
+        * pass 0 as flags to vmci_event_subscribe()).  VMCI makes no
+        * guarantees in that case about what context we might be running in,
+        * so it could be BH or process, blockable or non-blockable.  So we
+        * need to account for all possible contexts here.
+        */
+       local_bh_disable();
+       bh_lock_sock(sk);
+
+       /* XXX This is lame, we should provide a way to lookup sockets by
+        * qp_handle.
+        */
+       if (vmci_handle_is_equal(vmci_trans(vsk)->qp_handle,
+                                e_payload->handle)) {
+               /* XXX This doesn't do anything, but in the future we may want
+                * to set a flag here to verify the attach really did occur and
+                * we weren't just sent a datagram claiming it was.
+                */
+               goto out;
+       }
+
+out:
+       bh_unlock_sock(sk);
+       local_bh_enable();
+}
+
+static void vmci_transport_handle_detach(struct sock *sk)
+{
+       struct vsock_sock *vsk;
+
+       vsk = vsock_sk(sk);
+       if (!vmci_handle_is_invalid(vmci_trans(vsk)->qp_handle)) {
+               sock_set_flag(sk, SOCK_DONE);
+
+               /* On a detach the peer will not be sending or receiving
+                * anymore.
+                */
+               vsk->peer_shutdown = SHUTDOWN_MASK;
+
+               /* We should not be sending anymore since the peer won't be
+                * there to receive, but we can still receive if there is data
+                * left in our consume queue.
+                */
+               if (vsock_stream_has_data(vsk) <= 0) {
+                       if (sk->sk_state == SS_CONNECTING) {
+                               /* The peer may detach from a queue pair while
+                                * we are still in the connecting state, i.e.,
+                                * if the peer VM is killed after attaching to
+                                * a queue pair, but before we complete the
+                                * handshake. In that case, we treat the detach
+                                * event like a reset.
+                                */
+
+                               sk->sk_state = SS_UNCONNECTED;
+                               sk->sk_err = ECONNRESET;
+                               sk->sk_error_report(sk);
+                               return;
+                       }
+                       sk->sk_state = SS_UNCONNECTED;
+               }
+               sk->sk_state_change(sk);
+       }
+}
+
+static void vmci_transport_peer_detach_cb(u32 sub_id,
+                                         const struct vmci_event_data *e_data,
+                                         void *client_data)
+{
+       struct sock *sk = client_data;
+       const struct vmci_event_payload_qp *e_payload;
+       struct vsock_sock *vsk;
+
+       e_payload = vmci_event_data_const_payload(e_data);
+       vsk = vsock_sk(sk);
+       if (vmci_handle_is_invalid(e_payload->handle))
+               return;
+
+       /* Same rules for locking as for peer_attach_cb(). */
+       local_bh_disable();
+       bh_lock_sock(sk);
+
+       /* XXX This is lame, we should provide a way to lookup sockets by
+        * qp_handle.
+        */
+       if (vmci_handle_is_equal(vmci_trans(vsk)->qp_handle,
+                                e_payload->handle))
+               vmci_transport_handle_detach(sk);
+
+       bh_unlock_sock(sk);
+       local_bh_enable();
+}
+
+static void vmci_transport_qp_resumed_cb(u32 sub_id,
+                                        const struct vmci_event_data *e_data,
+                                        void *client_data)
+{
+       vsock_for_each_connected_socket(vmci_transport_handle_detach);
+}
+
+static void vmci_transport_recv_pkt_work(struct work_struct *work)
+{
+       struct vmci_transport_recv_pkt_info *recv_pkt_info;
+       struct vmci_transport_packet *pkt;
+       struct sock *sk;
+
+       recv_pkt_info =
+               container_of(work, struct vmci_transport_recv_pkt_info, work);
+       sk = recv_pkt_info->sk;
+       pkt = &recv_pkt_info->pkt;
+
+       lock_sock(sk);
+
+       switch (sk->sk_state) {
+       case SS_LISTEN:
+               vmci_transport_recv_listen(sk, pkt);
+               break;
+       case SS_CONNECTING:
+               /* Processing of pending connections for servers goes through
+                * the listening socket, so see vmci_transport_recv_listen()
+                * for that path.
+                */
+               vmci_transport_recv_connecting_client(sk, pkt);
+               break;
+       case SS_CONNECTED:
+               vmci_transport_recv_connected(sk, pkt);
+               break;
+       default:
+               /* Because this function does not run in the same context as
+                * vmci_transport_recv_stream_cb it is possible that the
+                * socket has closed. We need to let the other side know or it
+                * could be sitting in a connect and hang forever. Send a
+                * reset to prevent that.
+                */
+               vmci_transport_send_reset(sk, pkt);
+               goto out;
+       }
+
+out:
+       release_sock(sk);
+       kfree(recv_pkt_info);
+       /* Release reference obtained in the stream callback when we fetched
+        * this socket out of the bound or connected list.
+        */
+       sock_put(sk);
+}
+
+static int vmci_transport_recv_listen(struct sock *sk,
+                                     struct vmci_transport_packet *pkt)
+{
+       struct sock *pending;
+       struct vsock_sock *vpending;
+       int err;
+       u64 qp_size;
+       bool old_request = false;
+       bool old_pkt_proto = false;
+
+       err = 0;
+
+       /* Because we are in the listen state, we could be receiving a packet
+        * for ourself or any previous connection requests that we received.
+        * If it's the latter, we try to find a socket in our list of pending
+        * connections and, if we do, call the appropriate handler for the
+        * state that that socket is in.  Otherwise we try to service the
+        * connection request.
+        */
+       pending = vmci_transport_get_pending(sk, pkt);
+       if (pending) {
+               lock_sock(pending);
+               switch (pending->sk_state) {
+               case SS_CONNECTING:
+                       err = vmci_transport_recv_connecting_server(sk,
+                                                                   pending,
+                                                                   pkt);
+                       break;
+               default:
+                       vmci_transport_send_reset(pending, pkt);
+                       err = -EINVAL;
+               }
+
+               if (err < 0)
+                       vsock_remove_pending(sk, pending);
+
+               release_sock(pending);
+               vmci_transport_release_pending(pending);
+
+               return err;
+       }
+
+       /* The listen state only accepts connection requests.  Reply with a
+        * reset unless we received a reset.
+        */
+
+       if (!(pkt->type == VMCI_TRANSPORT_PACKET_TYPE_REQUEST ||
+             pkt->type == VMCI_TRANSPORT_PACKET_TYPE_REQUEST2)) {
+               vmci_transport_reply_reset(pkt);
+               return -EINVAL;
+       }
+
+       if (pkt->u.size == 0) {
+               vmci_transport_reply_reset(pkt);
+               return -EINVAL;
+       }
+
+       /* If this socket can't accommodate this connection request, we send a
+        * reset.  Otherwise we create and initialize a child socket and reply
+        * with a connection negotiation.
+        */
+       if (sk->sk_ack_backlog >= sk->sk_max_ack_backlog) {
+               vmci_transport_reply_reset(pkt);
+               return -ECONNREFUSED;
+       }
+
+       pending = __vsock_create(sock_net(sk), NULL, sk, GFP_KERNEL,
+                                sk->sk_type);
+       if (!pending) {
+               vmci_transport_send_reset(sk, pkt);
+               return -ENOMEM;
+       }
+
+       vpending = vsock_sk(pending);
+
+       vsock_addr_init(&vpending->local_addr, pkt->dg.dst.context,
+                       pkt->dst_port);
+       vsock_addr_init(&vpending->remote_addr, pkt->dg.src.context,
+                       pkt->src_port);
+
+       /* If the proposed size fits within our min/max, accept it. Otherwise
+        * propose our own size.
+        */
+       if (pkt->u.size >= vmci_trans(vpending)->queue_pair_min_size &&
+           pkt->u.size <= vmci_trans(vpending)->queue_pair_max_size) {
+               qp_size = pkt->u.size;
+       } else {
+               qp_size = vmci_trans(vpending)->queue_pair_size;
+       }
+
+       /* Figure out if we are using old or new requests based on the
+        * overrides pkt types sent by our peer.
+        */
+       if (vmci_transport_old_proto_override(&old_pkt_proto)) {
+               old_request = old_pkt_proto;
+       } else {
+               if (pkt->type == VMCI_TRANSPORT_PACKET_TYPE_REQUEST)
+                       old_request = true;
+               else if (pkt->type == VMCI_TRANSPORT_PACKET_TYPE_REQUEST2)
+                       old_request = false;
+
+       }
+
+       if (old_request) {
+               /* Handle a REQUEST (or override) */
+               u16 version = VSOCK_PROTO_INVALID;
+               if (vmci_transport_proto_to_notify_struct(
+                       pending, &version, true))
+                       err = vmci_transport_send_negotiate(pending, qp_size);
+               else
+                       err = -EINVAL;
+
+       } else {
+               /* Handle a REQUEST2 (or override) */
+               int proto_int = pkt->proto;
+               int pos;
+               u16 active_proto_version = 0;
+
+               /* The list of possible protocols is the intersection of all
+                * protocols the client supports ... plus all the protocols we
+                * support.
+                */
+               proto_int &= vmci_transport_new_proto_supported_versions();
+
+               /* We choose the highest possible protocol version and use that
+                * one.
+                */
+               pos = fls(proto_int);
+               if (pos) {
+                       active_proto_version = (1 << (pos - 1));
+                       if (vmci_transport_proto_to_notify_struct(
+                               pending, &active_proto_version, false))
+                               err = vmci_transport_send_negotiate2(pending,
+                                                       qp_size,
+                                                       active_proto_version);
+                       else
+                               err = -EINVAL;
+
+               } else {
+                       err = -EINVAL;
+               }
+       }
+
+       if (err < 0) {
+               vmci_transport_send_reset(sk, pkt);
+               sock_put(pending);
+               err = vmci_transport_error_to_vsock_error(err);
+               goto out;
+       }
+
+       vsock_add_pending(sk, pending);
+       sk->sk_ack_backlog++;
+
+       pending->sk_state = SS_CONNECTING;
+       vmci_trans(vpending)->produce_size =
+               vmci_trans(vpending)->consume_size = qp_size;
+       vmci_trans(vpending)->queue_pair_size = qp_size;
+
+       vmci_trans(vpending)->notify_ops->process_request(pending);
+
+       /* We might never receive another message for this socket and it's not
+        * connected to any process, so we have to ensure it gets cleaned up
+        * ourself.  Our delayed work function will take care of that.  Note
+        * that we do not ever cancel this function since we have few
+        * guarantees about its state when calling cancel_delayed_work().
+        * Instead we hold a reference on the socket for that function and make
+        * it capable of handling cases where it needs to do nothing but
+        * release that reference.
+        */
+       vpending->listener = sk;
+       sock_hold(sk);
+       sock_hold(pending);
+       INIT_DELAYED_WORK(&vpending->dwork, vsock_pending_work);
+       schedule_delayed_work(&vpending->dwork, HZ);
+
+out:
+       return err;
+}
+
+static int
+vmci_transport_recv_connecting_server(struct sock *listener,
+                                     struct sock *pending,
+                                     struct vmci_transport_packet *pkt)
+{
+       struct vsock_sock *vpending;
+       struct vmci_handle handle;
+       struct vmci_qp *qpair;
+       bool is_local;
+       u32 flags;
+       u32 detach_sub_id;
+       int err;
+       int skerr;
+
+       vpending = vsock_sk(pending);
+       detach_sub_id = VMCI_INVALID_ID;
+
+       switch (pkt->type) {
+       case VMCI_TRANSPORT_PACKET_TYPE_OFFER:
+               if (vmci_handle_is_invalid(pkt->u.handle)) {
+                       vmci_transport_send_reset(pending, pkt);
+                       skerr = EPROTO;
+                       err = -EINVAL;
+                       goto destroy;
+               }
+               break;
+       default:
+               /* Close and cleanup the connection. */
+               vmci_transport_send_reset(pending, pkt);
+               skerr = EPROTO;
+               err = pkt->type == VMCI_TRANSPORT_PACKET_TYPE_RST ? 0 : -EINVAL;
+               goto destroy;
+       }
+
+       /* In order to complete the connection we need to attach to the offered
+        * queue pair and send an attach notification.  We also subscribe to the
+        * detach event so we know when our peer goes away, and we do that
+        * before attaching so we don't miss an event.  If all this succeeds,
+        * we update our state and wakeup anything waiting in accept() for a
+        * connection.
+        */
+
+       /* We don't care about attach since we ensure the other side has
+        * attached by specifying the ATTACH_ONLY flag below.
+        */
+       err = vmci_event_subscribe(VMCI_EVENT_QP_PEER_DETACH,
+                                  vmci_transport_peer_detach_cb,
+                                  pending, &detach_sub_id);
+       if (err < VMCI_SUCCESS) {
+               vmci_transport_send_reset(pending, pkt);
+               err = vmci_transport_error_to_vsock_error(err);
+               skerr = -err;
+               goto destroy;
+       }
+
+       vmci_trans(vpending)->detach_sub_id = detach_sub_id;
+
+       /* Now attach to the queue pair the client created. */
+       handle = pkt->u.handle;
+
+       /* vpending->local_addr always has a context id so we do not need to
+        * worry about VMADDR_CID_ANY in this case.
+        */
+       is_local =
+           vpending->remote_addr.svm_cid == vpending->local_addr.svm_cid;
+       flags = VMCI_QPFLAG_ATTACH_ONLY;
+       flags |= is_local ? VMCI_QPFLAG_LOCAL : 0;
+
+       err = vmci_transport_queue_pair_alloc(
+                                       &qpair,
+                                       &handle,
+                                       vmci_trans(vpending)->produce_size,
+                                       vmci_trans(vpending)->consume_size,
+                                       pkt->dg.src.context,
+                                       flags,
+                                       vmci_transport_is_trusted(
+                                               vpending,
+                                               vpending->remote_addr.svm_cid));
+       if (err < 0) {
+               vmci_transport_send_reset(pending, pkt);
+               skerr = -err;
+               goto destroy;
+       }
+
+       vmci_trans(vpending)->qp_handle = handle;
+       vmci_trans(vpending)->qpair = qpair;
+
+       /* When we send the attach message, we must be ready to handle incoming
+        * control messages on the newly connected socket. So we move the
+        * pending socket to the connected state before sending the attach
+        * message. Otherwise, an incoming packet triggered by the attach being
+        * received by the peer may be processed concurrently with what happens
+        * below after sending the attach message, and that incoming packet
+        * will find the listening socket instead of the (currently) pending
+        * socket. Note that enqueueing the socket increments the reference
+        * count, so even if a reset comes before the connection is accepted,
+        * the socket will be valid until it is removed from the queue.
+        *
+        * If we fail sending the attach below, we remove the socket from the
+        * connected list and move the socket to SS_UNCONNECTED before
+        * releasing the lock, so a pending slow path processing of an incoming
+        * packet will not see the socket in the connected state in that case.
+        */
+       pending->sk_state = SS_CONNECTED;
+
+       vsock_insert_connected(vpending);
+
+       /* Notify our peer of our attach. */
+       err = vmci_transport_send_attach(pending, handle);
+       if (err < 0) {
+               vsock_remove_connected(vpending);
+               pr_err("Could not send attach\n");
+               vmci_transport_send_reset(pending, pkt);
+               err = vmci_transport_error_to_vsock_error(err);
+               skerr = -err;
+               goto destroy;
+       }
+
+       /* We have a connection. Move the now connected socket from the
+        * listener's pending list to the accept queue so callers of accept()
+        * can find it.
+        */
+       vsock_remove_pending(listener, pending);
+       vsock_enqueue_accept(listener, pending);
+
+       /* Callers of accept() will be be waiting on the listening socket, not
+        * the pending socket.
+        */
+       listener->sk_state_change(listener);
+
+       return 0;
+
+destroy:
+       pending->sk_err = skerr;
+       pending->sk_state = SS_UNCONNECTED;
+       /* As long as we drop our reference, all necessary cleanup will handle
+        * when the cleanup function drops its reference and our destruct
+        * implementation is called.  Note that since the listen handler will
+        * remove pending from the pending list upon our failure, the cleanup
+        * function won't drop the additional reference, which is why we do it
+        * here.
+        */
+       sock_put(pending);
+
+       return err;
+}
+
+static int
+vmci_transport_recv_connecting_client(struct sock *sk,
+                                     struct vmci_transport_packet *pkt)
+{
+       struct vsock_sock *vsk;
+       int err;
+       int skerr;
+
+       vsk = vsock_sk(sk);
+
+       switch (pkt->type) {
+       case VMCI_TRANSPORT_PACKET_TYPE_ATTACH:
+               if (vmci_handle_is_invalid(pkt->u.handle) ||
+                   !vmci_handle_is_equal(pkt->u.handle,
+                                         vmci_trans(vsk)->qp_handle)) {
+                       skerr = EPROTO;
+                       err = -EINVAL;
+                       goto destroy;
+               }
+
+               /* Signify the socket is connected and wakeup the waiter in
+                * connect(). Also place the socket in the connected table for
+                * accounting (it can already be found since it's in the bound
+                * table).
+                */
+               sk->sk_state = SS_CONNECTED;
+               sk->sk_socket->state = SS_CONNECTED;
+               vsock_insert_connected(vsk);
+               sk->sk_state_change(sk);
+
+               break;
+       case VMCI_TRANSPORT_PACKET_TYPE_NEGOTIATE:
+       case VMCI_TRANSPORT_PACKET_TYPE_NEGOTIATE2:
+               if (pkt->u.size == 0
+                   || pkt->dg.src.context != vsk->remote_addr.svm_cid
+                   || pkt->src_port != vsk->remote_addr.svm_port
+                   || !vmci_handle_is_invalid(vmci_trans(vsk)->qp_handle)
+                   || vmci_trans(vsk)->qpair
+                   || vmci_trans(vsk)->produce_size != 0
+                   || vmci_trans(vsk)->consume_size != 0
+                   || vmci_trans(vsk)->attach_sub_id != VMCI_INVALID_ID
+                   || vmci_trans(vsk)->detach_sub_id != VMCI_INVALID_ID) {
+                       skerr = EPROTO;
+                       err = -EINVAL;
+
+                       goto destroy;
+               }
+
+               err = vmci_transport_recv_connecting_client_negotiate(sk, pkt);
+               if (err) {
+                       skerr = -err;
+                       goto destroy;
+               }
+
+               break;
+       case VMCI_TRANSPORT_PACKET_TYPE_INVALID:
+               err = vmci_transport_recv_connecting_client_invalid(sk, pkt);
+               if (err) {
+                       skerr = -err;
+                       goto destroy;
+               }
+
+               break;
+       case VMCI_TRANSPORT_PACKET_TYPE_RST:
+               /* Older versions of the linux code (WS 6.5 / ESX 4.0) used to
+                * continue processing here after they sent an INVALID packet.
+                * This meant that we got a RST after the INVALID. We ignore a
+                * RST after an INVALID. The common code doesn't send the RST
+                * ... so we can hang if an old version of the common code
+                * fails between getting a REQUEST and sending an OFFER back.
+                * Not much we can do about it... except hope that it doesn't
+                * happen.
+                */
+               if (vsk->ignore_connecting_rst) {
+                       vsk->ignore_connecting_rst = false;
+               } else {
+                       skerr = ECONNRESET;
+                       err = 0;
+                       goto destroy;
+               }
+
+               break;
+       default:
+               /* Close and cleanup the connection. */
+               skerr = EPROTO;
+               err = -EINVAL;
+               goto destroy;
+       }
+
+       return 0;
+
+destroy:
+       vmci_transport_send_reset(sk, pkt);
+
+       sk->sk_state = SS_UNCONNECTED;
+       sk->sk_err = skerr;
+       sk->sk_error_report(sk);
+       return err;
+}
+
+static int vmci_transport_recv_connecting_client_negotiate(
+                                       struct sock *sk,
+                                       struct vmci_transport_packet *pkt)
+{
+       int err;
+       struct vsock_sock *vsk;
+       struct vmci_handle handle;
+       struct vmci_qp *qpair;
+       u32 attach_sub_id;
+       u32 detach_sub_id;
+       bool is_local;
+       u32 flags;
+       bool old_proto = true;
+       bool old_pkt_proto;
+       u16 version;
+
+       vsk = vsock_sk(sk);
+       handle = VMCI_INVALID_HANDLE;
+       attach_sub_id = VMCI_INVALID_ID;
+       detach_sub_id = VMCI_INVALID_ID;
+
+       /* If we have gotten here then we should be past the point where old
+        * linux vsock could have sent the bogus rst.
+        */
+       vsk->sent_request = false;
+       vsk->ignore_connecting_rst = false;
+
+       /* Verify that we're OK with the proposed queue pair size */
+       if (pkt->u.size < vmci_trans(vsk)->queue_pair_min_size ||
+           pkt->u.size > vmci_trans(vsk)->queue_pair_max_size) {
+               err = -EINVAL;
+               goto destroy;
+       }
+
+       /* At this point we know the CID the peer is using to talk to us. */
+
+       if (vsk->local_addr.svm_cid == VMADDR_CID_ANY)
+               vsk->local_addr.svm_cid = pkt->dg.dst.context;
+
+       /* Setup the notify ops to be the highest supported version that both
+        * the server and the client support.
+        */
+
+       if (vmci_transport_old_proto_override(&old_pkt_proto)) {
+               old_proto = old_pkt_proto;
+       } else {
+               if (pkt->type == VMCI_TRANSPORT_PACKET_TYPE_NEGOTIATE)
+                       old_proto = true;
+               else if (pkt->type == VMCI_TRANSPORT_PACKET_TYPE_NEGOTIATE2)
+                       old_proto = false;
+
+       }
+
+       if (old_proto)
+               version = VSOCK_PROTO_INVALID;
+       else
+               version = pkt->proto;
+
+       if (!vmci_transport_proto_to_notify_struct(sk, &version, old_proto)) {
+               err = -EINVAL;
+               goto destroy;
+       }
+
+       /* Subscribe to attach and detach events first.
+        *
+        * XXX We attach once for each queue pair created for now so it is easy
+        * to find the socket (it's provided), but later we should only
+        * subscribe once and add a way to lookup sockets by queue pair handle.
+        */
+       err = vmci_event_subscribe(VMCI_EVENT_QP_PEER_ATTACH,
+                                  vmci_transport_peer_attach_cb,
+                                  sk, &attach_sub_id);
+       if (err < VMCI_SUCCESS) {
+               err = vmci_transport_error_to_vsock_error(err);
+               goto destroy;
+       }
+
+       err = vmci_event_subscribe(VMCI_EVENT_QP_PEER_DETACH,
+                                  vmci_transport_peer_detach_cb,
+                                  sk, &detach_sub_id);
+       if (err < VMCI_SUCCESS) {
+               err = vmci_transport_error_to_vsock_error(err);
+               goto destroy;
+       }
+
+       /* Make VMCI select the handle for us. */
+       handle = VMCI_INVALID_HANDLE;
+       is_local = vsk->remote_addr.svm_cid == vsk->local_addr.svm_cid;
+       flags = is_local ? VMCI_QPFLAG_LOCAL : 0;
+
+       err = vmci_transport_queue_pair_alloc(&qpair,
+                                             &handle,
+                                             pkt->u.size,
+                                             pkt->u.size,
+                                             vsk->remote_addr.svm_cid,
+                                             flags,
+                                             vmci_transport_is_trusted(
+                                                 vsk,
+                                                 vsk->
+                                                 remote_addr.svm_cid));
+       if (err < 0)
+               goto destroy;
+
+       err = vmci_transport_send_qp_offer(sk, handle);
+       if (err < 0) {
+               err = vmci_transport_error_to_vsock_error(err);
+               goto destroy;
+       }
+
+       vmci_trans(vsk)->qp_handle = handle;
+       vmci_trans(vsk)->qpair = qpair;
+
+       vmci_trans(vsk)->produce_size = vmci_trans(vsk)->consume_size =
+               pkt->u.size;
+
+       vmci_trans(vsk)->attach_sub_id = attach_sub_id;
+       vmci_trans(vsk)->detach_sub_id = detach_sub_id;
+
+       vmci_trans(vsk)->notify_ops->process_negotiate(sk);
+
+       return 0;
+
+destroy:
+       if (attach_sub_id != VMCI_INVALID_ID)
+               vmci_event_unsubscribe(attach_sub_id);
+
+       if (detach_sub_id != VMCI_INVALID_ID)
+               vmci_event_unsubscribe(detach_sub_id);
+
+       if (!vmci_handle_is_invalid(handle))
+               vmci_qpair_detach(&qpair);
+
+       return err;
+}
+
+static int
+vmci_transport_recv_connecting_client_invalid(struct sock *sk,
+                                             struct vmci_transport_packet *pkt)
+{
+       int err = 0;
+       struct vsock_sock *vsk = vsock_sk(sk);
+
+       if (vsk->sent_request) {
+               vsk->sent_request = false;
+               vsk->ignore_connecting_rst = true;
+
+               err = vmci_transport_send_conn_request(
+                       sk, vmci_trans(vsk)->queue_pair_size);
+               if (err < 0)
+                       err = vmci_transport_error_to_vsock_error(err);
+               else
+                       err = 0;
+
+       }
+
+       return err;
+}
+
+static int vmci_transport_recv_connected(struct sock *sk,
+                                        struct vmci_transport_packet *pkt)
+{
+       struct vsock_sock *vsk;
+       bool pkt_processed = false;
+
+       /* In cases where we are closing the connection, it's sufficient to
+        * mark the state change (and maybe error) and wake up any waiting
+        * threads. Since this is a connected socket, it's owned by a user
+        * process and will be cleaned up when the failure is passed back on
+        * the current or next system call.  Our system call implementations
+        * must therefore check for error and state changes on entry and when
+        * being awoken.
+        */
+       switch (pkt->type) {
+       case VMCI_TRANSPORT_PACKET_TYPE_SHUTDOWN:
+               if (pkt->u.mode) {
+                       vsk = vsock_sk(sk);
+
+                       vsk->peer_shutdown |= pkt->u.mode;
+                       sk->sk_state_change(sk);
+               }
+               break;
+
+       case VMCI_TRANSPORT_PACKET_TYPE_RST:
+               vsk = vsock_sk(sk);
+               /* It is possible that we sent our peer a message (e.g a
+                * WAITING_READ) right before we got notified that the peer had
+                * detached. If that happens then we can get a RST pkt back
+                * from our peer even though there is data available for us to
+                * read. In that case, don't shutdown the socket completely but
+                * instead allow the local client to finish reading data off
+                * the queuepair. Always treat a RST pkt in connected mode like
+                * a clean shutdown.
+                */
+               sock_set_flag(sk, SOCK_DONE);
+               vsk->peer_shutdown = SHUTDOWN_MASK;
+               if (vsock_stream_has_data(vsk) <= 0)
+                       sk->sk_state = SS_DISCONNECTING;
+
+               sk->sk_state_change(sk);
+               break;
+
+       default:
+               vsk = vsock_sk(sk);
+               vmci_trans(vsk)->notify_ops->handle_notify_pkt(
+                               sk, pkt, false, NULL, NULL,
+                               &pkt_processed);
+               if (!pkt_processed)
+                       return -EINVAL;
+
+               break;
+       }
+
+       return 0;
+}
+
+static int vmci_transport_socket_init(struct vsock_sock *vsk,
+                                     struct vsock_sock *psk)
+{
+       vsk->trans = kmalloc(sizeof(struct vmci_transport), GFP_KERNEL);
+       if (!vsk->trans)
+               return -ENOMEM;
+
+       vmci_trans(vsk)->dg_handle = VMCI_INVALID_HANDLE;
+       vmci_trans(vsk)->qp_handle = VMCI_INVALID_HANDLE;
+       vmci_trans(vsk)->qpair = NULL;
+       vmci_trans(vsk)->produce_size = vmci_trans(vsk)->consume_size = 0;
+       vmci_trans(vsk)->attach_sub_id = vmci_trans(vsk)->detach_sub_id =
+               VMCI_INVALID_ID;
+       vmci_trans(vsk)->notify_ops = NULL;
+       if (psk) {
+               vmci_trans(vsk)->queue_pair_size =
+                       vmci_trans(psk)->queue_pair_size;
+               vmci_trans(vsk)->queue_pair_min_size =
+                       vmci_trans(psk)->queue_pair_min_size;
+               vmci_trans(vsk)->queue_pair_max_size =
+                       vmci_trans(psk)->queue_pair_max_size;
+       } else {
+               vmci_trans(vsk)->queue_pair_size =
+                       VMCI_TRANSPORT_DEFAULT_QP_SIZE;
+               vmci_trans(vsk)->queue_pair_min_size =
+                        VMCI_TRANSPORT_DEFAULT_QP_SIZE_MIN;
+               vmci_trans(vsk)->queue_pair_max_size =
+                       VMCI_TRANSPORT_DEFAULT_QP_SIZE_MAX;
+       }
+
+       return 0;
+}
+
+static void vmci_transport_destruct(struct vsock_sock *vsk)
+{
+       if (vmci_trans(vsk)->attach_sub_id != VMCI_INVALID_ID) {
+               vmci_event_unsubscribe(vmci_trans(vsk)->attach_sub_id);
+               vmci_trans(vsk)->attach_sub_id = VMCI_INVALID_ID;
+       }
+
+       if (vmci_trans(vsk)->detach_sub_id != VMCI_INVALID_ID) {
+               vmci_event_unsubscribe(vmci_trans(vsk)->detach_sub_id);
+               vmci_trans(vsk)->detach_sub_id = VMCI_INVALID_ID;
+       }
+
+       if (!vmci_handle_is_invalid(vmci_trans(vsk)->qp_handle)) {
+               vmci_qpair_detach(&vmci_trans(vsk)->qpair);
+               vmci_trans(vsk)->qp_handle = VMCI_INVALID_HANDLE;
+               vmci_trans(vsk)->produce_size = 0;
+               vmci_trans(vsk)->consume_size = 0;
+       }
+
+       if (vmci_trans(vsk)->notify_ops)
+               vmci_trans(vsk)->notify_ops->socket_destruct(vsk);
+
+       kfree(vsk->trans);
+       vsk->trans = NULL;
+}
+
+static void vmci_transport_release(struct vsock_sock *vsk)
+{
+       if (!vmci_handle_is_invalid(vmci_trans(vsk)->dg_handle)) {
+               vmci_datagram_destroy_handle(vmci_trans(vsk)->dg_handle);
+               vmci_trans(vsk)->dg_handle = VMCI_INVALID_HANDLE;
+       }
+}
+
+static int vmci_transport_dgram_bind(struct vsock_sock *vsk,
+                                    struct sockaddr_vm *addr)
+{
+       u32 port;
+       u32 flags;
+       int err;
+
+       /* VMCI will select a resource ID for us if we provide
+        * VMCI_INVALID_ID.
+        */
+       port = addr->svm_port == VMADDR_PORT_ANY ?
+                       VMCI_INVALID_ID : addr->svm_port;
+
+       if (port <= LAST_RESERVED_PORT && !capable(CAP_NET_BIND_SERVICE))
+               return -EACCES;
+
+       flags = addr->svm_cid == VMADDR_CID_ANY ?
+                               VMCI_FLAG_ANYCID_DG_HND : 0;
+
+       err = vmci_transport_datagram_create_hnd(port, flags,
+                                                vmci_transport_recv_dgram_cb,
+                                                &vsk->sk,
+                                                &vmci_trans(vsk)->dg_handle);
+       if (err < VMCI_SUCCESS)
+               return vmci_transport_error_to_vsock_error(err);
+       vsock_addr_init(&vsk->local_addr, addr->svm_cid,
+                       vmci_trans(vsk)->dg_handle.resource);
+
+       return 0;
+}
+
+static int vmci_transport_dgram_enqueue(
+       struct vsock_sock *vsk,
+       struct sockaddr_vm *remote_addr,
+       struct iovec *iov,
+       size_t len)
+{
+       int err;
+       struct vmci_datagram *dg;
+
+       if (len > VMCI_MAX_DG_PAYLOAD_SIZE)
+               return -EMSGSIZE;
+
+       if (!vmci_transport_allow_dgram(vsk, remote_addr->svm_cid))
+               return -EPERM;
+
+       /* Allocate a buffer for the user's message and our packet header. */
+       dg = kmalloc(len + sizeof(*dg), GFP_KERNEL);
+       if (!dg)
+               return -ENOMEM;
+
+       memcpy_fromiovec(VMCI_DG_PAYLOAD(dg), iov, len);
+
+       dg->dst = vmci_make_handle(remote_addr->svm_cid,
+                                  remote_addr->svm_port);
+       dg->src = vmci_make_handle(vsk->local_addr.svm_cid,
+                                  vsk->local_addr.svm_port);
+       dg->payload_size = len;
+
+       err = vmci_datagram_send(dg);
+       kfree(dg);
+       if (err < 0)
+               return vmci_transport_error_to_vsock_error(err);
+
+       return err - sizeof(*dg);
+}
+
+static int vmci_transport_dgram_dequeue(struct kiocb *kiocb,
+                                       struct vsock_sock *vsk,
+                                       struct msghdr *msg, size_t len,
+                                       int flags)
+{
+       int err;
+       int noblock;
+       struct vmci_datagram *dg;
+       size_t payload_len;
+       struct sk_buff *skb;
+
+       noblock = flags & MSG_DONTWAIT;
+
+       if (flags & MSG_OOB || flags & MSG_ERRQUEUE)
+               return -EOPNOTSUPP;
+
+       /* Retrieve the head sk_buff from the socket's receive queue. */
+       err = 0;
+       skb = skb_recv_datagram(&vsk->sk, flags, noblock, &err);
+       if (err)
+               return err;
+
+       if (!skb)
+               return -EAGAIN;
+
+       dg = (struct vmci_datagram *)skb->data;
+       if (!dg)
+               /* err is 0, meaning we read zero bytes. */
+               goto out;
+
+       payload_len = dg->payload_size;
+       /* Ensure the sk_buff matches the payload size claimed in the packet. */
+       if (payload_len != skb->len - sizeof(*dg)) {
+               err = -EINVAL;
+               goto out;
+       }
+
+       if (payload_len > len) {
+               payload_len = len;
+               msg->msg_flags |= MSG_TRUNC;
+       }
+
+       /* Place the datagram payload in the user's iovec. */
+       err = skb_copy_datagram_iovec(skb, sizeof(*dg), msg->msg_iov,
+               payload_len);
+       if (err)
+               goto out;
+
+       msg->msg_namelen = 0;
+       if (msg->msg_name) {
+               struct sockaddr_vm *vm_addr;
+
+               /* Provide the address of the sender. */
+               vm_addr = (struct sockaddr_vm *)msg->msg_name;
+               vsock_addr_init(vm_addr, dg->src.context, dg->src.resource);
+               msg->msg_namelen = sizeof(*vm_addr);
+       }
+       err = payload_len;
+
+out:
+       skb_free_datagram(&vsk->sk, skb);
+       return err;
+}
+
+static bool vmci_transport_dgram_allow(u32 cid, u32 port)
+{
+       if (cid == VMADDR_CID_HYPERVISOR) {
+               /* Registrations of PBRPC Servers do not modify VMX/Hypervisor
+                * state and are allowed.
+                */
+               return port == VMCI_UNITY_PBRPC_REGISTER;
+       }
+
+       return true;
+}
+
+static int vmci_transport_connect(struct vsock_sock *vsk)
+{
+       int err;
+       bool old_pkt_proto = false;
+       struct sock *sk = &vsk->sk;
+
+       if (vmci_transport_old_proto_override(&old_pkt_proto) &&
+               old_pkt_proto) {
+               err = vmci_transport_send_conn_request(
+                       sk, vmci_trans(vsk)->queue_pair_size);
+               if (err < 0) {
+                       sk->sk_state = SS_UNCONNECTED;
+                       return err;
+               }
+       } else {
+               int supported_proto_versions =
+                       vmci_transport_new_proto_supported_versions();
+               err = vmci_transport_send_conn_request2(
+                               sk, vmci_trans(vsk)->queue_pair_size,
+                               supported_proto_versions);
+               if (err < 0) {
+                       sk->sk_state = SS_UNCONNECTED;
+                       return err;
+               }
+
+               vsk->sent_request = true;
+       }
+
+       return err;
+}
+
+static ssize_t vmci_transport_stream_dequeue(
+       struct vsock_sock *vsk,
+       struct iovec *iov,
+       size_t len,
+       int flags)
+{
+       if (flags & MSG_PEEK)
+               return vmci_qpair_peekv(vmci_trans(vsk)->qpair, iov, len, 0);
+       else
+               return vmci_qpair_dequev(vmci_trans(vsk)->qpair, iov, len, 0);
+}
+
+static ssize_t vmci_transport_stream_enqueue(
+       struct vsock_sock *vsk,
+       struct iovec *iov,
+       size_t len)
+{
+       return vmci_qpair_enquev(vmci_trans(vsk)->qpair, iov, len, 0);
+}
+
+static s64 vmci_transport_stream_has_data(struct vsock_sock *vsk)
+{
+       return vmci_qpair_consume_buf_ready(vmci_trans(vsk)->qpair);
+}
+
+static s64 vmci_transport_stream_has_space(struct vsock_sock *vsk)
+{
+       return vmci_qpair_produce_free_space(vmci_trans(vsk)->qpair);
+}
+
+static u64 vmci_transport_stream_rcvhiwat(struct vsock_sock *vsk)
+{
+       return vmci_trans(vsk)->consume_size;
+}
+
+static bool vmci_transport_stream_is_active(struct vsock_sock *vsk)
+{
+       return !vmci_handle_is_invalid(vmci_trans(vsk)->qp_handle);
+}
+
+static u64 vmci_transport_get_buffer_size(struct vsock_sock *vsk)
+{
+       return vmci_trans(vsk)->queue_pair_size;
+}
+
+static u64 vmci_transport_get_min_buffer_size(struct vsock_sock *vsk)
+{
+       return vmci_trans(vsk)->queue_pair_min_size;
+}
+
+static u64 vmci_transport_get_max_buffer_size(struct vsock_sock *vsk)
+{
+       return vmci_trans(vsk)->queue_pair_max_size;
+}
+
+static void vmci_transport_set_buffer_size(struct vsock_sock *vsk, u64 val)
+{
+       if (val < vmci_trans(vsk)->queue_pair_min_size)
+               vmci_trans(vsk)->queue_pair_min_size = val;
+       if (val > vmci_trans(vsk)->queue_pair_max_size)
+               vmci_trans(vsk)->queue_pair_max_size = val;
+       vmci_trans(vsk)->queue_pair_size = val;
+}
+
+static void vmci_transport_set_min_buffer_size(struct vsock_sock *vsk,
+                                              u64 val)
+{
+       if (val > vmci_trans(vsk)->queue_pair_size)
+               vmci_trans(vsk)->queue_pair_size = val;
+       vmci_trans(vsk)->queue_pair_min_size = val;
+}
+
+static void vmci_transport_set_max_buffer_size(struct vsock_sock *vsk,
+                                              u64 val)
+{
+       if (val < vmci_trans(vsk)->queue_pair_size)
+               vmci_trans(vsk)->queue_pair_size = val;
+       vmci_trans(vsk)->queue_pair_max_size = val;
+}
+
+static int vmci_transport_notify_poll_in(
+       struct vsock_sock *vsk,
+       size_t target,
+       bool *data_ready_now)
+{
+       return vmci_trans(vsk)->notify_ops->poll_in(
+                       &vsk->sk, target, data_ready_now);
+}
+
+static int vmci_transport_notify_poll_out(
+       struct vsock_sock *vsk,
+       size_t target,
+       bool *space_available_now)
+{
+       return vmci_trans(vsk)->notify_ops->poll_out(
+                       &vsk->sk, target, space_available_now);
+}
+
+static int vmci_transport_notify_recv_init(
+       struct vsock_sock *vsk,
+       size_t target,
+       struct vsock_transport_recv_notify_data *data)
+{
+       return vmci_trans(vsk)->notify_ops->recv_init(
+                       &vsk->sk, target,
+                       (struct vmci_transport_recv_notify_data *)data);
+}
+
+static int vmci_transport_notify_recv_pre_block(
+       struct vsock_sock *vsk,
+       size_t target,
+       struct vsock_transport_recv_notify_data *data)
+{
+       return vmci_trans(vsk)->notify_ops->recv_pre_block(
+                       &vsk->sk, target,
+                       (struct vmci_transport_recv_notify_data *)data);
+}
+
+static int vmci_transport_notify_recv_pre_dequeue(
+       struct vsock_sock *vsk,
+       size_t target,
+       struct vsock_transport_recv_notify_data *data)
+{
+       return vmci_trans(vsk)->notify_ops->recv_pre_dequeue(
+                       &vsk->sk, target,
+                       (struct vmci_transport_recv_notify_data *)data);
+}
+
+static int vmci_transport_notify_recv_post_dequeue(
+       struct vsock_sock *vsk,
+       size_t target,
+       ssize_t copied,
+       bool data_read,
+       struct vsock_transport_recv_notify_data *data)
+{
+       return vmci_trans(vsk)->notify_ops->recv_post_dequeue(
+                       &vsk->sk, target, copied, data_read,
+                       (struct vmci_transport_recv_notify_data *)data);
+}
+
+static int vmci_transport_notify_send_init(
+       struct vsock_sock *vsk,
+       struct vsock_transport_send_notify_data *data)
+{
+       return vmci_trans(vsk)->notify_ops->send_init(
+                       &vsk->sk,
+                       (struct vmci_transport_send_notify_data *)data);
+}
+
+static int vmci_transport_notify_send_pre_block(
+       struct vsock_sock *vsk,
+       struct vsock_transport_send_notify_data *data)
+{
+       return vmci_trans(vsk)->notify_ops->send_pre_block(
+                       &vsk->sk,
+                       (struct vmci_transport_send_notify_data *)data);
+}
+
+static int vmci_transport_notify_send_pre_enqueue(
+       struct vsock_sock *vsk,
+       struct vsock_transport_send_notify_data *data)
+{
+       return vmci_trans(vsk)->notify_ops->send_pre_enqueue(
+                       &vsk->sk,
+                       (struct vmci_transport_send_notify_data *)data);
+}
+
+static int vmci_transport_notify_send_post_enqueue(
+       struct vsock_sock *vsk,
+       ssize_t written,
+       struct vsock_transport_send_notify_data *data)
+{
+       return vmci_trans(vsk)->notify_ops->send_post_enqueue(
+                       &vsk->sk, written,
+                       (struct vmci_transport_send_notify_data *)data);
+}
+
+static bool vmci_transport_old_proto_override(bool *old_pkt_proto)
+{
+       if (PROTOCOL_OVERRIDE != -1) {
+               if (PROTOCOL_OVERRIDE == 0)
+                       *old_pkt_proto = true;
+               else
+                       *old_pkt_proto = false;
+
+               pr_info("Proto override in use\n");
+               return true;
+       }
+
+       return false;
+}
+
+static bool vmci_transport_proto_to_notify_struct(struct sock *sk,
+                                                 u16 *proto,
+                                                 bool old_pkt_proto)
+{
+       struct vsock_sock *vsk = vsock_sk(sk);
+
+       if (old_pkt_proto) {
+               if (*proto != VSOCK_PROTO_INVALID) {
+                       pr_err("Can't set both an old and new protocol\n");
+                       return false;
+               }
+               vmci_trans(vsk)->notify_ops = &vmci_transport_notify_pkt_ops;
+               goto exit;
+       }
+
+       switch (*proto) {
+       case VSOCK_PROTO_PKT_ON_NOTIFY:
+               vmci_trans(vsk)->notify_ops =
+                       &vmci_transport_notify_pkt_q_state_ops;
+               break;
+       default:
+               pr_err("Unknown notify protocol version\n");
+               return false;
+       }
+
+exit:
+       vmci_trans(vsk)->notify_ops->socket_init(sk);
+       return true;
+}
+
+static u16 vmci_transport_new_proto_supported_versions(void)
+{
+       if (PROTOCOL_OVERRIDE != -1)
+               return PROTOCOL_OVERRIDE;
+
+       return VSOCK_PROTO_ALL_SUPPORTED;
+}
+
+static u32 vmci_transport_get_local_cid(void)
+{
+       return vmci_get_context_id();
+}
+
+static struct vsock_transport vmci_transport = {
+       .init = vmci_transport_socket_init,
+       .destruct = vmci_transport_destruct,
+       .release = vmci_transport_release,
+       .connect = vmci_transport_connect,
+       .dgram_bind = vmci_transport_dgram_bind,
+       .dgram_dequeue = vmci_transport_dgram_dequeue,
+       .dgram_enqueue = vmci_transport_dgram_enqueue,
+       .dgram_allow = vmci_transport_dgram_allow,
+       .stream_dequeue = vmci_transport_stream_dequeue,
+       .stream_enqueue = vmci_transport_stream_enqueue,
+       .stream_has_data = vmci_transport_stream_has_data,
+       .stream_has_space = vmci_transport_stream_has_space,
+       .stream_rcvhiwat = vmci_transport_stream_rcvhiwat,
+       .stream_is_active = vmci_transport_stream_is_active,
+       .stream_allow = vmci_transport_stream_allow,
+       .notify_poll_in = vmci_transport_notify_poll_in,
+       .notify_poll_out = vmci_transport_notify_poll_out,
+       .notify_recv_init = vmci_transport_notify_recv_init,
+       .notify_recv_pre_block = vmci_transport_notify_recv_pre_block,
+       .notify_recv_pre_dequeue = vmci_transport_notify_recv_pre_dequeue,
+       .notify_recv_post_dequeue = vmci_transport_notify_recv_post_dequeue,
+       .notify_send_init = vmci_transport_notify_send_init,
+       .notify_send_pre_block = vmci_transport_notify_send_pre_block,
+       .notify_send_pre_enqueue = vmci_transport_notify_send_pre_enqueue,
+       .notify_send_post_enqueue = vmci_transport_notify_send_post_enqueue,
+       .shutdown = vmci_transport_shutdown,
+       .set_buffer_size = vmci_transport_set_buffer_size,
+       .set_min_buffer_size = vmci_transport_set_min_buffer_size,
+       .set_max_buffer_size = vmci_transport_set_max_buffer_size,
+       .get_buffer_size = vmci_transport_get_buffer_size,
+       .get_min_buffer_size = vmci_transport_get_min_buffer_size,
+       .get_max_buffer_size = vmci_transport_get_max_buffer_size,
+       .get_local_cid = vmci_transport_get_local_cid,
+};
+
+static int __init vmci_transport_init(void)
+{
+       int err;
+
+       /* Create the datagram handle that we will use to send and receive all
+        * VSocket control messages for this context.
+        */
+       err = vmci_transport_datagram_create_hnd(VMCI_TRANSPORT_PACKET_RID,
+                                                VMCI_FLAG_ANYCID_DG_HND,
+                                                vmci_transport_recv_stream_cb,
+                                                NULL,
+                                                &vmci_transport_stream_handle);
+       if (err < VMCI_SUCCESS) {
+               pr_err("Unable to create datagram handle. (%d)\n", err);
+               return vmci_transport_error_to_vsock_error(err);
+       }
+
+       err = vmci_event_subscribe(VMCI_EVENT_QP_RESUMED,
+                                  vmci_transport_qp_resumed_cb,
+                                  NULL, &vmci_transport_qp_resumed_sub_id);
+       if (err < VMCI_SUCCESS) {
+               pr_err("Unable to subscribe to resumed event. (%d)\n", err);
+               err = vmci_transport_error_to_vsock_error(err);
+               vmci_transport_qp_resumed_sub_id = VMCI_INVALID_ID;
+               goto err_destroy_stream_handle;
+       }
+
+       err = vsock_core_init(&vmci_transport);
+       if (err < 0)
+               goto err_unsubscribe;
+
+       return 0;
+
+err_unsubscribe:
+       vmci_event_unsubscribe(vmci_transport_qp_resumed_sub_id);
+err_destroy_stream_handle:
+       vmci_datagram_destroy_handle(vmci_transport_stream_handle);
+       return err;
+}
+module_init(vmci_transport_init);
+
+static void __exit vmci_transport_exit(void)
+{
+       if (!vmci_handle_is_invalid(vmci_transport_stream_handle)) {
+               if (vmci_datagram_destroy_handle(
+                       vmci_transport_stream_handle) != VMCI_SUCCESS)
+                       pr_err("Couldn't destroy datagram handle\n");
+               vmci_transport_stream_handle = VMCI_INVALID_HANDLE;
+       }
+
+       if (vmci_transport_qp_resumed_sub_id != VMCI_INVALID_ID) {
+               vmci_event_unsubscribe(vmci_transport_qp_resumed_sub_id);
+               vmci_transport_qp_resumed_sub_id = VMCI_INVALID_ID;
+       }
+
+       vsock_core_exit();
+}
+module_exit(vmci_transport_exit);
+
+MODULE_AUTHOR("VMware, Inc.");
+MODULE_DESCRIPTION("VMCI transport for Virtual Sockets");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("vmware_vsock");
+MODULE_ALIAS_NETPROTO(PF_VSOCK);
diff --git a/net/vmw_vsock/vmci_transport.h b/net/vmw_vsock/vmci_transport.h
new file mode 100644 (file)
index 0000000..1bf9918
--- /dev/null
@@ -0,0 +1,139 @@
+/*
+ * VMware vSockets Driver
+ *
+ * Copyright (C) 2013 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _VMCI_TRANSPORT_H_
+#define _VMCI_TRANSPORT_H_
+
+#include <linux/vmw_vmci_defs.h>
+#include <linux/vmw_vmci_api.h>
+
+#include "vsock_addr.h"
+#include "af_vsock.h"
+
+/* If the packet format changes in a release then this should change too. */
+#define VMCI_TRANSPORT_PACKET_VERSION 1
+
+/* The resource ID on which control packets are sent. */
+#define VMCI_TRANSPORT_PACKET_RID 1
+
+#define VSOCK_PROTO_INVALID        0
+#define VSOCK_PROTO_PKT_ON_NOTIFY (1 << 0)
+#define VSOCK_PROTO_ALL_SUPPORTED (VSOCK_PROTO_PKT_ON_NOTIFY)
+
+#define vmci_trans(_vsk) ((struct vmci_transport *)((_vsk)->trans))
+
+enum vmci_transport_packet_type {
+       VMCI_TRANSPORT_PACKET_TYPE_INVALID = 0,
+       VMCI_TRANSPORT_PACKET_TYPE_REQUEST,
+       VMCI_TRANSPORT_PACKET_TYPE_NEGOTIATE,
+       VMCI_TRANSPORT_PACKET_TYPE_OFFER,
+       VMCI_TRANSPORT_PACKET_TYPE_ATTACH,
+       VMCI_TRANSPORT_PACKET_TYPE_WROTE,
+       VMCI_TRANSPORT_PACKET_TYPE_READ,
+       VMCI_TRANSPORT_PACKET_TYPE_RST,
+       VMCI_TRANSPORT_PACKET_TYPE_SHUTDOWN,
+       VMCI_TRANSPORT_PACKET_TYPE_WAITING_WRITE,
+       VMCI_TRANSPORT_PACKET_TYPE_WAITING_READ,
+       VMCI_TRANSPORT_PACKET_TYPE_REQUEST2,
+       VMCI_TRANSPORT_PACKET_TYPE_NEGOTIATE2,
+       VMCI_TRANSPORT_PACKET_TYPE_MAX
+};
+
+struct vmci_transport_waiting_info {
+       u64 generation;
+       u64 offset;
+};
+
+/* Control packet type for STREAM sockets.  DGRAMs have no control packets nor
+ * special packet header for data packets, they are just raw VMCI DGRAM
+ * messages.  For STREAMs, control packets are sent over the control channel
+ * while data is written and read directly from queue pairs with no packet
+ * format.
+ */
+struct vmci_transport_packet {
+       struct vmci_datagram dg;
+       u8 version;
+       u8 type;
+       u16 proto;
+       u32 src_port;
+       u32 dst_port;
+       u32 _reserved2;
+       union {
+               u64 size;
+               u64 mode;
+               struct vmci_handle handle;
+               struct vmci_transport_waiting_info wait;
+       } u;
+};
+
+struct vmci_transport_notify_pkt {
+       u64 write_notify_window;
+       u64 write_notify_min_window;
+       bool peer_waiting_read;
+       bool peer_waiting_write;
+       bool peer_waiting_write_detected;
+       bool sent_waiting_read;
+       bool sent_waiting_write;
+       struct vmci_transport_waiting_info peer_waiting_read_info;
+       struct vmci_transport_waiting_info peer_waiting_write_info;
+       u64 produce_q_generation;
+       u64 consume_q_generation;
+};
+
+struct vmci_transport_notify_pkt_q_state {
+       u64 write_notify_window;
+       u64 write_notify_min_window;
+       bool peer_waiting_write;
+       bool peer_waiting_write_detected;
+};
+
+union vmci_transport_notify {
+       struct vmci_transport_notify_pkt pkt;
+       struct vmci_transport_notify_pkt_q_state pkt_q_state;
+};
+
+/* Our transport-specific data. */
+struct vmci_transport {
+       /* For DGRAMs. */
+       struct vmci_handle dg_handle;
+       /* For STREAMs. */
+       struct vmci_handle qp_handle;
+       struct vmci_qp *qpair;
+       u64 produce_size;
+       u64 consume_size;
+       u64 queue_pair_size;
+       u64 queue_pair_min_size;
+       u64 queue_pair_max_size;
+       u32 attach_sub_id;
+       u32 detach_sub_id;
+       union vmci_transport_notify notify;
+       struct vmci_transport_notify_ops *notify_ops;
+};
+
+int vmci_transport_register(void);
+void vmci_transport_unregister(void);
+
+int vmci_transport_send_wrote_bh(struct sockaddr_vm *dst,
+                                struct sockaddr_vm *src);
+int vmci_transport_send_read_bh(struct sockaddr_vm *dst,
+                               struct sockaddr_vm *src);
+int vmci_transport_send_wrote(struct sock *sk);
+int vmci_transport_send_read(struct sock *sk);
+int vmci_transport_send_waiting_write(struct sock *sk,
+                                     struct vmci_transport_waiting_info *wait);
+int vmci_transport_send_waiting_read(struct sock *sk,
+                                    struct vmci_transport_waiting_info *wait);
+
+#endif
diff --git a/net/vmw_vsock/vmci_transport_notify.c b/net/vmw_vsock/vmci_transport_notify.c
new file mode 100644 (file)
index 0000000..9a73074
--- /dev/null
@@ -0,0 +1,680 @@
+/*
+ * VMware vSockets Driver
+ *
+ * Copyright (C) 2009-2013 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/types.h>
+#include <linux/socket.h>
+#include <linux/stddef.h>
+#include <net/sock.h>
+
+#include "vmci_transport_notify.h"
+
+#define PKT_FIELD(vsk, field_name) (vmci_trans(vsk)->notify.pkt.field_name)
+
+static bool vmci_transport_notify_waiting_write(struct vsock_sock *vsk)
+{
+#if defined(VSOCK_OPTIMIZATION_WAITING_NOTIFY)
+       bool retval;
+       u64 notify_limit;
+
+       if (!PKT_FIELD(vsk, peer_waiting_write))
+               return false;
+
+#ifdef VSOCK_OPTIMIZATION_FLOW_CONTROL
+       /* When the sender blocks, we take that as a sign that the sender is
+        * faster than the receiver. To reduce the transmit rate of the sender,
+        * we delay the sending of the read notification by decreasing the
+        * write_notify_window. The notification is delayed until the number of
+        * bytes used in the queue drops below the write_notify_window.
+        */
+
+       if (!PKT_FIELD(vsk, peer_waiting_write_detected)) {
+               PKT_FIELD(vsk, peer_waiting_write_detected) = true;
+               if (PKT_FIELD(vsk, write_notify_window) < PAGE_SIZE) {
+                       PKT_FIELD(vsk, write_notify_window) =
+                           PKT_FIELD(vsk, write_notify_min_window);
+               } else {
+                       PKT_FIELD(vsk, write_notify_window) -= PAGE_SIZE;
+                       if (PKT_FIELD(vsk, write_notify_window) <
+                           PKT_FIELD(vsk, write_notify_min_window))
+                               PKT_FIELD(vsk, write_notify_window) =
+                                   PKT_FIELD(vsk, write_notify_min_window);
+
+               }
+       }
+       notify_limit = vmci_trans(vsk)->consume_size -
+               PKT_FIELD(vsk, write_notify_window);
+#else
+       notify_limit = 0;
+#endif
+
+       /* For now we ignore the wait information and just see if the free
+        * space exceeds the notify limit.  Note that improving this function
+        * to be more intelligent will not require a protocol change and will
+        * retain compatibility between endpoints with mixed versions of this
+        * function.
+        *
+        * The notify_limit is used to delay notifications in the case where
+        * flow control is enabled. Below the test is expressed in terms of
+        * free space in the queue: if free_space > ConsumeSize -
+        * write_notify_window then notify An alternate way of expressing this
+        * is to rewrite the expression to use the data ready in the receive
+        * queue: if write_notify_window > bufferReady then notify as
+        * free_space == ConsumeSize - bufferReady.
+        */
+       retval = vmci_qpair_consume_free_space(vmci_trans(vsk)->qpair) >
+               notify_limit;
+#ifdef VSOCK_OPTIMIZATION_FLOW_CONTROL
+       if (retval) {
+               /*
+                * Once we notify the peer, we reset the detected flag so the
+                * next wait will again cause a decrease in the window size.
+                */
+
+               PKT_FIELD(vsk, peer_waiting_write_detected) = false;
+       }
+#endif
+       return retval;
+#else
+       return true;
+#endif
+}
+
+static bool vmci_transport_notify_waiting_read(struct vsock_sock *vsk)
+{
+#if defined(VSOCK_OPTIMIZATION_WAITING_NOTIFY)
+       if (!PKT_FIELD(vsk, peer_waiting_read))
+               return false;
+
+       /* For now we ignore the wait information and just see if there is any
+        * data for our peer to read.  Note that improving this function to be
+        * more intelligent will not require a protocol change and will retain
+        * compatibility between endpoints with mixed versions of this
+        * function.
+        */
+       return vmci_qpair_produce_buf_ready(vmci_trans(vsk)->qpair) > 0;
+#else
+       return true;
+#endif
+}
+
+static void
+vmci_transport_handle_waiting_read(struct sock *sk,
+                                  struct vmci_transport_packet *pkt,
+                                  bool bottom_half,
+                                  struct sockaddr_vm *dst,
+                                  struct sockaddr_vm *src)
+{
+#if defined(VSOCK_OPTIMIZATION_WAITING_NOTIFY)
+       struct vsock_sock *vsk;
+
+       vsk = vsock_sk(sk);
+
+       PKT_FIELD(vsk, peer_waiting_read) = true;
+       memcpy(&PKT_FIELD(vsk, peer_waiting_read_info), &pkt->u.wait,
+              sizeof(PKT_FIELD(vsk, peer_waiting_read_info)));
+
+       if (vmci_transport_notify_waiting_read(vsk)) {
+               bool sent;
+
+               if (bottom_half)
+                       sent = vmci_transport_send_wrote_bh(dst, src) > 0;
+               else
+                       sent = vmci_transport_send_wrote(sk) > 0;
+
+               if (sent)
+                       PKT_FIELD(vsk, peer_waiting_read) = false;
+       }
+#endif
+}
+
+static void
+vmci_transport_handle_waiting_write(struct sock *sk,
+                                   struct vmci_transport_packet *pkt,
+                                   bool bottom_half,
+                                   struct sockaddr_vm *dst,
+                                   struct sockaddr_vm *src)
+{
+#if defined(VSOCK_OPTIMIZATION_WAITING_NOTIFY)
+       struct vsock_sock *vsk;
+
+       vsk = vsock_sk(sk);
+
+       PKT_FIELD(vsk, peer_waiting_write) = true;
+       memcpy(&PKT_FIELD(vsk, peer_waiting_write_info), &pkt->u.wait,
+              sizeof(PKT_FIELD(vsk, peer_waiting_write_info)));
+
+       if (vmci_transport_notify_waiting_write(vsk)) {
+               bool sent;
+
+               if (bottom_half)
+                       sent = vmci_transport_send_read_bh(dst, src) > 0;
+               else
+                       sent = vmci_transport_send_read(sk) > 0;
+
+               if (sent)
+                       PKT_FIELD(vsk, peer_waiting_write) = false;
+       }
+#endif
+}
+
+static void
+vmci_transport_handle_read(struct sock *sk,
+                          struct vmci_transport_packet *pkt,
+                          bool bottom_half,
+                          struct sockaddr_vm *dst, struct sockaddr_vm *src)
+{
+#if defined(VSOCK_OPTIMIZATION_WAITING_NOTIFY)
+       struct vsock_sock *vsk;
+
+       vsk = vsock_sk(sk);
+       PKT_FIELD(vsk, sent_waiting_write) = false;
+#endif
+
+       sk->sk_write_space(sk);
+}
+
+static bool send_waiting_read(struct sock *sk, u64 room_needed)
+{
+#if defined(VSOCK_OPTIMIZATION_WAITING_NOTIFY)
+       struct vsock_sock *vsk;
+       struct vmci_transport_waiting_info waiting_info;
+       u64 tail;
+       u64 head;
+       u64 room_left;
+       bool ret;
+
+       vsk = vsock_sk(sk);
+
+       if (PKT_FIELD(vsk, sent_waiting_read))
+               return true;
+
+       if (PKT_FIELD(vsk, write_notify_window) <
+                       vmci_trans(vsk)->consume_size)
+               PKT_FIELD(vsk, write_notify_window) =
+                   min(PKT_FIELD(vsk, write_notify_window) + PAGE_SIZE,
+                       vmci_trans(vsk)->consume_size);
+
+       vmci_qpair_get_consume_indexes(vmci_trans(vsk)->qpair, &tail, &head);
+       room_left = vmci_trans(vsk)->consume_size - head;
+       if (room_needed >= room_left) {
+               waiting_info.offset = room_needed - room_left;
+               waiting_info.generation =
+                   PKT_FIELD(vsk, consume_q_generation) + 1;
+       } else {
+               waiting_info.offset = head + room_needed;
+               waiting_info.generation = PKT_FIELD(vsk, consume_q_generation);
+       }
+
+       ret = vmci_transport_send_waiting_read(sk, &waiting_info) > 0;
+       if (ret)
+               PKT_FIELD(vsk, sent_waiting_read) = true;
+
+       return ret;
+#else
+       return true;
+#endif
+}
+
+static bool send_waiting_write(struct sock *sk, u64 room_needed)
+{
+#if defined(VSOCK_OPTIMIZATION_WAITING_NOTIFY)
+       struct vsock_sock *vsk;
+       struct vmci_transport_waiting_info waiting_info;
+       u64 tail;
+       u64 head;
+       u64 room_left;
+       bool ret;
+
+       vsk = vsock_sk(sk);
+
+       if (PKT_FIELD(vsk, sent_waiting_write))
+               return true;
+
+       vmci_qpair_get_produce_indexes(vmci_trans(vsk)->qpair, &tail, &head);
+       room_left = vmci_trans(vsk)->produce_size - tail;
+       if (room_needed + 1 >= room_left) {
+               /* Wraps around to current generation. */
+               waiting_info.offset = room_needed + 1 - room_left;
+               waiting_info.generation = PKT_FIELD(vsk, produce_q_generation);
+       } else {
+               waiting_info.offset = tail + room_needed + 1;
+               waiting_info.generation =
+                   PKT_FIELD(vsk, produce_q_generation) - 1;
+       }
+
+       ret = vmci_transport_send_waiting_write(sk, &waiting_info) > 0;
+       if (ret)
+               PKT_FIELD(vsk, sent_waiting_write) = true;
+
+       return ret;
+#else
+       return true;
+#endif
+}
+
+static int vmci_transport_send_read_notification(struct sock *sk)
+{
+       struct vsock_sock *vsk;
+       bool sent_read;
+       unsigned int retries;
+       int err;
+
+       vsk = vsock_sk(sk);
+       sent_read = false;
+       retries = 0;
+       err = 0;
+
+       if (vmci_transport_notify_waiting_write(vsk)) {
+               /* Notify the peer that we have read, retrying the send on
+                * failure up to our maximum value.  XXX For now we just log
+                * the failure, but later we should schedule a work item to
+                * handle the resend until it succeeds.  That would require
+                * keeping track of work items in the vsk and cleaning them up
+                * upon socket close.
+                */
+               while (!(vsk->peer_shutdown & RCV_SHUTDOWN) &&
+                      !sent_read &&
+                      retries < VMCI_TRANSPORT_MAX_DGRAM_RESENDS) {
+                       err = vmci_transport_send_read(sk);
+                       if (err >= 0)
+                               sent_read = true;
+
+                       retries++;
+               }
+
+               if (retries >= VMCI_TRANSPORT_MAX_DGRAM_RESENDS)
+                       pr_err("%p unable to send read notify to peer\n", sk);
+               else
+#if defined(VSOCK_OPTIMIZATION_WAITING_NOTIFY)
+                       PKT_FIELD(vsk, peer_waiting_write) = false;
+#endif
+
+       }
+       return err;
+}
+
+static void
+vmci_transport_handle_wrote(struct sock *sk,
+                           struct vmci_transport_packet *pkt,
+                           bool bottom_half,
+                           struct sockaddr_vm *dst, struct sockaddr_vm *src)
+{
+#if defined(VSOCK_OPTIMIZATION_WAITING_NOTIFY)
+       struct vsock_sock *vsk = vsock_sk(sk);
+       PKT_FIELD(vsk, sent_waiting_read) = false;
+#endif
+       sk->sk_data_ready(sk, 0);
+}
+
+static void vmci_transport_notify_pkt_socket_init(struct sock *sk)
+{
+       struct vsock_sock *vsk = vsock_sk(sk);
+
+       PKT_FIELD(vsk, write_notify_window) = PAGE_SIZE;
+       PKT_FIELD(vsk, write_notify_min_window) = PAGE_SIZE;
+       PKT_FIELD(vsk, peer_waiting_read) = false;
+       PKT_FIELD(vsk, peer_waiting_write) = false;
+       PKT_FIELD(vsk, peer_waiting_write_detected) = false;
+       PKT_FIELD(vsk, sent_waiting_read) = false;
+       PKT_FIELD(vsk, sent_waiting_write) = false;
+       PKT_FIELD(vsk, produce_q_generation) = 0;
+       PKT_FIELD(vsk, consume_q_generation) = 0;
+
+       memset(&PKT_FIELD(vsk, peer_waiting_read_info), 0,
+              sizeof(PKT_FIELD(vsk, peer_waiting_read_info)));
+       memset(&PKT_FIELD(vsk, peer_waiting_write_info), 0,
+              sizeof(PKT_FIELD(vsk, peer_waiting_write_info)));
+}
+
+static void vmci_transport_notify_pkt_socket_destruct(struct vsock_sock *vsk)
+{
+}
+
+static int
+vmci_transport_notify_pkt_poll_in(struct sock *sk,
+                                 size_t target, bool *data_ready_now)
+{
+       struct vsock_sock *vsk = vsock_sk(sk);
+
+       if (vsock_stream_has_data(vsk)) {
+               *data_ready_now = true;
+       } else {
+               /* We can't read right now because there is nothing in the
+                * queue. Ask for notifications when there is something to
+                * read.
+                */
+               if (sk->sk_state == SS_CONNECTED) {
+                       if (!send_waiting_read(sk, 1))
+                               return -1;
+
+               }
+               *data_ready_now = false;
+       }
+
+       return 0;
+}
+
+static int
+vmci_transport_notify_pkt_poll_out(struct sock *sk,
+                                  size_t target, bool *space_avail_now)
+{
+       s64 produce_q_free_space;
+       struct vsock_sock *vsk = vsock_sk(sk);
+
+       produce_q_free_space = vsock_stream_has_space(vsk);
+       if (produce_q_free_space > 0) {
+               *space_avail_now = true;
+               return 0;
+       } else if (produce_q_free_space == 0) {
+               /* This is a connected socket but we can't currently send data.
+                * Notify the peer that we are waiting if the queue is full. We
+                * only send a waiting write if the queue is full because
+                * otherwise we end up in an infinite WAITING_WRITE, READ,
+                * WAITING_WRITE, READ, etc. loop. Treat failing to send the
+                * notification as a socket error, passing that back through
+                * the mask.
+                */
+               if (!send_waiting_write(sk, 1))
+                       return -1;
+
+               *space_avail_now = false;
+       }
+
+       return 0;
+}
+
+static int
+vmci_transport_notify_pkt_recv_init(
+                       struct sock *sk,
+                       size_t target,
+                       struct vmci_transport_recv_notify_data *data)
+{
+       struct vsock_sock *vsk = vsock_sk(sk);
+
+#ifdef VSOCK_OPTIMIZATION_WAITING_NOTIFY
+       data->consume_head = 0;
+       data->produce_tail = 0;
+#ifdef VSOCK_OPTIMIZATION_FLOW_CONTROL
+       data->notify_on_block = false;
+
+       if (PKT_FIELD(vsk, write_notify_min_window) < target + 1) {
+               PKT_FIELD(vsk, write_notify_min_window) = target + 1;
+               if (PKT_FIELD(vsk, write_notify_window) <
+                   PKT_FIELD(vsk, write_notify_min_window)) {
+                       /* If the current window is smaller than the new
+                        * minimal window size, we need to reevaluate whether
+                        * we need to notify the sender. If the number of ready
+                        * bytes are smaller than the new window, we need to
+                        * send a notification to the sender before we block.
+                        */
+
+                       PKT_FIELD(vsk, write_notify_window) =
+                           PKT_FIELD(vsk, write_notify_min_window);
+                       data->notify_on_block = true;
+               }
+       }
+#endif
+#endif
+
+       return 0;
+}
+
+static int
+vmci_transport_notify_pkt_recv_pre_block(
+                               struct sock *sk,
+                               size_t target,
+                               struct vmci_transport_recv_notify_data *data)
+{
+       int err = 0;
+
+       /* Notify our peer that we are waiting for data to read. */
+       if (!send_waiting_read(sk, target)) {
+               err = -EHOSTUNREACH;
+               return err;
+       }
+#ifdef VSOCK_OPTIMIZATION_FLOW_CONTROL
+       if (data->notify_on_block) {
+               err = vmci_transport_send_read_notification(sk);
+               if (err < 0)
+                       return err;
+
+               data->notify_on_block = false;
+       }
+#endif
+
+       return err;
+}
+
+static int
+vmci_transport_notify_pkt_recv_pre_dequeue(
+                               struct sock *sk,
+                               size_t target,
+                               struct vmci_transport_recv_notify_data *data)
+{
+       struct vsock_sock *vsk = vsock_sk(sk);
+
+       /* Now consume up to len bytes from the queue.  Note that since we have
+        * the socket locked we should copy at least ready bytes.
+        */
+#if defined(VSOCK_OPTIMIZATION_WAITING_NOTIFY)
+       vmci_qpair_get_consume_indexes(vmci_trans(vsk)->qpair,
+                                      &data->produce_tail,
+                                      &data->consume_head);
+#endif
+
+       return 0;
+}
+
+static int
+vmci_transport_notify_pkt_recv_post_dequeue(
+                               struct sock *sk,
+                               size_t target,
+                               ssize_t copied,
+                               bool data_read,
+                               struct vmci_transport_recv_notify_data *data)
+{
+       struct vsock_sock *vsk;
+       int err;
+
+       vsk = vsock_sk(sk);
+       err = 0;
+
+       if (data_read) {
+#if defined(VSOCK_OPTIMIZATION_WAITING_NOTIFY)
+               /* Detect a wrap-around to maintain queue generation.  Note
+                * that this is safe since we hold the socket lock across the
+                * two queue pair operations.
+                */
+               if (copied >=
+                       vmci_trans(vsk)->consume_size - data->consume_head)
+                       PKT_FIELD(vsk, consume_q_generation)++;
+#endif
+
+               err = vmci_transport_send_read_notification(sk);
+               if (err < 0)
+                       return err;
+
+       }
+       return err;
+}
+
+static int
+vmci_transport_notify_pkt_send_init(
+                       struct sock *sk,
+                       struct vmci_transport_send_notify_data *data)
+{
+#ifdef VSOCK_OPTIMIZATION_WAITING_NOTIFY
+       data->consume_head = 0;
+       data->produce_tail = 0;
+#endif
+
+       return 0;
+}
+
+static int
+vmci_transport_notify_pkt_send_pre_block(
+                               struct sock *sk,
+                               struct vmci_transport_send_notify_data *data)
+{
+       /* Notify our peer that we are waiting for room to write. */
+       if (!send_waiting_write(sk, 1))
+               return -EHOSTUNREACH;
+
+       return 0;
+}
+
+static int
+vmci_transport_notify_pkt_send_pre_enqueue(
+                               struct sock *sk,
+                               struct vmci_transport_send_notify_data *data)
+{
+       struct vsock_sock *vsk = vsock_sk(sk);
+
+#if defined(VSOCK_OPTIMIZATION_WAITING_NOTIFY)
+       vmci_qpair_get_produce_indexes(vmci_trans(vsk)->qpair,
+                                      &data->produce_tail,
+                                      &data->consume_head);
+#endif
+
+       return 0;
+}
+
+static int
+vmci_transport_notify_pkt_send_post_enqueue(
+                               struct sock *sk,
+                               ssize_t written,
+                               struct vmci_transport_send_notify_data *data)
+{
+       int err = 0;
+       struct vsock_sock *vsk;
+       bool sent_wrote = false;
+       int retries = 0;
+
+       vsk = vsock_sk(sk);
+
+#if defined(VSOCK_OPTIMIZATION_WAITING_NOTIFY)
+       /* Detect a wrap-around to maintain queue generation.  Note that this
+        * is safe since we hold the socket lock across the two queue pair
+        * operations.
+        */
+       if (written >= vmci_trans(vsk)->produce_size - data->produce_tail)
+               PKT_FIELD(vsk, produce_q_generation)++;
+
+#endif
+
+       if (vmci_transport_notify_waiting_read(vsk)) {
+               /* Notify the peer that we have written, retrying the send on
+                * failure up to our maximum value. See the XXX comment for the
+                * corresponding piece of code in StreamRecvmsg() for potential
+                * improvements.
+                */
+               while (!(vsk->peer_shutdown & RCV_SHUTDOWN) &&
+                      !sent_wrote &&
+                      retries < VMCI_TRANSPORT_MAX_DGRAM_RESENDS) {
+                       err = vmci_transport_send_wrote(sk);
+                       if (err >= 0)
+                               sent_wrote = true;
+
+                       retries++;
+               }
+
+               if (retries >= VMCI_TRANSPORT_MAX_DGRAM_RESENDS) {
+                       pr_err("%p unable to send wrote notify to peer\n", sk);
+                       return err;
+               } else {
+#if defined(VSOCK_OPTIMIZATION_WAITING_NOTIFY)
+                       PKT_FIELD(vsk, peer_waiting_read) = false;
+#endif
+               }
+       }
+       return err;
+}
+
+static void
+vmci_transport_notify_pkt_handle_pkt(
+                       struct sock *sk,
+                       struct vmci_transport_packet *pkt,
+                       bool bottom_half,
+                       struct sockaddr_vm *dst,
+                       struct sockaddr_vm *src, bool *pkt_processed)
+{
+       bool processed = false;
+
+       switch (pkt->type) {
+       case VMCI_TRANSPORT_PACKET_TYPE_WROTE:
+               vmci_transport_handle_wrote(sk, pkt, bottom_half, dst, src);
+               processed = true;
+               break;
+       case VMCI_TRANSPORT_PACKET_TYPE_READ:
+               vmci_transport_handle_read(sk, pkt, bottom_half, dst, src);
+               processed = true;
+               break;
+       case VMCI_TRANSPORT_PACKET_TYPE_WAITING_WRITE:
+               vmci_transport_handle_waiting_write(sk, pkt, bottom_half,
+                                                   dst, src);
+               processed = true;
+               break;
+
+       case VMCI_TRANSPORT_PACKET_TYPE_WAITING_READ:
+               vmci_transport_handle_waiting_read(sk, pkt, bottom_half,
+                                                  dst, src);
+               processed = true;
+               break;
+       }
+
+       if (pkt_processed)
+               *pkt_processed = processed;
+}
+
+static void vmci_transport_notify_pkt_process_request(struct sock *sk)
+{
+       struct vsock_sock *vsk = vsock_sk(sk);
+
+       PKT_FIELD(vsk, write_notify_window) = vmci_trans(vsk)->consume_size;
+       if (vmci_trans(vsk)->consume_size <
+               PKT_FIELD(vsk, write_notify_min_window))
+               PKT_FIELD(vsk, write_notify_min_window) =
+                       vmci_trans(vsk)->consume_size;
+}
+
+static void vmci_transport_notify_pkt_process_negotiate(struct sock *sk)
+{
+       struct vsock_sock *vsk = vsock_sk(sk);
+
+       PKT_FIELD(vsk, write_notify_window) = vmci_trans(vsk)->consume_size;
+       if (vmci_trans(vsk)->consume_size <
+               PKT_FIELD(vsk, write_notify_min_window))
+               PKT_FIELD(vsk, write_notify_min_window) =
+                       vmci_trans(vsk)->consume_size;
+}
+
+/* Socket control packet based operations. */
+struct vmci_transport_notify_ops vmci_transport_notify_pkt_ops = {
+       vmci_transport_notify_pkt_socket_init,
+       vmci_transport_notify_pkt_socket_destruct,
+       vmci_transport_notify_pkt_poll_in,
+       vmci_transport_notify_pkt_poll_out,
+       vmci_transport_notify_pkt_handle_pkt,
+       vmci_transport_notify_pkt_recv_init,
+       vmci_transport_notify_pkt_recv_pre_block,
+       vmci_transport_notify_pkt_recv_pre_dequeue,
+       vmci_transport_notify_pkt_recv_post_dequeue,
+       vmci_transport_notify_pkt_send_init,
+       vmci_transport_notify_pkt_send_pre_block,
+       vmci_transport_notify_pkt_send_pre_enqueue,
+       vmci_transport_notify_pkt_send_post_enqueue,
+       vmci_transport_notify_pkt_process_request,
+       vmci_transport_notify_pkt_process_negotiate,
+};
diff --git a/net/vmw_vsock/vmci_transport_notify.h b/net/vmw_vsock/vmci_transport_notify.h
new file mode 100644 (file)
index 0000000..7df7932
--- /dev/null
@@ -0,0 +1,83 @@
+/*
+ * VMware vSockets Driver
+ *
+ * Copyright (C) 2009-2013 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __VMCI_TRANSPORT_NOTIFY_H__
+#define __VMCI_TRANSPORT_NOTIFY_H__
+
+#include <linux/types.h>
+#include <linux/vmw_vmci_defs.h>
+#include <linux/vmw_vmci_api.h>
+#include <linux/vm_sockets.h>
+
+#include "vmci_transport.h"
+
+/* Comment this out to compare with old protocol. */
+#define VSOCK_OPTIMIZATION_WAITING_NOTIFY 1
+#if defined(VSOCK_OPTIMIZATION_WAITING_NOTIFY)
+/* Comment this out to remove flow control for "new" protocol */
+#define VSOCK_OPTIMIZATION_FLOW_CONTROL 1
+#endif
+
+#define VMCI_TRANSPORT_MAX_DGRAM_RESENDS       10
+
+struct vmci_transport_recv_notify_data {
+       u64 consume_head;
+       u64 produce_tail;
+       bool notify_on_block;
+};
+
+struct vmci_transport_send_notify_data {
+       u64 consume_head;
+       u64 produce_tail;
+};
+
+/* Socket notification callbacks. */
+struct vmci_transport_notify_ops {
+       void (*socket_init) (struct sock *sk);
+       void (*socket_destruct) (struct vsock_sock *vsk);
+       int (*poll_in) (struct sock *sk, size_t target,
+                         bool *data_ready_now);
+       int (*poll_out) (struct sock *sk, size_t target,
+                          bool *space_avail_now);
+       void (*handle_notify_pkt) (struct sock *sk,
+                                  struct vmci_transport_packet *pkt,
+                                  bool bottom_half, struct sockaddr_vm *dst,
+                                  struct sockaddr_vm *src,
+                                  bool *pkt_processed);
+       int (*recv_init) (struct sock *sk, size_t target,
+                         struct vmci_transport_recv_notify_data *data);
+       int (*recv_pre_block) (struct sock *sk, size_t target,
+                              struct vmci_transport_recv_notify_data *data);
+       int (*recv_pre_dequeue) (struct sock *sk, size_t target,
+                                struct vmci_transport_recv_notify_data *data);
+       int (*recv_post_dequeue) (struct sock *sk, size_t target,
+                                 ssize_t copied, bool data_read,
+                                 struct vmci_transport_recv_notify_data *data);
+       int (*send_init) (struct sock *sk,
+                         struct vmci_transport_send_notify_data *data);
+       int (*send_pre_block) (struct sock *sk,
+                              struct vmci_transport_send_notify_data *data);
+       int (*send_pre_enqueue) (struct sock *sk,
+                                struct vmci_transport_send_notify_data *data);
+       int (*send_post_enqueue) (struct sock *sk, ssize_t written,
+                                 struct vmci_transport_send_notify_data *data);
+       void (*process_request) (struct sock *sk);
+       void (*process_negotiate) (struct sock *sk);
+};
+
+extern struct vmci_transport_notify_ops vmci_transport_notify_pkt_ops;
+extern struct vmci_transport_notify_ops vmci_transport_notify_pkt_q_state_ops;
+
+#endif /* __VMCI_TRANSPORT_NOTIFY_H__ */
diff --git a/net/vmw_vsock/vmci_transport_notify_qstate.c b/net/vmw_vsock/vmci_transport_notify_qstate.c
new file mode 100644 (file)
index 0000000..622bd7a
--- /dev/null
@@ -0,0 +1,438 @@
+/*
+ * VMware vSockets Driver
+ *
+ * Copyright (C) 2009-2013 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/types.h>
+#include <linux/socket.h>
+#include <linux/stddef.h>
+#include <net/sock.h>
+
+#include "vmci_transport_notify.h"
+
+#define PKT_FIELD(vsk, field_name) \
+       (vmci_trans(vsk)->notify.pkt_q_state.field_name)
+
+static bool vmci_transport_notify_waiting_write(struct vsock_sock *vsk)
+{
+       bool retval;
+       u64 notify_limit;
+
+       if (!PKT_FIELD(vsk, peer_waiting_write))
+               return false;
+
+       /* When the sender blocks, we take that as a sign that the sender is
+        * faster than the receiver. To reduce the transmit rate of the sender,
+        * we delay the sending of the read notification by decreasing the
+        * write_notify_window. The notification is delayed until the number of
+        * bytes used in the queue drops below the write_notify_window.
+        */
+
+       if (!PKT_FIELD(vsk, peer_waiting_write_detected)) {
+               PKT_FIELD(vsk, peer_waiting_write_detected) = true;
+               if (PKT_FIELD(vsk, write_notify_window) < PAGE_SIZE) {
+                       PKT_FIELD(vsk, write_notify_window) =
+                           PKT_FIELD(vsk, write_notify_min_window);
+               } else {
+                       PKT_FIELD(vsk, write_notify_window) -= PAGE_SIZE;
+                       if (PKT_FIELD(vsk, write_notify_window) <
+                           PKT_FIELD(vsk, write_notify_min_window))
+                               PKT_FIELD(vsk, write_notify_window) =
+                                   PKT_FIELD(vsk, write_notify_min_window);
+
+               }
+       }
+       notify_limit = vmci_trans(vsk)->consume_size -
+               PKT_FIELD(vsk, write_notify_window);
+
+       /* The notify_limit is used to delay notifications in the case where
+        * flow control is enabled. Below the test is expressed in terms of
+        * free space in the queue: if free_space > ConsumeSize -
+        * write_notify_window then notify An alternate way of expressing this
+        * is to rewrite the expression to use the data ready in the receive
+        * queue: if write_notify_window > bufferReady then notify as
+        * free_space == ConsumeSize - bufferReady.
+        */
+
+       retval = vmci_qpair_consume_free_space(vmci_trans(vsk)->qpair) >
+               notify_limit;
+
+       if (retval) {
+               /* Once we notify the peer, we reset the detected flag so the
+                * next wait will again cause a decrease in the window size.
+                */
+
+               PKT_FIELD(vsk, peer_waiting_write_detected) = false;
+       }
+       return retval;
+}
+
+static void
+vmci_transport_handle_read(struct sock *sk,
+                          struct vmci_transport_packet *pkt,
+                          bool bottom_half,
+                          struct sockaddr_vm *dst, struct sockaddr_vm *src)
+{
+       sk->sk_write_space(sk);
+}
+
+static void
+vmci_transport_handle_wrote(struct sock *sk,
+                           struct vmci_transport_packet *pkt,
+                           bool bottom_half,
+                           struct sockaddr_vm *dst, struct sockaddr_vm *src)
+{
+       sk->sk_data_ready(sk, 0);
+}
+
+static void vsock_block_update_write_window(struct sock *sk)
+{
+       struct vsock_sock *vsk = vsock_sk(sk);
+
+       if (PKT_FIELD(vsk, write_notify_window) < vmci_trans(vsk)->consume_size)
+               PKT_FIELD(vsk, write_notify_window) =
+                   min(PKT_FIELD(vsk, write_notify_window) + PAGE_SIZE,
+                       vmci_trans(vsk)->consume_size);
+}
+
+static int vmci_transport_send_read_notification(struct sock *sk)
+{
+       struct vsock_sock *vsk;
+       bool sent_read;
+       unsigned int retries;
+       int err;
+
+       vsk = vsock_sk(sk);
+       sent_read = false;
+       retries = 0;
+       err = 0;
+
+       if (vmci_transport_notify_waiting_write(vsk)) {
+               /* Notify the peer that we have read, retrying the send on
+                * failure up to our maximum value.  XXX For now we just log
+                * the failure, but later we should schedule a work item to
+                * handle the resend until it succeeds.  That would require
+                * keeping track of work items in the vsk and cleaning them up
+                * upon socket close.
+                */
+               while (!(vsk->peer_shutdown & RCV_SHUTDOWN) &&
+                      !sent_read &&
+                      retries < VMCI_TRANSPORT_MAX_DGRAM_RESENDS) {
+                       err = vmci_transport_send_read(sk);
+                       if (err >= 0)
+                               sent_read = true;
+
+                       retries++;
+               }
+
+               if (retries >= VMCI_TRANSPORT_MAX_DGRAM_RESENDS && !sent_read)
+                       pr_err("%p unable to send read notification to peer\n",
+                              sk);
+               else
+                       PKT_FIELD(vsk, peer_waiting_write) = false;
+
+       }
+       return err;
+}
+
+static void vmci_transport_notify_pkt_socket_init(struct sock *sk)
+{
+       struct vsock_sock *vsk = vsock_sk(sk);
+
+       PKT_FIELD(vsk, write_notify_window) = PAGE_SIZE;
+       PKT_FIELD(vsk, write_notify_min_window) = PAGE_SIZE;
+       PKT_FIELD(vsk, peer_waiting_write) = false;
+       PKT_FIELD(vsk, peer_waiting_write_detected) = false;
+}
+
+static void vmci_transport_notify_pkt_socket_destruct(struct vsock_sock *vsk)
+{
+       PKT_FIELD(vsk, write_notify_window) = PAGE_SIZE;
+       PKT_FIELD(vsk, write_notify_min_window) = PAGE_SIZE;
+       PKT_FIELD(vsk, peer_waiting_write) = false;
+       PKT_FIELD(vsk, peer_waiting_write_detected) = false;
+}
+
+static int
+vmci_transport_notify_pkt_poll_in(struct sock *sk,
+                                 size_t target, bool *data_ready_now)
+{
+       struct vsock_sock *vsk = vsock_sk(sk);
+
+       if (vsock_stream_has_data(vsk)) {
+               *data_ready_now = true;
+       } else {
+               /* We can't read right now because there is nothing in the
+                * queue. Ask for notifications when there is something to
+                * read.
+                */
+               if (sk->sk_state == SS_CONNECTED)
+                       vsock_block_update_write_window(sk);
+               *data_ready_now = false;
+       }
+
+       return 0;
+}
+
+static int
+vmci_transport_notify_pkt_poll_out(struct sock *sk,
+                                  size_t target, bool *space_avail_now)
+{
+       s64 produce_q_free_space;
+       struct vsock_sock *vsk = vsock_sk(sk);
+
+       produce_q_free_space = vsock_stream_has_space(vsk);
+       if (produce_q_free_space > 0) {
+               *space_avail_now = true;
+               return 0;
+       } else if (produce_q_free_space == 0) {
+               /* This is a connected socket but we can't currently send data.
+                * Nothing else to do.
+                */
+               *space_avail_now = false;
+       }
+
+       return 0;
+}
+
+static int
+vmci_transport_notify_pkt_recv_init(
+                               struct sock *sk,
+                               size_t target,
+                               struct vmci_transport_recv_notify_data *data)
+{
+       struct vsock_sock *vsk = vsock_sk(sk);
+
+       data->consume_head = 0;
+       data->produce_tail = 0;
+       data->notify_on_block = false;
+
+       if (PKT_FIELD(vsk, write_notify_min_window) < target + 1) {
+               PKT_FIELD(vsk, write_notify_min_window) = target + 1;
+               if (PKT_FIELD(vsk, write_notify_window) <
+                   PKT_FIELD(vsk, write_notify_min_window)) {
+                       /* If the current window is smaller than the new
+                        * minimal window size, we need to reevaluate whether
+                        * we need to notify the sender. If the number of ready
+                        * bytes are smaller than the new window, we need to
+                        * send a notification to the sender before we block.
+                        */
+
+                       PKT_FIELD(vsk, write_notify_window) =
+                           PKT_FIELD(vsk, write_notify_min_window);
+                       data->notify_on_block = true;
+               }
+       }
+
+       return 0;
+}
+
+static int
+vmci_transport_notify_pkt_recv_pre_block(
+                               struct sock *sk,
+                               size_t target,
+                               struct vmci_transport_recv_notify_data *data)
+{
+       int err = 0;
+
+       vsock_block_update_write_window(sk);
+
+       if (data->notify_on_block) {
+               err = vmci_transport_send_read_notification(sk);
+               if (err < 0)
+                       return err;
+               data->notify_on_block = false;
+       }
+
+       return err;
+}
+
+static int
+vmci_transport_notify_pkt_recv_post_dequeue(
+                               struct sock *sk,
+                               size_t target,
+                               ssize_t copied,
+                               bool data_read,
+                               struct vmci_transport_recv_notify_data *data)
+{
+       struct vsock_sock *vsk;
+       int err;
+       bool was_full = false;
+       u64 free_space;
+
+       vsk = vsock_sk(sk);
+       err = 0;
+
+       if (data_read) {
+               smp_mb();
+
+               free_space =
+                       vmci_qpair_consume_free_space(vmci_trans(vsk)->qpair);
+               was_full = free_space == copied;
+
+               if (was_full)
+                       PKT_FIELD(vsk, peer_waiting_write) = true;
+
+               err = vmci_transport_send_read_notification(sk);
+               if (err < 0)
+                       return err;
+
+               /* See the comment in
+                * vmci_transport_notify_pkt_send_post_enqueue().
+                */
+               sk->sk_data_ready(sk, 0);
+       }
+
+       return err;
+}
+
+static int
+vmci_transport_notify_pkt_send_init(
+                               struct sock *sk,
+                               struct vmci_transport_send_notify_data *data)
+{
+       data->consume_head = 0;
+       data->produce_tail = 0;
+
+       return 0;
+}
+
+static int
+vmci_transport_notify_pkt_send_post_enqueue(
+                               struct sock *sk,
+                               ssize_t written,
+                               struct vmci_transport_send_notify_data *data)
+{
+       int err = 0;
+       struct vsock_sock *vsk;
+       bool sent_wrote = false;
+       bool was_empty;
+       int retries = 0;
+
+       vsk = vsock_sk(sk);
+
+       smp_mb();
+
+       was_empty =
+               vmci_qpair_produce_buf_ready(vmci_trans(vsk)->qpair) == written;
+       if (was_empty) {
+               while (!(vsk->peer_shutdown & RCV_SHUTDOWN) &&
+                      !sent_wrote &&
+                      retries < VMCI_TRANSPORT_MAX_DGRAM_RESENDS) {
+                       err = vmci_transport_send_wrote(sk);
+                       if (err >= 0)
+                               sent_wrote = true;
+
+                       retries++;
+               }
+       }
+
+       if (retries >= VMCI_TRANSPORT_MAX_DGRAM_RESENDS && !sent_wrote) {
+               pr_err("%p unable to send wrote notification to peer\n",
+                      sk);
+               return err;
+       }
+
+       return err;
+}
+
+static void
+vmci_transport_notify_pkt_handle_pkt(
+                               struct sock *sk,
+                               struct vmci_transport_packet *pkt,
+                               bool bottom_half,
+                               struct sockaddr_vm *dst,
+                               struct sockaddr_vm *src, bool *pkt_processed)
+{
+       bool processed = false;
+
+       switch (pkt->type) {
+       case VMCI_TRANSPORT_PACKET_TYPE_WROTE:
+               vmci_transport_handle_wrote(sk, pkt, bottom_half, dst, src);
+               processed = true;
+               break;
+       case VMCI_TRANSPORT_PACKET_TYPE_READ:
+               vmci_transport_handle_read(sk, pkt, bottom_half, dst, src);
+               processed = true;
+               break;
+       }
+
+       if (pkt_processed)
+               *pkt_processed = processed;
+}
+
+static void vmci_transport_notify_pkt_process_request(struct sock *sk)
+{
+       struct vsock_sock *vsk = vsock_sk(sk);
+
+       PKT_FIELD(vsk, write_notify_window) = vmci_trans(vsk)->consume_size;
+       if (vmci_trans(vsk)->consume_size <
+               PKT_FIELD(vsk, write_notify_min_window))
+               PKT_FIELD(vsk, write_notify_min_window) =
+                       vmci_trans(vsk)->consume_size;
+}
+
+static void vmci_transport_notify_pkt_process_negotiate(struct sock *sk)
+{
+       struct vsock_sock *vsk = vsock_sk(sk);
+
+       PKT_FIELD(vsk, write_notify_window) = vmci_trans(vsk)->consume_size;
+       if (vmci_trans(vsk)->consume_size <
+               PKT_FIELD(vsk, write_notify_min_window))
+               PKT_FIELD(vsk, write_notify_min_window) =
+                       vmci_trans(vsk)->consume_size;
+}
+
+static int
+vmci_transport_notify_pkt_recv_pre_dequeue(
+                               struct sock *sk,
+                               size_t target,
+                               struct vmci_transport_recv_notify_data *data)
+{
+       return 0; /* NOP for QState. */
+}
+
+static int
+vmci_transport_notify_pkt_send_pre_block(
+                               struct sock *sk,
+                               struct vmci_transport_send_notify_data *data)
+{
+       return 0; /* NOP for QState. */
+}
+
+static int
+vmci_transport_notify_pkt_send_pre_enqueue(
+                               struct sock *sk,
+                               struct vmci_transport_send_notify_data *data)
+{
+       return 0; /* NOP for QState. */
+}
+
+/* Socket always on control packet based operations. */
+struct vmci_transport_notify_ops vmci_transport_notify_pkt_q_state_ops = {
+       vmci_transport_notify_pkt_socket_init,
+       vmci_transport_notify_pkt_socket_destruct,
+       vmci_transport_notify_pkt_poll_in,
+       vmci_transport_notify_pkt_poll_out,
+       vmci_transport_notify_pkt_handle_pkt,
+       vmci_transport_notify_pkt_recv_init,
+       vmci_transport_notify_pkt_recv_pre_block,
+       vmci_transport_notify_pkt_recv_pre_dequeue,
+       vmci_transport_notify_pkt_recv_post_dequeue,
+       vmci_transport_notify_pkt_send_init,
+       vmci_transport_notify_pkt_send_pre_block,
+       vmci_transport_notify_pkt_send_pre_enqueue,
+       vmci_transport_notify_pkt_send_post_enqueue,
+       vmci_transport_notify_pkt_process_request,
+       vmci_transport_notify_pkt_process_negotiate,
+};
diff --git a/net/vmw_vsock/vsock_addr.c b/net/vmw_vsock/vsock_addr.c
new file mode 100644 (file)
index 0000000..b7df1ae
--- /dev/null
@@ -0,0 +1,86 @@
+/*
+ * VMware vSockets Driver
+ *
+ * Copyright (C) 2007-2012 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/types.h>
+#include <linux/socket.h>
+#include <linux/stddef.h>
+#include <net/sock.h>
+
+#include "vsock_addr.h"
+
+void vsock_addr_init(struct sockaddr_vm *addr, u32 cid, u32 port)
+{
+       memset(addr, 0, sizeof(*addr));
+       addr->svm_family = AF_VSOCK;
+       addr->svm_cid = cid;
+       addr->svm_port = port;
+}
+EXPORT_SYMBOL_GPL(vsock_addr_init);
+
+int vsock_addr_validate(const struct sockaddr_vm *addr)
+{
+       if (!addr)
+               return -EFAULT;
+
+       if (addr->svm_family != AF_VSOCK)
+               return -EAFNOSUPPORT;
+
+       if (addr->svm_zero[0] != 0)
+               return -EINVAL;
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(vsock_addr_validate);
+
+bool vsock_addr_bound(const struct sockaddr_vm *addr)
+{
+       return addr->svm_port != VMADDR_PORT_ANY;
+}
+EXPORT_SYMBOL_GPL(vsock_addr_bound);
+
+void vsock_addr_unbind(struct sockaddr_vm *addr)
+{
+       vsock_addr_init(addr, VMADDR_CID_ANY, VMADDR_PORT_ANY);
+}
+EXPORT_SYMBOL_GPL(vsock_addr_unbind);
+
+bool vsock_addr_equals_addr(const struct sockaddr_vm *addr,
+                           const struct sockaddr_vm *other)
+{
+       return addr->svm_cid == other->svm_cid &&
+               addr->svm_port == other->svm_port;
+}
+EXPORT_SYMBOL_GPL(vsock_addr_equals_addr);
+
+bool vsock_addr_equals_addr_any(const struct sockaddr_vm *addr,
+                               const struct sockaddr_vm *other)
+{
+       return (addr->svm_cid == VMADDR_CID_ANY ||
+               other->svm_cid == VMADDR_CID_ANY ||
+               addr->svm_cid == other->svm_cid) &&
+              addr->svm_port == other->svm_port;
+}
+EXPORT_SYMBOL_GPL(vsock_addr_equals_addr_any);
+
+int vsock_addr_cast(const struct sockaddr *addr,
+                   size_t len, struct sockaddr_vm **out_addr)
+{
+       if (len < sizeof(**out_addr))
+               return -EFAULT;
+
+       *out_addr = (struct sockaddr_vm *)addr;
+       return vsock_addr_validate(*out_addr);
+}
+EXPORT_SYMBOL_GPL(vsock_addr_cast);
diff --git a/net/vmw_vsock/vsock_addr.h b/net/vmw_vsock/vsock_addr.h
new file mode 100644 (file)
index 0000000..cdfbcef
--- /dev/null
@@ -0,0 +1,32 @@
+/*
+ * VMware vSockets Driver
+ *
+ * Copyright (C) 2007-2013 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _VSOCK_ADDR_H_
+#define _VSOCK_ADDR_H_
+
+#include <linux/vm_sockets.h>
+
+void vsock_addr_init(struct sockaddr_vm *addr, u32 cid, u32 port);
+int vsock_addr_validate(const struct sockaddr_vm *addr);
+bool vsock_addr_bound(const struct sockaddr_vm *addr);
+void vsock_addr_unbind(struct sockaddr_vm *addr);
+bool vsock_addr_equals_addr(const struct sockaddr_vm *addr,
+                           const struct sockaddr_vm *other);
+bool vsock_addr_equals_addr_any(const struct sockaddr_vm *addr,
+                               const struct sockaddr_vm *other);
+int vsock_addr_cast(const struct sockaddr *addr, size_t len,
+                   struct sockaddr_vm **out_addr);
+
+#endif
index 396373f..fd556ac 100644 (file)
@@ -147,6 +147,32 @@ static void chandef_primary_freqs(const struct cfg80211_chan_def *c,
        }
 }
 
+static int cfg80211_chandef_get_width(const struct cfg80211_chan_def *c)
+{
+       int width;
+
+       switch (c->width) {
+       case NL80211_CHAN_WIDTH_20:
+       case NL80211_CHAN_WIDTH_20_NOHT:
+               width = 20;
+               break;
+       case NL80211_CHAN_WIDTH_40:
+               width = 40;
+               break;
+       case NL80211_CHAN_WIDTH_80P80:
+       case NL80211_CHAN_WIDTH_80:
+               width = 80;
+               break;
+       case NL80211_CHAN_WIDTH_160:
+               width = 160;
+               break;
+       default:
+               WARN_ON_ONCE(1);
+               return -1;
+       }
+       return width;
+}
+
 const struct cfg80211_chan_def *
 cfg80211_chandef_compatible(const struct cfg80211_chan_def *c1,
                            const struct cfg80211_chan_def *c2)
@@ -192,6 +218,93 @@ cfg80211_chandef_compatible(const struct cfg80211_chan_def *c1,
 }
 EXPORT_SYMBOL(cfg80211_chandef_compatible);
 
+static void cfg80211_set_chans_dfs_state(struct wiphy *wiphy, u32 center_freq,
+                                        u32 bandwidth,
+                                        enum nl80211_dfs_state dfs_state)
+{
+       struct ieee80211_channel *c;
+       u32 freq;
+
+       for (freq = center_freq - bandwidth/2 + 10;
+            freq <= center_freq + bandwidth/2 - 10;
+            freq += 20) {
+               c = ieee80211_get_channel(wiphy, freq);
+               if (!c || !(c->flags & IEEE80211_CHAN_RADAR))
+                       continue;
+
+               c->dfs_state = dfs_state;
+               c->dfs_state_entered = jiffies;
+       }
+}
+
+void cfg80211_set_dfs_state(struct wiphy *wiphy,
+                           const struct cfg80211_chan_def *chandef,
+                           enum nl80211_dfs_state dfs_state)
+{
+       int width;
+
+       if (WARN_ON(!cfg80211_chandef_valid(chandef)))
+               return;
+
+       width = cfg80211_chandef_get_width(chandef);
+       if (width < 0)
+               return;
+
+       cfg80211_set_chans_dfs_state(wiphy, chandef->center_freq1,
+                                    width, dfs_state);
+
+       if (!chandef->center_freq2)
+               return;
+       cfg80211_set_chans_dfs_state(wiphy, chandef->center_freq2,
+                                    width, dfs_state);
+}
+
+static int cfg80211_get_chans_dfs_required(struct wiphy *wiphy,
+                                           u32 center_freq,
+                                           u32 bandwidth)
+{
+       struct ieee80211_channel *c;
+       u32 freq;
+
+       for (freq = center_freq - bandwidth/2 + 10;
+            freq <= center_freq + bandwidth/2 - 10;
+            freq += 20) {
+               c = ieee80211_get_channel(wiphy, freq);
+               if (!c)
+                       return -EINVAL;
+
+               if (c->flags & IEEE80211_CHAN_RADAR)
+                       return 1;
+       }
+       return 0;
+}
+
+
+int cfg80211_chandef_dfs_required(struct wiphy *wiphy,
+                                 const struct cfg80211_chan_def *chandef)
+{
+       int width;
+       int r;
+
+       if (WARN_ON(!cfg80211_chandef_valid(chandef)))
+               return -EINVAL;
+
+       width = cfg80211_chandef_get_width(chandef);
+       if (width < 0)
+               return -EINVAL;
+
+       r = cfg80211_get_chans_dfs_required(wiphy, chandef->center_freq1,
+                                           width);
+       if (r)
+               return r;
+
+       if (!chandef->center_freq2)
+               return 0;
+
+       return cfg80211_get_chans_dfs_required(wiphy, chandef->center_freq2,
+                                              width);
+}
+
 static bool cfg80211_secondary_chans_ok(struct wiphy *wiphy,
                                        u32 center_freq, u32 bandwidth,
                                        u32 prohibited_flags)
@@ -203,7 +316,16 @@ static bool cfg80211_secondary_chans_ok(struct wiphy *wiphy,
             freq <= center_freq + bandwidth/2 - 10;
             freq += 20) {
                c = ieee80211_get_channel(wiphy, freq);
-               if (!c || c->flags & prohibited_flags)
+               if (!c)
+                       return false;
+
+               /* check for radar flags */
+               if ((prohibited_flags & c->flags & IEEE80211_CHAN_RADAR) &&
+                   (c->dfs_state != NL80211_DFS_AVAILABLE))
+                       return false;
+
+               /* check for the other flags */
+               if (c->flags & prohibited_flags & ~IEEE80211_CHAN_RADAR)
                        return false;
        }
 
@@ -253,6 +375,7 @@ bool cfg80211_chandef_usable(struct wiphy *wiphy,
        case NL80211_CHAN_WIDTH_80:
                if (!vht_cap->vht_supported)
                        return false;
+               prohibited_flags |= IEEE80211_CHAN_NO_80MHZ;
                width = 80;
                break;
        case NL80211_CHAN_WIDTH_160:
@@ -260,6 +383,7 @@ bool cfg80211_chandef_usable(struct wiphy *wiphy,
                        return false;
                if (!(vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ))
                        return false;
+               prohibited_flags |= IEEE80211_CHAN_NO_160MHZ;
                width = 160;
                break;
        default:
@@ -267,7 +391,16 @@ bool cfg80211_chandef_usable(struct wiphy *wiphy,
                return false;
        }
 
-       /* TODO: missing regulatory check on 80/160 bandwidth */
+       /*
+        * TODO: What if there are only certain 80/160/80+80 MHz channels
+        *       allowed by the driver, or only certain combinations?
+        *       For 40 MHz the driver can set the NO_HT40 flags, but for
+        *       80/160 MHz and in particular 80+80 MHz this isn't really
+        *       feasible and we only have NO_80MHZ/NO_160MHZ so far but
+        *       no way to cover 80+80 MHz or more complex restrictions.
+        *       Note that such restrictions also need to be advertised to
+        *       userspace, for example for P2P channel selection.
+        */
 
        if (width > 20)
                prohibited_flags |= IEEE80211_CHAN_NO_OFDM;
@@ -344,7 +477,10 @@ cfg80211_get_chan_state(struct wireless_dev *wdev,
                break;
        case NL80211_IFTYPE_AP:
        case NL80211_IFTYPE_P2P_GO:
-               if (wdev->beacon_interval) {
+               if (wdev->cac_started) {
+                       *chan = wdev->channel;
+                       *chanmode = CHAN_MODE_SHARED;
+               } else if (wdev->beacon_interval) {
                        *chan = wdev->channel;
                        *chanmode = CHAN_MODE_SHARED;
                }
index 9245729..5ffff03 100644 (file)
@@ -324,6 +324,8 @@ struct wiphy *wiphy_new(const struct cfg80211_ops *ops, int sizeof_priv)
        INIT_LIST_HEAD(&rdev->bss_list);
        INIT_WORK(&rdev->scan_done_wk, __cfg80211_scan_done);
        INIT_WORK(&rdev->sched_scan_results_wk, __cfg80211_sched_scan_results);
+       INIT_DELAYED_WORK(&rdev->dfs_update_channels_wk,
+                         cfg80211_dfs_channels_update_work);
 #ifdef CONFIG_CFG80211_WEXT
        rdev->wiphy.wext = &cfg80211_wext_handler;
 #endif
@@ -365,7 +367,8 @@ struct wiphy *wiphy_new(const struct cfg80211_ops *ops, int sizeof_priv)
        rdev->wiphy.rts_threshold = (u32) -1;
        rdev->wiphy.coverage_class = 0;
 
-       rdev->wiphy.features = NL80211_FEATURE_SCAN_FLUSH;
+       rdev->wiphy.features = NL80211_FEATURE_SCAN_FLUSH |
+                              NL80211_FEATURE_ADVERTISE_CHAN_LIMITS;
 
        return &rdev->wiphy;
 }
@@ -478,6 +481,11 @@ int wiphy_register(struct wiphy *wiphy)
                           ETH_ALEN)))
                return -EINVAL;
 
+       if (WARN_ON(wiphy->max_acl_mac_addrs &&
+                   (!(wiphy->flags & WIPHY_FLAG_HAVE_AP_SME) ||
+                    !rdev->ops->set_mac_acl)))
+               return -EINVAL;
+
        if (wiphy->addresses)
                memcpy(wiphy->perm_addr, wiphy->addresses[0].addr, ETH_ALEN);
 
@@ -690,6 +698,7 @@ void wiphy_unregister(struct wiphy *wiphy)
        flush_work(&rdev->scan_done_wk);
        cancel_work_sync(&rdev->conn_work);
        flush_work(&rdev->event_work);
+       cancel_delayed_work_sync(&rdev->dfs_update_channels_wk);
 
        if (rdev->wowlan && rdev->ops->set_wakeup)
                rdev_set_wakeup(rdev, false);
@@ -710,7 +719,7 @@ void cfg80211_dev_free(struct cfg80211_registered_device *rdev)
                kfree(reg);
        }
        list_for_each_entry_safe(scan, tmp, &rdev->bss_list, list)
-               cfg80211_put_bss(&scan->pub);
+               cfg80211_put_bss(&rdev->wiphy, &scan->pub);
        kfree(rdev);
 }
 
index 8396f76..3aec0e4 100644 (file)
@@ -8,7 +8,6 @@
 #include <linux/mutex.h>
 #include <linux/list.h>
 #include <linux/netdevice.h>
-#include <linux/kref.h>
 #include <linux/rbtree.h>
 #include <linux/debugfs.h>
 #include <linux/rfkill.h>
@@ -87,6 +86,8 @@ struct cfg80211_registered_device {
 
        struct cfg80211_wowlan *wowlan;
 
+       struct delayed_work dfs_update_channels_wk;
+
        /* must be last because of the way we do wiphy_priv(),
         * and it should at least be aligned to NETDEV_ALIGN */
        struct wiphy wiphy __aligned(NETDEV_ALIGN);
@@ -109,6 +110,9 @@ cfg80211_rdev_free_wowlan(struct cfg80211_registered_device *rdev)
        for (i = 0; i < rdev->wowlan->n_patterns; i++)
                kfree(rdev->wowlan->patterns[i].mask);
        kfree(rdev->wowlan->patterns);
+       if (rdev->wowlan->tcp && rdev->wowlan->tcp->sock)
+               sock_release(rdev->wowlan->tcp->sock);
+       kfree(rdev->wowlan->tcp);
        kfree(rdev->wowlan);
 }
 
@@ -124,9 +128,10 @@ static inline void assert_cfg80211_lock(void)
 
 struct cfg80211_internal_bss {
        struct list_head list;
+       struct list_head hidden_list;
        struct rb_node rbn;
        unsigned long ts;
-       struct kref ref;
+       unsigned long refcount;
        atomic_t hold;
 
        /* must be last because of priv member */
@@ -428,6 +433,22 @@ int cfg80211_can_use_iftype_chan(struct cfg80211_registered_device *rdev,
                                 enum cfg80211_chan_mode chanmode,
                                 u8 radar_detect);
 
+/**
+ * cfg80211_chandef_dfs_required - checks if radar detection is required
+ * @wiphy: the wiphy to validate against
+ * @chandef: the channel definition to check
+ * Return: 1 if radar detection is required, 0 if it is not, < 0 on error
+ */
+int cfg80211_chandef_dfs_required(struct wiphy *wiphy,
+                                 const struct cfg80211_chan_def *c);
+
+void cfg80211_set_dfs_state(struct wiphy *wiphy,
+                           const struct cfg80211_chan_def *chandef,
+                           enum nl80211_dfs_state dfs_state);
+
+void cfg80211_dfs_channels_update_work(struct work_struct *work);
+
+
 static inline int
 cfg80211_can_change_interface(struct cfg80211_registered_device *rdev,
                              struct wireless_dev *wdev,
@@ -454,6 +475,16 @@ cfg80211_can_use_chan(struct cfg80211_registered_device *rdev,
                                            chan, chanmode, 0);
 }
 
+static inline unsigned int elapsed_jiffies_msecs(unsigned long start)
+{
+       unsigned long end = jiffies;
+
+       if (end >= start)
+               return jiffies_to_msecs(end - start);
+
+       return jiffies_to_msecs(end + (MAX_JIFFY_OFFSET - start) + 1);
+}
+
 void
 cfg80211_get_chan_state(struct wireless_dev *wdev,
                        struct ieee80211_channel **chan,
index 9b9551e..d80e471 100644 (file)
@@ -37,7 +37,7 @@ void __cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid)
 
        if (wdev->current_bss) {
                cfg80211_unhold_bss(wdev->current_bss);
-               cfg80211_put_bss(&wdev->current_bss->pub);
+               cfg80211_put_bss(wdev->wiphy, &wdev->current_bss->pub);
        }
 
        cfg80211_hold_bss(bss_from_pub(bss));
@@ -182,7 +182,7 @@ static void __cfg80211_clear_ibss(struct net_device *dev, bool nowext)
 
        if (wdev->current_bss) {
                cfg80211_unhold_bss(wdev->current_bss);
-               cfg80211_put_bss(&wdev->current_bss->pub);
+               cfg80211_put_bss(wdev->wiphy, &wdev->current_bss->pub);
        }
 
        wdev->current_bss = NULL;
index 461e692..caddca3 100644 (file)
@@ -58,7 +58,7 @@ void cfg80211_send_rx_assoc(struct net_device *dev, struct cfg80211_bss *bss,
         */
        if (status_code != WLAN_STATUS_SUCCESS && wdev->conn &&
            cfg80211_sme_failed_reassoc(wdev)) {
-               cfg80211_put_bss(bss);
+               cfg80211_put_bss(wiphy, bss);
                goto out;
        }
 
@@ -70,7 +70,7 @@ void cfg80211_send_rx_assoc(struct net_device *dev, struct cfg80211_bss *bss,
                 * do not call connect_result() now because the
                 * sme will schedule work that does it later.
                 */
-               cfg80211_put_bss(bss);
+               cfg80211_put_bss(wiphy, bss);
                goto out;
        }
 
@@ -108,7 +108,7 @@ void __cfg80211_send_deauth(struct net_device *dev,
        if (wdev->current_bss &&
            ether_addr_equal(wdev->current_bss->pub.bssid, bssid)) {
                cfg80211_unhold_bss(wdev->current_bss);
-               cfg80211_put_bss(&wdev->current_bss->pub);
+               cfg80211_put_bss(wiphy, &wdev->current_bss->pub);
                wdev->current_bss = NULL;
                was_current = true;
        }
@@ -164,7 +164,7 @@ void __cfg80211_send_disassoc(struct net_device *dev,
            ether_addr_equal(wdev->current_bss->pub.bssid, bssid)) {
                cfg80211_sme_disassoc(dev, wdev->current_bss);
                cfg80211_unhold_bss(wdev->current_bss);
-               cfg80211_put_bss(&wdev->current_bss->pub);
+               cfg80211_put_bss(wiphy, &wdev->current_bss->pub);
                wdev->current_bss = NULL;
        } else
                WARN_ON(1);
@@ -324,7 +324,7 @@ int __cfg80211_mlme_auth(struct cfg80211_registered_device *rdev,
        err = rdev_auth(rdev, dev, &req);
 
 out:
-       cfg80211_put_bss(req.bss);
+       cfg80211_put_bss(&rdev->wiphy, req.bss);
        return err;
 }
 
@@ -432,7 +432,7 @@ out:
        if (err) {
                if (was_connected)
                        wdev->sme_state = CFG80211_SME_CONNECTED;
-               cfg80211_put_bss(req.bss);
+               cfg80211_put_bss(&rdev->wiphy, req.bss);
        }
 
        return err;
@@ -514,7 +514,7 @@ static int __cfg80211_mlme_disassoc(struct cfg80211_registered_device *rdev,
        if (wdev->sme_state != CFG80211_SME_CONNECTED)
                return -ENOTCONN;
 
-       if (WARN_ON(!wdev->current_bss))
+       if (WARN(!wdev->current_bss, "sme_state=%d\n", wdev->sme_state))
                return -ENOTCONN;
 
        memset(&req, 0, sizeof(req));
@@ -572,7 +572,7 @@ void cfg80211_mlme_down(struct cfg80211_registered_device *rdev,
 
        if (wdev->current_bss) {
                cfg80211_unhold_bss(wdev->current_bss);
-               cfg80211_put_bss(&wdev->current_bss->pub);
+               cfg80211_put_bss(&rdev->wiphy, &wdev->current_bss->pub);
                wdev->current_bss = NULL;
        }
 }
@@ -987,3 +987,123 @@ void cfg80211_pmksa_candidate_notify(struct net_device *dev, int index,
        nl80211_pmksa_candidate_notify(rdev, dev, index, bssid, preauth, gfp);
 }
 EXPORT_SYMBOL(cfg80211_pmksa_candidate_notify);
+
+void cfg80211_dfs_channels_update_work(struct work_struct *work)
+{
+       struct delayed_work *delayed_work;
+       struct cfg80211_registered_device *rdev;
+       struct cfg80211_chan_def chandef;
+       struct ieee80211_supported_band *sband;
+       struct ieee80211_channel *c;
+       struct wiphy *wiphy;
+       bool check_again = false;
+       unsigned long timeout, next_time = 0;
+       int bandid, i;
+
+       delayed_work = container_of(work, struct delayed_work, work);
+       rdev = container_of(delayed_work, struct cfg80211_registered_device,
+                           dfs_update_channels_wk);
+       wiphy = &rdev->wiphy;
+
+       mutex_lock(&cfg80211_mutex);
+       for (bandid = 0; bandid < IEEE80211_NUM_BANDS; bandid++) {
+               sband = wiphy->bands[bandid];
+               if (!sband)
+                       continue;
+
+               for (i = 0; i < sband->n_channels; i++) {
+                       c = &sband->channels[i];
+
+                       if (c->dfs_state != NL80211_DFS_UNAVAILABLE)
+                               continue;
+
+                       timeout = c->dfs_state_entered +
+                                 IEEE80211_DFS_MIN_NOP_TIME_MS;
+
+                       if (time_after_eq(jiffies, timeout)) {
+                               c->dfs_state = NL80211_DFS_USABLE;
+                               cfg80211_chandef_create(&chandef, c,
+                                                       NL80211_CHAN_NO_HT);
+
+                               nl80211_radar_notify(rdev, &chandef,
+                                                    NL80211_RADAR_NOP_FINISHED,
+                                                    NULL, GFP_ATOMIC);
+                               continue;
+                       }
+
+                       if (!check_again)
+                               next_time = timeout - jiffies;
+                       else
+                               next_time = min(next_time, timeout - jiffies);
+                       check_again = true;
+               }
+       }
+       mutex_unlock(&cfg80211_mutex);
+
+       /* reschedule if there are other channels waiting to be cleared again */
+       if (check_again)
+               queue_delayed_work(cfg80211_wq, &rdev->dfs_update_channels_wk,
+                                  next_time);
+}
+
+
+void cfg80211_radar_event(struct wiphy *wiphy,
+                         struct cfg80211_chan_def *chandef,
+                         gfp_t gfp)
+{
+       struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+       unsigned long timeout;
+
+       trace_cfg80211_radar_event(wiphy, chandef);
+
+       /* only set the chandef supplied channel to unavailable, in
+        * case the radar is detected on only one of multiple channels
+        * spanned by the chandef.
+        */
+       cfg80211_set_dfs_state(wiphy, chandef, NL80211_DFS_UNAVAILABLE);
+
+       timeout = msecs_to_jiffies(IEEE80211_DFS_MIN_NOP_TIME_MS);
+       queue_delayed_work(cfg80211_wq, &rdev->dfs_update_channels_wk,
+                          timeout);
+
+       nl80211_radar_notify(rdev, chandef, NL80211_RADAR_DETECTED, NULL, gfp);
+}
+EXPORT_SYMBOL(cfg80211_radar_event);
+
+void cfg80211_cac_event(struct net_device *netdev,
+                       enum nl80211_radar_event event, gfp_t gfp)
+{
+       struct wireless_dev *wdev = netdev->ieee80211_ptr;
+       struct wiphy *wiphy = wdev->wiphy;
+       struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+       struct cfg80211_chan_def chandef;
+       unsigned long timeout;
+
+       trace_cfg80211_cac_event(netdev, event);
+
+       if (WARN_ON(!wdev->cac_started))
+               return;
+
+       if (WARN_ON(!wdev->channel))
+               return;
+
+       cfg80211_chandef_create(&chandef, wdev->channel, NL80211_CHAN_NO_HT);
+
+       switch (event) {
+       case NL80211_RADAR_CAC_FINISHED:
+               timeout = wdev->cac_start_time +
+                         msecs_to_jiffies(IEEE80211_DFS_MIN_CAC_TIME_MS);
+               WARN_ON(!time_after_eq(jiffies, timeout));
+               cfg80211_set_dfs_state(wiphy, &chandef, NL80211_DFS_AVAILABLE);
+               break;
+       case NL80211_RADAR_CAC_ABORTED:
+               break;
+       default:
+               WARN_ON(1);
+               return;
+       }
+       wdev->cac_started = false;
+
+       nl80211_radar_notify(rdev, &chandef, event, netdev, gfp);
+}
+EXPORT_SYMBOL(cfg80211_cac_event);
index 33de803..580ffea 100644 (file)
@@ -19,6 +19,7 @@
 #include <net/genetlink.h>
 #include <net/cfg80211.h>
 #include <net/sock.h>
+#include <net/inet_connection_sock.h>
 #include "core.h"
 #include "nl80211.h"
 #include "reg.h"
@@ -365,6 +366,10 @@ static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = {
        [NL80211_ATTR_SCAN_FLAGS] = { .type = NLA_U32 },
        [NL80211_ATTR_P2P_CTWINDOW] = { .type = NLA_U8 },
        [NL80211_ATTR_P2P_OPPPS] = { .type = NLA_U8 },
+       [NL80211_ATTR_ACL_POLICY] = {. type = NLA_U32 },
+       [NL80211_ATTR_MAC_ADDRS] = { .type = NLA_NESTED },
+       [NL80211_ATTR_STA_CAPABILITY] = { .type = NLA_U16 },
+       [NL80211_ATTR_STA_EXT_CAPABILITY] = { .type = NLA_BINARY, },
 };
 
 /* policy for the key attributes */
@@ -397,6 +402,26 @@ nl80211_wowlan_policy[NUM_NL80211_WOWLAN_TRIG] = {
        [NL80211_WOWLAN_TRIG_EAP_IDENT_REQUEST] = { .type = NLA_FLAG },
        [NL80211_WOWLAN_TRIG_4WAY_HANDSHAKE] = { .type = NLA_FLAG },
        [NL80211_WOWLAN_TRIG_RFKILL_RELEASE] = { .type = NLA_FLAG },
+       [NL80211_WOWLAN_TRIG_TCP_CONNECTION] = { .type = NLA_NESTED },
+};
+
+static const struct nla_policy
+nl80211_wowlan_tcp_policy[NUM_NL80211_WOWLAN_TCP] = {
+       [NL80211_WOWLAN_TCP_SRC_IPV4] = { .type = NLA_U32 },
+       [NL80211_WOWLAN_TCP_DST_IPV4] = { .type = NLA_U32 },
+       [NL80211_WOWLAN_TCP_DST_MAC] = { .len = ETH_ALEN },
+       [NL80211_WOWLAN_TCP_SRC_PORT] = { .type = NLA_U16 },
+       [NL80211_WOWLAN_TCP_DST_PORT] = { .type = NLA_U16 },
+       [NL80211_WOWLAN_TCP_DATA_PAYLOAD] = { .len = 1 },
+       [NL80211_WOWLAN_TCP_DATA_PAYLOAD_SEQ] = {
+               .len = sizeof(struct nl80211_wowlan_tcp_data_seq)
+       },
+       [NL80211_WOWLAN_TCP_DATA_PAYLOAD_TOKEN] = {
+               .len = sizeof(struct nl80211_wowlan_tcp_data_token)
+       },
+       [NL80211_WOWLAN_TCP_DATA_INTERVAL] = { .type = NLA_U32 },
+       [NL80211_WOWLAN_TCP_WAKE_PAYLOAD] = { .len = 1 },
+       [NL80211_WOWLAN_TCP_WAKE_MASK] = { .len = 1 },
 };
 
 /* policy for GTK rekey offload attributes */
@@ -529,8 +554,27 @@ static int nl80211_msg_put_channel(struct sk_buff *msg,
        if ((chan->flags & IEEE80211_CHAN_NO_IBSS) &&
            nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_IBSS))
                goto nla_put_failure;
-       if ((chan->flags & IEEE80211_CHAN_RADAR) &&
-           nla_put_flag(msg, NL80211_FREQUENCY_ATTR_RADAR))
+       if (chan->flags & IEEE80211_CHAN_RADAR) {
+               u32 time = elapsed_jiffies_msecs(chan->dfs_state_entered);
+               if (nla_put_flag(msg, NL80211_FREQUENCY_ATTR_RADAR))
+                       goto nla_put_failure;
+               if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_DFS_STATE,
+                               chan->dfs_state))
+                       goto nla_put_failure;
+               if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_DFS_TIME, time))
+                       goto nla_put_failure;
+       }
+       if ((chan->flags & IEEE80211_CHAN_NO_HT40MINUS) &&
+           nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_HT40_MINUS))
+               goto nla_put_failure;
+       if ((chan->flags & IEEE80211_CHAN_NO_HT40PLUS) &&
+           nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_HT40_PLUS))
+               goto nla_put_failure;
+       if ((chan->flags & IEEE80211_CHAN_NO_80MHZ) &&
+           nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_80MHZ))
+               goto nla_put_failure;
+       if ((chan->flags & IEEE80211_CHAN_NO_160MHZ) &&
+           nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_160MHZ))
                goto nla_put_failure;
 
        if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_MAX_TX_POWER,
@@ -870,6 +914,48 @@ nla_put_failure:
        return -ENOBUFS;
 }
 
+#ifdef CONFIG_PM
+static int nl80211_send_wowlan_tcp_caps(struct cfg80211_registered_device *rdev,
+                                       struct sk_buff *msg)
+{
+       const struct wiphy_wowlan_tcp_support *tcp = rdev->wiphy.wowlan.tcp;
+       struct nlattr *nl_tcp;
+
+       if (!tcp)
+               return 0;
+
+       nl_tcp = nla_nest_start(msg, NL80211_WOWLAN_TRIG_TCP_CONNECTION);
+       if (!nl_tcp)
+               return -ENOBUFS;
+
+       if (nla_put_u32(msg, NL80211_WOWLAN_TCP_DATA_PAYLOAD,
+                       tcp->data_payload_max))
+               return -ENOBUFS;
+
+       if (nla_put_u32(msg, NL80211_WOWLAN_TCP_DATA_PAYLOAD,
+                       tcp->data_payload_max))
+               return -ENOBUFS;
+
+       if (tcp->seq && nla_put_flag(msg, NL80211_WOWLAN_TCP_DATA_PAYLOAD_SEQ))
+               return -ENOBUFS;
+
+       if (tcp->tok && nla_put(msg, NL80211_WOWLAN_TCP_DATA_PAYLOAD_TOKEN,
+                               sizeof(*tcp->tok), tcp->tok))
+               return -ENOBUFS;
+
+       if (nla_put_u32(msg, NL80211_WOWLAN_TCP_DATA_INTERVAL,
+                       tcp->data_interval_max))
+               return -ENOBUFS;
+
+       if (nla_put_u32(msg, NL80211_WOWLAN_TCP_WAKE_PAYLOAD,
+                       tcp->wake_payload_max))
+               return -ENOBUFS;
+
+       nla_nest_end(msg, nl_tcp);
+       return 0;
+}
+#endif
+
 static int nl80211_send_wiphy(struct sk_buff *msg, u32 portid, u32 seq, int flags,
                              struct cfg80211_registered_device *dev)
 {
@@ -1236,12 +1322,17 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 portid, u32 seq, int flag
                                        dev->wiphy.wowlan.pattern_min_len,
                                .max_pattern_len =
                                        dev->wiphy.wowlan.pattern_max_len,
+                               .max_pkt_offset =
+                                       dev->wiphy.wowlan.max_pkt_offset,
                        };
                        if (nla_put(msg, NL80211_WOWLAN_TRIG_PKT_PATTERN,
                                    sizeof(pat), &pat))
                                goto nla_put_failure;
                }
 
+               if (nl80211_send_wowlan_tcp_caps(dev, msg))
+                       goto nla_put_failure;
+
                nla_nest_end(msg, nl_wowlan);
        }
 #endif
@@ -1268,6 +1359,21 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 portid, u32 seq, int flag
                    dev->wiphy.ht_capa_mod_mask))
                goto nla_put_failure;
 
+       if (dev->wiphy.flags & WIPHY_FLAG_HAVE_AP_SME &&
+           dev->wiphy.max_acl_mac_addrs &&
+           nla_put_u32(msg, NL80211_ATTR_MAC_ACL_MAX,
+                       dev->wiphy.max_acl_mac_addrs))
+               goto nla_put_failure;
+
+       if (dev->wiphy.extended_capabilities &&
+           (nla_put(msg, NL80211_ATTR_EXT_CAPA,
+                    dev->wiphy.extended_capabilities_len,
+                    dev->wiphy.extended_capabilities) ||
+            nla_put(msg, NL80211_ATTR_EXT_CAPA_MASK,
+                    dev->wiphy.extended_capabilities_len,
+                    dev->wiphy.extended_capabilities_mask)))
+               goto nla_put_failure;
+
        return genlmsg_end(msg, hdr);
 
  nla_put_failure:
@@ -2491,6 +2597,97 @@ static int nl80211_del_key(struct sk_buff *skb, struct genl_info *info)
        return err;
 }
 
+/* This function returns an error or the number of nested attributes */
+static int validate_acl_mac_addrs(struct nlattr *nl_attr)
+{
+       struct nlattr *attr;
+       int n_entries = 0, tmp;
+
+       nla_for_each_nested(attr, nl_attr, tmp) {
+               if (nla_len(attr) != ETH_ALEN)
+                       return -EINVAL;
+
+               n_entries++;
+       }
+
+       return n_entries;
+}
+
+/*
+ * This function parses ACL information and allocates memory for ACL data.
+ * On successful return, the calling function is responsible to free the
+ * ACL buffer returned by this function.
+ */
+static struct cfg80211_acl_data *parse_acl_data(struct wiphy *wiphy,
+                                               struct genl_info *info)
+{
+       enum nl80211_acl_policy acl_policy;
+       struct nlattr *attr;
+       struct cfg80211_acl_data *acl;
+       int i = 0, n_entries, tmp;
+
+       if (!wiphy->max_acl_mac_addrs)
+               return ERR_PTR(-EOPNOTSUPP);
+
+       if (!info->attrs[NL80211_ATTR_ACL_POLICY])
+               return ERR_PTR(-EINVAL);
+
+       acl_policy = nla_get_u32(info->attrs[NL80211_ATTR_ACL_POLICY]);
+       if (acl_policy != NL80211_ACL_POLICY_ACCEPT_UNLESS_LISTED &&
+           acl_policy != NL80211_ACL_POLICY_DENY_UNLESS_LISTED)
+               return ERR_PTR(-EINVAL);
+
+       if (!info->attrs[NL80211_ATTR_MAC_ADDRS])
+               return ERR_PTR(-EINVAL);
+
+       n_entries = validate_acl_mac_addrs(info->attrs[NL80211_ATTR_MAC_ADDRS]);
+       if (n_entries < 0)
+               return ERR_PTR(n_entries);
+
+       if (n_entries > wiphy->max_acl_mac_addrs)
+               return ERR_PTR(-ENOTSUPP);
+
+       acl = kzalloc(sizeof(*acl) + (sizeof(struct mac_address) * n_entries),
+                     GFP_KERNEL);
+       if (!acl)
+               return ERR_PTR(-ENOMEM);
+
+       nla_for_each_nested(attr, info->attrs[NL80211_ATTR_MAC_ADDRS], tmp) {
+               memcpy(acl->mac_addrs[i].addr, nla_data(attr), ETH_ALEN);
+               i++;
+       }
+
+       acl->n_acl_entries = n_entries;
+       acl->acl_policy = acl_policy;
+
+       return acl;
+}
+
+static int nl80211_set_mac_acl(struct sk_buff *skb, struct genl_info *info)
+{
+       struct cfg80211_registered_device *rdev = info->user_ptr[0];
+       struct net_device *dev = info->user_ptr[1];
+       struct cfg80211_acl_data *acl;
+       int err;
+
+       if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP &&
+           dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO)
+               return -EOPNOTSUPP;
+
+       if (!dev->ieee80211_ptr->beacon_interval)
+               return -EINVAL;
+
+       acl = parse_acl_data(&rdev->wiphy, info);
+       if (IS_ERR(acl))
+               return PTR_ERR(acl);
+
+       err = rdev_set_mac_acl(rdev, dev, acl);
+
+       kfree(acl);
+
+       return err;
+}
+
 static int nl80211_parse_beacon(struct genl_info *info,
                                struct cfg80211_beacon_data *bcn)
 {
@@ -2608,6 +2805,7 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
        struct wireless_dev *wdev = dev->ieee80211_ptr;
        struct cfg80211_ap_settings params;
        int err;
+       u8 radar_detect_width = 0;
 
        if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP &&
            dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO)
@@ -2726,14 +2924,30 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
        if (!cfg80211_reg_can_beacon(&rdev->wiphy, &params.chandef))
                return -EINVAL;
 
+       err = cfg80211_chandef_dfs_required(wdev->wiphy, &params.chandef);
+       if (err < 0)
+               return err;
+       if (err) {
+               radar_detect_width = BIT(params.chandef.width);
+               params.radar_required = true;
+       }
+
        mutex_lock(&rdev->devlist_mtx);
-       err = cfg80211_can_use_chan(rdev, wdev, params.chandef.chan,
-                                   CHAN_MODE_SHARED);
+       err = cfg80211_can_use_iftype_chan(rdev, wdev, wdev->iftype,
+                                          params.chandef.chan,
+                                          CHAN_MODE_SHARED,
+                                          radar_detect_width);
        mutex_unlock(&rdev->devlist_mtx);
 
        if (err)
                return err;
 
+       if (info->attrs[NL80211_ATTR_ACL_POLICY]) {
+               params.acl = parse_acl_data(&rdev->wiphy, info);
+               if (IS_ERR(params.acl))
+                       return PTR_ERR(params.acl);
+       }
+
        err = rdev_start_ap(rdev, dev, &params);
        if (!err) {
                wdev->preset_chandef = params.chandef;
@@ -2742,6 +2956,9 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
                wdev->ssid_len = params.ssid_len;
                memcpy(wdev->ssid, params.ssid, wdev->ssid_len);
        }
+
+       kfree(params.acl);
+
        return err;
 }
 
@@ -2949,12 +3166,22 @@ static int nl80211_send_station(struct sk_buff *msg, u32 portid, u32 seq,
            nla_put_u32(msg, NL80211_STA_INFO_INACTIVE_TIME,
                        sinfo->inactive_time))
                goto nla_put_failure;
-       if ((sinfo->filled & STATION_INFO_RX_BYTES) &&
+       if ((sinfo->filled & (STATION_INFO_RX_BYTES |
+                             STATION_INFO_RX_BYTES64)) &&
            nla_put_u32(msg, NL80211_STA_INFO_RX_BYTES,
-                       sinfo->rx_bytes))
+                       (u32)sinfo->rx_bytes))
                goto nla_put_failure;
-       if ((sinfo->filled & STATION_INFO_TX_BYTES) &&
+       if ((sinfo->filled & (STATION_INFO_TX_BYTES |
+                             NL80211_STA_INFO_TX_BYTES64)) &&
            nla_put_u32(msg, NL80211_STA_INFO_TX_BYTES,
+                       (u32)sinfo->tx_bytes))
+               goto nla_put_failure;
+       if ((sinfo->filled & STATION_INFO_RX_BYTES64) &&
+           nla_put_u64(msg, NL80211_STA_INFO_RX_BYTES64,
+                       sinfo->rx_bytes))
+               goto nla_put_failure;
+       if ((sinfo->filled & STATION_INFO_TX_BYTES64) &&
+           nla_put_u64(msg, NL80211_STA_INFO_TX_BYTES64,
                        sinfo->tx_bytes))
                goto nla_put_failure;
        if ((sinfo->filled & STATION_INFO_LLID) &&
@@ -3182,6 +3409,63 @@ static struct net_device *get_vlan(struct genl_info *info,
        return ERR_PTR(ret);
 }
 
+static struct nla_policy
+nl80211_sta_wme_policy[NL80211_STA_WME_MAX + 1] __read_mostly = {
+       [NL80211_STA_WME_UAPSD_QUEUES] = { .type = NLA_U8 },
+       [NL80211_STA_WME_MAX_SP] = { .type = NLA_U8 },
+};
+
+static int nl80211_set_station_tdls(struct genl_info *info,
+                                   struct station_parameters *params)
+{
+       struct cfg80211_registered_device *rdev = info->user_ptr[0];
+       struct nlattr *tb[NL80211_STA_WME_MAX + 1];
+       struct nlattr *nla;
+       int err;
+
+       /* Can only set if TDLS ... */
+       if (!(rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_TDLS))
+               return -EOPNOTSUPP;
+
+       /* ... with external setup is supported */
+       if (!(rdev->wiphy.flags & WIPHY_FLAG_TDLS_EXTERNAL_SETUP))
+               return -EOPNOTSUPP;
+
+       /* Dummy STA entry gets updated once the peer capabilities are known */
+       if (info->attrs[NL80211_ATTR_HT_CAPABILITY])
+               params->ht_capa =
+                       nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY]);
+       if (info->attrs[NL80211_ATTR_VHT_CAPABILITY])
+               params->vht_capa =
+                       nla_data(info->attrs[NL80211_ATTR_VHT_CAPABILITY]);
+
+       /* parse WME attributes if present */
+       if (!info->attrs[NL80211_ATTR_STA_WME])
+               return 0;
+
+       nla = info->attrs[NL80211_ATTR_STA_WME];
+       err = nla_parse_nested(tb, NL80211_STA_WME_MAX, nla,
+                              nl80211_sta_wme_policy);
+       if (err)
+               return err;
+
+       if (tb[NL80211_STA_WME_UAPSD_QUEUES])
+               params->uapsd_queues = nla_get_u8(
+                       tb[NL80211_STA_WME_UAPSD_QUEUES]);
+       if (params->uapsd_queues & ~IEEE80211_WMM_IE_STA_QOSINFO_AC_MASK)
+               return -EINVAL;
+
+       if (tb[NL80211_STA_WME_MAX_SP])
+               params->max_sp = nla_get_u8(tb[NL80211_STA_WME_MAX_SP]);
+
+       if (params->max_sp & ~IEEE80211_WMM_IE_STA_QOSINFO_SP_MASK)
+               return -EINVAL;
+
+       params->sta_modify_mask |= STATION_PARAM_APPLY_UAPSD;
+
+       return 0;
+}
+
 static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info)
 {
        struct cfg80211_registered_device *rdev = info->user_ptr[0];
@@ -3210,8 +3494,20 @@ static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info)
                        nla_len(info->attrs[NL80211_ATTR_STA_SUPPORTED_RATES]);
        }
 
-       if (info->attrs[NL80211_ATTR_STA_LISTEN_INTERVAL] ||
-           info->attrs[NL80211_ATTR_HT_CAPABILITY])
+       if (info->attrs[NL80211_ATTR_STA_CAPABILITY]) {
+               params.capability =
+                       nla_get_u16(info->attrs[NL80211_ATTR_STA_CAPABILITY]);
+               params.sta_modify_mask |= STATION_PARAM_APPLY_CAPABILITY;
+       }
+
+       if (info->attrs[NL80211_ATTR_STA_EXT_CAPABILITY]) {
+               params.ext_capab =
+                       nla_data(info->attrs[NL80211_ATTR_STA_EXT_CAPABILITY]);
+               params.ext_capab_len =
+                       nla_len(info->attrs[NL80211_ATTR_STA_EXT_CAPABILITY]);
+       }
+
+       if (info->attrs[NL80211_ATTR_STA_LISTEN_INTERVAL])
                return -EINVAL;
 
        if (!rdev->ops->change_station)
@@ -3280,6 +3576,13 @@ static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info)
                /* reject other things that can't change */
                if (params.supported_rates)
                        return -EINVAL;
+               if (info->attrs[NL80211_ATTR_STA_CAPABILITY])
+                       return -EINVAL;
+               if (info->attrs[NL80211_ATTR_STA_EXT_CAPABILITY])
+                       return -EINVAL;
+               if (info->attrs[NL80211_ATTR_HT_CAPABILITY] ||
+                   info->attrs[NL80211_ATTR_VHT_CAPABILITY])
+                       return -EINVAL;
 
                /* must be last in here for error handling */
                params.vlan = get_vlan(info, rdev);
@@ -3295,13 +3598,29 @@ static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info)
                 * to change the flag.
                 */
                params.sta_flags_mask &= ~BIT(NL80211_STA_FLAG_TDLS_PEER);
-               /* fall through */
+               /* Include parameters for TDLS peer (driver will check) */
+               err = nl80211_set_station_tdls(info, &params);
+               if (err)
+                       return err;
+               /* disallow things sta doesn't support */
+               if (params.plink_action)
+                       return -EINVAL;
+               if (params.local_pm)
+                       return -EINVAL;
+               /* reject any changes other than AUTHORIZED or WME (for TDLS) */
+               if (params.sta_flags_mask & ~(BIT(NL80211_STA_FLAG_AUTHORIZED) |
+                                             BIT(NL80211_STA_FLAG_WME)))
+                       return -EINVAL;
+               break;
        case NL80211_IFTYPE_ADHOC:
                /* disallow things sta doesn't support */
                if (params.plink_action)
                        return -EINVAL;
                if (params.local_pm)
                        return -EINVAL;
+               if (info->attrs[NL80211_ATTR_HT_CAPABILITY] ||
+                   info->attrs[NL80211_ATTR_VHT_CAPABILITY])
+                       return -EINVAL;
                /* reject any changes other than AUTHORIZED */
                if (params.sta_flags_mask & ~BIT(NL80211_STA_FLAG_AUTHORIZED))
                        return -EINVAL;
@@ -3312,6 +3631,13 @@ static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info)
                        return -EINVAL;
                if (params.supported_rates)
                        return -EINVAL;
+               if (info->attrs[NL80211_ATTR_STA_CAPABILITY])
+                       return -EINVAL;
+               if (info->attrs[NL80211_ATTR_STA_EXT_CAPABILITY])
+                       return -EINVAL;
+               if (info->attrs[NL80211_ATTR_HT_CAPABILITY] ||
+                   info->attrs[NL80211_ATTR_VHT_CAPABILITY])
+                       return -EINVAL;
                /*
                 * No special handling for TDLS here -- the userspace
                 * mesh code doesn't have this bug.
@@ -3336,12 +3662,6 @@ static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info)
        return err;
 }
 
-static struct nla_policy
-nl80211_sta_wme_policy[NL80211_STA_WME_MAX + 1] __read_mostly = {
-       [NL80211_STA_WME_UAPSD_QUEUES] = { .type = NLA_U8 },
-       [NL80211_STA_WME_MAX_SP] = { .type = NLA_U8 },
-};
-
 static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
 {
        struct cfg80211_registered_device *rdev = info->user_ptr[0];
@@ -3376,6 +3696,19 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
        if (!params.aid || params.aid > IEEE80211_MAX_AID)
                return -EINVAL;
 
+       if (info->attrs[NL80211_ATTR_STA_CAPABILITY]) {
+               params.capability =
+                       nla_get_u16(info->attrs[NL80211_ATTR_STA_CAPABILITY]);
+               params.sta_modify_mask |= STATION_PARAM_APPLY_CAPABILITY;
+       }
+
+       if (info->attrs[NL80211_ATTR_STA_EXT_CAPABILITY]) {
+               params.ext_capab =
+                       nla_data(info->attrs[NL80211_ATTR_STA_EXT_CAPABILITY]);
+               params.ext_capab_len =
+                       nla_len(info->attrs[NL80211_ATTR_STA_EXT_CAPABILITY]);
+       }
+
        if (info->attrs[NL80211_ATTR_HT_CAPABILITY])
                params.ht_capa =
                        nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY]);
@@ -4869,6 +5202,54 @@ static int nl80211_stop_sched_scan(struct sk_buff *skb,
        return err;
 }
 
+static int nl80211_start_radar_detection(struct sk_buff *skb,
+                                        struct genl_info *info)
+{
+       struct cfg80211_registered_device *rdev = info->user_ptr[0];
+       struct net_device *dev = info->user_ptr[1];
+       struct wireless_dev *wdev = dev->ieee80211_ptr;
+       struct cfg80211_chan_def chandef;
+       int err;
+
+       err = nl80211_parse_chandef(rdev, info, &chandef);
+       if (err)
+               return err;
+
+       if (wdev->cac_started)
+               return -EBUSY;
+
+       err = cfg80211_chandef_dfs_required(wdev->wiphy, &chandef);
+       if (err < 0)
+               return err;
+
+       if (err == 0)
+               return -EINVAL;
+
+       if (chandef.chan->dfs_state != NL80211_DFS_USABLE)
+               return -EINVAL;
+
+       if (!rdev->ops->start_radar_detection)
+               return -EOPNOTSUPP;
+
+       mutex_lock(&rdev->devlist_mtx);
+       err = cfg80211_can_use_iftype_chan(rdev, wdev, wdev->iftype,
+                                          chandef.chan, CHAN_MODE_SHARED,
+                                          BIT(chandef.width));
+       if (err)
+               goto err_locked;
+
+       err = rdev->ops->start_radar_detection(&rdev->wiphy, dev, &chandef);
+       if (!err) {
+               wdev->channel = chandef.chan;
+               wdev->cac_started = true;
+               wdev->cac_start_time = jiffies;
+       }
+err_locked:
+       mutex_unlock(&rdev->devlist_mtx);
+
+       return err;
+}
+
 static int nl80211_send_bss(struct sk_buff *msg, struct netlink_callback *cb,
                            u32 seq, int flags,
                            struct cfg80211_registered_device *rdev,
@@ -4879,6 +5260,7 @@ static int nl80211_send_bss(struct sk_buff *msg, struct netlink_callback *cb,
        const struct cfg80211_bss_ies *ies;
        void *hdr;
        struct nlattr *bss;
+       bool tsf = false;
 
        ASSERT_WDEV_LOCK(wdev);
 
@@ -4902,22 +5284,24 @@ static int nl80211_send_bss(struct sk_buff *msg, struct netlink_callback *cb,
 
        rcu_read_lock();
        ies = rcu_dereference(res->ies);
-       if (ies && ies->len && nla_put(msg, NL80211_BSS_INFORMATION_ELEMENTS,
-                                      ies->len, ies->data)) {
-               rcu_read_unlock();
-               goto nla_put_failure;
+       if (ies) {
+               if (nla_put_u64(msg, NL80211_BSS_TSF, ies->tsf))
+                       goto fail_unlock_rcu;
+               tsf = true;
+               if (ies->len && nla_put(msg, NL80211_BSS_INFORMATION_ELEMENTS,
+                                       ies->len, ies->data))
+                       goto fail_unlock_rcu;
        }
        ies = rcu_dereference(res->beacon_ies);
-       if (ies && ies->len && nla_put(msg, NL80211_BSS_BEACON_IES,
-                                      ies->len, ies->data)) {
-               rcu_read_unlock();
-               goto nla_put_failure;
+       if (ies) {
+               if (!tsf && nla_put_u64(msg, NL80211_BSS_TSF, ies->tsf))
+                       goto fail_unlock_rcu;
+               if (ies->len && nla_put(msg, NL80211_BSS_BEACON_IES,
+                                       ies->len, ies->data))
+                       goto fail_unlock_rcu;
        }
        rcu_read_unlock();
 
-       if (res->tsf &&
-           nla_put_u64(msg, NL80211_BSS_TSF, res->tsf))
-               goto nla_put_failure;
        if (res->beacon_interval &&
            nla_put_u16(msg, NL80211_BSS_BEACON_INTERVAL, res->beacon_interval))
                goto nla_put_failure;
@@ -4962,6 +5346,8 @@ static int nl80211_send_bss(struct sk_buff *msg, struct netlink_callback *cb,
 
        return genlmsg_end(msg, hdr);
 
+ fail_unlock_rcu:
+       rcu_read_unlock();
  nla_put_failure:
        genlmsg_cancel(msg, hdr);
        return -EMSGSIZE;
@@ -6772,16 +7158,100 @@ static int nl80211_leave_mesh(struct sk_buff *skb, struct genl_info *info)
 }
 
 #ifdef CONFIG_PM
+static int nl80211_send_wowlan_patterns(struct sk_buff *msg,
+                                       struct cfg80211_registered_device *rdev)
+{
+       struct nlattr *nl_pats, *nl_pat;
+       int i, pat_len;
+
+       if (!rdev->wowlan->n_patterns)
+               return 0;
+
+       nl_pats = nla_nest_start(msg, NL80211_WOWLAN_TRIG_PKT_PATTERN);
+       if (!nl_pats)
+               return -ENOBUFS;
+
+       for (i = 0; i < rdev->wowlan->n_patterns; i++) {
+               nl_pat = nla_nest_start(msg, i + 1);
+               if (!nl_pat)
+                       return -ENOBUFS;
+               pat_len = rdev->wowlan->patterns[i].pattern_len;
+               if (nla_put(msg, NL80211_WOWLAN_PKTPAT_MASK,
+                           DIV_ROUND_UP(pat_len, 8),
+                           rdev->wowlan->patterns[i].mask) ||
+                   nla_put(msg, NL80211_WOWLAN_PKTPAT_PATTERN,
+                           pat_len, rdev->wowlan->patterns[i].pattern) ||
+                   nla_put_u32(msg, NL80211_WOWLAN_PKTPAT_OFFSET,
+                               rdev->wowlan->patterns[i].pkt_offset))
+                       return -ENOBUFS;
+               nla_nest_end(msg, nl_pat);
+       }
+       nla_nest_end(msg, nl_pats);
+
+       return 0;
+}
+
+static int nl80211_send_wowlan_tcp(struct sk_buff *msg,
+                                  struct cfg80211_wowlan_tcp *tcp)
+{
+       struct nlattr *nl_tcp;
+
+       if (!tcp)
+               return 0;
+
+       nl_tcp = nla_nest_start(msg, NL80211_WOWLAN_TRIG_TCP_CONNECTION);
+       if (!nl_tcp)
+               return -ENOBUFS;
+
+       if (nla_put_be32(msg, NL80211_WOWLAN_TCP_SRC_IPV4, tcp->src) ||
+           nla_put_be32(msg, NL80211_WOWLAN_TCP_DST_IPV4, tcp->dst) ||
+           nla_put(msg, NL80211_WOWLAN_TCP_DST_MAC, ETH_ALEN, tcp->dst_mac) ||
+           nla_put_u16(msg, NL80211_WOWLAN_TCP_SRC_PORT, tcp->src_port) ||
+           nla_put_u16(msg, NL80211_WOWLAN_TCP_DST_PORT, tcp->dst_port) ||
+           nla_put(msg, NL80211_WOWLAN_TCP_DATA_PAYLOAD,
+                   tcp->payload_len, tcp->payload) ||
+           nla_put_u32(msg, NL80211_WOWLAN_TCP_DATA_INTERVAL,
+                       tcp->data_interval) ||
+           nla_put(msg, NL80211_WOWLAN_TCP_WAKE_PAYLOAD,
+                   tcp->wake_len, tcp->wake_data) ||
+           nla_put(msg, NL80211_WOWLAN_TCP_WAKE_MASK,
+                   DIV_ROUND_UP(tcp->wake_len, 8), tcp->wake_mask))
+               return -ENOBUFS;
+
+       if (tcp->payload_seq.len &&
+           nla_put(msg, NL80211_WOWLAN_TCP_DATA_PAYLOAD_SEQ,
+                   sizeof(tcp->payload_seq), &tcp->payload_seq))
+               return -ENOBUFS;
+
+       if (tcp->payload_tok.len &&
+           nla_put(msg, NL80211_WOWLAN_TCP_DATA_PAYLOAD_TOKEN,
+                   sizeof(tcp->payload_tok) + tcp->tokens_size,
+                   &tcp->payload_tok))
+               return -ENOBUFS;
+
+       return 0;
+}
+
 static int nl80211_get_wowlan(struct sk_buff *skb, struct genl_info *info)
 {
        struct cfg80211_registered_device *rdev = info->user_ptr[0];
        struct sk_buff *msg;
        void *hdr;
+       u32 size = NLMSG_DEFAULT_SIZE;
 
-       if (!rdev->wiphy.wowlan.flags && !rdev->wiphy.wowlan.n_patterns)
+       if (!rdev->wiphy.wowlan.flags && !rdev->wiphy.wowlan.n_patterns &&
+           !rdev->wiphy.wowlan.tcp)
                return -EOPNOTSUPP;
 
-       msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+       if (rdev->wowlan && rdev->wowlan->tcp) {
+               /* adjust size to have room for all the data */
+               size += rdev->wowlan->tcp->tokens_size +
+                       rdev->wowlan->tcp->payload_len +
+                       rdev->wowlan->tcp->wake_len +
+                       rdev->wowlan->tcp->wake_len / 8;
+       }
+
+       msg = nlmsg_new(size, GFP_KERNEL);
        if (!msg)
                return -ENOMEM;
 
@@ -6812,31 +7282,12 @@ static int nl80211_get_wowlan(struct sk_buff *skb, struct genl_info *info)
                    (rdev->wowlan->rfkill_release &&
                     nla_put_flag(msg, NL80211_WOWLAN_TRIG_RFKILL_RELEASE)))
                        goto nla_put_failure;
-               if (rdev->wowlan->n_patterns) {
-                       struct nlattr *nl_pats, *nl_pat;
-                       int i, pat_len;
 
-                       nl_pats = nla_nest_start(msg,
-                                       NL80211_WOWLAN_TRIG_PKT_PATTERN);
-                       if (!nl_pats)
-                               goto nla_put_failure;
+               if (nl80211_send_wowlan_patterns(msg, rdev))
+                       goto nla_put_failure;
 
-                       for (i = 0; i < rdev->wowlan->n_patterns; i++) {
-                               nl_pat = nla_nest_start(msg, i + 1);
-                               if (!nl_pat)
-                                       goto nla_put_failure;
-                               pat_len = rdev->wowlan->patterns[i].pattern_len;
-                               if (nla_put(msg, NL80211_WOWLAN_PKTPAT_MASK,
-                                           DIV_ROUND_UP(pat_len, 8),
-                                           rdev->wowlan->patterns[i].mask) ||
-                                   nla_put(msg, NL80211_WOWLAN_PKTPAT_PATTERN,
-                                           pat_len,
-                                           rdev->wowlan->patterns[i].pattern))
-                                       goto nla_put_failure;
-                               nla_nest_end(msg, nl_pat);
-                       }
-                       nla_nest_end(msg, nl_pats);
-               }
+               if (nl80211_send_wowlan_tcp(msg, rdev->wowlan->tcp))
+                       goto nla_put_failure;
 
                nla_nest_end(msg, nl_wowlan);
        }
@@ -6849,6 +7300,150 @@ nla_put_failure:
        return -ENOBUFS;
 }
 
+static int nl80211_parse_wowlan_tcp(struct cfg80211_registered_device *rdev,
+                                   struct nlattr *attr,
+                                   struct cfg80211_wowlan *trig)
+{
+       struct nlattr *tb[NUM_NL80211_WOWLAN_TCP];
+       struct cfg80211_wowlan_tcp *cfg;
+       struct nl80211_wowlan_tcp_data_token *tok = NULL;
+       struct nl80211_wowlan_tcp_data_seq *seq = NULL;
+       u32 size;
+       u32 data_size, wake_size, tokens_size = 0, wake_mask_size;
+       int err, port;
+
+       if (!rdev->wiphy.wowlan.tcp)
+               return -EINVAL;
+
+       err = nla_parse(tb, MAX_NL80211_WOWLAN_TCP,
+                       nla_data(attr), nla_len(attr),
+                       nl80211_wowlan_tcp_policy);
+       if (err)
+               return err;
+
+       if (!tb[NL80211_WOWLAN_TCP_SRC_IPV4] ||
+           !tb[NL80211_WOWLAN_TCP_DST_IPV4] ||
+           !tb[NL80211_WOWLAN_TCP_DST_MAC] ||
+           !tb[NL80211_WOWLAN_TCP_DST_PORT] ||
+           !tb[NL80211_WOWLAN_TCP_DATA_PAYLOAD] ||
+           !tb[NL80211_WOWLAN_TCP_DATA_INTERVAL] ||
+           !tb[NL80211_WOWLAN_TCP_WAKE_PAYLOAD] ||
+           !tb[NL80211_WOWLAN_TCP_WAKE_MASK])
+               return -EINVAL;
+
+       data_size = nla_len(tb[NL80211_WOWLAN_TCP_DATA_PAYLOAD]);
+       if (data_size > rdev->wiphy.wowlan.tcp->data_payload_max)
+               return -EINVAL;
+
+       if (nla_get_u32(tb[NL80211_WOWLAN_TCP_DATA_INTERVAL]) >
+                       rdev->wiphy.wowlan.tcp->data_interval_max)
+               return -EINVAL;
+
+       wake_size = nla_len(tb[NL80211_WOWLAN_TCP_WAKE_PAYLOAD]);
+       if (wake_size > rdev->wiphy.wowlan.tcp->wake_payload_max)
+               return -EINVAL;
+
+       wake_mask_size = nla_len(tb[NL80211_WOWLAN_TCP_WAKE_MASK]);
+       if (wake_mask_size != DIV_ROUND_UP(wake_size, 8))
+               return -EINVAL;
+
+       if (tb[NL80211_WOWLAN_TCP_DATA_PAYLOAD_TOKEN]) {
+               u32 tokln = nla_len(tb[NL80211_WOWLAN_TCP_DATA_PAYLOAD_TOKEN]);
+
+               tok = nla_data(tb[NL80211_WOWLAN_TCP_DATA_PAYLOAD_TOKEN]);
+               tokens_size = tokln - sizeof(*tok);
+
+               if (!tok->len || tokens_size % tok->len)
+                       return -EINVAL;
+               if (!rdev->wiphy.wowlan.tcp->tok)
+                       return -EINVAL;
+               if (tok->len > rdev->wiphy.wowlan.tcp->tok->max_len)
+                       return -EINVAL;
+               if (tok->len < rdev->wiphy.wowlan.tcp->tok->min_len)
+                       return -EINVAL;
+               if (tokens_size > rdev->wiphy.wowlan.tcp->tok->bufsize)
+                       return -EINVAL;
+               if (tok->offset + tok->len > data_size)
+                       return -EINVAL;
+       }
+
+       if (tb[NL80211_WOWLAN_TCP_DATA_PAYLOAD_SEQ]) {
+               seq = nla_data(tb[NL80211_WOWLAN_TCP_DATA_PAYLOAD_SEQ]);
+               if (!rdev->wiphy.wowlan.tcp->seq)
+                       return -EINVAL;
+               if (seq->len == 0 || seq->len > 4)
+                       return -EINVAL;
+               if (seq->len + seq->offset > data_size)
+                       return -EINVAL;
+       }
+
+       size = sizeof(*cfg);
+       size += data_size;
+       size += wake_size + wake_mask_size;
+       size += tokens_size;
+
+       cfg = kzalloc(size, GFP_KERNEL);
+       if (!cfg)
+               return -ENOMEM;
+       cfg->src = nla_get_be32(tb[NL80211_WOWLAN_TCP_SRC_IPV4]);
+       cfg->dst = nla_get_be32(tb[NL80211_WOWLAN_TCP_DST_IPV4]);
+       memcpy(cfg->dst_mac, nla_data(tb[NL80211_WOWLAN_TCP_DST_MAC]),
+              ETH_ALEN);
+       if (tb[NL80211_WOWLAN_TCP_SRC_PORT])
+               port = nla_get_u16(tb[NL80211_WOWLAN_TCP_SRC_PORT]);
+       else
+               port = 0;
+#ifdef CONFIG_INET
+       /* allocate a socket and port for it and use it */
+       err = __sock_create(wiphy_net(&rdev->wiphy), PF_INET, SOCK_STREAM,
+                           IPPROTO_TCP, &cfg->sock, 1);
+       if (err) {
+               kfree(cfg);
+               return err;
+       }
+       if (inet_csk_get_port(cfg->sock->sk, port)) {
+               sock_release(cfg->sock);
+               kfree(cfg);
+               return -EADDRINUSE;
+       }
+       cfg->src_port = inet_sk(cfg->sock->sk)->inet_num;
+#else
+       if (!port) {
+               kfree(cfg);
+               return -EINVAL;
+       }
+       cfg->src_port = port;
+#endif
+
+       cfg->dst_port = nla_get_u16(tb[NL80211_WOWLAN_TCP_DST_PORT]);
+       cfg->payload_len = data_size;
+       cfg->payload = (u8 *)cfg + sizeof(*cfg) + tokens_size;
+       memcpy((void *)cfg->payload,
+              nla_data(tb[NL80211_WOWLAN_TCP_DATA_PAYLOAD]),
+              data_size);
+       if (seq)
+               cfg->payload_seq = *seq;
+       cfg->data_interval = nla_get_u32(tb[NL80211_WOWLAN_TCP_DATA_INTERVAL]);
+       cfg->wake_len = wake_size;
+       cfg->wake_data = (u8 *)cfg + sizeof(*cfg) + tokens_size + data_size;
+       memcpy((void *)cfg->wake_data,
+              nla_data(tb[NL80211_WOWLAN_TCP_WAKE_PAYLOAD]),
+              wake_size);
+       cfg->wake_mask = (u8 *)cfg + sizeof(*cfg) + tokens_size +
+                        data_size + wake_size;
+       memcpy((void *)cfg->wake_mask,
+              nla_data(tb[NL80211_WOWLAN_TCP_WAKE_MASK]),
+              wake_mask_size);
+       if (tok) {
+               cfg->tokens_size = tokens_size;
+               memcpy(&cfg->payload_tok, tok, sizeof(*tok) + tokens_size);
+       }
+
+       trig->tcp = cfg;
+
+       return 0;
+}
+
 static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info)
 {
        struct cfg80211_registered_device *rdev = info->user_ptr[0];
@@ -6859,7 +7454,8 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info)
        int err, i;
        bool prev_enabled = rdev->wowlan;
 
-       if (!rdev->wiphy.wowlan.flags && !rdev->wiphy.wowlan.n_patterns)
+       if (!rdev->wiphy.wowlan.flags && !rdev->wiphy.wowlan.n_patterns &&
+           !rdev->wiphy.wowlan.tcp)
                return -EOPNOTSUPP;
 
        if (!info->attrs[NL80211_ATTR_WOWLAN_TRIGGERS]) {
@@ -6923,7 +7519,7 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info)
        if (tb[NL80211_WOWLAN_TRIG_PKT_PATTERN]) {
                struct nlattr *pat;
                int n_patterns = 0;
-               int rem, pat_len, mask_len;
+               int rem, pat_len, mask_len, pkt_offset;
                struct nlattr *pat_tb[NUM_NL80211_WOWLAN_PKTPAT];
 
                nla_for_each_nested(pat, tb[NL80211_WOWLAN_TRIG_PKT_PATTERN],
@@ -6958,6 +7554,15 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info)
                            pat_len < wowlan->pattern_min_len)
                                goto error;
 
+                       if (!pat_tb[NL80211_WOWLAN_PKTPAT_OFFSET])
+                               pkt_offset = 0;
+                       else
+                               pkt_offset = nla_get_u32(
+                                       pat_tb[NL80211_WOWLAN_PKTPAT_OFFSET]);
+                       if (pkt_offset > wowlan->max_pkt_offset)
+                               goto error;
+                       new_triggers.patterns[i].pkt_offset = pkt_offset;
+
                        new_triggers.patterns[i].mask =
                                kmalloc(mask_len + pat_len, GFP_KERNEL);
                        if (!new_triggers.patterns[i].mask) {
@@ -6977,6 +7582,14 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info)
                }
        }
 
+       if (tb[NL80211_WOWLAN_TRIG_TCP_CONNECTION]) {
+               err = nl80211_parse_wowlan_tcp(
+                       rdev, tb[NL80211_WOWLAN_TRIG_TCP_CONNECTION],
+                       &new_triggers);
+               if (err)
+                       goto error;
+       }
+
        ntrig = kmemdup(&new_triggers, sizeof(new_triggers), GFP_KERNEL);
        if (!ntrig) {
                err = -ENOMEM;
@@ -6994,6 +7607,9 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info)
        for (i = 0; i < new_triggers.n_patterns; i++)
                kfree(new_triggers.patterns[i].mask);
        kfree(new_triggers.patterns);
+       if (new_triggers.tcp && new_triggers.tcp->sock)
+               sock_release(new_triggers.tcp->sock);
+       kfree(new_triggers.tcp);
        return err;
 }
 #endif
@@ -7876,6 +8492,22 @@ static struct genl_ops nl80211_ops[] = {
                .internal_flags = NL80211_FLAG_NEED_NETDEV |
                                  NL80211_FLAG_NEED_RTNL,
        },
+       {
+               .cmd = NL80211_CMD_SET_MAC_ACL,
+               .doit = nl80211_set_mac_acl,
+               .policy = nl80211_policy,
+               .flags = GENL_ADMIN_PERM,
+               .internal_flags = NL80211_FLAG_NEED_NETDEV |
+                                 NL80211_FLAG_NEED_RTNL,
+       },
+       {
+               .cmd = NL80211_CMD_RADAR_DETECT,
+               .doit = nl80211_start_radar_detection,
+               .policy = nl80211_policy,
+               .flags = GENL_ADMIN_PERM,
+               .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
+                                 NL80211_FLAG_NEED_RTNL,
+       },
 };
 
 static struct genl_multicast_group nl80211_mlme_mcgrp = {
@@ -9073,6 +9705,57 @@ nl80211_send_cqm_txe_notify(struct cfg80211_registered_device *rdev,
 }
 
 void
+nl80211_radar_notify(struct cfg80211_registered_device *rdev,
+                    struct cfg80211_chan_def *chandef,
+                    enum nl80211_radar_event event,
+                    struct net_device *netdev, gfp_t gfp)
+{
+       struct sk_buff *msg;
+       void *hdr;
+
+       msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
+       if (!msg)
+               return;
+
+       hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_RADAR_DETECT);
+       if (!hdr) {
+               nlmsg_free(msg);
+               return;
+       }
+
+       if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx))
+               goto nla_put_failure;
+
+       /* NOP and radar events don't need a netdev parameter */
+       if (netdev) {
+               struct wireless_dev *wdev = netdev->ieee80211_ptr;
+
+               if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
+                   nla_put_u64(msg, NL80211_ATTR_WDEV, wdev_id(wdev)))
+                       goto nla_put_failure;
+       }
+
+       if (nla_put_u32(msg, NL80211_ATTR_RADAR_EVENT, event))
+               goto nla_put_failure;
+
+       if (nl80211_send_chandef(msg, chandef))
+               goto nla_put_failure;
+
+       if (genlmsg_end(msg, hdr) < 0) {
+               nlmsg_free(msg);
+               return;
+       }
+
+       genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
+                               nl80211_mlme_mcgrp.id, gfp);
+       return;
+
+ nla_put_failure:
+       genlmsg_cancel(msg, hdr);
+       nlmsg_free(msg);
+}
+
+void
 nl80211_send_cqm_pktloss_notify(struct cfg80211_registered_device *rdev,
                                struct net_device *netdev, const u8 *peer,
                                u32 num_packets, gfp_t gfp)
@@ -9207,6 +9890,114 @@ void cfg80211_report_obss_beacon(struct wiphy *wiphy,
 }
 EXPORT_SYMBOL(cfg80211_report_obss_beacon);
 
+#ifdef CONFIG_PM
+void cfg80211_report_wowlan_wakeup(struct wireless_dev *wdev,
+                                  struct cfg80211_wowlan_wakeup *wakeup,
+                                  gfp_t gfp)
+{
+       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       struct sk_buff *msg;
+       void *hdr;
+       int err, size = 200;
+
+       trace_cfg80211_report_wowlan_wakeup(wdev->wiphy, wdev, wakeup);
+
+       if (wakeup)
+               size += wakeup->packet_present_len;
+
+       msg = nlmsg_new(size, gfp);
+       if (!msg)
+               return;
+
+       hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_SET_WOWLAN);
+       if (!hdr)
+               goto free_msg;
+
+       if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
+           nla_put_u64(msg, NL80211_ATTR_WDEV, wdev_id(wdev)))
+               goto free_msg;
+
+       if (wdev->netdev && nla_put_u32(msg, NL80211_ATTR_IFINDEX,
+                                       wdev->netdev->ifindex))
+               goto free_msg;
+
+       if (wakeup) {
+               struct nlattr *reasons;
+
+               reasons = nla_nest_start(msg, NL80211_ATTR_WOWLAN_TRIGGERS);
+
+               if (wakeup->disconnect &&
+                   nla_put_flag(msg, NL80211_WOWLAN_TRIG_DISCONNECT))
+                       goto free_msg;
+               if (wakeup->magic_pkt &&
+                   nla_put_flag(msg, NL80211_WOWLAN_TRIG_MAGIC_PKT))
+                       goto free_msg;
+               if (wakeup->gtk_rekey_failure &&
+                   nla_put_flag(msg, NL80211_WOWLAN_TRIG_GTK_REKEY_FAILURE))
+                       goto free_msg;
+               if (wakeup->eap_identity_req &&
+                   nla_put_flag(msg, NL80211_WOWLAN_TRIG_EAP_IDENT_REQUEST))
+                       goto free_msg;
+               if (wakeup->four_way_handshake &&
+                   nla_put_flag(msg, NL80211_WOWLAN_TRIG_4WAY_HANDSHAKE))
+                       goto free_msg;
+               if (wakeup->rfkill_release &&
+                   nla_put_flag(msg, NL80211_WOWLAN_TRIG_RFKILL_RELEASE))
+                       goto free_msg;
+
+               if (wakeup->pattern_idx >= 0 &&
+                   nla_put_u32(msg, NL80211_WOWLAN_TRIG_PKT_PATTERN,
+                               wakeup->pattern_idx))
+                       goto free_msg;
+
+               if (wakeup->tcp_match)
+                       nla_put_flag(msg, NL80211_WOWLAN_TRIG_WAKEUP_TCP_MATCH);
+
+               if (wakeup->tcp_connlost)
+                       nla_put_flag(msg,
+                                    NL80211_WOWLAN_TRIG_WAKEUP_TCP_CONNLOST);
+
+               if (wakeup->tcp_nomoretokens)
+                       nla_put_flag(msg,
+                               NL80211_WOWLAN_TRIG_WAKEUP_TCP_NOMORETOKENS);
+
+               if (wakeup->packet) {
+                       u32 pkt_attr = NL80211_WOWLAN_TRIG_WAKEUP_PKT_80211;
+                       u32 len_attr = NL80211_WOWLAN_TRIG_WAKEUP_PKT_80211_LEN;
+
+                       if (!wakeup->packet_80211) {
+                               pkt_attr =
+                                       NL80211_WOWLAN_TRIG_WAKEUP_PKT_8023;
+                               len_attr =
+                                       NL80211_WOWLAN_TRIG_WAKEUP_PKT_8023_LEN;
+                       }
+
+                       if (wakeup->packet_len &&
+                           nla_put_u32(msg, len_attr, wakeup->packet_len))
+                               goto free_msg;
+
+                       if (nla_put(msg, pkt_attr, wakeup->packet_present_len,
+                                   wakeup->packet))
+                               goto free_msg;
+               }
+
+               nla_nest_end(msg, reasons);
+       }
+
+       err = genlmsg_end(msg, hdr);
+       if (err < 0)
+               goto free_msg;
+
+       genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
+                               nl80211_mlme_mcgrp.id, gfp);
+       return;
+
+ free_msg:
+       nlmsg_free(msg);
+}
+EXPORT_SYMBOL(cfg80211_report_wowlan_wakeup);
+#endif
+
 void cfg80211_tdls_oper_request(struct net_device *dev, const u8 *peer,
                                enum nl80211_tdls_operation oper,
                                u16 reason_code, gfp_t gfp)
index 2acba84..b061da4 100644 (file)
@@ -108,6 +108,13 @@ nl80211_send_cqm_rssi_notify(struct cfg80211_registered_device *rdev,
                             struct net_device *netdev,
                             enum nl80211_cqm_rssi_threshold_event rssi_event,
                             gfp_t gfp);
+
+void
+nl80211_radar_notify(struct cfg80211_registered_device *rdev,
+                    struct cfg80211_chan_def *chandef,
+                    enum nl80211_radar_event event,
+                    struct net_device *netdev, gfp_t gfp);
+
 void
 nl80211_send_cqm_pktloss_notify(struct cfg80211_registered_device *rdev,
                                struct net_device *netdev, const u8 *peer,
index 6c0c819..422d382 100644 (file)
@@ -875,4 +875,16 @@ static inline void rdev_stop_p2p_device(struct cfg80211_registered_device *rdev,
        rdev->ops->stop_p2p_device(&rdev->wiphy, wdev);
        trace_rdev_return_void(&rdev->wiphy);
 }                                      
+
+static inline int rdev_set_mac_acl(struct cfg80211_registered_device *rdev,
+                                  struct net_device *dev,
+                                  struct cfg80211_acl_data *params)
+{
+       int ret;
+
+       trace_rdev_set_mac_acl(&rdev->wiphy, dev, params);
+       ret = rdev->ops->set_mac_acl(&rdev->wiphy, dev, params);
+       trace_rdev_return_int(&rdev->wiphy, ret);
+       return ret;
+}
 #endif /* __CFG80211_RDEV_OPS */
index de02d63..98532c0 100644 (file)
@@ -866,6 +866,10 @@ static void handle_channel(struct wiphy *wiphy,
 
        if (freq_range->max_bandwidth_khz < MHZ_TO_KHZ(40))
                bw_flags = IEEE80211_CHAN_NO_HT40;
+       if (freq_range->max_bandwidth_khz < MHZ_TO_KHZ(80))
+               bw_flags |= IEEE80211_CHAN_NO_80MHZ;
+       if (freq_range->max_bandwidth_khz < MHZ_TO_KHZ(160))
+               bw_flags |= IEEE80211_CHAN_NO_160MHZ;
 
        if (lr->initiator == NL80211_REGDOM_SET_BY_DRIVER &&
            request_wiphy && request_wiphy == wiphy &&
@@ -884,6 +888,9 @@ static void handle_channel(struct wiphy *wiphy,
                return;
        }
 
+       chan->dfs_state = NL80211_DFS_USABLE;
+       chan->dfs_state_entered = jiffies;
+
        chan->beacon_found = false;
        chan->flags = flags | bw_flags | map_regdom_flags(reg_rule->flags);
        chan->max_antenna_gain =
@@ -1261,6 +1268,10 @@ static void handle_channel_custom(struct wiphy *wiphy,
 
        if (freq_range->max_bandwidth_khz < MHZ_TO_KHZ(40))
                bw_flags = IEEE80211_CHAN_NO_HT40;
+       if (freq_range->max_bandwidth_khz < MHZ_TO_KHZ(80))
+               bw_flags |= IEEE80211_CHAN_NO_80MHZ;
+       if (freq_range->max_bandwidth_khz < MHZ_TO_KHZ(160))
+               bw_flags |= IEEE80211_CHAN_NO_160MHZ;
 
        chan->flags |= map_regdom_flags(reg_rule->flags) | bw_flags;
        chan->max_antenna_gain = (int) MBI_TO_DBI(power_rule->max_antenna_gain);
@@ -2189,10 +2200,15 @@ static int __set_regdom(const struct ieee80211_regdomain *rd)
                 * However if a driver requested this specific regulatory
                 * domain we keep it for its private use
                 */
-               if (lr->initiator == NL80211_REGDOM_SET_BY_DRIVER)
+               if (lr->initiator == NL80211_REGDOM_SET_BY_DRIVER) {
+                       const struct ieee80211_regdomain *tmp;
+
+                       tmp = get_wiphy_regdom(request_wiphy);
                        rcu_assign_pointer(request_wiphy->regd, rd);
-               else
+                       rcu_free_regdom(tmp);
+               } else {
                        kfree(rd);
+               }
 
                rd = NULL;
 
index 01592d7..674aadc 100644 (file)
 #include "wext-compat.h"
 #include "rdev-ops.h"
 
+/**
+ * DOC: BSS tree/list structure
+ *
+ * At the top level, the BSS list is kept in both a list in each
+ * registered device (@bss_list) as well as an RB-tree for faster
+ * lookup. In the RB-tree, entries can be looked up using their
+ * channel, MESHID, MESHCONF (for MBSSes) or channel, BSSID, SSID
+ * for other BSSes.
+ *
+ * Due to the possibility of hidden SSIDs, there's a second level
+ * structure, the "hidden_list" and "hidden_beacon_bss" pointer.
+ * The hidden_list connects all BSSes belonging to a single AP
+ * that has a hidden SSID, and connects beacon and probe response
+ * entries. For a probe response entry for a hidden SSID, the
+ * hidden_beacon_bss pointer points to the BSS struct holding the
+ * beacon's information.
+ *
+ * Reference counting is done for all these references except for
+ * the hidden_list, so that a beacon BSS struct that is otherwise
+ * not referenced has one reference for being on the bss_list and
+ * one for each probe response entry that points to it using the
+ * hidden_beacon_bss pointer. When a BSS struct that has such a
+ * pointer is get/put, the refcount update is also propagated to
+ * the referenced struct, this ensure that it cannot get removed
+ * while somebody is using the probe response version.
+ *
+ * Note that the hidden_beacon_bss pointer never changes, due to
+ * the reference counting. Therefore, no locking is needed for
+ * it.
+ *
+ * Also note that the hidden_beacon_bss pointer is only relevant
+ * if the driver uses something other than the IEs, e.g. private
+ * data stored stored in the BSS struct, since the beacon IEs are
+ * also linked into the probe response struct.
+ */
+
 #define IEEE80211_SCAN_RESULT_EXPIRE   (30 * HZ)
 
-static void bss_release(struct kref *ref)
+static void bss_free(struct cfg80211_internal_bss *bss)
 {
        struct cfg80211_bss_ies *ies;
-       struct cfg80211_internal_bss *bss;
-
-       bss = container_of(ref, struct cfg80211_internal_bss, ref);
 
        if (WARN_ON(atomic_read(&bss->hold)))
                return;
 
-       if (bss->pub.free_priv)
-               bss->pub.free_priv(&bss->pub);
-
        ies = (void *)rcu_access_pointer(bss->pub.beacon_ies);
-       if (ies)
+       if (ies && !bss->pub.hidden_beacon_bss)
                kfree_rcu(ies, rcu_head);
        ies = (void *)rcu_access_pointer(bss->pub.proberesp_ies);
        if (ies)
                kfree_rcu(ies, rcu_head);
 
+       /*
+        * This happens when the module is removed, it doesn't
+        * really matter any more save for completeness
+        */
+       if (!list_empty(&bss->hidden_list))
+               list_del(&bss->hidden_list);
+
        kfree(bss);
 }
 
-/* must hold dev->bss_lock! */
-static void __cfg80211_unlink_bss(struct cfg80211_registered_device *dev,
+static inline void bss_ref_get(struct cfg80211_registered_device *dev,
+                              struct cfg80211_internal_bss *bss)
+{
+       lockdep_assert_held(&dev->bss_lock);
+
+       bss->refcount++;
+       if (bss->pub.hidden_beacon_bss) {
+               bss = container_of(bss->pub.hidden_beacon_bss,
+                                  struct cfg80211_internal_bss,
+                                  pub);
+               bss->refcount++;
+       }
+}
+
+static inline void bss_ref_put(struct cfg80211_registered_device *dev,
+                              struct cfg80211_internal_bss *bss)
+{
+       lockdep_assert_held(&dev->bss_lock);
+
+       if (bss->pub.hidden_beacon_bss) {
+               struct cfg80211_internal_bss *hbss;
+               hbss = container_of(bss->pub.hidden_beacon_bss,
+                                   struct cfg80211_internal_bss,
+                                   pub);
+               hbss->refcount--;
+               if (hbss->refcount == 0)
+                       bss_free(hbss);
+       }
+       bss->refcount--;
+       if (bss->refcount == 0)
+               bss_free(bss);
+}
+
+static bool __cfg80211_unlink_bss(struct cfg80211_registered_device *dev,
                                  struct cfg80211_internal_bss *bss)
 {
+       lockdep_assert_held(&dev->bss_lock);
+
+       if (!list_empty(&bss->hidden_list)) {
+               /*
+                * don't remove the beacon entry if it has
+                * probe responses associated with it
+                */
+               if (!bss->pub.hidden_beacon_bss)
+                       return false;
+               /*
+                * if it's a probe response entry break its
+                * link to the other entries in the group
+                */
+               list_del_init(&bss->hidden_list);
+       }
+
        list_del_init(&bss->list);
        rb_erase(&bss->rbn, &dev->bss_tree);
-       kref_put(&bss->ref, bss_release);
+       bss_ref_put(dev, bss);
+       return true;
 }
 
-/* must hold dev->bss_lock! */
 static void __cfg80211_bss_expire(struct cfg80211_registered_device *dev,
                                  unsigned long expire_time)
 {
        struct cfg80211_internal_bss *bss, *tmp;
        bool expired = false;
 
+       lockdep_assert_held(&dev->bss_lock);
+
        list_for_each_entry_safe(bss, tmp, &dev->bss_list, list) {
                if (atomic_read(&bss->hold))
                        continue;
                if (!time_after(expire_time, bss->ts))
                        continue;
 
-               __cfg80211_unlink_bss(dev, bss);
-               expired = true;
+               if (__cfg80211_unlink_bss(dev, bss))
+                       expired = true;
        }
 
        if (expired)
@@ -234,15 +321,16 @@ int __cfg80211_stop_sched_scan(struct cfg80211_registered_device *rdev,
        return 0;
 }
 
-/* must hold dev->bss_lock! */
 void cfg80211_bss_age(struct cfg80211_registered_device *dev,
                       unsigned long age_secs)
 {
        struct cfg80211_internal_bss *bss;
        unsigned long age_jiffies = msecs_to_jiffies(age_secs * MSEC_PER_SEC);
 
+       spin_lock_bh(&dev->bss_lock);
        list_for_each_entry(bss, &dev->bss_list, list)
                bss->ts -= age_jiffies;
+       spin_unlock_bh(&dev->bss_lock);
 }
 
 void cfg80211_bss_expire(struct cfg80211_registered_device *dev)
@@ -277,40 +365,24 @@ const u8 *cfg80211_find_vendor_ie(unsigned int oui, u8 oui_type,
                if (!pos)
                        return NULL;
 
-               if (end - pos < sizeof(*ie))
-                       return NULL;
-
                ie = (struct ieee80211_vendor_ie *)pos;
+
+               /* make sure we can access ie->len */
+               BUILD_BUG_ON(offsetof(struct ieee80211_vendor_ie, len) != 1);
+
+               if (ie->len < sizeof(*ie))
+                       goto cont;
+
                ie_oui = ie->oui[0] << 16 | ie->oui[1] << 8 | ie->oui[2];
                if (ie_oui == oui && ie->oui_type == oui_type)
                        return pos;
-
+cont:
                pos += 2 + ie->len;
        }
        return NULL;
 }
 EXPORT_SYMBOL(cfg80211_find_vendor_ie);
 
-static int cmp_ies(u8 num, const u8 *ies1, int len1, const u8 *ies2, int len2)
-{
-       const u8 *ie1 = cfg80211_find_ie(num, ies1, len1);
-       const u8 *ie2 = cfg80211_find_ie(num, ies2, len2);
-
-       /* equal if both missing */
-       if (!ie1 && !ie2)
-               return 0;
-       /* sort missing IE before (left of) present IE */
-       if (!ie1)
-               return -1;
-       if (!ie2)
-               return 1;
-
-       /* sort by length first, then by contents */
-       if (ie1[1] != ie2[1])
-               return ie2[1] - ie1[1];
-       return memcmp(ie1 + 2, ie2 + 2, ie1[1]);
-}
-
 static bool is_bss(struct cfg80211_bss *a, const u8 *bssid,
                   const u8 *ssid, size_t ssid_len)
 {
@@ -334,109 +406,30 @@ static bool is_bss(struct cfg80211_bss *a, const u8 *bssid,
        return memcmp(ssidie + 2, ssid, ssid_len) == 0;
 }
 
-static bool is_mesh_bss(struct cfg80211_bss *a)
-{
-       const struct cfg80211_bss_ies *ies;
-       const u8 *ie;
-
-       if (!WLAN_CAPABILITY_IS_STA_BSS(a->capability))
-               return false;
-
-       ies = rcu_access_pointer(a->ies);
-       if (!ies)
-               return false;
-
-       ie = cfg80211_find_ie(WLAN_EID_MESH_ID, ies->data, ies->len);
-       if (!ie)
-               return false;
-
-       ie = cfg80211_find_ie(WLAN_EID_MESH_CONFIG, ies->data, ies->len);
-       if (!ie)
-               return false;
-
-       return true;
-}
-
-static bool is_mesh(struct cfg80211_bss *a,
-                   const u8 *meshid, size_t meshidlen,
-                   const u8 *meshcfg)
-{
-       const struct cfg80211_bss_ies *ies;
-       const u8 *ie;
-
-       if (!WLAN_CAPABILITY_IS_STA_BSS(a->capability))
-               return false;
-
-       ies = rcu_access_pointer(a->ies);
-       if (!ies)
-               return false;
-
-       ie = cfg80211_find_ie(WLAN_EID_MESH_ID, ies->data, ies->len);
-       if (!ie)
-               return false;
-       if (ie[1] != meshidlen)
-               return false;
-       if (memcmp(ie + 2, meshid, meshidlen))
-               return false;
-
-       ie = cfg80211_find_ie(WLAN_EID_MESH_CONFIG, ies->data, ies->len);
-       if (!ie)
-               return false;
-       if (ie[1] != sizeof(struct ieee80211_meshconf_ie))
-               return false;
-
-       /*
-        * Ignore mesh capability (last two bytes of the IE) when
-        * comparing since that may differ between stations taking
-        * part in the same mesh.
-        */
-       return memcmp(ie + 2, meshcfg,
-                     sizeof(struct ieee80211_meshconf_ie) - 2) == 0;
-}
+/**
+ * enum bss_compare_mode - BSS compare mode
+ * @BSS_CMP_REGULAR: regular compare mode (for insertion and normal find)
+ * @BSS_CMP_HIDE_ZLEN: find hidden SSID with zero-length mode
+ * @BSS_CMP_HIDE_NUL: find hidden SSID with NUL-ed out mode
+ */
+enum bss_compare_mode {
+       BSS_CMP_REGULAR,
+       BSS_CMP_HIDE_ZLEN,
+       BSS_CMP_HIDE_NUL,
+};
 
-static int cmp_bss_core(struct cfg80211_bss *a, struct cfg80211_bss *b)
+static int cmp_bss(struct cfg80211_bss *a,
+                  struct cfg80211_bss *b,
+                  enum bss_compare_mode mode)
 {
        const struct cfg80211_bss_ies *a_ies, *b_ies;
-       int r;
+       const u8 *ie1 = NULL;
+       const u8 *ie2 = NULL;
+       int i, r;
 
        if (a->channel != b->channel)
                return b->channel->center_freq - a->channel->center_freq;
 
-       if (is_mesh_bss(a) && is_mesh_bss(b)) {
-               a_ies = rcu_access_pointer(a->ies);
-               if (!a_ies)
-                       return -1;
-               b_ies = rcu_access_pointer(b->ies);
-               if (!b_ies)
-                       return 1;
-
-               r = cmp_ies(WLAN_EID_MESH_ID,
-                           a_ies->data, a_ies->len,
-                           b_ies->data, b_ies->len);
-               if (r)
-                       return r;
-               return cmp_ies(WLAN_EID_MESH_CONFIG,
-                              a_ies->data, a_ies->len,
-                              b_ies->data, b_ies->len);
-       }
-
-       /*
-        * we can't use compare_ether_addr here since we need a < > operator.
-        * The binary return value of compare_ether_addr isn't enough
-        */
-       return memcmp(a->bssid, b->bssid, sizeof(a->bssid));
-}
-
-static int cmp_bss(struct cfg80211_bss *a,
-                  struct cfg80211_bss *b)
-{
-       const struct cfg80211_bss_ies *a_ies, *b_ies;
-       int r;
-
-       r = cmp_bss_core(a, b);
-       if (r)
-               return r;
-
        a_ies = rcu_access_pointer(a->ies);
        if (!a_ies)
                return -1;
@@ -444,42 +437,51 @@ static int cmp_bss(struct cfg80211_bss *a,
        if (!b_ies)
                return 1;
 
-       return cmp_ies(WLAN_EID_SSID,
-                      a_ies->data, a_ies->len,
-                      b_ies->data, b_ies->len);
-}
-
-static int cmp_hidden_bss(struct cfg80211_bss *a, struct cfg80211_bss *b)
-{
-       const struct cfg80211_bss_ies *a_ies, *b_ies;
-       const u8 *ie1;
-       const u8 *ie2;
-       int i;
-       int r;
+       if (WLAN_CAPABILITY_IS_STA_BSS(a->capability))
+               ie1 = cfg80211_find_ie(WLAN_EID_MESH_ID,
+                                      a_ies->data, a_ies->len);
+       if (WLAN_CAPABILITY_IS_STA_BSS(b->capability))
+               ie2 = cfg80211_find_ie(WLAN_EID_MESH_ID,
+                                      b_ies->data, b_ies->len);
+       if (ie1 && ie2) {
+               int mesh_id_cmp;
+
+               if (ie1[1] == ie2[1])
+                       mesh_id_cmp = memcmp(ie1 + 2, ie2 + 2, ie1[1]);
+               else
+                       mesh_id_cmp = ie2[1] - ie1[1];
+
+               ie1 = cfg80211_find_ie(WLAN_EID_MESH_CONFIG,
+                                      a_ies->data, a_ies->len);
+               ie2 = cfg80211_find_ie(WLAN_EID_MESH_CONFIG,
+                                      b_ies->data, b_ies->len);
+               if (ie1 && ie2) {
+                       if (mesh_id_cmp)
+                               return mesh_id_cmp;
+                       if (ie1[1] != ie2[1])
+                               return ie2[1] - ie1[1];
+                       return memcmp(ie1 + 2, ie2 + 2, ie1[1]);
+               }
+       }
 
-       r = cmp_bss_core(a, b);
+       /*
+        * we can't use compare_ether_addr here since we need a < > operator.
+        * The binary return value of compare_ether_addr isn't enough
+        */
+       r = memcmp(a->bssid, b->bssid, sizeof(a->bssid));
        if (r)
                return r;
 
-       a_ies = rcu_access_pointer(a->ies);
-       if (!a_ies)
-               return -1;
-       b_ies = rcu_access_pointer(b->ies);
-       if (!b_ies)
-               return 1;
-
        ie1 = cfg80211_find_ie(WLAN_EID_SSID, a_ies->data, a_ies->len);
        ie2 = cfg80211_find_ie(WLAN_EID_SSID, b_ies->data, b_ies->len);
 
+       if (!ie1 && !ie2)
+               return 0;
+
        /*
-        * Key comparator must use same algorithm in any rb-tree
-        * search function (order is important), otherwise ordering
-        * of items in the tree is broken and search gives incorrect
-        * results. This code uses same order as cmp_ies() does.
-        *
-        * Note that due to the differring behaviour with hidden SSIDs
-        * this function only works when "b" is the tree element and
-        * "a" is the key we're looking for.
+        * Note that with "hide_ssid", the function returns a match if
+        * the already-present BSS ("b") is a hidden SSID beacon for
+        * the new BSS ("a").
         */
 
        /* sort missing IE before (left of) present IE */
@@ -488,24 +490,36 @@ static int cmp_hidden_bss(struct cfg80211_bss *a, struct cfg80211_bss *b)
        if (!ie2)
                return 1;
 
-       /* zero-size SSID is used as an indication of the hidden bss */
-       if (!ie2[1])
+       switch (mode) {
+       case BSS_CMP_HIDE_ZLEN:
+               /*
+                * In ZLEN mode we assume the BSS entry we're
+                * looking for has a zero-length SSID. So if
+                * the one we're looking at right now has that,
+                * return 0. Otherwise, return the difference
+                * in length, but since we're looking for the
+                * 0-length it's really equivalent to returning
+                * the length of the one we're looking at.
+                *
+                * No content comparison is needed as we assume
+                * the content length is zero.
+                */
+               return ie2[1];
+       case BSS_CMP_REGULAR:
+       default:
+               /* sort by length first, then by contents */
+               if (ie1[1] != ie2[1])
+                       return ie2[1] - ie1[1];
+               return memcmp(ie1 + 2, ie2 + 2, ie1[1]);
+       case BSS_CMP_HIDE_NUL:
+               if (ie1[1] != ie2[1])
+                       return ie2[1] - ie1[1];
+               /* this is equivalent to memcmp(zeroes, ie2 + 2, len) */
+               for (i = 0; i < ie2[1]; i++)
+                       if (ie2[i + 2])
+                               return -1;
                return 0;
-
-       /* sort by length first, then by contents */
-       if (ie1[1] != ie2[1])
-               return ie2[1] - ie1[1];
-
-       /*
-        * zeroed SSID ie is another indication of a hidden bss;
-        * if it isn't zeroed just return the regular sort value
-        * to find the next candidate
-        */
-       for (i = 0; i < ie2[1]; i++)
-               if (ie2[i + 2])
-                       return memcmp(ie1 + 2, ie2 + 2, ie1[1]);
-
-       return 0;
+       }
 }
 
 struct cfg80211_bss *cfg80211_get_bss(struct wiphy *wiphy,
@@ -534,7 +548,7 @@ struct cfg80211_bss *cfg80211_get_bss(struct wiphy *wiphy,
                        continue;
                if (is_bss(&bss->pub, bssid, ssid, ssid_len)) {
                        res = bss;
-                       kref_get(&res->ref);
+                       bss_ref_get(dev, res);
                        break;
                }
        }
@@ -547,34 +561,6 @@ struct cfg80211_bss *cfg80211_get_bss(struct wiphy *wiphy,
 }
 EXPORT_SYMBOL(cfg80211_get_bss);
 
-struct cfg80211_bss *cfg80211_get_mesh(struct wiphy *wiphy,
-                                      struct ieee80211_channel *channel,
-                                      const u8 *meshid, size_t meshidlen,
-                                      const u8 *meshcfg)
-{
-       struct cfg80211_registered_device *dev = wiphy_to_dev(wiphy);
-       struct cfg80211_internal_bss *bss, *res = NULL;
-
-       spin_lock_bh(&dev->bss_lock);
-
-       list_for_each_entry(bss, &dev->bss_list, list) {
-               if (channel && bss->pub.channel != channel)
-                       continue;
-               if (is_mesh(&bss->pub, meshid, meshidlen, meshcfg)) {
-                       res = bss;
-                       kref_get(&res->ref);
-                       break;
-               }
-       }
-
-       spin_unlock_bh(&dev->bss_lock);
-       if (!res)
-               return NULL;
-       return &res->pub;
-}
-EXPORT_SYMBOL(cfg80211_get_mesh);
-
-
 static void rb_insert_bss(struct cfg80211_registered_device *dev,
                          struct cfg80211_internal_bss *bss)
 {
@@ -587,7 +573,7 @@ static void rb_insert_bss(struct cfg80211_registered_device *dev,
                parent = *p;
                tbss = rb_entry(parent, struct cfg80211_internal_bss, rbn);
 
-               cmp = cmp_bss(&bss->pub, &tbss->pub);
+               cmp = cmp_bss(&bss->pub, &tbss->pub, BSS_CMP_REGULAR);
 
                if (WARN_ON(!cmp)) {
                        /* will sort of leak this BSS */
@@ -606,7 +592,8 @@ static void rb_insert_bss(struct cfg80211_registered_device *dev,
 
 static struct cfg80211_internal_bss *
 rb_find_bss(struct cfg80211_registered_device *dev,
-           struct cfg80211_internal_bss *res)
+           struct cfg80211_internal_bss *res,
+           enum bss_compare_mode mode)
 {
        struct rb_node *n = dev->bss_tree.rb_node;
        struct cfg80211_internal_bss *bss;
@@ -614,7 +601,7 @@ rb_find_bss(struct cfg80211_registered_device *dev,
 
        while (n) {
                bss = rb_entry(n, struct cfg80211_internal_bss, rbn);
-               r = cmp_bss(&res->pub, &bss->pub);
+               r = cmp_bss(&res->pub, &bss->pub, mode);
 
                if (r == 0)
                        return bss;
@@ -627,46 +614,67 @@ rb_find_bss(struct cfg80211_registered_device *dev,
        return NULL;
 }
 
-static struct cfg80211_internal_bss *
-rb_find_hidden_bss(struct cfg80211_registered_device *dev,
-                  struct cfg80211_internal_bss *res)
+static bool cfg80211_combine_bsses(struct cfg80211_registered_device *dev,
+                                  struct cfg80211_internal_bss *new)
 {
-       struct rb_node *n = dev->bss_tree.rb_node;
+       const struct cfg80211_bss_ies *ies;
        struct cfg80211_internal_bss *bss;
-       int r;
+       const u8 *ie;
+       int i, ssidlen;
+       u8 fold = 0;
 
-       while (n) {
-               bss = rb_entry(n, struct cfg80211_internal_bss, rbn);
-               r = cmp_hidden_bss(&res->pub, &bss->pub);
+       ies = rcu_access_pointer(new->pub.beacon_ies);
+       if (WARN_ON(!ies))
+               return false;
 
-               if (r == 0)
-                       return bss;
-               else if (r < 0)
-                       n = n->rb_left;
-               else
-                       n = n->rb_right;
+       ie = cfg80211_find_ie(WLAN_EID_SSID, ies->data, ies->len);
+       if (!ie) {
+               /* nothing to do */
+               return true;
        }
 
-       return NULL;
-}
+       ssidlen = ie[1];
+       for (i = 0; i < ssidlen; i++)
+               fold |= ie[2 + i];
 
-static void
-copy_hidden_ies(struct cfg80211_internal_bss *res,
-               struct cfg80211_internal_bss *hidden)
-{
-       const struct cfg80211_bss_ies *ies;
+       if (fold) {
+               /* not a hidden SSID */
+               return true;
+       }
 
-       if (rcu_access_pointer(res->pub.beacon_ies))
-               return;
+       /* This is the bad part ... */
 
-       ies = rcu_access_pointer(hidden->pub.beacon_ies);
-       if (WARN_ON(!ies))
-               return;
+       list_for_each_entry(bss, &dev->bss_list, list) {
+               if (!ether_addr_equal(bss->pub.bssid, new->pub.bssid))
+                       continue;
+               if (bss->pub.channel != new->pub.channel)
+                       continue;
+               if (rcu_access_pointer(bss->pub.beacon_ies))
+                       continue;
+               ies = rcu_access_pointer(bss->pub.ies);
+               if (!ies)
+                       continue;
+               ie = cfg80211_find_ie(WLAN_EID_SSID, ies->data, ies->len);
+               if (!ie)
+                       continue;
+               if (ssidlen && ie[1] != ssidlen)
+                       continue;
+               /* that would be odd ... */
+               if (bss->pub.beacon_ies)
+                       continue;
+               if (WARN_ON_ONCE(bss->pub.hidden_beacon_bss))
+                       continue;
+               if (WARN_ON_ONCE(!list_empty(&bss->hidden_list)))
+                       list_del(&bss->hidden_list);
+               /* combine them */
+               list_add(&bss->hidden_list, &new->hidden_list);
+               bss->pub.hidden_beacon_bss = &new->pub;
+               new->refcount += bss->refcount;
+               rcu_assign_pointer(bss->pub.beacon_ies,
+                                  new->pub.beacon_ies);
+       }
 
-       ies = kmemdup(ies, sizeof(*ies) + ies->len, GFP_ATOMIC);
-       if (unlikely(!ies))
-               return;
-       rcu_assign_pointer(res->pub.beacon_ies, ies);
+       return true;
 }
 
 static struct cfg80211_internal_bss *
@@ -687,11 +695,10 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev,
                return NULL;
        }
 
-       found = rb_find_bss(dev, tmp);
+       found = rb_find_bss(dev, tmp, BSS_CMP_REGULAR);
 
        if (found) {
                found->pub.beacon_interval = tmp->pub.beacon_interval;
-               found->pub.tsf = tmp->pub.tsf;
                found->pub.signal = tmp->pub.signal;
                found->pub.capability = tmp->pub.capability;
                found->ts = tmp->ts;
@@ -711,19 +718,45 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev,
                                kfree_rcu((struct cfg80211_bss_ies *)old,
                                          rcu_head);
                } else if (rcu_access_pointer(tmp->pub.beacon_ies)) {
-                       const struct cfg80211_bss_ies *old, *ies;
+                       const struct cfg80211_bss_ies *old;
+                       struct cfg80211_internal_bss *bss;
+
+                       if (found->pub.hidden_beacon_bss &&
+                           !list_empty(&found->hidden_list)) {
+                               /*
+                                * The found BSS struct is one of the probe
+                                * response members of a group, but we're
+                                * receiving a beacon (beacon_ies in the tmp
+                                * bss is used). This can only mean that the
+                                * AP changed its beacon from not having an
+                                * SSID to showing it, which is confusing so
+                                * drop this information.
+                                */
+                               goto drop;
+                       }
 
                        old = rcu_access_pointer(found->pub.beacon_ies);
-                       ies = rcu_access_pointer(found->pub.ies);
 
                        rcu_assign_pointer(found->pub.beacon_ies,
                                           tmp->pub.beacon_ies);
 
                        /* Override IEs if they were from a beacon before */
-                       if (old == ies)
+                       if (old == rcu_access_pointer(found->pub.ies))
                                rcu_assign_pointer(found->pub.ies,
                                                   tmp->pub.beacon_ies);
 
+                       /* Assign beacon IEs to all sub entries */
+                       list_for_each_entry(bss, &found->hidden_list,
+                                           hidden_list) {
+                               const struct cfg80211_bss_ies *ies;
+
+                               ies = rcu_access_pointer(bss->pub.beacon_ies);
+                               WARN_ON(ies != old);
+
+                               rcu_assign_pointer(bss->pub.beacon_ies,
+                                                  tmp->pub.beacon_ies);
+                       }
+
                        if (old)
                                kfree_rcu((struct cfg80211_bss_ies *)old,
                                          rcu_head);
@@ -733,19 +766,6 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev,
                struct cfg80211_internal_bss *hidden;
                struct cfg80211_bss_ies *ies;
 
-               /* First check if the beacon is a probe response from
-                * a hidden bss. If so, copy beacon ies (with nullified
-                * ssid) into the probe response bss entry (with real ssid).
-                * It is required basically for PSM implementation
-                * (probe responses do not contain tim ie) */
-
-               /* TODO: The code is not trying to update existing probe
-                * response bss entries when beacon ies are
-                * getting changed. */
-               hidden = rb_find_hidden_bss(dev, tmp);
-               if (hidden)
-                       copy_hidden_ies(tmp, hidden);
-
                /*
                 * create a copy -- the "res" variable that is passed in
                 * is allocated on the stack since it's not needed in the
@@ -760,21 +780,51 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev,
                        ies = (void *)rcu_dereference(tmp->pub.proberesp_ies);
                        if (ies)
                                kfree_rcu(ies, rcu_head);
-                       spin_unlock_bh(&dev->bss_lock);
-                       return NULL;
+                       goto drop;
                }
                memcpy(new, tmp, sizeof(*new));
-               kref_init(&new->ref);
+               new->refcount = 1;
+               INIT_LIST_HEAD(&new->hidden_list);
+
+               if (rcu_access_pointer(tmp->pub.proberesp_ies)) {
+                       hidden = rb_find_bss(dev, tmp, BSS_CMP_HIDE_ZLEN);
+                       if (!hidden)
+                               hidden = rb_find_bss(dev, tmp,
+                                                    BSS_CMP_HIDE_NUL);
+                       if (hidden) {
+                               new->pub.hidden_beacon_bss = &hidden->pub;
+                               list_add(&new->hidden_list,
+                                        &hidden->hidden_list);
+                               hidden->refcount++;
+                               rcu_assign_pointer(new->pub.beacon_ies,
+                                                  hidden->pub.beacon_ies);
+                       }
+               } else {
+                       /*
+                        * Ok so we found a beacon, and don't have an entry. If
+                        * it's a beacon with hidden SSID, we might be in for an
+                        * expensive search for any probe responses that should
+                        * be grouped with this beacon for updates ...
+                        */
+                       if (!cfg80211_combine_bsses(dev, new)) {
+                               kfree(new);
+                               goto drop;
+                       }
+               }
+
                list_add_tail(&new->list, &dev->bss_list);
                rb_insert_bss(dev, new);
                found = new;
        }
 
        dev->bss_generation++;
+       bss_ref_get(dev, found);
        spin_unlock_bh(&dev->bss_lock);
 
-       kref_get(&found->ref);
        return found;
+ drop:
+       spin_unlock_bh(&dev->bss_lock);
+       return NULL;
 }
 
 static struct ieee80211_channel *
@@ -833,7 +883,6 @@ cfg80211_inform_bss(struct wiphy *wiphy,
        memcpy(tmp.pub.bssid, bssid, ETH_ALEN);
        tmp.pub.channel = channel;
        tmp.pub.signal = signal;
-       tmp.pub.tsf = tsf;
        tmp.pub.beacon_interval = beacon_interval;
        tmp.pub.capability = capability;
        /*
@@ -841,16 +890,14 @@ cfg80211_inform_bss(struct wiphy *wiphy,
         * Response frame, we need to pick one of the options and only use it
         * with the driver that does not provide the full Beacon/Probe Response
         * frame. Use Beacon frame pointer to avoid indicating that this should
-        * override the iies pointer should we have received an earlier
+        * override the IEs pointer should we have received an earlier
         * indication of Probe Response data.
-        *
-        * The initial buffer for the IEs is allocated with the BSS entry and
-        * is located after the private area.
         */
        ies = kmalloc(sizeof(*ies) + ielen, gfp);
        if (!ies)
                return NULL;
        ies->len = ielen;
+       ies->tsf = tsf;
        memcpy(ies->data, ie, ielen);
 
        rcu_assign_pointer(tmp.pub.beacon_ies, ies);
@@ -907,6 +954,7 @@ cfg80211_inform_bss_frame(struct wiphy *wiphy,
        if (!ies)
                return NULL;
        ies->len = ielen;
+       ies->tsf = le64_to_cpu(mgmt->u.probe_resp.timestamp);
        memcpy(ies->data, mgmt->u.probe_resp.variable, ielen);
 
        if (ieee80211_is_probe_resp(mgmt->frame_control))
@@ -918,7 +966,6 @@ cfg80211_inform_bss_frame(struct wiphy *wiphy,
        memcpy(tmp.pub.bssid, mgmt->bssid, ETH_ALEN);
        tmp.pub.channel = channel;
        tmp.pub.signal = signal;
-       tmp.pub.tsf = le64_to_cpu(mgmt->u.probe_resp.timestamp);
        tmp.pub.beacon_interval = le16_to_cpu(mgmt->u.probe_resp.beacon_int);
        tmp.pub.capability = le16_to_cpu(mgmt->u.probe_resp.capab_info);
 
@@ -935,27 +982,35 @@ cfg80211_inform_bss_frame(struct wiphy *wiphy,
 }
 EXPORT_SYMBOL(cfg80211_inform_bss_frame);
 
-void cfg80211_ref_bss(struct cfg80211_bss *pub)
+void cfg80211_ref_bss(struct wiphy *wiphy, struct cfg80211_bss *pub)
 {
+       struct cfg80211_registered_device *dev = wiphy_to_dev(wiphy);
        struct cfg80211_internal_bss *bss;
 
        if (!pub)
                return;
 
        bss = container_of(pub, struct cfg80211_internal_bss, pub);
-       kref_get(&bss->ref);
+
+       spin_lock_bh(&dev->bss_lock);
+       bss_ref_get(dev, bss);
+       spin_unlock_bh(&dev->bss_lock);
 }
 EXPORT_SYMBOL(cfg80211_ref_bss);
 
-void cfg80211_put_bss(struct cfg80211_bss *pub)
+void cfg80211_put_bss(struct wiphy *wiphy, struct cfg80211_bss *pub)
 {
+       struct cfg80211_registered_device *dev = wiphy_to_dev(wiphy);
        struct cfg80211_internal_bss *bss;
 
        if (!pub)
                return;
 
        bss = container_of(pub, struct cfg80211_internal_bss, pub);
-       kref_put(&bss->ref, bss_release);
+
+       spin_lock_bh(&dev->bss_lock);
+       bss_ref_put(dev, bss);
+       spin_unlock_bh(&dev->bss_lock);
 }
 EXPORT_SYMBOL(cfg80211_put_bss);
 
@@ -971,8 +1026,8 @@ void cfg80211_unlink_bss(struct wiphy *wiphy, struct cfg80211_bss *pub)
 
        spin_lock_bh(&dev->bss_lock);
        if (!list_empty(&bss->list)) {
-               __cfg80211_unlink_bss(dev, bss);
-               dev->bss_generation++;
+               if (__cfg80211_unlink_bss(dev, bss))
+                       dev->bss_generation++;
        }
        spin_unlock_bh(&dev->bss_lock);
 }
@@ -1155,16 +1210,6 @@ static void ieee80211_scan_add_ies(struct iw_request_info *info,
        }
 }
 
-static inline unsigned int elapsed_jiffies_msecs(unsigned long start)
-{
-       unsigned long end = jiffies;
-
-       if (end >= start)
-               return jiffies_to_msecs(end - start);
-
-       return jiffies_to_msecs(end + (MAX_JIFFY_OFFSET - start) + 1);
-}
-
 static char *
 ieee80211_bss(struct wiphy *wiphy, struct iw_request_info *info,
              struct cfg80211_internal_bss *bss, char *current_ev,
@@ -1241,15 +1286,10 @@ ieee80211_bss(struct wiphy *wiphy, struct iw_request_info *info,
 
        rcu_read_lock();
        ies = rcu_dereference(bss->pub.ies);
-       if (ies) {
-               rem = ies->len;
-               ie = ies->data;
-       } else {
-               rem = 0;
-               ie = NULL;
-       }
+       rem = ies->len;
+       ie = ies->data;
 
-       while (ies && rem >= 2) {
+       while (rem >= 2) {
                /* invalid data */
                if (ie[1] > rem - 2)
                        break;
@@ -1358,11 +1398,11 @@ ieee80211_bss(struct wiphy *wiphy, struct iw_request_info *info,
                                                  &iwe, IW_EV_UINT_LEN);
        }
 
-       buf = kmalloc(30, GFP_ATOMIC);
+       buf = kmalloc(31, GFP_ATOMIC);
        if (buf) {
                memset(&iwe, 0, sizeof(iwe));
                iwe.cmd = IWEVCUSTOM;
-               sprintf(buf, "tsf=%016llx", (unsigned long long)(bss->pub.tsf));
+               sprintf(buf, "tsf=%016llx", (unsigned long long)(ies->tsf));
                iwe.u.data.length = strlen(buf);
                current_ev = iwe_stream_add_point(info, current_ev, end_buf,
                                                  &iwe, buf);
index a825dfe..f432bd3 100644 (file)
@@ -301,7 +301,7 @@ static void __cfg80211_sme_scan_done(struct net_device *dev)
 
        bss = cfg80211_get_conn_bss(wdev);
        if (bss) {
-               cfg80211_put_bss(bss);
+               cfg80211_put_bss(&rdev->wiphy, bss);
        } else {
                /* not found */
                if (wdev->conn->state == CFG80211_CONN_SCAN_AGAIN)
@@ -464,7 +464,7 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
 
        if (wdev->current_bss) {
                cfg80211_unhold_bss(wdev->current_bss);
-               cfg80211_put_bss(&wdev->current_bss->pub);
+               cfg80211_put_bss(wdev->wiphy, &wdev->current_bss->pub);
                wdev->current_bss = NULL;
        }
 
@@ -480,7 +480,7 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
                kfree(wdev->connect_keys);
                wdev->connect_keys = NULL;
                wdev->ssid_len = 0;
-               cfg80211_put_bss(bss);
+               cfg80211_put_bss(wdev->wiphy, bss);
                return;
        }
 
@@ -586,7 +586,7 @@ void __cfg80211_roamed(struct wireless_dev *wdev,
        }
 
        cfg80211_unhold_bss(wdev->current_bss);
-       cfg80211_put_bss(&wdev->current_bss->pub);
+       cfg80211_put_bss(wdev->wiphy, &wdev->current_bss->pub);
        wdev->current_bss = NULL;
 
        cfg80211_hold_bss(bss_from_pub(bss));
@@ -621,7 +621,7 @@ void __cfg80211_roamed(struct wireless_dev *wdev,
 
        return;
 out:
-       cfg80211_put_bss(bss);
+       cfg80211_put_bss(wdev->wiphy, bss);
 }
 
 void cfg80211_roamed(struct net_device *dev,
@@ -663,7 +663,7 @@ void cfg80211_roamed_bss(struct net_device *dev,
 
        ev = kzalloc(sizeof(*ev) + req_ie_len + resp_ie_len, gfp);
        if (!ev) {
-               cfg80211_put_bss(bss);
+               cfg80211_put_bss(wdev->wiphy, bss);
                return;
        }
 
@@ -704,7 +704,7 @@ void __cfg80211_disconnected(struct net_device *dev, const u8 *ie,
 
        if (wdev->current_bss) {
                cfg80211_unhold_bss(wdev->current_bss);
-               cfg80211_put_bss(&wdev->current_bss->pub);
+               cfg80211_put_bss(wdev->wiphy, &wdev->current_bss->pub);
        }
 
        wdev->current_bss = NULL;
@@ -875,7 +875,7 @@ int __cfg80211_connect(struct cfg80211_registered_device *rdev,
                if (bss) {
                        wdev->conn->state = CFG80211_CONN_AUTHENTICATE_NEXT;
                        err = cfg80211_conn_do_work(wdev);
-                       cfg80211_put_bss(bss);
+                       cfg80211_put_bss(wdev->wiphy, bss);
                } else {
                        /* otherwise we'll need to scan for the AP first */
                        err = cfg80211_conn_scan(wdev);
index 1f6f01e..238ee49 100644 (file)
@@ -106,9 +106,7 @@ static int wiphy_resume(struct device *dev)
        int ret = 0;
 
        /* Age scan results with time spent in suspend */
-       spin_lock_bh(&rdev->bss_lock);
        cfg80211_bss_age(rdev, get_seconds() - rdev->suspend_at);
-       spin_unlock_bh(&rdev->bss_lock);
 
        if (rdev->ops->resume) {
                rtnl_lock();
index 2134576..b7a5313 100644 (file)
@@ -1767,6 +1767,24 @@ DEFINE_EVENT(wiphy_wdev_evt, rdev_stop_p2p_device,
        TP_ARGS(wiphy, wdev)
 );
 
+TRACE_EVENT(rdev_set_mac_acl,
+       TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
+                struct cfg80211_acl_data *params),
+       TP_ARGS(wiphy, netdev, params),
+       TP_STRUCT__entry(
+               WIPHY_ENTRY
+               NETDEV_ENTRY
+               __field(u32, acl_policy)
+       ),
+       TP_fast_assign(
+               WIPHY_ASSIGN;
+               WIPHY_ASSIGN;
+               __entry->acl_policy = params->acl_policy;
+       ),
+       TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", acl policy: %d",
+                 WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->acl_policy)
+);
+
 /*************************************************************
  *          cfg80211 exported functions traces              *
  *************************************************************/
@@ -2033,6 +2051,21 @@ TRACE_EVENT(cfg80211_reg_can_beacon,
                  WIPHY_PR_ARG, CHAN_DEF_PR_ARG)
 );
 
+TRACE_EVENT(cfg80211_chandef_dfs_required,
+       TP_PROTO(struct wiphy *wiphy, struct cfg80211_chan_def *chandef),
+       TP_ARGS(wiphy, chandef),
+       TP_STRUCT__entry(
+               WIPHY_ENTRY
+               CHAN_DEF_ENTRY
+       ),
+       TP_fast_assign(
+               WIPHY_ASSIGN;
+               CHAN_DEF_ASSIGN(chandef);
+       ),
+       TP_printk(WIPHY_PR_FMT ", " CHAN_DEF_PR_FMT,
+                 WIPHY_PR_ARG, CHAN_DEF_PR_ARG)
+);
+
 TRACE_EVENT(cfg80211_ch_switch_notify,
        TP_PROTO(struct net_device *netdev,
                 struct cfg80211_chan_def *chandef),
@@ -2049,6 +2082,36 @@ TRACE_EVENT(cfg80211_ch_switch_notify,
                  NETDEV_PR_ARG, CHAN_DEF_PR_ARG)
 );
 
+TRACE_EVENT(cfg80211_radar_event,
+       TP_PROTO(struct wiphy *wiphy, struct cfg80211_chan_def *chandef),
+       TP_ARGS(wiphy, chandef),
+       TP_STRUCT__entry(
+               WIPHY_ENTRY
+               CHAN_DEF_ENTRY
+       ),
+       TP_fast_assign(
+               WIPHY_ASSIGN;
+               CHAN_DEF_ASSIGN(chandef);
+       ),
+       TP_printk(WIPHY_PR_FMT ", " CHAN_DEF_PR_FMT,
+                 WIPHY_PR_ARG, CHAN_DEF_PR_ARG)
+);
+
+TRACE_EVENT(cfg80211_cac_event,
+       TP_PROTO(struct net_device *netdev, enum nl80211_radar_event evt),
+       TP_ARGS(netdev, evt),
+       TP_STRUCT__entry(
+               NETDEV_ENTRY
+               __field(enum nl80211_radar_event, evt)
+       ),
+       TP_fast_assign(
+               NETDEV_ASSIGN;
+               __entry->evt = evt;
+       ),
+       TP_printk(NETDEV_PR_FMT ",  event: %d",
+                 NETDEV_PR_ARG, __entry->evt)
+);
+
 DECLARE_EVENT_CLASS(cfg80211_rx_evt,
        TP_PROTO(struct net_device *netdev, const u8 *addr),
        TP_ARGS(netdev, addr),
@@ -2315,6 +2378,41 @@ TRACE_EVENT(cfg80211_return_u32,
        TP_printk("ret: %u", __entry->ret)
 );
 
+TRACE_EVENT(cfg80211_report_wowlan_wakeup,
+       TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev,
+                struct cfg80211_wowlan_wakeup *wakeup),
+       TP_ARGS(wiphy, wdev, wakeup),
+       TP_STRUCT__entry(
+               WIPHY_ENTRY
+               WDEV_ENTRY
+               __field(bool, disconnect)
+               __field(bool, magic_pkt)
+               __field(bool, gtk_rekey_failure)
+               __field(bool, eap_identity_req)
+               __field(bool, four_way_handshake)
+               __field(bool, rfkill_release)
+               __field(s32, pattern_idx)
+               __field(u32, packet_len)
+               __dynamic_array(u8, packet, wakeup->packet_present_len)
+       ),
+       TP_fast_assign(
+               WIPHY_ASSIGN;
+               WDEV_ASSIGN;
+               __entry->disconnect = wakeup->disconnect;
+               __entry->magic_pkt = wakeup->magic_pkt;
+               __entry->gtk_rekey_failure = wakeup->gtk_rekey_failure;
+               __entry->eap_identity_req = wakeup->eap_identity_req;
+               __entry->four_way_handshake = wakeup->four_way_handshake;
+               __entry->rfkill_release = wakeup->rfkill_release;
+               __entry->pattern_idx = wakeup->pattern_idx;
+               __entry->packet_len = wakeup->packet_len;
+               if (wakeup->packet && wakeup->packet_present_len)
+                       memcpy(__get_dynamic_array(packet), wakeup->packet,
+                              wakeup->packet_present_len);
+       ),
+       TP_printk(WIPHY_PR_FMT ", " WDEV_PR_FMT, WIPHY_PR_ARG, WDEV_PR_ARG)
+);
+
 #endif /* !__RDEV_OPS_TRACE || TRACE_HEADER_MULTI_READ */
 
 #undef TRACE_INCLUDE_PATH
index 1c2795d..37a56ee 100644 (file)
@@ -1212,14 +1212,15 @@ int cfg80211_can_use_iftype_chan(struct cfg80211_registered_device *rdev,
        case NL80211_IFTYPE_MESH_POINT:
        case NL80211_IFTYPE_P2P_GO:
        case NL80211_IFTYPE_WDS:
-               radar_required = !!(chan->flags & IEEE80211_CHAN_RADAR);
+               radar_required = !!(chan &&
+                                   (chan->flags & IEEE80211_CHAN_RADAR));
                break;
        case NL80211_IFTYPE_P2P_CLIENT:
        case NL80211_IFTYPE_STATION:
+       case NL80211_IFTYPE_P2P_DEVICE:
        case NL80211_IFTYPE_MONITOR:
                radar_required = false;
                break;
-       case NL80211_IFTYPE_P2P_DEVICE:
        case NUM_NL80211_IFTYPES:
        case NL80211_IFTYPE_UNSPECIFIED:
        default:
index 8bafa31..e98a01c 100644 (file)
@@ -143,7 +143,8 @@ static const struct file_operations wireless_seq_fops = {
 int __net_init wext_proc_init(struct net *net)
 {
        /* Create /proc/net/wireless entry */
-       if (!proc_net_fops_create(net, "wireless", S_IRUGO, &wireless_seq_fops))
+       if (!proc_create("wireless", S_IRUGO, net->proc_net,
+                        &wireless_seq_fops))
                return -ENOMEM;
 
        return 0;
@@ -151,5 +152,5 @@ int __net_init wext_proc_init(struct net *net)
 
 void __net_exit wext_proc_exit(struct net *net)
 {
-       proc_net_remove(net, "wireless");
+       remove_proc_entry("wireless", net->proc_net);
 }
index f9a5495..6fb9d00 100644 (file)
@@ -35,6 +35,8 @@ static struct xfrm_algo_desc aead_list[] = {
                }
        },
 
+       .pfkey_supported = 1,
+
        .desc = {
                .sadb_alg_id = SADB_X_EALG_AES_GCM_ICV8,
                .sadb_alg_ivlen = 8,
@@ -51,6 +53,8 @@ static struct xfrm_algo_desc aead_list[] = {
                }
        },
 
+       .pfkey_supported = 1,
+
        .desc = {
                .sadb_alg_id = SADB_X_EALG_AES_GCM_ICV12,
                .sadb_alg_ivlen = 8,
@@ -67,6 +71,8 @@ static struct xfrm_algo_desc aead_list[] = {
                }
        },
 
+       .pfkey_supported = 1,
+
        .desc = {
                .sadb_alg_id = SADB_X_EALG_AES_GCM_ICV16,
                .sadb_alg_ivlen = 8,
@@ -83,6 +89,8 @@ static struct xfrm_algo_desc aead_list[] = {
                }
        },
 
+       .pfkey_supported = 1,
+
        .desc = {
                .sadb_alg_id = SADB_X_EALG_AES_CCM_ICV8,
                .sadb_alg_ivlen = 8,
@@ -99,6 +107,8 @@ static struct xfrm_algo_desc aead_list[] = {
                }
        },
 
+       .pfkey_supported = 1,
+
        .desc = {
                .sadb_alg_id = SADB_X_EALG_AES_CCM_ICV12,
                .sadb_alg_ivlen = 8,
@@ -115,6 +125,8 @@ static struct xfrm_algo_desc aead_list[] = {
                }
        },
 
+       .pfkey_supported = 1,
+
        .desc = {
                .sadb_alg_id = SADB_X_EALG_AES_CCM_ICV16,
                .sadb_alg_ivlen = 8,
@@ -131,6 +143,8 @@ static struct xfrm_algo_desc aead_list[] = {
                }
        },
 
+       .pfkey_supported = 1,
+
        .desc = {
                .sadb_alg_id = SADB_X_EALG_NULL_AES_GMAC,
                .sadb_alg_ivlen = 8,
@@ -151,6 +165,8 @@ static struct xfrm_algo_desc aalg_list[] = {
                }
        },
 
+       .pfkey_supported = 1,
+
        .desc = {
                .sadb_alg_id = SADB_X_AALG_NULL,
                .sadb_alg_ivlen = 0,
@@ -169,6 +185,8 @@ static struct xfrm_algo_desc aalg_list[] = {
                }
        },
 
+       .pfkey_supported = 1,
+
        .desc = {
                .sadb_alg_id = SADB_AALG_MD5HMAC,
                .sadb_alg_ivlen = 0,
@@ -187,6 +205,8 @@ static struct xfrm_algo_desc aalg_list[] = {
                }
        },
 
+       .pfkey_supported = 1,
+
        .desc = {
                .sadb_alg_id = SADB_AALG_SHA1HMAC,
                .sadb_alg_ivlen = 0,
@@ -205,6 +225,8 @@ static struct xfrm_algo_desc aalg_list[] = {
                }
        },
 
+       .pfkey_supported = 1,
+
        .desc = {
                .sadb_alg_id = SADB_X_AALG_SHA2_256HMAC,
                .sadb_alg_ivlen = 0,
@@ -222,6 +244,8 @@ static struct xfrm_algo_desc aalg_list[] = {
                }
        },
 
+       .pfkey_supported = 1,
+
        .desc = {
                .sadb_alg_id = SADB_X_AALG_SHA2_384HMAC,
                .sadb_alg_ivlen = 0,
@@ -239,6 +263,8 @@ static struct xfrm_algo_desc aalg_list[] = {
                }
        },
 
+       .pfkey_supported = 1,
+
        .desc = {
                .sadb_alg_id = SADB_X_AALG_SHA2_512HMAC,
                .sadb_alg_ivlen = 0,
@@ -257,6 +283,8 @@ static struct xfrm_algo_desc aalg_list[] = {
                }
        },
 
+       .pfkey_supported = 1,
+
        .desc = {
                .sadb_alg_id = SADB_X_AALG_RIPEMD160HMAC,
                .sadb_alg_ivlen = 0,
@@ -274,6 +302,8 @@ static struct xfrm_algo_desc aalg_list[] = {
                }
        },
 
+       .pfkey_supported = 1,
+
        .desc = {
                .sadb_alg_id = SADB_X_AALG_AES_XCBC_MAC,
                .sadb_alg_ivlen = 0,
@@ -295,6 +325,8 @@ static struct xfrm_algo_desc ealg_list[] = {
                }
        },
 
+       .pfkey_supported = 1,
+
        .desc = {
                .sadb_alg_id =  SADB_EALG_NULL,
                .sadb_alg_ivlen = 0,
@@ -313,6 +345,8 @@ static struct xfrm_algo_desc ealg_list[] = {
                }
        },
 
+       .pfkey_supported = 1,
+
        .desc = {
                .sadb_alg_id = SADB_EALG_DESCBC,
                .sadb_alg_ivlen = 8,
@@ -331,6 +365,8 @@ static struct xfrm_algo_desc ealg_list[] = {
                }
        },
 
+       .pfkey_supported = 1,
+
        .desc = {
                .sadb_alg_id = SADB_EALG_3DESCBC,
                .sadb_alg_ivlen = 8,
@@ -349,6 +385,8 @@ static struct xfrm_algo_desc ealg_list[] = {
                }
        },
 
+       .pfkey_supported = 1,
+
        .desc = {
                .sadb_alg_id = SADB_X_EALG_CASTCBC,
                .sadb_alg_ivlen = 8,
@@ -367,6 +405,8 @@ static struct xfrm_algo_desc ealg_list[] = {
                }
        },
 
+       .pfkey_supported = 1,
+
        .desc = {
                .sadb_alg_id = SADB_X_EALG_BLOWFISHCBC,
                .sadb_alg_ivlen = 8,
@@ -385,6 +425,8 @@ static struct xfrm_algo_desc ealg_list[] = {
                }
        },
 
+       .pfkey_supported = 1,
+
        .desc = {
                .sadb_alg_id = SADB_X_EALG_AESCBC,
                .sadb_alg_ivlen = 8,
@@ -403,6 +445,8 @@ static struct xfrm_algo_desc ealg_list[] = {
                }
        },
 
+       .pfkey_supported = 1,
+
        .desc = {
                .sadb_alg_id = SADB_X_EALG_SERPENTCBC,
                .sadb_alg_ivlen = 8,
@@ -421,6 +465,8 @@ static struct xfrm_algo_desc ealg_list[] = {
                }
        },
 
+       .pfkey_supported = 1,
+
        .desc = {
                .sadb_alg_id = SADB_X_EALG_CAMELLIACBC,
                .sadb_alg_ivlen = 8,
@@ -439,6 +485,8 @@ static struct xfrm_algo_desc ealg_list[] = {
                }
        },
 
+       .pfkey_supported = 1,
+
        .desc = {
                .sadb_alg_id = SADB_X_EALG_TWOFISHCBC,
                .sadb_alg_ivlen = 8,
@@ -456,6 +504,8 @@ static struct xfrm_algo_desc ealg_list[] = {
                }
        },
 
+       .pfkey_supported = 1,
+
        .desc = {
                .sadb_alg_id = SADB_X_EALG_AESCTR,
                .sadb_alg_ivlen = 8,
@@ -473,6 +523,7 @@ static struct xfrm_algo_desc calg_list[] = {
                        .threshold = 90,
                }
        },
+       .pfkey_supported = 1,
        .desc = { .sadb_alg_id = SADB_X_CALG_DEFLATE }
 },
 {
@@ -482,6 +533,7 @@ static struct xfrm_algo_desc calg_list[] = {
                        .threshold = 90,
                }
        },
+       .pfkey_supported = 1,
        .desc = { .sadb_alg_id = SADB_X_CALG_LZS }
 },
 {
@@ -491,6 +543,7 @@ static struct xfrm_algo_desc calg_list[] = {
                        .threshold = 50,
                }
        },
+       .pfkey_supported = 1,
        .desc = { .sadb_alg_id = SADB_X_CALG_LZJH }
 },
 };
@@ -714,27 +767,27 @@ void xfrm_probe_algs(void)
 }
 EXPORT_SYMBOL_GPL(xfrm_probe_algs);
 
-int xfrm_count_auth_supported(void)
+int xfrm_count_pfkey_auth_supported(void)
 {
        int i, n;
 
        for (i = 0, n = 0; i < aalg_entries(); i++)
-               if (aalg_list[i].available)
+               if (aalg_list[i].available && aalg_list[i].pfkey_supported)
                        n++;
        return n;
 }
-EXPORT_SYMBOL_GPL(xfrm_count_auth_supported);
+EXPORT_SYMBOL_GPL(xfrm_count_pfkey_auth_supported);
 
-int xfrm_count_enc_supported(void)
+int xfrm_count_pfkey_enc_supported(void)
 {
        int i, n;
 
        for (i = 0, n = 0; i < ealg_entries(); i++)
-               if (ealg_list[i].available)
+               if (ealg_list[i].available && ealg_list[i].pfkey_supported)
                        n++;
        return n;
 }
-EXPORT_SYMBOL_GPL(xfrm_count_enc_supported);
+EXPORT_SYMBOL_GPL(xfrm_count_pfkey_enc_supported);
 
 #if defined(CONFIG_INET_ESP) || defined(CONFIG_INET_ESP_MODULE) || defined(CONFIG_INET6_ESP) || defined(CONFIG_INET6_ESP_MODULE)
 
index 3670526..bcfda89 100644 (file)
@@ -64,7 +64,7 @@ static int xfrm_output_one(struct sk_buff *skb, int err)
 
                if (unlikely(x->km.state != XFRM_STATE_VALID)) {
                        XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEINVALID);
-                       goto error_nolock;
+                       goto error;
                }
 
                err = xfrm_state_check_expire(x);
index 6c9aa64..5b47180 100644 (file)
 
 #include "xfrm_hash.h"
 
+#define XFRM_QUEUE_TMO_MIN ((unsigned)(HZ/10))
+#define XFRM_QUEUE_TMO_MAX ((unsigned)(60*HZ))
+#define XFRM_MAX_QUEUE_LEN     100
+
 DEFINE_MUTEX(xfrm_cfg_mutex);
 EXPORT_SYMBOL(xfrm_cfg_mutex);
 
@@ -51,7 +55,7 @@ static struct kmem_cache *xfrm_dst_cache __read_mostly;
 static void xfrm_init_pmtu(struct dst_entry *dst);
 static int stale_bundle(struct dst_entry *dst);
 static int xfrm_bundle_ok(struct xfrm_dst *xdst);
-
+static void xfrm_policy_queue_process(unsigned long arg);
 
 static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
                                                int dir);
@@ -287,8 +291,11 @@ struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp)
                INIT_HLIST_NODE(&policy->byidx);
                rwlock_init(&policy->lock);
                atomic_set(&policy->refcnt, 1);
+               skb_queue_head_init(&policy->polq.hold_queue);
                setup_timer(&policy->timer, xfrm_policy_timer,
                                (unsigned long)policy);
+               setup_timer(&policy->polq.hold_timer, xfrm_policy_queue_process,
+                           (unsigned long)policy);
                policy->flo.ops = &xfrm_policy_fc_ops;
        }
        return policy;
@@ -309,6 +316,16 @@ void xfrm_policy_destroy(struct xfrm_policy *policy)
 }
 EXPORT_SYMBOL(xfrm_policy_destroy);
 
+static void xfrm_queue_purge(struct sk_buff_head *list)
+{
+       struct sk_buff *skb;
+
+       while ((skb = skb_dequeue(list)) != NULL) {
+               dev_put(skb->dev);
+               kfree_skb(skb);
+       }
+}
+
 /* Rule must be locked. Release descentant resources, announce
  * entry dead. The rule must be unlinked from lists to the moment.
  */
@@ -319,6 +336,9 @@ static void xfrm_policy_kill(struct xfrm_policy *policy)
 
        atomic_inc(&policy->genid);
 
+       del_timer(&policy->polq.hold_timer);
+       xfrm_queue_purge(&policy->polq.hold_queue);
+
        if (del_timer(&policy->timer))
                xfrm_pol_put(policy);
 
@@ -562,6 +582,46 @@ static inline int selector_cmp(struct xfrm_selector *s1, struct xfrm_selector *s
        return 0;
 }
 
+static void xfrm_policy_requeue(struct xfrm_policy *old,
+                               struct xfrm_policy *new)
+{
+       struct xfrm_policy_queue *pq = &old->polq;
+       struct sk_buff_head list;
+
+       __skb_queue_head_init(&list);
+
+       spin_lock_bh(&pq->hold_queue.lock);
+       skb_queue_splice_init(&pq->hold_queue, &list);
+       del_timer(&pq->hold_timer);
+       spin_unlock_bh(&pq->hold_queue.lock);
+
+       if (skb_queue_empty(&list))
+               return;
+
+       pq = &new->polq;
+
+       spin_lock_bh(&pq->hold_queue.lock);
+       skb_queue_splice(&list, &pq->hold_queue);
+       pq->timeout = XFRM_QUEUE_TMO_MIN;
+       mod_timer(&pq->hold_timer, jiffies);
+       spin_unlock_bh(&pq->hold_queue.lock);
+}
+
+static bool xfrm_policy_mark_match(struct xfrm_policy *policy,
+                                  struct xfrm_policy *pol)
+{
+       u32 mark = policy->mark.v & policy->mark.m;
+
+       if (policy->mark.v == pol->mark.v && policy->mark.m == pol->mark.m)
+               return true;
+
+       if ((mark & pol->mark.m) == pol->mark.v &&
+           policy->priority == pol->priority)
+               return true;
+
+       return false;
+}
+
 int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
 {
        struct net *net = xp_net(policy);
@@ -569,7 +629,6 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
        struct xfrm_policy *delpol;
        struct hlist_head *chain;
        struct hlist_node *entry, *newpos;
-       u32 mark = policy->mark.v & policy->mark.m;
 
        write_lock_bh(&xfrm_policy_lock);
        chain = policy_hash_bysel(net, &policy->selector, policy->family, dir);
@@ -578,7 +637,7 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
        hlist_for_each_entry(pol, entry, chain, bydst) {
                if (pol->type == policy->type &&
                    !selector_cmp(&pol->selector, &policy->selector) &&
-                   (mark & pol->mark.m) == pol->mark.v &&
+                   xfrm_policy_mark_match(policy, pol) &&
                    xfrm_sec_ctx_match(pol->security, policy->security) &&
                    !WARN_ON(delpol)) {
                        if (excl) {
@@ -603,8 +662,10 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
        net->xfrm.policy_count[dir]++;
        atomic_inc(&flow_cache_genid);
        rt_genid_bump(net);
-       if (delpol)
+       if (delpol) {
+               xfrm_policy_requeue(delpol, policy);
                __xfrm_policy_unlink(delpol, dir);
+       }
        policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir);
        hlist_add_head(&policy->byidx, net->xfrm.policy_byidx+idx_hash(net, policy->index));
        policy->curlft.add_time = get_seconds();
@@ -1115,11 +1176,15 @@ int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
                pol->index = xfrm_gen_index(net, XFRM_POLICY_MAX+dir);
                __xfrm_policy_link(pol, XFRM_POLICY_MAX+dir);
        }
-       if (old_pol)
+       if (old_pol) {
+               if (pol)
+                       xfrm_policy_requeue(old_pol, pol);
+
                /* Unlinking succeeds always. This is the only function
                 * allowed to delete or replace socket policy.
                 */
                __xfrm_policy_unlink(old_pol, XFRM_POLICY_MAX+dir);
+       }
        write_unlock_bh(&xfrm_policy_lock);
 
        if (old_pol) {
@@ -1310,6 +1375,8 @@ static struct flow_cache_object *xfrm_bundle_flo_get(struct flow_cache_object *f
                 * It means we need to try again resolving. */
                if (xdst->num_xfrms > 0)
                        return NULL;
+       } else if (dst->flags & DST_XFRM_QUEUE) {
+               return NULL;
        } else {
                /* Real bundle */
                if (stale_bundle(dst))
@@ -1673,6 +1740,171 @@ xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
        return xdst;
 }
 
+static void xfrm_policy_queue_process(unsigned long arg)
+{
+       int err = 0;
+       struct sk_buff *skb;
+       struct sock *sk;
+       struct dst_entry *dst;
+       struct net_device *dev;
+       struct xfrm_policy *pol = (struct xfrm_policy *)arg;
+       struct xfrm_policy_queue *pq = &pol->polq;
+       struct flowi fl;
+       struct sk_buff_head list;
+
+       spin_lock(&pq->hold_queue.lock);
+       skb = skb_peek(&pq->hold_queue);
+       dst = skb_dst(skb);
+       sk = skb->sk;
+       xfrm_decode_session(skb, &fl, dst->ops->family);
+       spin_unlock(&pq->hold_queue.lock);
+
+       dst_hold(dst->path);
+       dst = xfrm_lookup(xp_net(pol), dst->path, &fl,
+                         sk, 0);
+       if (IS_ERR(dst))
+               goto purge_queue;
+
+       if (dst->flags & DST_XFRM_QUEUE) {
+               dst_release(dst);
+
+               if (pq->timeout >= XFRM_QUEUE_TMO_MAX)
+                       goto purge_queue;
+
+               pq->timeout = pq->timeout << 1;
+               mod_timer(&pq->hold_timer, jiffies + pq->timeout);
+               return;
+       }
+
+       dst_release(dst);
+
+       __skb_queue_head_init(&list);
+
+       spin_lock(&pq->hold_queue.lock);
+       pq->timeout = 0;
+       skb_queue_splice_init(&pq->hold_queue, &list);
+       spin_unlock(&pq->hold_queue.lock);
+
+       while (!skb_queue_empty(&list)) {
+               skb = __skb_dequeue(&list);
+
+               xfrm_decode_session(skb, &fl, skb_dst(skb)->ops->family);
+               dst_hold(skb_dst(skb)->path);
+               dst = xfrm_lookup(xp_net(pol), skb_dst(skb)->path,
+                                 &fl, skb->sk, 0);
+               if (IS_ERR(dst)) {
+                       dev_put(skb->dev);
+                       kfree_skb(skb);
+                       continue;
+               }
+
+               nf_reset(skb);
+               skb_dst_drop(skb);
+               skb_dst_set(skb, dst);
+
+               dev = skb->dev;
+               err = dst_output(skb);
+               dev_put(dev);
+       }
+
+       return;
+
+purge_queue:
+       pq->timeout = 0;
+       xfrm_queue_purge(&pq->hold_queue);
+}
+
+static int xdst_queue_output(struct sk_buff *skb)
+{
+       unsigned long sched_next;
+       struct dst_entry *dst = skb_dst(skb);
+       struct xfrm_dst *xdst = (struct xfrm_dst *) dst;
+       struct xfrm_policy_queue *pq = &xdst->pols[0]->polq;
+
+       if (pq->hold_queue.qlen > XFRM_MAX_QUEUE_LEN) {
+               kfree_skb(skb);
+               return -EAGAIN;
+       }
+
+       skb_dst_force(skb);
+       dev_hold(skb->dev);
+
+       spin_lock_bh(&pq->hold_queue.lock);
+
+       if (!pq->timeout)
+               pq->timeout = XFRM_QUEUE_TMO_MIN;
+
+       sched_next = jiffies + pq->timeout;
+
+       if (del_timer(&pq->hold_timer)) {
+               if (time_before(pq->hold_timer.expires, sched_next))
+                       sched_next = pq->hold_timer.expires;
+       }
+
+       __skb_queue_tail(&pq->hold_queue, skb);
+       mod_timer(&pq->hold_timer, sched_next);
+
+       spin_unlock_bh(&pq->hold_queue.lock);
+
+       return 0;
+}
+
+static struct xfrm_dst *xfrm_create_dummy_bundle(struct net *net,
+                                                struct dst_entry *dst,
+                                                const struct flowi *fl,
+                                                int num_xfrms,
+                                                u16 family)
+{
+       int err;
+       struct net_device *dev;
+       struct dst_entry *dst1;
+       struct xfrm_dst *xdst;
+
+       xdst = xfrm_alloc_dst(net, family);
+       if (IS_ERR(xdst))
+               return xdst;
+
+       if (net->xfrm.sysctl_larval_drop || num_xfrms <= 0 ||
+           (fl->flowi_flags & FLOWI_FLAG_CAN_SLEEP))
+               return xdst;
+
+       dst1 = &xdst->u.dst;
+       dst_hold(dst);
+       xdst->route = dst;
+
+       dst_copy_metrics(dst1, dst);
+
+       dst1->obsolete = DST_OBSOLETE_FORCE_CHK;
+       dst1->flags |= DST_HOST | DST_XFRM_QUEUE;
+       dst1->lastuse = jiffies;
+
+       dst1->input = dst_discard;
+       dst1->output = xdst_queue_output;
+
+       dst_hold(dst);
+       dst1->child = dst;
+       dst1->path = dst;
+
+       xfrm_init_path((struct xfrm_dst *)dst1, dst, 0);
+
+       err = -ENODEV;
+       dev = dst->dev;
+       if (!dev)
+               goto free_dst;
+
+       err = xfrm_fill_dst(xdst, dev, fl);
+       if (err)
+               goto free_dst;
+
+out:
+       return xdst;
+
+free_dst:
+       dst_release(dst1);
+       xdst = ERR_PTR(err);
+       goto out;
+}
+
 static struct flow_cache_object *
 xfrm_bundle_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir,
                   struct flow_cache_object *oldflo, void *ctx)
@@ -1751,7 +1983,7 @@ make_dummy_bundle:
        /* We found policies, but there's no bundles to instantiate:
         * either because the policy blocks, has no transformations or
         * we could not build template (no xfrm_states).*/
-       xdst = xfrm_alloc_dst(net, family);
+       xdst = xfrm_create_dummy_bundle(net, dst_orig, fl, num_xfrms, family);
        if (IS_ERR(xdst)) {
                xfrm_pols_put(pols, num_pols);
                return ERR_CAST(xdst);
@@ -2359,6 +2591,9 @@ static int xfrm_bundle_ok(struct xfrm_dst *first)
            (dst->dev && !netif_running(dst->dev)))
                return 0;
 
+       if (dst->flags & DST_XFRM_QUEUE)
+               return 1;
+
        last = NULL;
 
        do {
index 6039038..c721b0d 100644 (file)
@@ -74,13 +74,13 @@ static const struct file_operations xfrm_statistics_seq_fops = {
 
 int __net_init xfrm_proc_init(struct net *net)
 {
-       if (!proc_net_fops_create(net, "xfrm_stat", S_IRUGO,
-                                 &xfrm_statistics_seq_fops))
+       if (!proc_create("xfrm_stat", S_IRUGO, net->proc_net,
+                        &xfrm_statistics_seq_fops))
                return -ENOMEM;
        return 0;
 }
 
 void xfrm_proc_fini(struct net *net)
 {
-       proc_net_remove(net, "xfrm_stat");
+       remove_proc_entry("xfrm_stat", net->proc_net);
 }
index bbbd276..7203e66 100644 (file)
@@ -19,6 +19,7 @@ bpf-direct-objs := bpf-direct.o
 
 # Try to match the kernel target.
 ifndef CONFIG_64BIT
+ifndef CROSS_COMPILE
 
 # s390 has -m31 flag to build 31 bit binaries
 ifndef CONFIG_S390
@@ -35,6 +36,7 @@ HOSTLOADLIBES_bpf-direct += $(MFLAG)
 HOSTLOADLIBES_bpf-fancy += $(MFLAG)
 HOSTLOADLIBES_dropper += $(MFLAG)
 endif
+endif
 
 # Tell kbuild to always build the programs
 always := $(hostprogs-y)
index 4d2c7df..2bb08a9 100755 (executable)
@@ -230,12 +230,12 @@ our $Inline       = qr{inline|__always_inline|noinline};
 our $Member    = qr{->$Ident|\.$Ident|\[[^]]*\]};
 our $Lval      = qr{$Ident(?:$Member)*};
 
-our $Float_hex = qr{(?i:0x[0-9a-f]+p-?[0-9]+[fl]?)};
-our $Float_dec = qr{(?i:((?:[0-9]+\.[0-9]*|[0-9]*\.[0-9]+)(?:e-?[0-9]+)?[fl]?))};
-our $Float_int = qr{(?i:[0-9]+e-?[0-9]+[fl]?)};
+our $Float_hex = qr{(?i)0x[0-9a-f]+p-?[0-9]+[fl]?};
+our $Float_dec = qr{(?i)(?:[0-9]+\.[0-9]*|[0-9]*\.[0-9]+)(?:e-?[0-9]+)?[fl]?};
+our $Float_int = qr{(?i)[0-9]+e-?[0-9]+[fl]?};
 our $Float     = qr{$Float_hex|$Float_dec|$Float_int};
-our $Constant  = qr{(?:$Float|(?i:(?:0x[0-9a-f]+|[0-9]+)[ul]*))};
-our $Assignment        = qr{(?:\*\=|/=|%=|\+=|-=|<<=|>>=|&=|\^=|\|=|=)};
+our $Constant  = qr{$Float|(?i)(?:0x[0-9a-f]+|[0-9]+)[ul]*};
+our $Assignment        = qr{\*\=|/=|%=|\+=|-=|<<=|>>=|&=|\^=|\|=|=};
 our $Compare    = qr{<=|>=|==|!=|<|>};
 our $Operators = qr{
                        <=|>=|==|!=|
index 0b6aeba..c78286f 100644 (file)
@@ -656,29 +656,43 @@ static char *driver_short_names[] = {
 #define get_azx_dev(substream) (substream->runtime->private_data)
 
 #ifdef CONFIG_X86
-static void __mark_pages_wc(struct azx *chip, void *addr, size_t size, bool on)
+static void __mark_pages_wc(struct azx *chip, struct snd_dma_buffer *dmab, bool on)
 {
+       int pages;
+
        if (azx_snoop(chip))
                return;
-       if (addr && size) {
-               int pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+       if (!dmab || !dmab->area || !dmab->bytes)
+               return;
+
+#ifdef CONFIG_SND_DMA_SGBUF
+       if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_SG) {
+               struct snd_sg_buf *sgbuf = dmab->private_data;
                if (on)
-                       set_memory_wc((unsigned long)addr, pages);
+                       set_pages_array_wc(sgbuf->page_table, sgbuf->pages);
                else
-                       set_memory_wb((unsigned long)addr, pages);
+                       set_pages_array_wb(sgbuf->page_table, sgbuf->pages);
+               return;
        }
+#endif
+
+       pages = (dmab->bytes + PAGE_SIZE - 1) >> PAGE_SHIFT;
+       if (on)
+               set_memory_wc((unsigned long)dmab->area, pages);
+       else
+               set_memory_wb((unsigned long)dmab->area, pages);
 }
 
 static inline void mark_pages_wc(struct azx *chip, struct snd_dma_buffer *buf,
                                 bool on)
 {
-       __mark_pages_wc(chip, buf->area, buf->bytes, on);
+       __mark_pages_wc(chip, buf, on);
 }
 static inline void mark_runtime_wc(struct azx *chip, struct azx_dev *azx_dev,
-                                  struct snd_pcm_runtime *runtime, bool on)
+                                  struct snd_pcm_substream *substream, bool on)
 {
        if (azx_dev->wc_marked != on) {
-               __mark_pages_wc(chip, runtime->dma_area, runtime->dma_bytes, on);
+               __mark_pages_wc(chip, snd_pcm_get_dma_buf(substream), on);
                azx_dev->wc_marked = on;
        }
 }
@@ -689,7 +703,7 @@ static inline void mark_pages_wc(struct azx *chip, struct snd_dma_buffer *buf,
 {
 }
 static inline void mark_runtime_wc(struct azx *chip, struct azx_dev *azx_dev,
-                                  struct snd_pcm_runtime *runtime, bool on)
+                                  struct snd_pcm_substream *substream, bool on)
 {
 }
 #endif
@@ -1968,11 +1982,10 @@ static int azx_pcm_hw_params(struct snd_pcm_substream *substream,
 {
        struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
        struct azx *chip = apcm->chip;
-       struct snd_pcm_runtime *runtime = substream->runtime;
        struct azx_dev *azx_dev = get_azx_dev(substream);
        int ret;
 
-       mark_runtime_wc(chip, azx_dev, runtime, false);
+       mark_runtime_wc(chip, azx_dev, substream, false);
        azx_dev->bufsize = 0;
        azx_dev->period_bytes = 0;
        azx_dev->format_val = 0;
@@ -1980,7 +1993,7 @@ static int azx_pcm_hw_params(struct snd_pcm_substream *substream,
                                        params_buffer_bytes(hw_params));
        if (ret < 0)
                return ret;
-       mark_runtime_wc(chip, azx_dev, runtime, true);
+       mark_runtime_wc(chip, azx_dev, substream, true);
        return ret;
 }
 
@@ -1989,7 +2002,6 @@ static int azx_pcm_hw_free(struct snd_pcm_substream *substream)
        struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
        struct azx_dev *azx_dev = get_azx_dev(substream);
        struct azx *chip = apcm->chip;
-       struct snd_pcm_runtime *runtime = substream->runtime;
        struct hda_pcm_stream *hinfo = apcm->hinfo[substream->stream];
 
        /* reset BDL address */
@@ -2002,7 +2014,7 @@ static int azx_pcm_hw_free(struct snd_pcm_substream *substream)
 
        snd_hda_codec_cleanup(apcm->codec, hinfo, substream);
 
-       mark_runtime_wc(chip, azx_dev, runtime, false);
+       mark_runtime_wc(chip, azx_dev, substream, false);
        return snd_pcm_lib_free_pages(substream);
 }
 
@@ -3613,13 +3625,12 @@ static DEFINE_PCI_DEVICE_TABLE(azx_ids) = {
        /* 5 Series/3400 */
        { PCI_DEVICE(0x8086, 0x3b56),
          .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH },
-       /* SCH */
+       /* Poulsbo */
        { PCI_DEVICE(0x8086, 0x811b),
-         .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_SCH_SNOOP |
-         AZX_DCAPS_BUFSIZE | AZX_DCAPS_POSFIX_LPIB }, /* Poulsbo */
+         .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH_NOPM },
+       /* Oaktrail */
        { PCI_DEVICE(0x8086, 0x080a),
-         .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_SCH_SNOOP |
-         AZX_DCAPS_BUFSIZE | AZX_DCAPS_POSFIX_LPIB }, /* Oaktrail */
+         .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH_NOPM },
        /* ICH */
        { PCI_DEVICE(0x8086, 0x2668),
          .driver_data = AZX_DRIVER_ICH | AZX_DCAPS_OLD_SSYNC |
index cf38861..5faaad2 100644 (file)
@@ -4694,6 +4694,7 @@ static const struct snd_pci_quirk alc880_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1584, 0x9077, "Uniwill P53", ALC880_FIXUP_VOL_KNOB),
        SND_PCI_QUIRK(0x161f, 0x203d, "W810", ALC880_FIXUP_W810),
        SND_PCI_QUIRK(0x161f, 0x205d, "Medion Rim 2150", ALC880_FIXUP_MEDION_RIM),
+       SND_PCI_QUIRK(0x1631, 0xe011, "PB 13201056", ALC880_FIXUP_6ST),
        SND_PCI_QUIRK(0x1734, 0x107c, "FSC F1734", ALC880_FIXUP_F1734),
        SND_PCI_QUIRK(0x1734, 0x1094, "FSC Amilo M1451G", ALC880_FIXUP_FUJITSU),
        SND_PCI_QUIRK(0x1734, 0x10ac, "FSC AMILO Xi 1526", ALC880_FIXUP_F1734),
@@ -5708,6 +5709,7 @@ static const struct alc_model_fixup alc268_fixup_models[] = {
 };
 
 static const struct snd_pci_quirk alc268_fixup_tbl[] = {
+       SND_PCI_QUIRK(0x1025, 0x015b, "Acer AOA 150 (ZG5)", ALC268_FIXUP_INV_DMIC),
        /* below is codec SSID since multiple Toshiba laptops have the
         * same PCI SSID 1179:ff00
         */
index 1d8bb59..ef62c43 100644 (file)
@@ -685,7 +685,7 @@ static int arizona_hw_params(struct snd_pcm_substream *substream,
        }
        sr_val = i;
 
-       lrclk = snd_soc_params_to_bclk(params) / params_rate(params);
+       lrclk = rates[bclk] / params_rate(params);
 
        arizona_aif_dbg(dai, "BCLK %dHz LRCLK %dHz\n",
                        rates[bclk], rates[bclk] / lrclk);
@@ -1082,6 +1082,9 @@ int arizona_init_fll(struct arizona *arizona, int id, int base, int lock_irq,
                        id, ret);
        }
 
+       regmap_update_bits(arizona->regmap, fll->base + 1,
+                          ARIZONA_FLL1_FREERUN, 0);
+
        return 0;
 }
 EXPORT_SYMBOL_GPL(arizona_init_fll);
index e6cefe1..d8c65f5 100644 (file)
@@ -1019,8 +1019,6 @@ static const char *wm2200_mixer_texts[] = {
        "EQR",
        "LHPF1",
        "LHPF2",
-       "LHPF3",
-       "LHPF4",
        "DSP1.1",
        "DSP1.2",
        "DSP1.3",
@@ -1053,7 +1051,6 @@ static int wm2200_mixer_values[] = {
        0x25,
        0x50,   /* EQ */
        0x51,
-       0x52,
        0x60,   /* LHPF1 */
        0x61,   /* LHPF2 */
        0x68,   /* DSP1 */
index 7a9048d..1440b3f 100644 (file)
@@ -896,8 +896,7 @@ static const unsigned int wm5102_aec_loopback_values[] = {
 
 static const struct soc_enum wm5102_aec_loopback =
        SOC_VALUE_ENUM_SINGLE(ARIZONA_DAC_AEC_CONTROL_1,
-                             ARIZONA_AEC_LOOPBACK_SRC_SHIFT,
-                             ARIZONA_AEC_LOOPBACK_SRC_MASK,
+                             ARIZONA_AEC_LOOPBACK_SRC_SHIFT, 0xf,
                              ARRAY_SIZE(wm5102_aec_loopback_texts),
                              wm5102_aec_loopback_texts,
                              wm5102_aec_loopback_values);
index ae80c8c..7a09096 100644 (file)
@@ -344,8 +344,7 @@ static const unsigned int wm5110_aec_loopback_values[] = {
 
 static const struct soc_enum wm5110_aec_loopback =
        SOC_VALUE_ENUM_SINGLE(ARIZONA_DAC_AEC_CONTROL_1,
-                             ARIZONA_AEC_LOOPBACK_SRC_SHIFT,
-                             ARIZONA_AEC_LOOPBACK_SRC_MASK,
+                             ARIZONA_AEC_LOOPBACK_SRC_SHIFT, 0xf,
                              ARRAY_SIZE(wm5110_aec_loopback_texts),
                              wm5110_aec_loopback_texts,
                              wm5110_aec_loopback_values);
index 7b198c3..b6b6548 100644 (file)
@@ -324,7 +324,7 @@ static int wm_adsp_load(struct wm_adsp *dsp)
 
                if (reg) {
                        buf = kmemdup(region->data, le32_to_cpu(region->len),
-                                     GFP_KERNEL);
+                                     GFP_KERNEL | GFP_DMA);
                        if (!buf) {
                                adsp_err(dsp, "Out of memory\n");
                                return -ENOMEM;
@@ -396,7 +396,7 @@ static int wm_adsp_load_coeff(struct wm_adsp *dsp)
        hdr = (void*)&firmware->data[0];
        if (memcmp(hdr->magic, "WMDR", 4) != 0) {
                adsp_err(dsp, "%s: invalid magic\n", file);
-               return -EINVAL;
+               goto out_fw;
        }
 
        adsp_dbg(dsp, "%s: v%d.%d.%d\n", file,
@@ -439,7 +439,7 @@ static int wm_adsp_load_coeff(struct wm_adsp *dsp)
 
                if (reg) {
                        buf = kmemdup(blk->data, le32_to_cpu(blk->len),
-                                     GFP_KERNEL);
+                                     GFP_KERNEL | GFP_DMA);
                        if (!buf) {
                                adsp_err(dsp, "Out of memory\n");
                                return -ENOMEM;
index bf363d8..500f8ce 100644 (file)
@@ -154,26 +154,7 @@ static struct snd_soc_platform_driver imx_soc_platform_mx2 = {
        .pcm_free       = imx_pcm_free,
 };
 
-static int imx_soc_platform_probe(struct platform_device *pdev)
+int imx_pcm_dma_init(struct platform_device *pdev)
 {
        return snd_soc_register_platform(&pdev->dev, &imx_soc_platform_mx2);
 }
-
-static int imx_soc_platform_remove(struct platform_device *pdev)
-{
-       snd_soc_unregister_platform(&pdev->dev);
-       return 0;
-}
-
-static struct platform_driver imx_pcm_driver = {
-       .driver = {
-                       .name = "imx-pcm-audio",
-                       .owner = THIS_MODULE,
-       },
-       .probe = imx_soc_platform_probe,
-       .remove = imx_soc_platform_remove,
-};
-
-module_platform_driver(imx_pcm_driver);
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:imx-pcm-audio");
index 5ec362a..920f945 100644 (file)
@@ -281,7 +281,7 @@ static struct snd_soc_platform_driver imx_soc_platform_fiq = {
        .pcm_free       = imx_pcm_fiq_free,
 };
 
-static int imx_soc_platform_probe(struct platform_device *pdev)
+int imx_pcm_fiq_init(struct platform_device *pdev)
 {
        struct imx_ssi *ssi = platform_get_drvdata(pdev);
        int ret;
@@ -314,23 +314,3 @@ failed_register:
 
        return ret;
 }
-
-static int imx_soc_platform_remove(struct platform_device *pdev)
-{
-       snd_soc_unregister_platform(&pdev->dev);
-       return 0;
-}
-
-static struct platform_driver imx_pcm_driver = {
-       .driver = {
-                       .name = "imx-fiq-pcm-audio",
-                       .owner = THIS_MODULE,
-       },
-
-       .probe = imx_soc_platform_probe,
-       .remove = imx_soc_platform_remove,
-};
-
-module_platform_driver(imx_pcm_driver);
-
-MODULE_LICENSE("GPL");
index d5cd9ef..0d0625b 100644 (file)
@@ -104,6 +104,38 @@ void imx_pcm_free(struct snd_pcm *pcm)
 }
 EXPORT_SYMBOL_GPL(imx_pcm_free);
 
+static int imx_pcm_probe(struct platform_device *pdev)
+{
+       if (strcmp(pdev->id_entry->name, "imx-fiq-pcm-audio") == 0)
+               return imx_pcm_fiq_init(pdev);
+
+       return imx_pcm_dma_init(pdev);
+}
+
+static int imx_pcm_remove(struct platform_device *pdev)
+{
+       snd_soc_unregister_platform(&pdev->dev);
+       return 0;
+}
+
+static struct platform_device_id imx_pcm_devtype[] = {
+       { .name = "imx-pcm-audio", },
+       { .name = "imx-fiq-pcm-audio", },
+       { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, imx_pcm_devtype);
+
+static struct platform_driver imx_pcm_driver = {
+       .driver = {
+                       .name = "imx-pcm",
+                       .owner = THIS_MODULE,
+       },
+       .id_table = imx_pcm_devtype,
+       .probe = imx_pcm_probe,
+       .remove = imx_pcm_remove,
+};
+module_platform_driver(imx_pcm_driver);
+
 MODULE_DESCRIPTION("Freescale i.MX PCM driver");
 MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
 MODULE_LICENSE("GPL");
index 83c0ed7..5ae13a1 100644 (file)
@@ -30,4 +30,22 @@ int snd_imx_pcm_mmap(struct snd_pcm_substream *substream,
 int imx_pcm_new(struct snd_soc_pcm_runtime *rtd);
 void imx_pcm_free(struct snd_pcm *pcm);
 
+#ifdef CONFIG_SND_SOC_IMX_PCM_DMA
+int imx_pcm_dma_init(struct platform_device *pdev);
+#else
+static inline int imx_pcm_dma_init(struct platform_device *pdev)
+{
+       return -ENODEV;
+}
+#endif
+
+#ifdef CONFIG_SND_SOC_IMX_PCM_FIQ
+int imx_pcm_fiq_init(struct platform_device *pdev);
+#else
+static inline int imx_pcm_fiq_init(struct platform_device *pdev)
+{
+       return -ENODEV;
+}
+#endif
+
 #endif /* _IMX_PCM_H */
index 1e36bc8..258acad 100644 (file)
@@ -1023,7 +1023,7 @@ int dapm_regulator_event(struct snd_soc_dapm_widget *w,
 
        if (SND_SOC_DAPM_EVENT_ON(event)) {
                if (w->invert & SND_SOC_DAPM_REGULATOR_BYPASS) {
-                       ret = regulator_allow_bypass(w->regulator, true);
+                       ret = regulator_allow_bypass(w->regulator, false);
                        if (ret != 0)
                                dev_warn(w->dapm->dev,
                                         "ASoC: Failed to bypass %s: %d\n",
@@ -1033,7 +1033,7 @@ int dapm_regulator_event(struct snd_soc_dapm_widget *w,
                return regulator_enable(w->regulator);
        } else {
                if (w->invert & SND_SOC_DAPM_REGULATOR_BYPASS) {
-                       ret = regulator_allow_bypass(w->regulator, false);
+                       ret = regulator_allow_bypass(w->regulator, true);
                        if (ret != 0)
                                dev_warn(w->dapm->dev,
                                         "ASoC: Failed to unbypass %s: %d\n",
@@ -3039,6 +3039,14 @@ snd_soc_dapm_new_control(struct snd_soc_dapm_context *dapm,
                                w->name, ret);
                        return NULL;
                }
+
+               if (w->invert & SND_SOC_DAPM_REGULATOR_BYPASS) {
+                       ret = regulator_allow_bypass(w->regulator, true);
+                       if (ret != 0)
+                               dev_warn(w->dapm->dev,
+                                        "ASoC: Failed to unbypass %s: %d\n",
+                                        w->name, ret);
+               }
                break;
        case snd_soc_dapm_clock_supply:
 #ifdef CONFIG_CLKDEV_LOOKUP
index ed4d89c..e90daf8 100644 (file)
@@ -1331,16 +1331,23 @@ static int parse_audio_feature_unit(struct mixer_build *state, int unitid, void
                }
                channels = (hdr->bLength - 7) / csize - 1;
                bmaControls = hdr->bmaControls;
+               if (hdr->bLength < 7 + csize) {
+                       snd_printk(KERN_ERR "usbaudio: unit %u: "
+                                  "invalid UAC_FEATURE_UNIT descriptor\n",
+                                  unitid);
+                       return -EINVAL;
+               }
        } else {
                struct uac2_feature_unit_descriptor *ftr = _ftr;
                csize = 4;
                channels = (hdr->bLength - 6) / 4 - 1;
                bmaControls = ftr->bmaControls;
-       }
-
-       if (hdr->bLength < 7 || !csize || hdr->bLength < 7 + csize) {
-               snd_printk(KERN_ERR "usbaudio: unit %u: invalid UAC_FEATURE_UNIT descriptor\n", unitid);
-               return -EINVAL;
+               if (hdr->bLength < 6 + csize) {
+                       snd_printk(KERN_ERR "usbaudio: unit %u: "
+                                  "invalid UAC_FEATURE_UNIT descriptor\n",
+                                  unitid);
+                       return -EINVAL;
+               }
        }
 
        /* parse the source unit */
diff --git a/tools/vm/.gitignore b/tools/vm/.gitignore
new file mode 100644 (file)
index 0000000..44f095f
--- /dev/null
@@ -0,0 +1,2 @@
+slabinfo
+page-types