Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
authorDavid S. Miller <davem@davemloft.net>
Fri, 31 Aug 2012 19:14:10 +0000 (15:14 -0400)
committerDavid S. Miller <davem@davemloft.net>
Fri, 31 Aug 2012 19:14:18 +0000 (15:14 -0400)
Merge the 'net' tree to get the recent set of netfilter bug fixes in
order to assist with some merge hassles Pablo is going to have to deal
with for upcoming changes.

Signed-off-by: David S. Miller <davem@davemloft.net>
448 files changed:
Documentation/devicetree/bindings/net/cpsw.txt [new file with mode: 0644]
Documentation/devicetree/bindings/net/davinci-mdio.txt [new file with mode: 0644]
Documentation/devicetree/bindings/net/mdio-mux-mmioreg.txt [new file with mode: 0644]
Documentation/networking/batman-adv.txt
Documentation/networking/bonding.txt
drivers/Makefile
drivers/bcma/Kconfig
drivers/bcma/bcma_private.h
drivers/bcma/driver_chipcommon_nflash.c
drivers/bcma/driver_chipcommon_pmu.c
drivers/bcma/driver_chipcommon_sflash.c
drivers/bcma/host_pci.c
drivers/bcma/host_soc.c
drivers/bcma/main.c
drivers/bluetooth/bcm203x.c
drivers/bluetooth/bfusb.c
drivers/bluetooth/bluecard_cs.c
drivers/bluetooth/bpa10x.c
drivers/bluetooth/bt3c_cs.c
drivers/bluetooth/btmrvl_sdio.c
drivers/bluetooth/btsdio.c
drivers/bluetooth/btuart_cs.c
drivers/bluetooth/btusb.c
drivers/bluetooth/btwilink.c
drivers/bluetooth/dtl1_cs.c
drivers/net/Kconfig
drivers/net/Makefile
drivers/net/bonding/bond_main.c
drivers/net/ethernet/emulex/benet/be.h
drivers/net/ethernet/emulex/benet/be_cmds.c
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/freescale/Kconfig
drivers/net/ethernet/freescale/Makefile
drivers/net/ethernet/freescale/fsl_pq_mdio.c
drivers/net/ethernet/freescale/fsl_pq_mdio.h [deleted file]
drivers/net/ethernet/freescale/gianfar.c
drivers/net/ethernet/freescale/ucc_geth.c
drivers/net/ethernet/freescale/xgmac_mdio.c [new file with mode: 0644]
drivers/net/ethernet/intel/e1000/e1000_ethtool.c
drivers/net/ethernet/intel/e1000/e1000_main.c
drivers/net/ethernet/intel/e1000e/ethtool.c
drivers/net/ethernet/intel/e1000e/phy.c
drivers/net/ethernet/intel/igb/e1000_phy.c
drivers/net/ethernet/intel/igb/e1000_phy.h
drivers/net/ethernet/intel/igb/igb_ethtool.c
drivers/net/ethernet/intel/igb/igb_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe.h
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/nvidia/forcedeth.c
drivers/net/ethernet/sfc/efx.c
drivers/net/ethernet/sfc/ethtool.c
drivers/net/ethernet/sfc/falcon_boards.c
drivers/net/ethernet/sfc/net_driver.h
drivers/net/ethernet/sfc/nic.c
drivers/net/ethernet/sfc/tx.c
drivers/net/ethernet/ti/Kconfig
drivers/net/ethernet/ti/cpsw.c
drivers/net/ethernet/ti/davinci_mdio.c
drivers/net/ethernet/tundra/tsi108_eth.c
drivers/net/ethernet/wiznet/w5100.c
drivers/net/ethernet/wiznet/w5300.c
drivers/net/hyperv/netvsc_drv.c
drivers/net/hyperv/rndis_filter.c
drivers/net/ieee802154/Kconfig [moved from drivers/ieee802154/Kconfig with 100% similarity]
drivers/net/ieee802154/Makefile [moved from drivers/ieee802154/Makefile with 100% similarity]
drivers/net/ieee802154/at86rf230.c [moved from drivers/ieee802154/at86rf230.c with 98% similarity]
drivers/net/ieee802154/fakehard.c [moved from drivers/ieee802154/fakehard.c with 99% similarity]
drivers/net/ieee802154/fakelb.c [moved from drivers/ieee802154/fakelb.c with 100% similarity]
drivers/net/loopback.c
drivers/net/phy/Kconfig
drivers/net/phy/Makefile
drivers/net/phy/mdio-gpio.c
drivers/net/phy/mdio-mux-mmioreg.c [new file with mode: 0644]
drivers/net/phy/phy.c
drivers/net/ppp/ppp_generic.c
drivers/net/team/Kconfig
drivers/net/team/team.c
drivers/net/team/team_mode_broadcast.c
drivers/net/team/team_mode_roundrobin.c
drivers/net/tun.c
drivers/net/veth.c
drivers/net/virtio_net.c
drivers/net/wimax/i2400m/driver.c
drivers/net/wireless/adm8211.c
drivers/net/wireless/airo.c
drivers/net/wireless/at76c50x-usb.c
drivers/net/wireless/ath/ath5k/ath5k.h
drivers/net/wireless/ath/ath5k/base.c
drivers/net/wireless/ath/ath5k/mac80211-ops.c
drivers/net/wireless/ath/ath5k/phy.c
drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
drivers/net/wireless/ath/ath9k/ath9k.h
drivers/net/wireless/ath/ath9k/htc.h
drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
drivers/net/wireless/ath/ath9k/htc_drv_main.c
drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
drivers/net/wireless/ath/ath9k/main.c
drivers/net/wireless/ath/ath9k/rc.c
drivers/net/wireless/ath/ath9k/rc.h
drivers/net/wireless/ath/ath9k/xmit.c
drivers/net/wireless/ath/carl9170/carl9170.h
drivers/net/wireless/ath/carl9170/fw.c
drivers/net/wireless/ath/carl9170/mac.c
drivers/net/wireless/ath/carl9170/main.c
drivers/net/wireless/ath/carl9170/rx.c
drivers/net/wireless/ath/carl9170/tx.c
drivers/net/wireless/b43/Makefile
drivers/net/wireless/b43/b43.h
drivers/net/wireless/b43/main.c
drivers/net/wireless/b43/phy_common.c
drivers/net/wireless/b43/phy_common.h
drivers/net/wireless/b43/phy_n.c
drivers/net/wireless/b43/phy_n.h
drivers/net/wireless/b43/radio_2057.c [new file with mode: 0644]
drivers/net/wireless/b43/radio_2057.h [new file with mode: 0644]
drivers/net/wireless/b43/tables_nphy.c
drivers/net/wireless/b43/tables_nphy.h
drivers/net/wireless/b43legacy/main.c
drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
drivers/net/wireless/brcm80211/brcmsmac/main.c
drivers/net/wireless/brcm80211/include/brcmu_wifi.h
drivers/net/wireless/iwlegacy/3945-mac.c
drivers/net/wireless/iwlegacy/4965-mac.c
drivers/net/wireless/iwlegacy/4965.h
drivers/net/wireless/iwlegacy/common.c
drivers/net/wireless/iwlegacy/common.h
drivers/net/wireless/iwlwifi/dvm/agn.h
drivers/net/wireless/iwlwifi/dvm/debugfs.c
drivers/net/wireless/iwlwifi/dvm/mac80211.c
drivers/net/wireless/iwlwifi/dvm/main.c
drivers/net/wireless/iwlwifi/dvm/sta.c
drivers/net/wireless/iwlwifi/dvm/tx.c
drivers/net/wireless/iwlwifi/iwl-drv.c
drivers/net/wireless/iwlwifi/iwl-drv.h
drivers/net/wireless/iwlwifi/iwl-op-mode.h
drivers/net/wireless/iwlwifi/iwl-trans.h
drivers/net/wireless/iwlwifi/pcie/drv.c
drivers/net/wireless/iwlwifi/pcie/internal.h
drivers/net/wireless/iwlwifi/pcie/rx.c
drivers/net/wireless/iwlwifi/pcie/trans.c
drivers/net/wireless/iwlwifi/pcie/tx.c
drivers/net/wireless/libertas_tf/main.c
drivers/net/wireless/mac80211_hwsim.c
drivers/net/wireless/mwifiex/11n.c
drivers/net/wireless/mwifiex/11n.h
drivers/net/wireless/mwifiex/11n_aggr.c
drivers/net/wireless/mwifiex/11n_rxreorder.c
drivers/net/wireless/mwifiex/11n_rxreorder.h
drivers/net/wireless/mwifiex/Makefile
drivers/net/wireless/mwifiex/cfg80211.c
drivers/net/wireless/mwifiex/cmdevt.c
drivers/net/wireless/mwifiex/decl.h
drivers/net/wireless/mwifiex/fw.h
drivers/net/wireless/mwifiex/init.c
drivers/net/wireless/mwifiex/ioctl.h
drivers/net/wireless/mwifiex/main.c
drivers/net/wireless/mwifiex/main.h
drivers/net/wireless/mwifiex/scan.c
drivers/net/wireless/mwifiex/sta_cmd.c
drivers/net/wireless/mwifiex/sta_event.c
drivers/net/wireless/mwifiex/sta_ioctl.c
drivers/net/wireless/mwifiex/sta_rx.c
drivers/net/wireless/mwifiex/txrx.c
drivers/net/wireless/mwifiex/uap_cmd.c
drivers/net/wireless/mwifiex/uap_event.c [new file with mode: 0644]
drivers/net/wireless/mwifiex/uap_txrx.c [new file with mode: 0644]
drivers/net/wireless/mwifiex/wmm.c
drivers/net/wireless/mwl8k.c
drivers/net/wireless/p54/eeprom.c
drivers/net/wireless/p54/eeprom.h
drivers/net/wireless/p54/lmac.h
drivers/net/wireless/p54/main.c
drivers/net/wireless/p54/p54pci.c
drivers/net/wireless/p54/p54pci.h
drivers/net/wireless/p54/txrx.c
drivers/net/wireless/rt2x00/rt2x00.h
drivers/net/wireless/rt2x00/rt2x00dev.c
drivers/net/wireless/rt2x00/rt2x00mac.c
drivers/net/wireless/rt2x00/rt2x00queue.c
drivers/net/wireless/rtl818x/rtl8180/dev.c
drivers/net/wireless/rtl818x/rtl8187/dev.c
drivers/net/wireless/rtlwifi/base.c
drivers/net/wireless/rtlwifi/core.c
drivers/net/wireless/rtlwifi/pci.c
drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
drivers/net/wireless/rtlwifi/rtl8192ce/trx.h
drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
drivers/net/wireless/rtlwifi/rtl8192cu/trx.h
drivers/net/wireless/rtlwifi/rtl8192de/trx.c
drivers/net/wireless/rtlwifi/rtl8192de/trx.h
drivers/net/wireless/rtlwifi/rtl8192se/trx.c
drivers/net/wireless/rtlwifi/rtl8192se/trx.h
drivers/net/wireless/rtlwifi/usb.c
drivers/net/wireless/rtlwifi/wifi.h
drivers/net/wireless/ti/wl1251/main.c
drivers/net/wireless/ti/wlcore/main.c
drivers/net/wireless/ti/wlcore/tx.c
drivers/net/wireless/ti/wlcore/tx.h
drivers/net/wireless/zd1211rw/zd_mac.c
drivers/net/xen-netfront.c
drivers/ssb/driver_mipscore.c
drivers/staging/winbond/wbusb.c
firmware/Makefile
firmware/cxgb3/t3fw-7.10.0.bin.ihex [deleted file]
fs/namei.c
fs/seq_file.c
include/linux/Kbuild
include/linux/bcma/bcma_driver_chipcommon.h
include/linux/bcma/bcma_regs.h
include/linux/ethtool.h
include/linux/hash.h
include/linux/if_arp.h
include/linux/if_team.h
include/linux/if_tunnel.h
include/linux/if_vlan.h
include/linux/inet_diag.h
include/linux/inetdevice.h
include/linux/ip6_tunnel.h
include/linux/jiffies.h
include/linux/mdio.h
include/linux/netdevice.h
include/linux/netlink.h
include/linux/nl80211.h
include/linux/of_mdio.h
include/linux/packet_diag.h [new file with mode: 0644]
include/linux/rfkill.h
include/linux/seq_file.h
include/linux/skbuff.h
include/linux/snmp.h
include/linux/ssb/ssb_driver_chipcommon.h
include/linux/tipc_config.h
include/net/arp.h
include/net/ax25.h
include/net/bluetooth/bluetooth.h
include/net/bluetooth/hci.h
include/net/bluetooth/hci_core.h
include/net/bluetooth/l2cap.h
include/net/bluetooth/smp.h
include/net/cfg80211.h
include/net/dst.h
include/net/ieee80211_radiotap.h
include/net/ip6_tunnel.h
include/net/ip_vs.h
include/net/ipv6.h
include/net/mac80211.h
include/net/ndisc.h
include/net/neighbour.h
include/net/net_namespace.h
include/net/netlink.h
include/net/netns/ipv4.h
include/net/netns/packet.h
include/net/netns/sctp.h [new file with mode: 0644]
include/net/sch_generic.h
include/net/sctp/sctp.h
include/net/sctp/sm.h
include/net/sctp/structs.h
include/net/snmp.h
include/net/sock.h
include/net/tcp.h
include/net/xfrm.h
init/Kconfig
kernel/pid.c
kernel/pid_namespace.c
lib/nlattr.c
net/8021q/vlan_core.c
net/appletalk/atalk_proc.c
net/ax25/ax25_uid.c
net/batman-adv/bat_iv_ogm.c
net/batman-adv/bridge_loop_avoidance.c
net/batman-adv/bridge_loop_avoidance.h
net/batman-adv/debugfs.c
net/batman-adv/gateway_client.c
net/batman-adv/hard-interface.c
net/batman-adv/main.c
net/batman-adv/main.h
net/batman-adv/packet.h
net/batman-adv/routing.c
net/batman-adv/send.c
net/batman-adv/soft-interface.c
net/batman-adv/soft-interface.h
net/batman-adv/translation-table.c
net/batman-adv/translation-table.h
net/batman-adv/types.h
net/batman-adv/unicast.c
net/batman-adv/vis.c
net/batman-adv/vis.h
net/bluetooth/a2mp.c
net/bluetooth/af_bluetooth.c
net/bluetooth/bnep/sock.c
net/bluetooth/cmtp/sock.c
net/bluetooth/hci_core.c
net/bluetooth/hci_event.c
net/bluetooth/hci_sock.c
net/bluetooth/hidp/sock.c
net/bluetooth/l2cap_core.c
net/bluetooth/l2cap_sock.c
net/bluetooth/mgmt.c
net/bluetooth/rfcomm/sock.c
net/bluetooth/sco.c
net/bridge/br_fdb.c
net/bridge/br_stp_timer.c
net/bridge/netfilter/ebtable_filter.c
net/bridge/netfilter/ebtable_nat.c
net/core/dev.c
net/core/dst.c
net/core/link_watch.c
net/core/netpoll.c
net/core/rtnetlink.c
net/core/scm.c
net/core/sock.c
net/decnet/dn_route.c
net/ipv4/af_inet.c
net/ipv4/devinet.c
net/ipv4/fib_frontend.c
net/ipv4/fib_trie.c
net/ipv4/igmp.c
net/ipv4/inet_diag.c
net/ipv4/ipmr.c
net/ipv4/netfilter/ipt_rpfilter.c
net/ipv4/netfilter/iptable_filter.c
net/ipv4/netfilter/iptable_mangle.c
net/ipv4/netfilter/iptable_raw.c
net/ipv4/netfilter/iptable_security.c
net/ipv4/ping.c
net/ipv4/raw.c
net/ipv4/route.c
net/ipv4/sysctl_net_ipv4.c
net/ipv4/tcp_input.c
net/ipv4/tcp_ipv4.c
net/ipv4/udp.c
net/ipv4/udp_diag.c
net/ipv6/Kconfig
net/ipv6/Makefile
net/ipv6/ip6_flowlabel.c
net/ipv6/ip6_gre.c [new file with mode: 0644]
net/ipv6/ip6_tunnel.c
net/ipv6/netfilter/ip6table_filter.c
net/ipv6/netfilter/ip6table_mangle.c
net/ipv6/netfilter/ip6table_raw.c
net/ipv6/netfilter/ip6table_security.c
net/ipv6/raw.c
net/ipv6/route.c
net/ipv6/tcp_ipv6.c
net/ipv6/udp.c
net/ipx/ipx_proc.c
net/key/af_key.c
net/llc/llc_proc.c
net/mac80211/aes_cmac.c
net/mac80211/cfg.c
net/mac80211/debugfs.c
net/mac80211/driver-ops.h
net/mac80211/ibss.c
net/mac80211/ieee80211_i.h
net/mac80211/iface.c
net/mac80211/main.c
net/mac80211/mesh.c
net/mac80211/mesh.h
net/mac80211/mesh_hwmp.c
net/mac80211/mesh_pathtbl.c
net/mac80211/mesh_plink.c
net/mac80211/mlme.c
net/mac80211/offchannel.c
net/mac80211/rate.h
net/mac80211/rx.c
net/mac80211/scan.c
net/mac80211/status.c
net/mac80211/trace.h
net/mac80211/tx.c
net/mac80211/util.c
net/netfilter/core.c
net/netfilter/ipvs/Kconfig
net/netfilter/ipvs/ip_vs_app.c
net/netfilter/ipvs/ip_vs_core.c
net/netfilter/ipvs/ip_vs_ctl.c
net/netfilter/ipvs/ip_vs_ftp.c
net/netfilter/ipvs/ip_vs_xmit.c
net/netfilter/nf_conntrack_proto.c
net/netfilter/nfnetlink_acct.c
net/netfilter/nfnetlink_cthelper.c
net/netfilter/nfnetlink_log.c
net/netfilter/xt_LOG.c
net/netfilter/xt_NFQUEUE.c
net/netfilter/xt_osf.c
net/netfilter/xt_owner.c
net/netfilter/xt_recent.c
net/netlink/af_netlink.c
net/packet/Kconfig
net/packet/Makefile
net/packet/af_packet.c
net/packet/diag.c [new file with mode: 0644]
net/packet/internal.h [new file with mode: 0644]
net/phonet/socket.c
net/rds/tcp_connect.c
net/rds/tcp_listen.c
net/rds/tcp_recv.c
net/rds/tcp_send.c
net/rfkill/core.c
net/sched/cls_api.c
net/sched/cls_basic.c
net/sched/cls_cgroup.c
net/sched/cls_flow.c
net/sched/cls_fw.c
net/sched/cls_route.c
net/sched/cls_rsvp.h
net/sched/cls_tcindex.c
net/sched/cls_u32.c
net/sched/sch_generic.c
net/sctp/associola.c
net/sctp/auth.c
net/sctp/bind_addr.c
net/sctp/chunk.c
net/sctp/endpointola.c
net/sctp/input.c
net/sctp/ipv6.c
net/sctp/objcnt.c
net/sctp/output.c
net/sctp/outqueue.c
net/sctp/primitive.c
net/sctp/proc.c
net/sctp/protocol.c
net/sctp/sm_make_chunk.c
net/sctp/sm_sideeffect.c
net/sctp/sm_statefuns.c
net/sctp/sm_statetable.c
net/sctp/socket.c
net/sctp/sysctl.c
net/sctp/transport.c
net/sctp/ulpqueue.c
net/tipc/bearer.c
net/tipc/config.c
net/tipc/core.c
net/tipc/core.h
net/tipc/eth_media.c
net/tipc/handler.c
net/tipc/link.c
net/tipc/name_table.c
net/tipc/net.c
net/tipc/net.h
net/tipc/subscr.c
net/wireless/chan.c
net/wireless/core.c
net/wireless/mlme.c
net/wireless/nl80211.c
net/wireless/radiotap.c
net/wireless/util.c
net/xfrm/xfrm_policy.c
net/xfrm/xfrm_state.c
net/xfrm/xfrm_user.c

diff --git a/Documentation/devicetree/bindings/net/cpsw.txt b/Documentation/devicetree/bindings/net/cpsw.txt
new file mode 100644 (file)
index 0000000..dcaabe9
--- /dev/null
@@ -0,0 +1,109 @@
+TI SoC Ethernet Switch Controller Device Tree Bindings
+------------------------------------------------------
+
+Required properties:
+- compatible           : Should be "ti,cpsw"
+- reg                  : physical base address and size of the cpsw
+                         registers map
+- interrupts           : property with a value describing the interrupt
+                         number
+- interrupt-parent     : The parent interrupt controller
+- cpdma_channels       : Specifies number of channels in CPDMA
+- host_port_no         : Specifies host port shift
+- cpdma_reg_ofs                : Specifies CPDMA submodule register offset
+- cpdma_sram_ofs       : Specifies CPDMA SRAM offset
+- ale_reg_ofs          : Specifies ALE submodule register offset
+- ale_entries          : Specifies No of entries ALE can hold
+- host_port_reg_ofs    : Specifies host port register offset
+- hw_stats_reg_ofs     : Specifies hardware statistics register offset
+- bd_ram_ofs           : Specifies internal desciptor RAM offset
+- bd_ram_size          : Specifies internal descriptor RAM size
+- rx_descs             : Specifies number of Rx descriptors
+- mac_control          : Specifies Default MAC control register content
+                         for the specific platform
+- slaves               : Specifies number for slaves
+- slave_reg_ofs                : Specifies slave register offset
+- sliver_reg_ofs       : Specifies slave sliver register offset
+- phy_id               : Specifies slave phy id
+- mac-address          : Specifies slave MAC address
+
+Optional properties:
+- ti,hwmods            : Must be "cpgmac0"
+- no_bd_ram            : Must be 0 or 1
+
+Note: "ti,hwmods" field is used to fetch the base address and irq
+resources from TI, omap hwmod data base during device registration.
+Future plan is to migrate hwmod data base contents into device tree
+blob so that, all the required data will be used from device tree dts
+file.
+
+Examples:
+
+       mac: ethernet@4A100000 {
+               compatible = "ti,cpsw";
+               reg = <0x4A100000 0x1000>;
+               interrupts = <55 0x4>;
+               interrupt-parent = <&intc>;
+               cpdma_channels = <8>;
+               host_port_no = <0>;
+               cpdma_reg_ofs = <0x800>;
+               cpdma_sram_ofs = <0xa00>;
+               ale_reg_ofs = <0xd00>;
+               ale_entries = <1024>;
+               host_port_reg_ofs = <0x108>;
+               hw_stats_reg_ofs = <0x900>;
+               bd_ram_ofs = <0x2000>;
+               bd_ram_size = <0x2000>;
+               no_bd_ram = <0>;
+               rx_descs = <64>;
+               mac_control = <0x20>;
+               slaves = <2>;
+               cpsw_emac0: slave@0 {
+                       slave_reg_ofs = <0x208>;
+                       sliver_reg_ofs = <0xd80>;
+                       phy_id = "davinci_mdio.16:00";
+                       /* Filled in by U-Boot */
+                       mac-address = [ 00 00 00 00 00 00 ];
+               };
+               cpsw_emac1: slave@1 {
+                       slave_reg_ofs = <0x308>;
+                       sliver_reg_ofs = <0xdc0>;
+                       phy_id = "davinci_mdio.16:01";
+                       /* Filled in by U-Boot */
+                       mac-address = [ 00 00 00 00 00 00 ];
+               };
+       };
+
+(or)
+       mac: ethernet@4A100000 {
+               compatible = "ti,cpsw";
+               ti,hwmods = "cpgmac0";
+               cpdma_channels = <8>;
+               host_port_no = <0>;
+               cpdma_reg_ofs = <0x800>;
+               cpdma_sram_ofs = <0xa00>;
+               ale_reg_ofs = <0xd00>;
+               ale_entries = <1024>;
+               host_port_reg_ofs = <0x108>;
+               hw_stats_reg_ofs = <0x900>;
+               bd_ram_ofs = <0x2000>;
+               bd_ram_size = <0x2000>;
+               no_bd_ram = <0>;
+               rx_descs = <64>;
+               mac_control = <0x20>;
+               slaves = <2>;
+               cpsw_emac0: slave@0 {
+                       slave_reg_ofs = <0x208>;
+                       sliver_reg_ofs = <0xd80>;
+                       phy_id = "davinci_mdio.16:00";
+                       /* Filled in by U-Boot */
+                       mac-address = [ 00 00 00 00 00 00 ];
+               };
+               cpsw_emac1: slave@1 {
+                       slave_reg_ofs = <0x308>;
+                       sliver_reg_ofs = <0xdc0>;
+                       phy_id = "davinci_mdio.16:01";
+                       /* Filled in by U-Boot */
+                       mac-address = [ 00 00 00 00 00 00 ];
+               };
+       };
diff --git a/Documentation/devicetree/bindings/net/davinci-mdio.txt b/Documentation/devicetree/bindings/net/davinci-mdio.txt
new file mode 100644 (file)
index 0000000..72efaaf
--- /dev/null
@@ -0,0 +1,33 @@
+TI SoC Davinci MDIO Controller Device Tree Bindings
+---------------------------------------------------
+
+Required properties:
+- compatible           : Should be "ti,davinci_mdio"
+- reg                  : physical base address and size of the davinci mdio
+                         registers map
+- bus_freq             : Mdio Bus frequency
+
+Optional properties:
+- ti,hwmods            : Must be "davinci_mdio"
+
+Note: "ti,hwmods" field is used to fetch the base address and irq
+resources from TI, omap hwmod data base during device registration.
+Future plan is to migrate hwmod data base contents into device tree
+blob so that, all the required data will be used from device tree dts
+file.
+
+Examples:
+
+       mdio: davinci_mdio@4A101000 {
+               compatible = "ti,cpsw";
+               reg = <0x4A101000 0x1000>;
+               bus_freq = <1000000>;
+       };
+
+(or)
+
+       mdio: davinci_mdio@4A101000 {
+               compatible = "ti,cpsw";
+               ti,hwmods = "davinci_mdio";
+               bus_freq = <1000000>;
+       };
diff --git a/Documentation/devicetree/bindings/net/mdio-mux-mmioreg.txt b/Documentation/devicetree/bindings/net/mdio-mux-mmioreg.txt
new file mode 100644 (file)
index 0000000..8516929
--- /dev/null
@@ -0,0 +1,75 @@
+Properties for an MDIO bus multiplexer controlled by a memory-mapped device
+
+This is a special case of a MDIO bus multiplexer.  A memory-mapped device,
+like an FPGA, is used to control which child bus is connected.  The mdio-mux
+node must be a child of the memory-mapped device.  The driver currently only
+supports devices with eight-bit registers.
+
+Required properties in addition to the generic multiplexer properties:
+
+- compatible : string, must contain "mdio-mux-mmioreg"
+
+- reg : integer, contains the offset of the register that controls the bus
+       multiplexer.  The size field in the 'reg' property is the size of
+       register, and must therefore be 1.
+
+- mux-mask : integer, contains an eight-bit mask that specifies which
+       bits in the register control the actual bus multiplexer.  The
+       'reg' property of each child mdio-mux node must be constrained by
+       this mask.
+
+Example:
+
+The FPGA node defines a memory-mapped FPGA with a register space of 0x30 bytes.
+For the "EMI2" MDIO bus, register 9 (BRDCFG1) controls the mux on that bus.
+A bitmask of 0x6 means that bits 1 and 2 (bit 0 is lsb) are the bits on
+BRDCFG1 that control the actual mux.
+
+       /* The FPGA node */
+       fpga: board-control@3,0 {
+               #address-cells = <1>;
+               #size-cells = <1>;
+               compatible = "fsl,p5020ds-fpga", "fsl,fpga-ngpixis";
+               reg = <3 0 0x30>;
+               ranges = <0 3 0 0x30>;
+
+               mdio-mux-emi2 {
+                       compatible = "mdio-mux-mmioreg", "mdio-mux";
+                       mdio-parent-bus = <&xmdio0>;
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+                       reg = <9 1>; // BRDCFG1
+                       mux-mask = <0x6>; // EMI2
+
+                       emi2_slot1: mdio@0 {    // Slot 1 XAUI (FM2)
+                               reg = <0>;
+                               #address-cells = <1>;
+                               #size-cells = <0>;
+
+                               phy_xgmii_slot1: ethernet-phy@0 {
+                                       compatible = "ethernet-phy-ieee802.3-c45";
+                                       reg = <4>;
+                               };
+                       };
+
+                       emi2_slot2: mdio@2 {    // Slot 2 XAUI (FM1)
+                               reg = <2>;
+                               #address-cells = <1>;
+                               #size-cells = <0>;
+
+                               phy_xgmii_slot2: ethernet-phy@4 {
+                                       compatible = "ethernet-phy-ieee802.3-c45";
+                                       reg = <0>;
+                               };
+                       };
+               };
+       };
+
+       /* The parent MDIO bus. */
+       xmdio0: mdio@f1000 {
+               #address-cells = <1>;
+               #size-cells = <0>;
+               compatible = "fsl,fman-xmdio";
+               reg = <0xf1000 0x1000>;
+               interrupts = <100 1 0 0>;
+       };
index 8f3ae4a..a173d2a 100644 (file)
@@ -75,9 +75,10 @@ folder:
 
 There is a special folder for debugging information:
 
-#  ls /sys/kernel/debug/batman_adv/bat0/
-# bla_claim_table    log                socket             transtable_local
-# gateways           originators        transtable_global  vis_data
+# ls /sys/kernel/debug/batman_adv/bat0/
+# bla_backbone_table  log                 transtable_global
+# bla_claim_table     originators         transtable_local
+# gateways            socket              vis_data
 
 Some of the files contain all sort of status information  regard-
 ing  the  mesh  network.  For  example, you can view the table of
index 6b1c711..10a015c 100644 (file)
@@ -752,12 +752,22 @@ xmit_hash_policy
                protocol information to generate the hash.
 
                Uses XOR of hardware MAC addresses and IP addresses to
-               generate the hash.  The formula is
+               generate the hash.  The IPv4 formula is
 
                (((source IP XOR dest IP) AND 0xffff) XOR
                        ( source MAC XOR destination MAC ))
                                modulo slave count
 
+               The IPv6 formula is
+
+               hash = (source ip quad 2 XOR dest IP quad 2) XOR
+                      (source ip quad 3 XOR dest IP quad 3) XOR
+                      (source ip quad 4 XOR dest IP quad 4)
+
+               (((hash >> 24) XOR (hash >> 16) XOR (hash >> 8) XOR hash)
+                       XOR (source MAC XOR destination MAC))
+                               modulo slave count
+
                This algorithm will place all traffic to a particular
                network peer on the same slave.  For non-IP traffic,
                the formula is the same as for the layer2 transmit
@@ -778,19 +788,29 @@ xmit_hash_policy
                slaves, although a single connection will not span
                multiple slaves.
 
-               The formula for unfragmented TCP and UDP packets is
+               The formula for unfragmented IPv4 TCP and UDP packets is
 
                ((source port XOR dest port) XOR
                         ((source IP XOR dest IP) AND 0xffff)
                                modulo slave count
 
-               For fragmented TCP or UDP packets and all other IP
-               protocol traffic, the source and destination port
+               The formula for unfragmented IPv6 TCP and UDP packets is
+
+               hash = (source port XOR dest port) XOR
+                      ((source ip quad 2 XOR dest IP quad 2) XOR
+                       (source ip quad 3 XOR dest IP quad 3) XOR
+                       (source ip quad 4 XOR dest IP quad 4))
+
+               ((hash >> 24) XOR (hash >> 16) XOR (hash >> 8) XOR hash)
+                       modulo slave count
+
+               For fragmented TCP or UDP packets and all other IPv4 and
+               IPv6 protocol traffic, the source and destination port
                information is omitted.  For non-IP traffic, the
                formula is the same as for the layer2 transmit hash
                policy.
 
-               This policy is intended to mimic the behavior of
+               The IPv4 policy is intended to mimic the behavior of
                certain switches, notably Cisco switches with PFC2 as
                well as some Foundry and IBM products.
 
index 5b42184..1ecd1bf 100644 (file)
@@ -120,7 +120,6 @@ obj-$(CONFIG_VHOST_NET)             += vhost/
 obj-$(CONFIG_VLYNQ)            += vlynq/
 obj-$(CONFIG_STAGING)          += staging/
 obj-y                          += platform/
-obj-y                          += ieee802154/
 #common clk code
 obj-y                          += clk/
 
index 06b3207..a533af2 100644 (file)
@@ -48,12 +48,12 @@ config BCMA_DRIVER_MIPS
 
 config BCMA_SFLASH
        bool
-       depends on BCMA_DRIVER_MIPS && BROKEN
+       depends on BCMA_DRIVER_MIPS
        default y
 
 config BCMA_NFLASH
        bool
-       depends on BCMA_DRIVER_MIPS && BROKEN
+       depends on BCMA_DRIVER_MIPS
        default y
 
 config BCMA_DRIVER_GMAC_CMN
index 3cf9cc9..169fc58 100644 (file)
@@ -54,6 +54,7 @@ u32 bcma_pmu_get_clockcpu(struct bcma_drv_cc *cc);
 #ifdef CONFIG_BCMA_SFLASH
 /* driver_chipcommon_sflash.c */
 int bcma_sflash_init(struct bcma_drv_cc *cc);
+extern struct platform_device bcma_sflash_dev;
 #else
 static inline int bcma_sflash_init(struct bcma_drv_cc *cc)
 {
@@ -65,6 +66,7 @@ static inline int bcma_sflash_init(struct bcma_drv_cc *cc)
 #ifdef CONFIG_BCMA_NFLASH
 /* driver_chipcommon_nflash.c */
 int bcma_nflash_init(struct bcma_drv_cc *cc);
+extern struct platform_device bcma_nflash_dev;
 #else
 static inline int bcma_nflash_init(struct bcma_drv_cc *cc)
 {
index 574d624..9042781 100644 (file)
@@ -5,15 +5,37 @@
  * Licensed under the GNU/GPL. See COPYING for details.
  */
 
+#include <linux/platform_device.h>
 #include <linux/bcma/bcma.h>
-#include <linux/bcma/bcma_driver_chipcommon.h>
-#include <linux/delay.h>
 
 #include "bcma_private.h"
 
+struct platform_device bcma_nflash_dev = {
+       .name           = "bcma_nflash",
+       .num_resources  = 0,
+};
+
 /* Initialize NAND flash access */
 int bcma_nflash_init(struct bcma_drv_cc *cc)
 {
-       bcma_err(cc->core->bus, "NAND flash support is broken\n");
+       struct bcma_bus *bus = cc->core->bus;
+
+       if (bus->chipinfo.id != BCMA_CHIP_ID_BCM4706 &&
+           cc->core->id.rev != 0x38) {
+               bcma_err(bus, "NAND flash on unsupported board!\n");
+               return -ENOTSUPP;
+       }
+
+       if (!(cc->capabilities & BCMA_CC_CAP_NFLASH)) {
+               bcma_err(bus, "NAND flash not present according to ChipCommon\n");
+               return -ENODEV;
+       }
+
+       cc->nflash.present = true;
+
+       /* Prepare platform device, but don't register it yet. It's too early,
+        * malloc (required by device_private_init) is not available yet. */
+       bcma_nflash_dev.dev.platform_data = &cc->nflash;
+
        return 0;
 }
index c9a4f46..8b8f2f3 100644 (file)
@@ -101,7 +101,7 @@ void bcma_chipco_bcm4331_ext_pa_lines_ctl(struct bcma_drv_cc *cc, bool enable)
        bcma_cc_write32(cc, BCMA_CC_CHIPCTL, val);
 }
 
-void bcma_pmu_workarounds(struct bcma_drv_cc *cc)
+static void bcma_pmu_workarounds(struct bcma_drv_cc *cc)
 {
        struct bcma_bus *bus = cc->core->bus;
 
@@ -257,7 +257,7 @@ static u32 bcma_pmu_clock_bcm4706(struct bcma_drv_cc *cc, u32 pll0, u32 m)
 }
 
 /* query bus clock frequency for PMU-enabled chipcommon */
-u32 bcma_pmu_get_clockcontrol(struct bcma_drv_cc *cc)
+static u32 bcma_pmu_get_clockcontrol(struct bcma_drv_cc *cc)
 {
        struct bcma_bus *bus = cc->core->bus;
 
index 6e157a5..2c4eec2 100644 (file)
  * Licensed under the GNU/GPL. See COPYING for details.
  */
 
+#include <linux/platform_device.h>
 #include <linux/bcma/bcma.h>
-#include <linux/bcma/bcma_driver_chipcommon.h>
-#include <linux/delay.h>
 
 #include "bcma_private.h"
 
+static struct resource bcma_sflash_resource = {
+       .name   = "bcma_sflash",
+       .start  = BCMA_SFLASH,
+       .end    = 0,
+       .flags  = IORESOURCE_MEM | IORESOURCE_READONLY,
+};
+
+struct platform_device bcma_sflash_dev = {
+       .name           = "bcma_sflash",
+       .resource       = &bcma_sflash_resource,
+       .num_resources  = 1,
+};
+
+struct bcma_sflash_tbl_e {
+       char *name;
+       u32 id;
+       u32 blocksize;
+       u16 numblocks;
+};
+
+static struct bcma_sflash_tbl_e bcma_sflash_st_tbl[] = {
+       { "", 0x14, 0x10000, 32, },
+       { 0 },
+};
+
+static struct bcma_sflash_tbl_e bcma_sflash_sst_tbl[] = {
+       { 0 },
+};
+
+static struct bcma_sflash_tbl_e bcma_sflash_at_tbl[] = {
+       { 0 },
+};
+
+static void bcma_sflash_cmd(struct bcma_drv_cc *cc, u32 opcode)
+{
+       int i;
+       bcma_cc_write32(cc, BCMA_CC_FLASHCTL,
+                       BCMA_CC_FLASHCTL_START | opcode);
+       for (i = 0; i < 1000; i++) {
+               if (!(bcma_cc_read32(cc, BCMA_CC_FLASHCTL) &
+                     BCMA_CC_FLASHCTL_BUSY))
+                       return;
+               cpu_relax();
+       }
+       bcma_err(cc->core->bus, "SFLASH control command failed (timeout)!\n");
+}
+
 /* Initialize serial flash access */
 int bcma_sflash_init(struct bcma_drv_cc *cc)
 {
-       bcma_err(cc->core->bus, "Serial flash support is broken\n");
+       struct bcma_bus *bus = cc->core->bus;
+       struct bcma_sflash *sflash = &cc->sflash;
+       struct bcma_sflash_tbl_e *e;
+       u32 id, id2;
+
+       switch (cc->capabilities & BCMA_CC_CAP_FLASHT) {
+       case BCMA_CC_FLASHT_STSER:
+               bcma_sflash_cmd(cc, BCMA_CC_FLASHCTL_ST_DP);
+
+               bcma_cc_write32(cc, BCMA_CC_FLASHADDR, 0);
+               bcma_sflash_cmd(cc, BCMA_CC_FLASHCTL_ST_RES);
+               id = bcma_cc_read32(cc, BCMA_CC_FLASHDATA);
+
+               bcma_cc_write32(cc, BCMA_CC_FLASHADDR, 1);
+               bcma_sflash_cmd(cc, BCMA_CC_FLASHCTL_ST_RES);
+               id2 = bcma_cc_read32(cc, BCMA_CC_FLASHDATA);
+
+               switch (id) {
+               case 0xbf:
+                       for (e = bcma_sflash_sst_tbl; e->name; e++) {
+                               if (e->id == id2)
+                                       break;
+                       }
+                       break;
+               default:
+                       for (e = bcma_sflash_st_tbl; e->name; e++) {
+                               if (e->id == id)
+                                       break;
+                       }
+                       break;
+               }
+               if (!e->name) {
+                       bcma_err(bus, "Unsupported ST serial flash (id: 0x%X, id2: 0x%X)\n", id, id2);
+                       return -ENOTSUPP;
+               }
+
+               break;
+       case BCMA_CC_FLASHT_ATSER:
+               bcma_sflash_cmd(cc, BCMA_CC_FLASHCTL_AT_STATUS);
+               id = bcma_cc_read32(cc, BCMA_CC_FLASHDATA) & 0x3c;
+
+               for (e = bcma_sflash_at_tbl; e->name; e++) {
+                       if (e->id == id)
+                               break;
+               }
+               if (!e->name) {
+                       bcma_err(bus, "Unsupported Atmel serial flash (id: 0x%X)\n", id);
+                       return -ENOTSUPP;
+               }
+
+               break;
+       default:
+               bcma_err(bus, "Unsupported flash type\n");
+               return -ENOTSUPP;
+       }
+
+       sflash->window = BCMA_SFLASH;
+       sflash->blocksize = e->blocksize;
+       sflash->numblocks = e->numblocks;
+       sflash->size = sflash->blocksize * sflash->numblocks;
+       sflash->present = true;
+
+       bcma_info(bus, "Found %s serial flash (size: %dKiB, blocksize: 0x%X, blocks: %d)\n",
+                 e->name, sflash->size / 1024, sflash->blocksize,
+                 sflash->numblocks);
+
+       /* Prepare platform device, but don't register it yet. It's too early,
+        * malloc (required by device_private_init) is not available yet. */
+       bcma_sflash_dev.resource[0].end = bcma_sflash_dev.resource[0].start +
+                                         sflash->size;
+       bcma_sflash_dev.dev.platform_data = sflash;
+
        return 0;
 }
index a6e5672..f7b0af7 100644 (file)
@@ -77,8 +77,8 @@ static void bcma_host_pci_write32(struct bcma_device *core, u16 offset,
 }
 
 #ifdef CONFIG_BCMA_BLOCKIO
-void bcma_host_pci_block_read(struct bcma_device *core, void *buffer,
-                             size_t count, u16 offset, u8 reg_width)
+static void bcma_host_pci_block_read(struct bcma_device *core, void *buffer,
+                                    size_t count, u16 offset, u8 reg_width)
 {
        void __iomem *addr = core->bus->mmio + offset;
        if (core->bus->mapped_core != core)
@@ -100,8 +100,9 @@ void bcma_host_pci_block_read(struct bcma_device *core, void *buffer,
        }
 }
 
-void bcma_host_pci_block_write(struct bcma_device *core, const void *buffer,
-                              size_t count, u16 offset, u8 reg_width)
+static void bcma_host_pci_block_write(struct bcma_device *core,
+                                     const void *buffer, size_t count,
+                                     u16 offset, u8 reg_width)
 {
        void __iomem *addr = core->bus->mmio + offset;
        if (core->bus->mapped_core != core)
@@ -139,7 +140,7 @@ static void bcma_host_pci_awrite32(struct bcma_device *core, u16 offset,
        iowrite32(value, core->bus->mmio + (1 * BCMA_CORE_SIZE) + offset);
 }
 
-const struct bcma_host_ops bcma_host_pci_ops = {
+static const struct bcma_host_ops bcma_host_pci_ops = {
        .read8          = bcma_host_pci_read8,
        .read16         = bcma_host_pci_read16,
        .read32         = bcma_host_pci_read32,
index 3c381fb..3475e60 100644 (file)
@@ -143,7 +143,7 @@ static void bcma_host_soc_awrite32(struct bcma_device *core, u16 offset,
        writel(value, core->io_wrap + offset);
 }
 
-const struct bcma_host_ops bcma_host_soc_ops = {
+static const struct bcma_host_ops bcma_host_soc_ops = {
        .read8          = bcma_host_soc_read8,
        .read16         = bcma_host_soc_read16,
        .read32         = bcma_host_soc_read32,
index 758af9c..a8f570d 100644 (file)
@@ -7,6 +7,7 @@
 
 #include "bcma_private.h"
 #include <linux/module.h>
+#include <linux/platform_device.h>
 #include <linux/bcma/bcma.h>
 #include <linux/slab.h>
 
@@ -136,6 +137,22 @@ static int bcma_register_cores(struct bcma_bus *bus)
                dev_id++;
        }
 
+#ifdef CONFIG_BCMA_SFLASH
+       if (bus->drv_cc.sflash.present) {
+               err = platform_device_register(&bcma_sflash_dev);
+               if (err)
+                       bcma_err(bus, "Error registering serial flash\n");
+       }
+#endif
+
+#ifdef CONFIG_BCMA_NFLASH
+       if (bus->drv_cc.nflash.present) {
+               err = platform_device_register(&bcma_nflash_dev);
+               if (err)
+                       bcma_err(bus, "Error registering NAND flash\n");
+       }
+#endif
+
        return 0;
 }
 
index 37ae175..364f82b 100644 (file)
@@ -177,7 +177,7 @@ static int bcm203x_probe(struct usb_interface *intf, const struct usb_device_id
        if (intf->cur_altsetting->desc.bInterfaceNumber != 0)
                return -ENODEV;
 
-       data = kzalloc(sizeof(*data), GFP_KERNEL);
+       data = devm_kzalloc(&intf->dev, sizeof(*data), GFP_KERNEL);
        if (!data) {
                BT_ERR("Can't allocate memory for data structure");
                return -ENOMEM;
@@ -189,14 +189,12 @@ static int bcm203x_probe(struct usb_interface *intf, const struct usb_device_id
        data->urb = usb_alloc_urb(0, GFP_KERNEL);
        if (!data->urb) {
                BT_ERR("Can't allocate URB");
-               kfree(data);
                return -ENOMEM;
        }
 
        if (request_firmware(&firmware, "BCM2033-MD.hex", &udev->dev) < 0) {
                BT_ERR("Mini driver request failed");
                usb_free_urb(data->urb);
-               kfree(data);
                return -EIO;
        }
 
@@ -209,7 +207,6 @@ static int bcm203x_probe(struct usb_interface *intf, const struct usb_device_id
                BT_ERR("Can't allocate memory for mini driver");
                release_firmware(firmware);
                usb_free_urb(data->urb);
-               kfree(data);
                return -ENOMEM;
        }
 
@@ -224,7 +221,6 @@ static int bcm203x_probe(struct usb_interface *intf, const struct usb_device_id
                BT_ERR("Firmware request failed");
                usb_free_urb(data->urb);
                kfree(data->buffer);
-               kfree(data);
                return -EIO;
        }
 
@@ -236,7 +232,6 @@ static int bcm203x_probe(struct usb_interface *intf, const struct usb_device_id
                release_firmware(firmware);
                usb_free_urb(data->urb);
                kfree(data->buffer);
-               kfree(data);
                return -ENOMEM;
        }
 
@@ -271,7 +266,6 @@ static void bcm203x_disconnect(struct usb_interface *intf)
        usb_free_urb(data->urb);
        kfree(data->fw_data);
        kfree(data->buffer);
-       kfree(data);
 }
 
 static struct usb_driver bcm203x_driver = {
index 32e8251..995aee9 100644 (file)
@@ -653,7 +653,7 @@ static int bfusb_probe(struct usb_interface *intf, const struct usb_device_id *i
        }
 
        /* Initialize control structure and load firmware */
-       data = kzalloc(sizeof(struct bfusb_data), GFP_KERNEL);
+       data = devm_kzalloc(&intf->dev, sizeof(struct bfusb_data), GFP_KERNEL);
        if (!data) {
                BT_ERR("Can't allocate memory for control structure");
                goto done;
@@ -674,7 +674,7 @@ static int bfusb_probe(struct usb_interface *intf, const struct usb_device_id *i
 
        if (request_firmware(&firmware, "bfubase.frm", &udev->dev) < 0) {
                BT_ERR("Firmware request failed");
-               goto error;
+               goto done;
        }
 
        BT_DBG("firmware data %p size %zu", firmware->data, firmware->size);
@@ -690,7 +690,7 @@ static int bfusb_probe(struct usb_interface *intf, const struct usb_device_id *i
        hdev = hci_alloc_dev();
        if (!hdev) {
                BT_ERR("Can't allocate HCI device");
-               goto error;
+               goto done;
        }
 
        data->hdev = hdev;
@@ -708,7 +708,7 @@ static int bfusb_probe(struct usb_interface *intf, const struct usb_device_id *i
        if (hci_register_dev(hdev) < 0) {
                BT_ERR("Can't register HCI device");
                hci_free_dev(hdev);
-               goto error;
+               goto done;
        }
 
        usb_set_intfdata(intf, data);
@@ -718,9 +718,6 @@ static int bfusb_probe(struct usb_interface *intf, const struct usb_device_id *i
 release:
        release_firmware(firmware);
 
-error:
-       kfree(data);
-
 done:
        return -EIO;
 }
@@ -741,7 +738,6 @@ static void bfusb_disconnect(struct usb_interface *intf)
 
        hci_unregister_dev(hdev);
        hci_free_dev(hdev);
-       kfree(data);
 }
 
 static struct usb_driver bfusb_driver = {
index 66c3a67..0c0838d 100644 (file)
@@ -849,7 +849,7 @@ static int bluecard_probe(struct pcmcia_device *link)
        bluecard_info_t *info;
 
        /* Create new info device */
-       info = kzalloc(sizeof(*info), GFP_KERNEL);
+       info = devm_kzalloc(&link->dev, sizeof(*info), GFP_KERNEL);
        if (!info)
                return -ENOMEM;
 
@@ -864,10 +864,7 @@ static int bluecard_probe(struct pcmcia_device *link)
 
 static void bluecard_detach(struct pcmcia_device *link)
 {
-       bluecard_info_t *info = link->priv;
-
        bluecard_release(link);
-       kfree(info);
 }
 
 
index 29caaed..2fe4a80 100644 (file)
@@ -443,7 +443,7 @@ static int bpa10x_probe(struct usb_interface *intf, const struct usb_device_id *
        if (intf->cur_altsetting->desc.bInterfaceNumber != 0)
                return -ENODEV;
 
-       data = kzalloc(sizeof(*data), GFP_KERNEL);
+       data = devm_kzalloc(&intf->dev, sizeof(*data), GFP_KERNEL);
        if (!data)
                return -ENOMEM;
 
@@ -453,10 +453,8 @@ static int bpa10x_probe(struct usb_interface *intf, const struct usb_device_id *
        init_usb_anchor(&data->rx_anchor);
 
        hdev = hci_alloc_dev();
-       if (!hdev) {
-               kfree(data);
+       if (!hdev)
                return -ENOMEM;
-       }
 
        hdev->bus = HCI_USB;
        hci_set_drvdata(hdev, data);
@@ -475,7 +473,6 @@ static int bpa10x_probe(struct usb_interface *intf, const struct usb_device_id *
        err = hci_register_dev(hdev);
        if (err < 0) {
                hci_free_dev(hdev);
-               kfree(data);
                return err;
        }
 
@@ -500,7 +497,6 @@ static void bpa10x_disconnect(struct usb_interface *intf)
        hci_free_dev(data->hdev);
        kfree_skb(data->rx_skb[0]);
        kfree_skb(data->rx_skb[1]);
-       kfree(data);
 }
 
 static struct usb_driver bpa10x_driver = {
index 8925b6d..7ffd3f4 100644 (file)
@@ -638,7 +638,7 @@ static int bt3c_probe(struct pcmcia_device *link)
        bt3c_info_t *info;
 
        /* Create new info device */
-       info = kzalloc(sizeof(*info), GFP_KERNEL);
+       info = devm_kzalloc(&link->dev, sizeof(*info), GFP_KERNEL);
        if (!info)
                return -ENOMEM;
 
@@ -654,10 +654,7 @@ static int bt3c_probe(struct pcmcia_device *link)
 
 static void bt3c_detach(struct pcmcia_device *link)
 {
-       bt3c_info_t *info = link->priv;
-
        bt3c_release(link);
-       kfree(info);
 }
 
 static int bt3c_check_config(struct pcmcia_device *p_dev, void *priv_data)
index 6a9e971..03b3acb 100644 (file)
@@ -956,11 +956,9 @@ static int btmrvl_sdio_probe(struct sdio_func *func,
        BT_INFO("vendor=0x%x, device=0x%x, class=%d, fn=%d",
                        id->vendor, id->device, id->class, func->num);
 
-       card = kzalloc(sizeof(*card), GFP_KERNEL);
-       if (!card) {
-               ret = -ENOMEM;
-               goto done;
-       }
+       card = devm_kzalloc(&func->dev, sizeof(*card), GFP_KERNEL);
+       if (!card)
+               return -ENOMEM;
 
        card->func = func;
 
@@ -974,8 +972,7 @@ static int btmrvl_sdio_probe(struct sdio_func *func,
 
        if (btmrvl_sdio_register_dev(card) < 0) {
                BT_ERR("Failed to register BT device!");
-               ret = -ENODEV;
-               goto free_card;
+               return -ENODEV;
        }
 
        /* Disable the interrupts on the card */
@@ -1023,9 +1020,6 @@ disable_host_int:
        btmrvl_sdio_disable_host_int(card);
 unreg_dev:
        btmrvl_sdio_unregister_dev(card);
-free_card:
-       kfree(card);
-done:
        return ret;
 }
 
@@ -1047,7 +1041,6 @@ static void btmrvl_sdio_remove(struct sdio_func *func)
                        BT_DBG("unregester dev");
                        btmrvl_sdio_unregister_dev(card);
                        btmrvl_remove_card(card->priv);
-                       kfree(card);
                }
        }
 }
index e10ea03..4a99097 100644 (file)
@@ -304,7 +304,7 @@ static int btsdio_probe(struct sdio_func *func,
                tuple = tuple->next;
        }
 
-       data = kzalloc(sizeof(*data), GFP_KERNEL);
+       data = devm_kzalloc(&func->dev, sizeof(*data), GFP_KERNEL);
        if (!data)
                return -ENOMEM;
 
@@ -315,10 +315,8 @@ static int btsdio_probe(struct sdio_func *func,
        skb_queue_head_init(&data->txq);
 
        hdev = hci_alloc_dev();
-       if (!hdev) {
-               kfree(data);
+       if (!hdev)
                return -ENOMEM;
-       }
 
        hdev->bus = HCI_SDIO;
        hci_set_drvdata(hdev, data);
@@ -340,7 +338,6 @@ static int btsdio_probe(struct sdio_func *func,
        err = hci_register_dev(hdev);
        if (err < 0) {
                hci_free_dev(hdev);
-               kfree(data);
                return err;
        }
 
@@ -366,7 +363,6 @@ static void btsdio_remove(struct sdio_func *func)
        hci_unregister_dev(hdev);
 
        hci_free_dev(hdev);
-       kfree(data);
 }
 
 static struct sdio_driver btsdio_driver = {
index 21e803a..2f510a8 100644 (file)
@@ -567,7 +567,7 @@ static int btuart_probe(struct pcmcia_device *link)
        btuart_info_t *info;
 
        /* Create new info device */
-       info = kzalloc(sizeof(*info), GFP_KERNEL);
+       info = devm_kzalloc(&link->dev, sizeof(*info), GFP_KERNEL);
        if (!info)
                return -ENOMEM;
 
@@ -583,10 +583,7 @@ static int btuart_probe(struct pcmcia_device *link)
 
 static void btuart_detach(struct pcmcia_device *link)
 {
-       btuart_info_t *info = link->priv;
-
        btuart_release(link);
-       kfree(info);
 }
 
 static int btuart_check_config(struct pcmcia_device *p_dev, void *priv_data)
index cef3bac..fa2a7d5 100644 (file)
@@ -954,7 +954,7 @@ static int btusb_probe(struct usb_interface *intf,
                        return -ENODEV;
        }
 
-       data = kzalloc(sizeof(*data), GFP_KERNEL);
+       data = devm_kzalloc(&intf->dev, sizeof(*data), GFP_KERNEL);
        if (!data)
                return -ENOMEM;
 
@@ -977,10 +977,8 @@ static int btusb_probe(struct usb_interface *intf,
                }
        }
 
-       if (!data->intr_ep || !data->bulk_tx_ep || !data->bulk_rx_ep) {
-               kfree(data);
+       if (!data->intr_ep || !data->bulk_tx_ep || !data->bulk_rx_ep)
                return -ENODEV;
-       }
 
        data->cmdreq_type = USB_TYPE_CLASS;
 
@@ -1000,10 +998,8 @@ static int btusb_probe(struct usb_interface *intf,
        init_usb_anchor(&data->deferred);
 
        hdev = hci_alloc_dev();
-       if (!hdev) {
-               kfree(data);
+       if (!hdev)
                return -ENOMEM;
-       }
 
        hdev->bus = HCI_USB;
        hci_set_drvdata(hdev, data);
@@ -1071,7 +1067,6 @@ static int btusb_probe(struct usb_interface *intf,
                                                        data->isoc, data);
                if (err < 0) {
                        hci_free_dev(hdev);
-                       kfree(data);
                        return err;
                }
        }
@@ -1079,7 +1074,6 @@ static int btusb_probe(struct usb_interface *intf,
        err = hci_register_dev(hdev);
        if (err < 0) {
                hci_free_dev(hdev);
-               kfree(data);
                return err;
        }
 
@@ -1112,7 +1106,6 @@ static void btusb_disconnect(struct usb_interface *intf)
                usb_driver_release_interface(&btusb_driver, data->isoc);
 
        hci_free_dev(hdev);
-       kfree(data);
 }
 
 #ifdef CONFIG_PM
index 8869469..4ad7b35 100644 (file)
@@ -297,16 +297,14 @@ static int bt_ti_probe(struct platform_device *pdev)
        struct hci_dev *hdev;
        int err;
 
-       hst = kzalloc(sizeof(struct ti_st), GFP_KERNEL);
+       hst = devm_kzalloc(&pdev->dev, sizeof(struct ti_st), GFP_KERNEL);
        if (!hst)
                return -ENOMEM;
 
        /* Expose "hciX" device to user space */
        hdev = hci_alloc_dev();
-       if (!hdev) {
-               kfree(hst);
+       if (!hdev)
                return -ENOMEM;
-       }
 
        BT_DBG("hdev %p", hdev);
 
@@ -321,7 +319,6 @@ static int bt_ti_probe(struct platform_device *pdev)
        err = hci_register_dev(hdev);
        if (err < 0) {
                BT_ERR("Can't register HCI device error %d", err);
-               kfree(hst);
                hci_free_dev(hdev);
                return err;
        }
@@ -347,7 +344,6 @@ static int bt_ti_remove(struct platform_device *pdev)
        hci_unregister_dev(hdev);
 
        hci_free_dev(hdev);
-       kfree(hst);
 
        dev_set_drvdata(&pdev->dev, NULL);
        return 0;
index 97a7784..036cb36 100644 (file)
@@ -550,7 +550,7 @@ static int dtl1_probe(struct pcmcia_device *link)
        dtl1_info_t *info;
 
        /* Create new info device */
-       info = kzalloc(sizeof(*info), GFP_KERNEL);
+       info = devm_kzalloc(&link->dev, sizeof(*info), GFP_KERNEL);
        if (!info)
                return -ENOMEM;
 
@@ -569,7 +569,6 @@ static void dtl1_detach(struct pcmcia_device *link)
 
        dtl1_close(info);
        pcmcia_disable_device(link);
-       kfree(info);
 }
 
 static int dtl1_confcheck(struct pcmcia_device *p_dev, void *priv_data)
index 0c2bd80..707ab7b 100644 (file)
@@ -107,8 +107,6 @@ config MII
          or internal device.  It is safe to say Y or M here even if your
          ethernet card lacks MII.
 
-source "drivers/ieee802154/Kconfig"
-
 config IFB
        tristate "Intermediate Functional Block support"
        depends on NET_CLS_ACT
@@ -290,6 +288,8 @@ source "drivers/net/wimax/Kconfig"
 
 source "drivers/net/wan/Kconfig"
 
+source "drivers/net/ieee802154/Kconfig"
+
 config XEN_NETDEV_FRONTEND
        tristate "Xen network device frontend driver"
        depends on XEN
index 3d375ca..b682a1d 100644 (file)
@@ -53,6 +53,7 @@ obj-$(CONFIG_SUNGEM_PHY) += sungem_phy.o
 obj-$(CONFIG_WAN) += wan/
 obj-$(CONFIG_WLAN) += wireless/
 obj-$(CONFIG_WIMAX) += wimax/
+obj-$(CONFIG_IEEE802154) += ieee802154/
 
 obj-$(CONFIG_VMXNET3) += vmxnet3/
 obj-$(CONFIG_XEN_NETDEV_FRONTEND) += xen-netfront.o
index d688a8a..b24ce25 100644 (file)
@@ -1120,10 +1120,10 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
                        write_unlock_bh(&bond->curr_slave_lock);
                        read_unlock(&bond->lock);
 
-                       netdev_bonding_change(bond->dev, NETDEV_BONDING_FAILOVER);
+                       call_netdevice_notifiers(NETDEV_BONDING_FAILOVER, bond->dev);
                        if (should_notify_peers)
-                               netdev_bonding_change(bond->dev,
-                                                     NETDEV_NOTIFY_PEERS);
+                               call_netdevice_notifiers(NETDEV_NOTIFY_PEERS,
+                                                        bond->dev);
 
                        read_lock(&bond->lock);
                        write_lock_bh(&bond->curr_slave_lock);
@@ -1558,8 +1558,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
                                 bond_dev->name,
                                 bond_dev->type, slave_dev->type);
 
-                       res = netdev_bonding_change(bond_dev,
-                                                   NETDEV_PRE_TYPE_CHANGE);
+                       res = call_netdevice_notifiers(NETDEV_PRE_TYPE_CHANGE,
+                                                      bond_dev);
                        res = notifier_to_errno(res);
                        if (res) {
                                pr_err("%s: refused to change device type\n",
@@ -1579,8 +1579,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
                                bond_dev->priv_flags &= ~IFF_TX_SKB_SHARING;
                        }
 
-                       netdev_bonding_change(bond_dev,
-                                             NETDEV_POST_TYPE_CHANGE);
+                       call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE,
+                                                bond_dev);
                }
        } else if (bond_dev->type != slave_dev->type) {
                pr_err("%s ether type (%d) is different from other slaves (%d), can not enslave it.\n",
@@ -1941,7 +1941,7 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
        }
 
        block_netpoll_tx();
-       netdev_bonding_change(bond_dev, NETDEV_RELEASE);
+       call_netdevice_notifiers(NETDEV_RELEASE, bond_dev);
        write_lock_bh(&bond->lock);
 
        slave = bond_get_slave_by_dev(bond, slave_dev);
@@ -2584,7 +2584,7 @@ re_arm:
                        read_unlock(&bond->lock);
                        return;
                }
-               netdev_bonding_change(bond->dev, NETDEV_NOTIFY_PEERS);
+               call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, bond->dev);
                rtnl_unlock();
        }
 }
@@ -3203,7 +3203,7 @@ re_arm:
                        read_unlock(&bond->lock);
                        return;
                }
-               netdev_bonding_change(bond->dev, NETDEV_NOTIFY_PEERS);
+               call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, bond->dev);
                rtnl_unlock();
        }
 }
@@ -3352,56 +3352,93 @@ static struct notifier_block bond_netdev_notifier = {
 /*---------------------------- Hashing Policies -----------------------------*/
 
 /*
+ * Hash for the output device based upon layer 2 data
+ */
+static int bond_xmit_hash_policy_l2(struct sk_buff *skb, int count)
+{
+       struct ethhdr *data = (struct ethhdr *)skb->data;
+
+       if (skb_headlen(skb) >= offsetof(struct ethhdr, h_proto))
+               return (data->h_dest[5] ^ data->h_source[5]) % count;
+
+       return 0;
+}
+
+/*
  * Hash for the output device based upon layer 2 and layer 3 data. If
- * the packet is not IP mimic bond_xmit_hash_policy_l2()
+ * the packet is not IP, fall back on bond_xmit_hash_policy_l2()
  */
 static int bond_xmit_hash_policy_l23(struct sk_buff *skb, int count)
 {
        struct ethhdr *data = (struct ethhdr *)skb->data;
-       struct iphdr *iph = ip_hdr(skb);
-
-       if (skb->protocol == htons(ETH_P_IP)) {
+       struct iphdr *iph;
+       struct ipv6hdr *ipv6h;
+       u32 v6hash;
+       __be32 *s, *d;
+
+       if (skb->protocol == htons(ETH_P_IP) &&
+           skb_network_header_len(skb) >= sizeof(*iph)) {
+               iph = ip_hdr(skb);
                return ((ntohl(iph->saddr ^ iph->daddr) & 0xffff) ^
                        (data->h_dest[5] ^ data->h_source[5])) % count;
+       } else if (skb->protocol == htons(ETH_P_IPV6) &&
+                  skb_network_header_len(skb) >= sizeof(*ipv6h)) {
+               ipv6h = ipv6_hdr(skb);
+               s = &ipv6h->saddr.s6_addr32[0];
+               d = &ipv6h->daddr.s6_addr32[0];
+               v6hash = (s[1] ^ d[1]) ^ (s[2] ^ d[2]) ^ (s[3] ^ d[3]);
+               v6hash ^= (v6hash >> 24) ^ (v6hash >> 16) ^ (v6hash >> 8);
+               return (v6hash ^ data->h_dest[5] ^ data->h_source[5]) % count;
        }
 
-       return (data->h_dest[5] ^ data->h_source[5]) % count;
+       return bond_xmit_hash_policy_l2(skb, count);
 }
 
 /*
  * Hash for the output device based upon layer 3 and layer 4 data. If
  * the packet is a frag or not TCP or UDP, just use layer 3 data.  If it is
- * altogether not IP, mimic bond_xmit_hash_policy_l2()
+ * altogether not IP, fall back on bond_xmit_hash_policy_l2()
  */
 static int bond_xmit_hash_policy_l34(struct sk_buff *skb, int count)
 {
-       struct ethhdr *data = (struct ethhdr *)skb->data;
-       struct iphdr *iph = ip_hdr(skb);
-       __be16 *layer4hdr = (__be16 *)((u32 *)iph + iph->ihl);
-       int layer4_xor = 0;
-
-       if (skb->protocol == htons(ETH_P_IP)) {
+       u32 layer4_xor = 0;
+       struct iphdr *iph;
+       struct ipv6hdr *ipv6h;
+       __be32 *s, *d;
+       __be16 *layer4hdr;
+
+       if (skb->protocol == htons(ETH_P_IP) &&
+           skb_network_header_len(skb) >= sizeof(*iph)) {
+               iph = ip_hdr(skb);
                if (!ip_is_fragment(iph) &&
                    (iph->protocol == IPPROTO_TCP ||
-                    iph->protocol == IPPROTO_UDP)) {
-                       layer4_xor = ntohs((*layer4hdr ^ *(layer4hdr + 1)));
+                    iph->protocol == IPPROTO_UDP) &&
+                   (skb_headlen(skb) - skb_network_offset(skb) >=
+                    iph->ihl * sizeof(u32) + sizeof(*layer4hdr) * 2)) {
+                       layer4hdr = (__be16 *)((u32 *)iph + iph->ihl);
+                       layer4_xor = ntohs(*layer4hdr ^ *(layer4hdr + 1));
                }
                return (layer4_xor ^
                        ((ntohl(iph->saddr ^ iph->daddr)) & 0xffff)) % count;
-
+       } else if (skb->protocol == htons(ETH_P_IPV6) &&
+                  skb_network_header_len(skb) >= sizeof(*ipv6h)) {
+               ipv6h = ipv6_hdr(skb);
+               if ((ipv6h->nexthdr == IPPROTO_TCP ||
+                    ipv6h->nexthdr == IPPROTO_UDP) &&
+                   (skb_headlen(skb) - skb_network_offset(skb) >=
+                    sizeof(*ipv6h) + sizeof(*layer4hdr) * 2)) {
+                       layer4hdr = (__be16 *)(ipv6h + 1);
+                       layer4_xor = ntohs(*layer4hdr ^ *(layer4hdr + 1));
+               }
+               s = &ipv6h->saddr.s6_addr32[0];
+               d = &ipv6h->daddr.s6_addr32[0];
+               layer4_xor ^= (s[1] ^ d[1]) ^ (s[2] ^ d[2]) ^ (s[3] ^ d[3]);
+               layer4_xor ^= (layer4_xor >> 24) ^ (layer4_xor >> 16) ^
+                              (layer4_xor >> 8);
+               return layer4_xor % count;
        }
 
-       return (data->h_dest[5] ^ data->h_source[5]) % count;
-}
-
-/*
- * Hash for the output device based upon layer 2 data
- */
-static int bond_xmit_hash_policy_l2(struct sk_buff *skb, int count)
-{
-       struct ethhdr *data = (struct ethhdr *)skb->data;
-
-       return (data->h_dest[5] ^ data->h_source[5]) % count;
+       return bond_xmit_hash_policy_l2(skb, count);
 }
 
 /*-------------------------- Device entry points ----------------------------*/
index d266c86..5b62299 100644 (file)
@@ -110,6 +110,7 @@ static inline char *nic_name(struct pci_dev *pdev)
 #define MAX_RX_POST            BE_NAPI_WEIGHT /* Frags posted at a time */
 #define RX_FRAGS_REFILL_WM     (RX_Q_LEN - MAX_RX_POST)
 
+#define MAX_VFS                        30 /* Max VFs supported by BE3 FW */
 #define FW_VER_LEN             32
 
 struct be_dma_mem {
index 8c63d06..701b3e9 100644 (file)
@@ -120,7 +120,7 @@ static int be_mcc_compl_process(struct be_adapter *adapter,
 
                if (compl_status == MCC_STATUS_UNAUTHORIZED_REQUEST) {
                        dev_warn(&adapter->pdev->dev,
-                                "opcode %d-%d is not permitted\n",
+                                "VF is not privileged to issue opcode %d-%d\n",
                                 opcode, subsystem);
                } else {
                        extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
index 78b8aa8..111dc88 100644 (file)
@@ -2176,8 +2176,7 @@ static uint be_num_rss_want(struct be_adapter *adapter)
 {
        u32 num = 0;
        if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
-            !sriov_want(adapter) && be_physfn(adapter) &&
-            !be_is_mc(adapter)) {
+            !sriov_want(adapter) && be_physfn(adapter)) {
                num = (adapter->be3_native) ? BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
                num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
        }
@@ -2646,8 +2645,8 @@ static int be_vf_setup(struct be_adapter *adapter)
        }
 
        for_all_vfs(adapter, vf_cfg, vf) {
-               status = be_cmd_link_status_query(adapter, NULL, &lnk_speed,
-                                                 NULL, vf + 1);
+               lnk_speed = 1000;
+               status = be_cmd_set_qos(adapter, lnk_speed, vf + 1);
                if (status)
                        goto err;
                vf_cfg->tx_rate = lnk_speed * 10;
@@ -2724,6 +2723,8 @@ static int be_get_config(struct be_adapter *adapter)
        if (pos) {
                pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
                                     &dev_num_vfs);
+               if (!lancer_chip(adapter))
+                       dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS);
                adapter->dev_num_vfs = dev_num_vfs;
        }
        return 0;
@@ -3437,6 +3438,7 @@ static void be_ctrl_cleanup(struct be_adapter *adapter)
        if (mem->va)
                dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
                                  mem->dma);
+       kfree(adapter->pmac_id);
 }
 
 static int be_ctrl_init(struct be_adapter *adapter)
@@ -3473,6 +3475,12 @@ static int be_ctrl_init(struct be_adapter *adapter)
        }
        memset(rx_filter->va, 0, rx_filter->size);
 
+       /* primary mac needs 1 pmac entry */
+       adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
+                                  sizeof(*adapter->pmac_id), GFP_KERNEL);
+       if (!adapter->pmac_id)
+               return -ENOMEM;
+
        mutex_init(&adapter->mbox_lock);
        spin_lock_init(&adapter->mcc_lock);
        spin_lock_init(&adapter->mcc_cq_lock);
@@ -3609,12 +3617,6 @@ static int be_get_initial_config(struct be_adapter *adapter)
        else
                adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
 
-       /* primary mac needs 1 pmac entry */
-       adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
-                                 sizeof(u32), GFP_KERNEL);
-       if (!adapter->pmac_id)
-               return -ENOMEM;
-
        status = be_cmd_get_cntl_attributes(adapter);
        if (status)
                return status;
index 3574e14..feff516 100644 (file)
@@ -62,6 +62,13 @@ config FSL_PQ_MDIO
        ---help---
          This driver supports the MDIO bus used by the gianfar and UCC drivers.
 
+config FSL_XGMAC_MDIO
+       tristate "Freescale XGMAC MDIO"
+       depends on FSL_SOC
+       select PHYLIB
+       ---help---
+         This driver supports the MDIO bus on the Fman 10G Ethernet MACs.
+
 config UCC_GETH
        tristate "Freescale QE Gigabit Ethernet"
        depends on QUICC_ENGINE
index 1752488..3d1839a 100644 (file)
@@ -9,6 +9,7 @@ ifeq ($(CONFIG_FEC_MPC52xx_MDIO),y)
 endif
 obj-$(CONFIG_FS_ENET) += fs_enet/
 obj-$(CONFIG_FSL_PQ_MDIO) += fsl_pq_mdio.o
+obj-$(CONFIG_FSL_XGMAC_MDIO) += xgmac_mdio.o
 obj-$(CONFIG_GIANFAR) += gianfar_driver.o
 obj-$(CONFIG_PTP_1588_CLOCK_GIANFAR) += gianfar_ptp.o
 gianfar_driver-objs := gianfar.o \
index 9527b28..c93a056 100644 (file)
 #include <linux/kernel.h>
 #include <linux/string.h>
 #include <linux/errno.h>
-#include <linux/unistd.h>
 #include <linux/slab.h>
-#include <linux/interrupt.h>
 #include <linux/init.h>
 #include <linux/delay.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/spinlock.h>
-#include <linux/mm.h>
 #include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/crc32.h>
 #include <linux/mii.h>
-#include <linux/phy.h>
-#include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/of_mdio.h>
-#include <linux/of_platform.h>
+#include <linux/of_device.h>
 
 #include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/uaccess.h>
-#include <asm/ucc.h>
+#include <asm/ucc.h>   /* for ucc_set_qe_mux_mii_mng() */
 
 #include "gianfar.h"
-#include "fsl_pq_mdio.h"
+
+#define MIIMIND_BUSY           0x00000001
+#define MIIMIND_NOTVALID       0x00000004
+#define MIIMCFG_INIT_VALUE     0x00000007
+#define MIIMCFG_RESET          0x80000000
+
+#define MII_READ_COMMAND       0x00000001
+
+struct fsl_pq_mii {
+       u32 miimcfg;    /* MII management configuration reg */
+       u32 miimcom;    /* MII management command reg */
+       u32 miimadd;    /* MII management address reg */
+       u32 miimcon;    /* MII management control reg */
+       u32 miimstat;   /* MII management status reg */
+       u32 miimind;    /* MII management indication reg */
+};
+
+struct fsl_pq_mdio {
+       u8 res1[16];
+       u32 ieventm;    /* MDIO Interrupt event register (for etsec2)*/
+       u32 imaskm;     /* MDIO Interrupt mask register (for etsec2)*/
+       u8 res2[4];
+       u32 emapm;      /* MDIO Event mapping register (for etsec2)*/
+       u8 res3[1280];
+       struct fsl_pq_mii mii;
+       u8 res4[28];
+       u32 utbipar;    /* TBI phy address reg (only on UCC) */
+       u8 res5[2728];
+} __packed;
 
 /* Number of microseconds to wait for an MII register to respond */
 #define MII_TIMEOUT    1000
 
 struct fsl_pq_mdio_priv {
        void __iomem *map;
-       struct fsl_pq_mdio __iomem *regs;
+       struct fsl_pq_mii __iomem *regs;
+       int irqs[PHY_MAX_ADDR];
+};
+
+/*
+ * Per-device-type data.  Each type of device tree node that we support gets
+ * one of these.
+ *
+ * @mii_offset: the offset of the MII registers within the memory map of the
+ * node.  Some nodes define only the MII registers, and some define the whole
+ * MAC (which includes the MII registers).
+ *
+ * @get_tbipa: determines the address of the TBIPA register
+ *
+ * @ucc_configure: a special function for extra QE configuration
+ */
+struct fsl_pq_mdio_data {
+       unsigned int mii_offset;        /* offset of the MII registers */
+       uint32_t __iomem * (*get_tbipa)(void __iomem *p);
+       void (*ucc_configure)(phys_addr_t start, phys_addr_t end);
 };
 
 /*
- * Write value to the PHY at mii_id at register regnum,
- * on the bus attached to the local interface, which may be different from the
- * generic mdio bus (tied to a single interface), waiting until the write is
- * done before returning. This is helpful in programming interfaces like
- * the TBI which control interfaces like onchip SERDES and are always tied to
- * the local mdio pins, which may not be the same as system mdio bus, used for
+ * Write value to the PHY at mii_id at register regnum, on the bus attached
+ * to the local interface, which may be different from the generic mdio bus
+ * (tied to a single interface), waiting until the write is done before
+ * returning. This is helpful in programming interfaces like the TBI which
+ * control interfaces like onchip SERDES and are always tied to the local
+ * mdio pins, which may not be the same as system mdio bus, used for
  * controlling the external PHYs, for example.
  */
-int fsl_pq_local_mdio_write(struct fsl_pq_mdio __iomem *regs, int mii_id,
-               int regnum, u16 value)
+static int fsl_pq_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
+               u16 value)
 {
+       struct fsl_pq_mdio_priv *priv = bus->priv;
+       struct fsl_pq_mii __iomem *regs = priv->regs;
        u32 status;
 
        /* Set the PHY address and the register address we want to write */
@@ -83,20 +119,21 @@ int fsl_pq_local_mdio_write(struct fsl_pq_mdio __iomem *regs, int mii_id,
 }
 
 /*
- * Read the bus for PHY at addr mii_id, register regnum, and
- * return the value.  Clears miimcom first.  All PHY operation
- * done on the bus attached to the local interface,
- * which may be different from the generic mdio bus
- * This is helpful in programming interfaces like
- * the TBI which, in turn, control interfaces like onchip SERDES
- * and are always tied to the local mdio pins, which may not be the
+ * Read the bus for PHY at addr mii_id, register regnum, and return the value.
+ * Clears miimcom first.
+ *
+ * All PHY operation done on the bus attached to the local interface, which
+ * may be different from the generic mdio bus.  This is helpful in programming
+ * interfaces like the TBI which, in turn, control interfaces like on-chip
+ * SERDES and are always tied to the local mdio pins, which may not be the
  * same as system mdio bus, used for controlling the external PHYs, for eg.
  */
-int fsl_pq_local_mdio_read(struct fsl_pq_mdio __iomem *regs,
-               int mii_id, int regnum)
+static int fsl_pq_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
 {
-       u16 value;
+       struct fsl_pq_mdio_priv *priv = bus->priv;
+       struct fsl_pq_mii __iomem *regs = priv->regs;
        u32 status;
+       u16 value;
 
        /* Set the PHY address and the register address we want to read */
        out_be32(&regs->miimadd, (mii_id << 8) | regnum);
@@ -115,44 +152,15 @@ int fsl_pq_local_mdio_read(struct fsl_pq_mdio __iomem *regs,
        /* Grab the value of the register from miimstat */
        value = in_be32(&regs->miimstat);
 
+       dev_dbg(&bus->dev, "read %04x from address %x/%x\n", value, mii_id, regnum);
        return value;
 }
 
-static struct fsl_pq_mdio __iomem *fsl_pq_mdio_get_regs(struct mii_bus *bus)
-{
-       struct fsl_pq_mdio_priv *priv = bus->priv;
-
-       return priv->regs;
-}
-
-/*
- * Write value to the PHY at mii_id at register regnum,
- * on the bus, waiting until the write is done before returning.
- */
-int fsl_pq_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value)
-{
-       struct fsl_pq_mdio __iomem *regs = fsl_pq_mdio_get_regs(bus);
-
-       /* Write to the local MII regs */
-       return fsl_pq_local_mdio_write(regs, mii_id, regnum, value);
-}
-
-/*
- * Read the bus for PHY at addr mii_id, register regnum, and
- * return the value.  Clears miimcom first.
- */
-int fsl_pq_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
-{
-       struct fsl_pq_mdio __iomem *regs = fsl_pq_mdio_get_regs(bus);
-
-       /* Read the local MII regs */
-       return fsl_pq_local_mdio_read(regs, mii_id, regnum);
-}
-
 /* Reset the MIIM registers, and wait for the bus to free */
 static int fsl_pq_mdio_reset(struct mii_bus *bus)
 {
-       struct fsl_pq_mdio __iomem *regs = fsl_pq_mdio_get_regs(bus);
+       struct fsl_pq_mdio_priv *priv = bus->priv;
+       struct fsl_pq_mii __iomem *regs = priv->regs;
        u32 status;
 
        mutex_lock(&bus->mdio_lock);
@@ -170,234 +178,291 @@ static int fsl_pq_mdio_reset(struct mii_bus *bus)
        mutex_unlock(&bus->mdio_lock);
 
        if (!status) {
-               printk(KERN_ERR "%s: The MII Bus is stuck!\n",
-                               bus->name);
+               dev_err(&bus->dev, "timeout waiting for MII bus\n");
                return -EBUSY;
        }
 
        return 0;
 }
 
-void fsl_pq_mdio_bus_name(char *name, struct device_node *np)
+#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
+/*
+ * This is mildly evil, but so is our hardware for doing this.
+ * Also, we have to cast back to struct gfar because of
+ * definition weirdness done in gianfar.h.
+ */
+static uint32_t __iomem *get_gfar_tbipa(void __iomem *p)
 {
-       const u32 *addr;
-       u64 taddr = OF_BAD_ADDR;
-
-       addr = of_get_address(np, 0, NULL, NULL);
-       if (addr)
-               taddr = of_translate_address(np, addr);
+       struct gfar __iomem *enet_regs = p;
 
-       snprintf(name, MII_BUS_ID_SIZE, "%s@%llx", np->name,
-               (unsigned long long)taddr);
+       return &enet_regs->tbipa;
 }
-EXPORT_SYMBOL_GPL(fsl_pq_mdio_bus_name);
 
+/*
+ * Return the TBIPAR address for an eTSEC2 node
+ */
+static uint32_t __iomem *get_etsec_tbipa(void __iomem *p)
+{
+       return p;
+}
+#endif
 
-static u32 __iomem *get_gfar_tbipa(struct fsl_pq_mdio __iomem *regs, struct device_node *np)
+#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE)
+/*
+ * Return the TBIPAR address for a QE MDIO node
+ */
+static uint32_t __iomem *get_ucc_tbipa(void __iomem *p)
 {
-#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
-       struct gfar __iomem *enet_regs;
+       struct fsl_pq_mdio __iomem *mdio = p;
 
-       /*
-        * This is mildly evil, but so is our hardware for doing this.
-        * Also, we have to cast back to struct gfar because of
-        * definition weirdness done in gianfar.h.
-        */
-       if(of_device_is_compatible(np, "fsl,gianfar-mdio") ||
-               of_device_is_compatible(np, "fsl,gianfar-tbi") ||
-               of_device_is_compatible(np, "gianfar")) {
-               enet_regs = (struct gfar __iomem *)regs;
-               return &enet_regs->tbipa;
-       } else if (of_device_is_compatible(np, "fsl,etsec2-mdio") ||
-                       of_device_is_compatible(np, "fsl,etsec2-tbi")) {
-               return of_iomap(np, 1);
-       }
-#endif
-       return NULL;
+       return &mdio->utbipar;
 }
 
-
-static int get_ucc_id_for_range(u64 start, u64 end, u32 *ucc_id)
+/*
+ * Find the UCC node that controls the given MDIO node
+ *
+ * For some reason, the QE MDIO nodes are not children of the UCC devices
+ * that control them.  Therefore, we need to scan all UCC nodes looking for
+ * the one that encompases the given MDIO node.  We do this by comparing
+ * physical addresses.  The 'start' and 'end' addresses of the MDIO node are
+ * passed, and the correct UCC node will cover the entire address range.
+ *
+ * This assumes that there is only one QE MDIO node in the entire device tree.
+ */
+static void ucc_configure(phys_addr_t start, phys_addr_t end)
 {
-#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE)
+       static bool found_mii_master;
        struct device_node *np = NULL;
-       int err = 0;
 
-       for_each_compatible_node(np, NULL, "ucc_geth") {
-               struct resource tempres;
+       if (found_mii_master)
+               return;
 
-               err = of_address_to_resource(np, 0, &tempres);
-               if (err)
+       for_each_compatible_node(np, NULL, "ucc_geth") {
+               struct resource res;
+               const uint32_t *iprop;
+               uint32_t id;
+               int ret;
+
+               ret = of_address_to_resource(np, 0, &res);
+               if (ret < 0) {
+                       pr_debug("fsl-pq-mdio: no address range in node %s\n",
+                                np->full_name);
                        continue;
+               }
 
                /* if our mdio regs fall within this UCC regs range */
-               if ((start >= tempres.start) && (end <= tempres.end)) {
-                       /* Find the id of the UCC */
-                       const u32 *id;
-
-                       id = of_get_property(np, "cell-index", NULL);
-                       if (!id) {
-                               id = of_get_property(np, "device-id", NULL);
-                               if (!id)
-                                       continue;
+               if ((start < res.start) || (end > res.end))
+                       continue;
+
+               iprop = of_get_property(np, "cell-index", NULL);
+               if (!iprop) {
+                       iprop = of_get_property(np, "device-id", NULL);
+                       if (!iprop) {
+                               pr_debug("fsl-pq-mdio: no UCC ID in node %s\n",
+                                        np->full_name);
+                               continue;
                        }
+               }
 
-                       *ucc_id = *id;
+               id = be32_to_cpup(iprop);
 
-                       return 0;
+               /*
+                * cell-index and device-id for QE nodes are
+                * numbered from 1, not 0.
+                */
+               if (ucc_set_qe_mux_mii_mng(id - 1) < 0) {
+                       pr_debug("fsl-pq-mdio: invalid UCC ID in node %s\n",
+                                np->full_name);
+                       continue;
                }
+
+               pr_debug("fsl-pq-mdio: setting node UCC%u to MII master\n", id);
+               found_mii_master = true;
        }
+}
 
-       if (err)
-               return err;
-       else
-               return -EINVAL;
-#else
-       return -ENODEV;
 #endif
-}
 
-static int fsl_pq_mdio_probe(struct platform_device *ofdev)
+static struct of_device_id fsl_pq_mdio_match[] = {
+#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
+       {
+               .compatible = "fsl,gianfar-tbi",
+               .data = &(struct fsl_pq_mdio_data) {
+                       .mii_offset = 0,
+                       .get_tbipa = get_gfar_tbipa,
+               },
+       },
+       {
+               .compatible = "fsl,gianfar-mdio",
+               .data = &(struct fsl_pq_mdio_data) {
+                       .mii_offset = 0,
+                       .get_tbipa = get_gfar_tbipa,
+               },
+       },
+       {
+               .type = "mdio",
+               .compatible = "gianfar",
+               .data = &(struct fsl_pq_mdio_data) {
+                       .mii_offset = offsetof(struct fsl_pq_mdio, mii),
+                       .get_tbipa = get_gfar_tbipa,
+               },
+       },
+       {
+               .compatible = "fsl,etsec2-tbi",
+               .data = &(struct fsl_pq_mdio_data) {
+                       .mii_offset = offsetof(struct fsl_pq_mdio, mii),
+                       .get_tbipa = get_etsec_tbipa,
+               },
+       },
+       {
+               .compatible = "fsl,etsec2-mdio",
+               .data = &(struct fsl_pq_mdio_data) {
+                       .mii_offset = offsetof(struct fsl_pq_mdio, mii),
+                       .get_tbipa = get_etsec_tbipa,
+               },
+       },
+#endif
+#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE)
+       {
+               .compatible = "fsl,ucc-mdio",
+               .data = &(struct fsl_pq_mdio_data) {
+                       .mii_offset = 0,
+                       .get_tbipa = get_ucc_tbipa,
+                       .ucc_configure = ucc_configure,
+               },
+       },
+       {
+               /* Legacy UCC MDIO node */
+               .type = "mdio",
+               .compatible = "ucc_geth_phy",
+               .data = &(struct fsl_pq_mdio_data) {
+                       .mii_offset = 0,
+                       .get_tbipa = get_ucc_tbipa,
+                       .ucc_configure = ucc_configure,
+               },
+       },
+#endif
+       /* No Kconfig option for Fman support yet */
+       {
+               .compatible = "fsl,fman-mdio",
+               .data = &(struct fsl_pq_mdio_data) {
+                       .mii_offset = 0,
+                       /* Fman TBI operations are handled elsewhere */
+               },
+       },
+
+       {},
+};
+MODULE_DEVICE_TABLE(of, fsl_pq_mdio_match);
+
+static int fsl_pq_mdio_probe(struct platform_device *pdev)
 {
-       struct device_node *np = ofdev->dev.of_node;
+       const struct of_device_id *id =
+               of_match_device(fsl_pq_mdio_match, &pdev->dev);
+       const struct fsl_pq_mdio_data *data = id->data;
+       struct device_node *np = pdev->dev.of_node;
+       struct resource res;
        struct device_node *tbi;
        struct fsl_pq_mdio_priv *priv;
-       struct fsl_pq_mdio __iomem *regs = NULL;
-       void __iomem *map;
-       u32 __iomem *tbipa;
        struct mii_bus *new_bus;
-       int tbiaddr = -1;
-       const u32 *addrp;
-       u64 addr = 0, size = 0;
        int err;
 
-       priv = kzalloc(sizeof(*priv), GFP_KERNEL);
-       if (!priv)
-               return -ENOMEM;
+       dev_dbg(&pdev->dev, "found %s compatible node\n", id->compatible);
 
-       new_bus = mdiobus_alloc();
-       if (!new_bus) {
-               err = -ENOMEM;
-               goto err_free_priv;
-       }
+       new_bus = mdiobus_alloc_size(sizeof(*priv));
+       if (!new_bus)
+               return -ENOMEM;
 
+       priv = new_bus->priv;
        new_bus->name = "Freescale PowerQUICC MII Bus",
-       new_bus->read = &fsl_pq_mdio_read,
-       new_bus->write = &fsl_pq_mdio_write,
-       new_bus->reset = &fsl_pq_mdio_reset,
-       new_bus->priv = priv;
-       fsl_pq_mdio_bus_name(new_bus->id, np);
-
-       addrp = of_get_address(np, 0, &size, NULL);
-       if (!addrp) {
-               err = -EINVAL;
-               goto err_free_bus;
+       new_bus->read = &fsl_pq_mdio_read;
+       new_bus->write = &fsl_pq_mdio_write;
+       new_bus->reset = &fsl_pq_mdio_reset;
+       new_bus->irq = priv->irqs;
+
+       err = of_address_to_resource(np, 0, &res);
+       if (err < 0) {
+               dev_err(&pdev->dev, "could not obtain address information\n");
+               goto error;
        }
 
-       /* Set the PHY base address */
-       addr = of_translate_address(np, addrp);
-       if (addr == OF_BAD_ADDR) {
-               err = -EINVAL;
-               goto err_free_bus;
-       }
+       snprintf(new_bus->id, MII_BUS_ID_SIZE, "%s@%llx", np->name,
+               (unsigned long long)res.start);
 
-       map = ioremap(addr, size);
-       if (!map) {
+       priv->map = of_iomap(np, 0);
+       if (!priv->map) {
                err = -ENOMEM;
-               goto err_free_bus;
+               goto error;
        }
-       priv->map = map;
-
-       if (of_device_is_compatible(np, "fsl,gianfar-mdio") ||
-                       of_device_is_compatible(np, "fsl,gianfar-tbi") ||
-                       of_device_is_compatible(np, "fsl,ucc-mdio") ||
-                       of_device_is_compatible(np, "ucc_geth_phy"))
-               map -= offsetof(struct fsl_pq_mdio, miimcfg);
-       regs = map;
-       priv->regs = regs;
-
-       new_bus->irq = kcalloc(PHY_MAX_ADDR, sizeof(int), GFP_KERNEL);
 
-       if (NULL == new_bus->irq) {
-               err = -ENOMEM;
-               goto err_unmap_regs;
+       /*
+        * Some device tree nodes represent only the MII registers, and
+        * others represent the MAC and MII registers.  The 'mii_offset' field
+        * contains the offset of the MII registers inside the mapped register
+        * space.
+        */
+       if (data->mii_offset > resource_size(&res)) {
+               dev_err(&pdev->dev, "invalid register map\n");
+               err = -EINVAL;
+               goto error;
        }
+       priv->regs = priv->map + data->mii_offset;
 
-       new_bus->parent = &ofdev->dev;
-       dev_set_drvdata(&ofdev->dev, new_bus);
-
-       if (of_device_is_compatible(np, "fsl,gianfar-mdio") ||
-                       of_device_is_compatible(np, "fsl,gianfar-tbi") ||
-                       of_device_is_compatible(np, "fsl,etsec2-mdio") ||
-                       of_device_is_compatible(np, "fsl,etsec2-tbi") ||
-                       of_device_is_compatible(np, "gianfar")) {
-               tbipa = get_gfar_tbipa(regs, np);
-               if (!tbipa) {
-                       err = -EINVAL;
-                       goto err_free_irqs;
-               }
-       } else if (of_device_is_compatible(np, "fsl,ucc-mdio") ||
-                       of_device_is_compatible(np, "ucc_geth_phy")) {
-               u32 id;
-               static u32 mii_mng_master;
-
-               tbipa = &regs->utbipar;
-
-               if ((err = get_ucc_id_for_range(addr, addr + size, &id)))
-                       goto err_free_irqs;
+       new_bus->parent = &pdev->dev;
+       dev_set_drvdata(&pdev->dev, new_bus);
 
-               if (!mii_mng_master) {
-                       mii_mng_master = id;
-                       ucc_set_qe_mux_mii_mng(id - 1);
+       if (data->get_tbipa) {
+               for_each_child_of_node(np, tbi) {
+                       if (strcmp(tbi->type, "tbi-phy") == 0) {
+                               dev_dbg(&pdev->dev, "found TBI PHY node %s\n",
+                                       strrchr(tbi->full_name, '/') + 1);
+                               break;
+                       }
                }
-       } else {
-               err = -ENODEV;
-               goto err_free_irqs;
-       }
 
-       for_each_child_of_node(np, tbi) {
-               if (!strncmp(tbi->type, "tbi-phy", 8))
-                       break;
-       }
+               if (tbi) {
+                       const u32 *prop = of_get_property(tbi, "reg", NULL);
+                       uint32_t __iomem *tbipa;
 
-       if (tbi) {
-               const u32 *prop = of_get_property(tbi, "reg", NULL);
+                       if (!prop) {
+                               dev_err(&pdev->dev,
+                                       "missing 'reg' property in node %s\n",
+                                       tbi->full_name);
+                               err = -EBUSY;
+                               goto error;
+                       }
 
-               if (prop)
-                       tbiaddr = *prop;
+                       tbipa = data->get_tbipa(priv->map);
 
-               if (tbiaddr == -1) {
-                       err = -EBUSY;
-                       goto err_free_irqs;
-               } else {
-                       out_be32(tbipa, tbiaddr);
+                       out_be32(tbipa, be32_to_cpup(prop));
                }
        }
 
+       if (data->ucc_configure)
+               data->ucc_configure(res.start, res.end);
+
        err = of_mdiobus_register(new_bus, np);
        if (err) {
-               printk (KERN_ERR "%s: Cannot register as MDIO bus\n",
-                               new_bus->name);
-               goto err_free_irqs;
+               dev_err(&pdev->dev, "cannot register %s as MDIO bus\n",
+                       new_bus->name);
+               goto error;
        }
 
        return 0;
 
-err_free_irqs:
-       kfree(new_bus->irq);
-err_unmap_regs:
-       iounmap(priv->map);
-err_free_bus:
+error:
+       if (priv->map)
+               iounmap(priv->map);
+
        kfree(new_bus);
-err_free_priv:
-       kfree(priv);
+
        return err;
 }
 
 
-static int fsl_pq_mdio_remove(struct platform_device *ofdev)
+static int fsl_pq_mdio_remove(struct platform_device *pdev)
 {
-       struct device *device = &ofdev->dev;
+       struct device *device = &pdev->dev;
        struct mii_bus *bus = dev_get_drvdata(device);
        struct fsl_pq_mdio_priv *priv = bus->priv;
 
@@ -406,41 +471,11 @@ static int fsl_pq_mdio_remove(struct platform_device *ofdev)
        dev_set_drvdata(device, NULL);
 
        iounmap(priv->map);
-       bus->priv = NULL;
        mdiobus_free(bus);
-       kfree(priv);
 
        return 0;
 }
 
-static struct of_device_id fsl_pq_mdio_match[] = {
-       {
-               .type = "mdio",
-               .compatible = "ucc_geth_phy",
-       },
-       {
-               .type = "mdio",
-               .compatible = "gianfar",
-       },
-       {
-               .compatible = "fsl,ucc-mdio",
-       },
-       {
-               .compatible = "fsl,gianfar-tbi",
-       },
-       {
-               .compatible = "fsl,gianfar-mdio",
-       },
-       {
-               .compatible = "fsl,etsec2-tbi",
-       },
-       {
-               .compatible = "fsl,etsec2-mdio",
-       },
-       {},
-};
-MODULE_DEVICE_TABLE(of, fsl_pq_mdio_match);
-
 static struct platform_driver fsl_pq_mdio_driver = {
        .driver = {
                .name = "fsl-pq_mdio",
diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.h b/drivers/net/ethernet/freescale/fsl_pq_mdio.h
deleted file mode 100644 (file)
index bd17a2a..0000000
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Freescale PowerQUICC MDIO Driver -- MII Management Bus Implementation
- * Driver for the MDIO bus controller on Freescale PowerQUICC processors
- *
- * Author: Andy Fleming
- * Modifier: Sandeep Gopalpet
- *
- * Copyright 2002-2004, 2008-2009 Freescale Semiconductor, Inc.
- *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
- * Free Software Foundation;  either version 2 of the  License, or (at your
- * option) any later version.
- *
- */
-#ifndef __FSL_PQ_MDIO_H
-#define __FSL_PQ_MDIO_H
-
-#define MIIMIND_BUSY            0x00000001
-#define MIIMIND_NOTVALID        0x00000004
-#define MIIMCFG_INIT_VALUE     0x00000007
-#define MIIMCFG_RESET           0x80000000
-
-#define MII_READ_COMMAND       0x00000001
-
-struct fsl_pq_mdio {
-       u8 res1[16];
-       u32 ieventm;    /* MDIO Interrupt event register (for etsec2)*/
-       u32 imaskm;     /* MDIO Interrupt mask register (for etsec2)*/
-       u8 res2[4];
-       u32 emapm;      /* MDIO Event mapping register (for etsec2)*/
-       u8 res3[1280];
-       u32 miimcfg;            /* MII management configuration reg */
-       u32 miimcom;            /* MII management command reg */
-       u32 miimadd;            /* MII management address reg */
-       u32 miimcon;            /* MII management control reg */
-       u32 miimstat;           /* MII management status reg */
-       u32 miimind;            /* MII management indication reg */
-       u8 reserved[28];        /* Space holder */
-       u32 utbipar;            /* TBI phy address reg (only on UCC) */
-       u8 res4[2728];
-} __packed;
-
-int fsl_pq_mdio_read(struct mii_bus *bus, int mii_id, int regnum);
-int fsl_pq_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value);
-int fsl_pq_local_mdio_write(struct fsl_pq_mdio __iomem *regs, int mii_id,
-                         int regnum, u16 value);
-int fsl_pq_local_mdio_read(struct fsl_pq_mdio __iomem *regs, int mii_id, int regnum);
-int __init fsl_pq_mdio_init(void);
-void fsl_pq_mdio_exit(void);
-void fsl_pq_mdio_bus_name(char *name, struct device_node *np);
-#endif /* FSL_PQ_MDIO_H */
index d3233f5..4d5b58c 100644 (file)
 #include <linux/of_net.h>
 
 #include "gianfar.h"
-#include "fsl_pq_mdio.h"
 
 #define TX_TIMEOUT      (1*HZ)
 
index 21c6574..1642884 100644 (file)
@@ -42,7 +42,6 @@
 #include <asm/machdep.h>
 
 #include "ucc_geth.h"
-#include "fsl_pq_mdio.h"
 
 #undef DEBUG
 
diff --git a/drivers/net/ethernet/freescale/xgmac_mdio.c b/drivers/net/ethernet/freescale/xgmac_mdio.c
new file mode 100644 (file)
index 0000000..1afb5ea
--- /dev/null
@@ -0,0 +1,274 @@
+/*
+ * QorIQ 10G MDIO Controller
+ *
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ *
+ * Authors: Andy Fleming <afleming@freescale.com>
+ *          Timur Tabi <timur@freescale.com>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2.  This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/phy.h>
+#include <linux/mdio.h>
+#include <linux/of_platform.h>
+#include <linux/of_mdio.h>
+
+/* Number of microseconds to wait for a register to respond */
+#define TIMEOUT        1000
+
+struct tgec_mdio_controller {
+       __be32  reserved[12];
+       __be32  mdio_stat;      /* MDIO configuration and status */
+       __be32  mdio_ctl;       /* MDIO control */
+       __be32  mdio_data;      /* MDIO data */
+       __be32  mdio_addr;      /* MDIO address */
+} __packed;
+
+#define MDIO_STAT_CLKDIV(x)    (((x>>1) & 0xff) << 8)
+#define MDIO_STAT_BSY          (1 << 0)
+#define MDIO_STAT_RD_ER                (1 << 1)
+#define MDIO_CTL_DEV_ADDR(x)   (x & 0x1f)
+#define MDIO_CTL_PORT_ADDR(x)  ((x & 0x1f) << 5)
+#define MDIO_CTL_PRE_DIS       (1 << 10)
+#define MDIO_CTL_SCAN_EN       (1 << 11)
+#define MDIO_CTL_POST_INC      (1 << 14)
+#define MDIO_CTL_READ          (1 << 15)
+
+#define MDIO_DATA(x)           (x & 0xffff)
+#define MDIO_DATA_BSY          (1 << 31)
+
+/*
+ * Wait untill the MDIO bus is free
+ */
+static int xgmac_wait_until_free(struct device *dev,
+                                struct tgec_mdio_controller __iomem *regs)
+{
+       uint32_t status;
+
+       /* Wait till the bus is free */
+       status = spin_event_timeout(
+               !((in_be32(&regs->mdio_stat)) & MDIO_STAT_BSY), TIMEOUT, 0);
+       if (!status) {
+               dev_err(dev, "timeout waiting for bus to be free\n");
+               return -ETIMEDOUT;
+       }
+
+       return 0;
+}
+
+/*
+ * Wait till the MDIO read or write operation is complete
+ */
+static int xgmac_wait_until_done(struct device *dev,
+                                struct tgec_mdio_controller __iomem *regs)
+{
+       uint32_t status;
+
+       /* Wait till the MDIO write is complete */
+       status = spin_event_timeout(
+               !((in_be32(&regs->mdio_data)) & MDIO_DATA_BSY), TIMEOUT, 0);
+       if (!status) {
+               dev_err(dev, "timeout waiting for operation to complete\n");
+               return -ETIMEDOUT;
+       }
+
+       return 0;
+}
+
+/*
+ * Write value to the PHY for this device to the register at regnum,waiting
+ * until the write is done before it returns.  All PHY configuration has to be
+ * done through the TSEC1 MIIM regs.
+ */
+static int xgmac_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 value)
+{
+       struct tgec_mdio_controller __iomem *regs = bus->priv;
+       uint16_t dev_addr = regnum >> 16;
+       int ret;
+
+       /* Setup the MII Mgmt clock speed */
+       out_be32(&regs->mdio_stat, MDIO_STAT_CLKDIV(100));
+
+       ret = xgmac_wait_until_free(&bus->dev, regs);
+       if (ret)
+               return ret;
+
+       /* Set the port and dev addr */
+       out_be32(&regs->mdio_ctl,
+                MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr));
+
+       /* Set the register address */
+       out_be32(&regs->mdio_addr, regnum & 0xffff);
+
+       ret = xgmac_wait_until_free(&bus->dev, regs);
+       if (ret)
+               return ret;
+
+       /* Write the value to the register */
+       out_be32(&regs->mdio_data, MDIO_DATA(value));
+
+       ret = xgmac_wait_until_done(&bus->dev, regs);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+/*
+ * Reads from register regnum in the PHY for device dev, returning the value.
+ * Clears miimcom first.  All PHY configuration has to be done through the
+ * TSEC1 MIIM regs.
+ */
+static int xgmac_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
+{
+       struct tgec_mdio_controller __iomem *regs = bus->priv;
+       uint16_t dev_addr = regnum >> 16;
+       uint32_t mdio_ctl;
+       uint16_t value;
+       int ret;
+
+       /* Setup the MII Mgmt clock speed */
+       out_be32(&regs->mdio_stat, MDIO_STAT_CLKDIV(100));
+
+       ret = xgmac_wait_until_free(&bus->dev, regs);
+       if (ret)
+               return ret;
+
+       /* Set the Port and Device Addrs */
+       mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr);
+       out_be32(&regs->mdio_ctl, mdio_ctl);
+
+       /* Set the register address */
+       out_be32(&regs->mdio_addr, regnum & 0xffff);
+
+       ret = xgmac_wait_until_free(&bus->dev, regs);
+       if (ret)
+               return ret;
+
+       /* Initiate the read */
+       out_be32(&regs->mdio_ctl, mdio_ctl | MDIO_CTL_READ);
+
+       ret = xgmac_wait_until_done(&bus->dev, regs);
+       if (ret)
+               return ret;
+
+       /* Return all Fs if nothing was there */
+       if (in_be32(&regs->mdio_stat) & MDIO_STAT_RD_ER) {
+               dev_err(&bus->dev, "MDIO read error\n");
+               return 0xffff;
+       }
+
+       value = in_be32(&regs->mdio_data) & 0xffff;
+       dev_dbg(&bus->dev, "read %04x\n", value);
+
+       return value;
+}
+
+/* Reset the MIIM registers, and wait for the bus to free */
+static int xgmac_mdio_reset(struct mii_bus *bus)
+{
+       struct tgec_mdio_controller __iomem *regs = bus->priv;
+       int ret;
+
+       mutex_lock(&bus->mdio_lock);
+
+       /* Setup the MII Mgmt clock speed */
+       out_be32(&regs->mdio_stat, MDIO_STAT_CLKDIV(100));
+
+       ret = xgmac_wait_until_free(&bus->dev, regs);
+
+       mutex_unlock(&bus->mdio_lock);
+
+       return ret;
+}
+
+static int __devinit xgmac_mdio_probe(struct platform_device *pdev)
+{
+       struct device_node *np = pdev->dev.of_node;
+       struct mii_bus *bus;
+       struct resource res;
+       int ret;
+
+       ret = of_address_to_resource(np, 0, &res);
+       if (ret) {
+               dev_err(&pdev->dev, "could not obtain address\n");
+               return ret;
+       }
+
+       bus = mdiobus_alloc_size(PHY_MAX_ADDR * sizeof(int));
+       if (!bus)
+               return -ENOMEM;
+
+       bus->name = "Freescale XGMAC MDIO Bus";
+       bus->read = xgmac_mdio_read;
+       bus->write = xgmac_mdio_write;
+       bus->reset = xgmac_mdio_reset;
+       bus->irq = bus->priv;
+       bus->parent = &pdev->dev;
+       snprintf(bus->id, MII_BUS_ID_SIZE, "%llx", (unsigned long long)res.start);
+
+       /* Set the PHY base address */
+       bus->priv = of_iomap(np, 0);
+       if (!bus->priv) {
+               ret = -ENOMEM;
+               goto err_ioremap;
+       }
+
+       ret = of_mdiobus_register(bus, np);
+       if (ret) {
+               dev_err(&pdev->dev, "cannot register MDIO bus\n");
+               goto err_registration;
+       }
+
+       dev_set_drvdata(&pdev->dev, bus);
+
+       return 0;
+
+err_registration:
+       iounmap(bus->priv);
+
+err_ioremap:
+       mdiobus_free(bus);
+
+       return ret;
+}
+
+static int __devexit xgmac_mdio_remove(struct platform_device *pdev)
+{
+       struct mii_bus *bus = dev_get_drvdata(&pdev->dev);
+
+       mdiobus_unregister(bus);
+       iounmap(bus->priv);
+       mdiobus_free(bus);
+
+       return 0;
+}
+
+static struct of_device_id xgmac_mdio_match[] = {
+       {
+               .compatible = "fsl,fman-xmdio",
+       },
+       {},
+};
+MODULE_DEVICE_TABLE(of, xgmac_mdio_match);
+
+static struct platform_driver xgmac_mdio_driver = {
+       .driver = {
+               .name = "fsl-fman_xmdio",
+               .of_match_table = xgmac_mdio_match,
+       },
+       .probe = xgmac_mdio_probe,
+       .remove = xgmac_mdio_remove,
+};
+
+module_platform_driver(xgmac_mdio_driver);
+
+MODULE_DESCRIPTION("Freescale QorIQ 10G MDIO Controller");
+MODULE_LICENSE("GPL v2");
index 736a7d9..9089d00 100644 (file)
@@ -174,6 +174,20 @@ static int e1000_get_settings(struct net_device *netdev,
 
        ecmd->autoneg = ((hw->media_type == e1000_media_type_fiber) ||
                         hw->autoneg) ? AUTONEG_ENABLE : AUTONEG_DISABLE;
+
+       /* MDI-X => 1; MDI => 0 */
+       if ((hw->media_type == e1000_media_type_copper) &&
+           netif_carrier_ok(netdev))
+               ecmd->eth_tp_mdix = (!!adapter->phy_info.mdix_mode ?
+                                                       ETH_TP_MDI_X :
+                                                       ETH_TP_MDI);
+       else
+               ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
+
+       if (hw->mdix == AUTO_ALL_MODES)
+               ecmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
+       else
+               ecmd->eth_tp_mdix_ctrl = hw->mdix;
        return 0;
 }
 
@@ -183,6 +197,22 @@ static int e1000_set_settings(struct net_device *netdev,
        struct e1000_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
 
+       /*
+        * MDI setting is only allowed when autoneg enabled because
+        * some hardware doesn't allow MDI setting when speed or
+        * duplex is forced.
+        */
+       if (ecmd->eth_tp_mdix_ctrl) {
+               if (hw->media_type != e1000_media_type_copper)
+                       return -EOPNOTSUPP;
+
+               if ((ecmd->eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) &&
+                   (ecmd->autoneg != AUTONEG_ENABLE)) {
+                       e_err(drv, "forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n");
+                       return -EINVAL;
+               }
+       }
+
        while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
                msleep(1);
 
@@ -199,12 +229,21 @@ static int e1000_set_settings(struct net_device *netdev,
                ecmd->advertising = hw->autoneg_advertised;
        } else {
                u32 speed = ethtool_cmd_speed(ecmd);
+               /* calling this overrides forced MDI setting */
                if (e1000_set_spd_dplx(adapter, speed, ecmd->duplex)) {
                        clear_bit(__E1000_RESETTING, &adapter->flags);
                        return -EINVAL;
                }
        }
 
+       /* MDI-X => 2; MDI => 1; Auto => 3 */
+       if (ecmd->eth_tp_mdix_ctrl) {
+               if (ecmd->eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO)
+                       hw->mdix = AUTO_ALL_MODES;
+               else
+                       hw->mdix = ecmd->eth_tp_mdix_ctrl;
+       }
+
        /* reset the link */
 
        if (netif_running(adapter->netdev)) {
index 3bfbb8d..0ae2fcf 100644 (file)
@@ -4939,6 +4939,10 @@ int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
        default:
                goto err_inval;
        }
+
+       /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */
+       hw->mdix = AUTO_ALL_MODES;
+
        return 0;
 
 err_inval:
index 0349e24..2e76f06 100644 (file)
@@ -199,6 +199,11 @@ static int e1000_get_settings(struct net_device *netdev,
        else
                ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
 
+       if (hw->phy.mdix == AUTO_ALL_MODES)
+               ecmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
+       else
+               ecmd->eth_tp_mdix_ctrl = hw->phy.mdix;
+
        return 0;
 }
 
@@ -241,6 +246,10 @@ static int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
        default:
                goto err_inval;
        }
+
+       /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */
+       adapter->hw.phy.mdix = AUTO_ALL_MODES;
+
        return 0;
 
 err_inval:
@@ -264,6 +273,22 @@ static int e1000_set_settings(struct net_device *netdev,
                return -EINVAL;
        }
 
+       /*
+        * MDI setting is only allowed when autoneg enabled because
+        * some hardware doesn't allow MDI setting when speed or
+        * duplex is forced.
+        */
+       if (ecmd->eth_tp_mdix_ctrl) {
+               if (hw->phy.media_type != e1000_media_type_copper)
+                       return -EOPNOTSUPP;
+
+               if ((ecmd->eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) &&
+                   (ecmd->autoneg != AUTONEG_ENABLE)) {
+                       e_err("forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n");
+                       return -EINVAL;
+               }
+       }
+
        while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
                usleep_range(1000, 2000);
 
@@ -282,20 +307,32 @@ static int e1000_set_settings(struct net_device *netdev,
                        hw->fc.requested_mode = e1000_fc_default;
        } else {
                u32 speed = ethtool_cmd_speed(ecmd);
+               /* calling this overrides forced MDI setting */
                if (e1000_set_spd_dplx(adapter, speed, ecmd->duplex)) {
                        clear_bit(__E1000_RESETTING, &adapter->state);
                        return -EINVAL;
                }
        }
 
+       /* MDI-X => 2; MDI => 1; Auto => 3 */
+       if (ecmd->eth_tp_mdix_ctrl) {
+               /*
+                * fix up the value for auto (3 => 0) as zero is mapped
+                * internally to auto
+                */
+               if (ecmd->eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO)
+                       hw->phy.mdix = AUTO_ALL_MODES;
+               else
+                       hw->phy.mdix = ecmd->eth_tp_mdix_ctrl;
+       }
+
        /* reset the link */
 
        if (netif_running(adapter->netdev)) {
                e1000e_down(adapter);
                e1000e_up(adapter);
-       } else {
+       } else
                e1000e_reset(adapter);
-       }
 
        clear_bit(__E1000_RESETTING, &adapter->state);
        return 0;
index b860d4f..fc62a3f 100644 (file)
@@ -84,8 +84,9 @@ static const u16 e1000_igp_2_cable_length_table[] = {
 #define I82577_PHY_STATUS2_SPEED_1000MBPS 0x0200
 
 /* I82577 PHY Control 2 */
-#define I82577_PHY_CTRL2_AUTO_MDIX        0x0400
-#define I82577_PHY_CTRL2_FORCE_MDI_MDIX   0x0200
+#define I82577_PHY_CTRL2_MANUAL_MDIX      0x0200
+#define I82577_PHY_CTRL2_AUTO_MDI_MDIX    0x0400
+#define I82577_PHY_CTRL2_MDIX_CFG_MASK    0x0600
 
 /* I82577 PHY Diagnostics Status */
 #define I82577_DSTATUS_CABLE_LENGTH       0x03FC
@@ -702,6 +703,32 @@ s32 e1000_copper_link_setup_82577(struct e1000_hw *hw)
        if (ret_val)
                return ret_val;
 
+       /* Set MDI/MDIX mode */
+       ret_val = e1e_rphy(hw, I82577_PHY_CTRL_2, &phy_data);
+       if (ret_val)
+               return ret_val;
+       phy_data &= ~I82577_PHY_CTRL2_MDIX_CFG_MASK;
+       /*
+        * Options:
+        *   0 - Auto (default)
+        *   1 - MDI mode
+        *   2 - MDI-X mode
+        */
+       switch (hw->phy.mdix) {
+       case 1:
+               break;
+       case 2:
+               phy_data |= I82577_PHY_CTRL2_MANUAL_MDIX;
+               break;
+       case 0:
+       default:
+               phy_data |= I82577_PHY_CTRL2_AUTO_MDI_MDIX;
+               break;
+       }
+       ret_val = e1e_wphy(hw, I82577_PHY_CTRL_2, phy_data);
+       if (ret_val)
+               return ret_val;
+
        return e1000_set_master_slave_mode(hw);
 }
 
index 7be98b6..3404bc7 100644 (file)
@@ -464,6 +464,32 @@ s32 igb_copper_link_setup_82580(struct e1000_hw *hw)
        phy_data |= I82580_CFG_ENABLE_DOWNSHIFT;
 
        ret_val = phy->ops.write_reg(hw, I82580_CFG_REG, phy_data);
+       if (ret_val)
+               goto out;
+
+       /* Set MDI/MDIX mode */
+       ret_val = phy->ops.read_reg(hw, I82580_PHY_CTRL_2, &phy_data);
+       if (ret_val)
+               goto out;
+       phy_data &= ~I82580_PHY_CTRL2_MDIX_CFG_MASK;
+       /*
+        * Options:
+        *   0 - Auto (default)
+        *   1 - MDI mode
+        *   2 - MDI-X mode
+        */
+       switch (hw->phy.mdix) {
+       case 1:
+               break;
+       case 2:
+               phy_data |= I82580_PHY_CTRL2_MANUAL_MDIX;
+               break;
+       case 0:
+       default:
+               phy_data |= I82580_PHY_CTRL2_AUTO_MDI_MDIX;
+               break;
+       }
+       ret_val = hw->phy.ops.write_reg(hw, I82580_PHY_CTRL_2, phy_data);
 
 out:
        return ret_val;
@@ -2246,8 +2272,7 @@ s32 igb_phy_force_speed_duplex_82580(struct e1000_hw *hw)
        if (ret_val)
                goto out;
 
-       phy_data &= ~I82580_PHY_CTRL2_AUTO_MDIX;
-       phy_data &= ~I82580_PHY_CTRL2_FORCE_MDI_MDIX;
+       phy_data &= ~I82580_PHY_CTRL2_MDIX_CFG_MASK;
 
        ret_val = phy->ops.write_reg(hw, I82580_PHY_CTRL_2, phy_data);
        if (ret_val)
index 34e4061..6ac3299 100644 (file)
@@ -111,8 +111,9 @@ s32  igb_check_polarity_m88(struct e1000_hw *hw);
 #define I82580_PHY_STATUS2_SPEED_100MBPS  0x0100
 
 /* I82580 PHY Control 2 */
-#define I82580_PHY_CTRL2_AUTO_MDIX        0x0400
-#define I82580_PHY_CTRL2_FORCE_MDI_MDIX   0x0200
+#define I82580_PHY_CTRL2_MANUAL_MDIX      0x0200
+#define I82580_PHY_CTRL2_AUTO_MDI_MDIX    0x0400
+#define I82580_PHY_CTRL2_MDIX_CFG_MASK    0x0600
 
 /* I82580 PHY Diagnostics Status */
 #define I82580_DSTATUS_CABLE_LENGTH       0x03FC
index 7059111..be02168 100644 (file)
@@ -198,6 +198,19 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
        }
 
        ecmd->autoneg = hw->mac.autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
+
+       /* MDI-X => 2; MDI =>1; Invalid =>0 */
+       if (hw->phy.media_type == e1000_media_type_copper)
+               ecmd->eth_tp_mdix = hw->phy.is_mdix ? ETH_TP_MDI_X :
+                                                     ETH_TP_MDI;
+       else
+               ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
+
+       if (hw->phy.mdix == AUTO_ALL_MODES)
+               ecmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
+       else
+               ecmd->eth_tp_mdix_ctrl = hw->phy.mdix;
+
        return 0;
 }
 
@@ -214,6 +227,22 @@ static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
                return -EINVAL;
        }
 
+       /*
+        * MDI setting is only allowed when autoneg enabled because
+        * some hardware doesn't allow MDI setting when speed or
+        * duplex is forced.
+        */
+       if (ecmd->eth_tp_mdix_ctrl) {
+               if (hw->phy.media_type != e1000_media_type_copper)
+                       return -EOPNOTSUPP;
+
+               if ((ecmd->eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) &&
+                   (ecmd->autoneg != AUTONEG_ENABLE)) {
+                       dev_err(&adapter->pdev->dev, "forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n");
+                       return -EINVAL;
+               }
+       }
+
        while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
                msleep(1);
 
@@ -227,12 +256,25 @@ static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
                        hw->fc.requested_mode = e1000_fc_default;
        } else {
                u32 speed = ethtool_cmd_speed(ecmd);
+               /* calling this overrides forced MDI setting */
                if (igb_set_spd_dplx(adapter, speed, ecmd->duplex)) {
                        clear_bit(__IGB_RESETTING, &adapter->state);
                        return -EINVAL;
                }
        }
 
+       /* MDI-X => 2; MDI => 1; Auto => 3 */
+       if (ecmd->eth_tp_mdix_ctrl) {
+               /*
+                * fix up the value for auto (3 => 0) as zero is mapped
+                * internally to auto
+                */
+               if (ecmd->eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO)
+                       hw->phy.mdix = AUTO_ALL_MODES;
+               else
+                       hw->phy.mdix = ecmd->eth_tp_mdix_ctrl;
+       }
+
        /* reset the link */
        if (netif_running(adapter->netdev)) {
                igb_down(adapter);
index 48cc4fb..73cc273 100644 (file)
@@ -6675,6 +6675,10 @@ int igb_set_spd_dplx(struct igb_adapter *adapter, u32 spd, u8 dplx)
        default:
                goto err_inval;
        }
+
+       /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */
+       adapter->hw.phy.mdix = AUTO_ALL_MODES;
+
        return 0;
 
 err_inval:
index b9623e9..bffcf1f 100644 (file)
@@ -78,6 +78,9 @@
 
 /* Supported Rx Buffer Sizes */
 #define IXGBE_RXBUFFER_256    256  /* Used for skb receive header */
+#define IXGBE_RXBUFFER_2K    2048
+#define IXGBE_RXBUFFER_3K    3072
+#define IXGBE_RXBUFFER_4K    4096
 #define IXGBE_MAX_RXBUFFER  16384  /* largest size for a single descriptor */
 
 /*
 #define IXGBE_TX_FLAGS_FSO             (u32)(1 << 6)
 #define IXGBE_TX_FLAGS_TXSW            (u32)(1 << 7)
 #define IXGBE_TX_FLAGS_TSTAMP          (u32)(1 << 8)
+#define IXGBE_TX_FLAGS_NO_IFCS         (u32)(1 << 9)
 #define IXGBE_TX_FLAGS_VLAN_MASK       0xffff0000
 #define IXGBE_TX_FLAGS_VLAN_PRIO_MASK  0xe0000000
 #define IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT  29
@@ -293,16 +297,25 @@ struct ixgbe_ring_feature {
  * this is twice the size of a half page we need to double the page order
  * for FCoE enabled Rx queues.
  */
-#if defined(IXGBE_FCOE) && (PAGE_SIZE < 8192)
-static inline unsigned int ixgbe_rx_pg_order(struct ixgbe_ring *ring)
+static inline unsigned int ixgbe_rx_bufsz(struct ixgbe_ring *ring)
 {
-       return test_bit(__IXGBE_RX_FCOE, &ring->state) ? 1 : 0;
+#ifdef IXGBE_FCOE
+       if (test_bit(__IXGBE_RX_FCOE, &ring->state))
+               return (PAGE_SIZE < 8192) ? IXGBE_RXBUFFER_4K :
+                                           IXGBE_RXBUFFER_3K;
+#endif
+       return IXGBE_RXBUFFER_2K;
 }
-#else
-#define ixgbe_rx_pg_order(_ring) 0
+
+static inline unsigned int ixgbe_rx_pg_order(struct ixgbe_ring *ring)
+{
+#ifdef IXGBE_FCOE
+       if (test_bit(__IXGBE_RX_FCOE, &ring->state))
+               return (PAGE_SIZE < 8192) ? 1 : 0;
 #endif
+       return 0;
+}
 #define ixgbe_rx_pg_size(_ring) (PAGE_SIZE << ixgbe_rx_pg_order(_ring))
-#define ixgbe_rx_bufsz(_ring) ((PAGE_SIZE / 2) << ixgbe_rx_pg_order(_ring))
 
 struct ixgbe_ring_container {
        struct ixgbe_ring *ring;        /* pointer to linked list of rings */
index 4326f74..fa0d6e1 100644 (file)
@@ -1167,7 +1167,7 @@ static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
        }
 
        bi->dma = dma;
-       bi->page_offset ^= ixgbe_rx_bufsz(rx_ring);
+       bi->page_offset = 0;
 
        return true;
 }
@@ -1320,29 +1320,6 @@ static unsigned int ixgbe_get_headlen(unsigned char *data,
                return max_len;
 }
 
-static void ixgbe_get_rsc_cnt(struct ixgbe_ring *rx_ring,
-                             union ixgbe_adv_rx_desc *rx_desc,
-                             struct sk_buff *skb)
-{
-       __le32 rsc_enabled;
-       u32 rsc_cnt;
-
-       if (!ring_is_rsc_enabled(rx_ring))
-               return;
-
-       rsc_enabled = rx_desc->wb.lower.lo_dword.data &
-                     cpu_to_le32(IXGBE_RXDADV_RSCCNT_MASK);
-
-       /* If this is an RSC frame rsc_cnt should be non-zero */
-       if (!rsc_enabled)
-               return;
-
-       rsc_cnt = le32_to_cpu(rsc_enabled);
-       rsc_cnt >>= IXGBE_RXDADV_RSCCNT_SHIFT;
-
-       IXGBE_CB(skb)->append_cnt += rsc_cnt - 1;
-}
-
 static void ixgbe_set_rsc_gso_size(struct ixgbe_ring *ring,
                                   struct sk_buff *skb)
 {
@@ -1440,16 +1417,28 @@ static bool ixgbe_is_non_eop(struct ixgbe_ring *rx_ring,
 
        prefetch(IXGBE_RX_DESC(rx_ring, ntc));
 
-       if (likely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)))
-               return false;
+       /* update RSC append count if present */
+       if (ring_is_rsc_enabled(rx_ring)) {
+               __le32 rsc_enabled = rx_desc->wb.lower.lo_dword.data &
+                                    cpu_to_le32(IXGBE_RXDADV_RSCCNT_MASK);
+
+               if (unlikely(rsc_enabled)) {
+                       u32 rsc_cnt = le32_to_cpu(rsc_enabled);
 
-       /* append_cnt indicates packet is RSC, if so fetch nextp */
-       if (IXGBE_CB(skb)->append_cnt) {
-               ntc = le32_to_cpu(rx_desc->wb.upper.status_error);
-               ntc &= IXGBE_RXDADV_NEXTP_MASK;
-               ntc >>= IXGBE_RXDADV_NEXTP_SHIFT;
+                       rsc_cnt >>= IXGBE_RXDADV_RSCCNT_SHIFT;
+                       IXGBE_CB(skb)->append_cnt += rsc_cnt - 1;
+
+                       /* update ntc based on RSC value */
+                       ntc = le32_to_cpu(rx_desc->wb.upper.status_error);
+                       ntc &= IXGBE_RXDADV_NEXTP_MASK;
+                       ntc >>= IXGBE_RXDADV_NEXTP_SHIFT;
+               }
        }
 
+       /* if we are the last buffer then there is nothing else to do */
+       if (likely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)))
+               return false;
+
        /* place skb in next buffer to be received */
        rx_ring->rx_buffer_info[ntc].skb = skb;
        rx_ring->rx_stats.non_eop_descs++;
@@ -1458,6 +1447,78 @@ static bool ixgbe_is_non_eop(struct ixgbe_ring *rx_ring,
 }
 
 /**
+ * ixgbe_pull_tail - ixgbe specific version of skb_pull_tail
+ * @rx_ring: rx descriptor ring packet is being transacted on
+ * @skb: pointer to current skb being adjusted
+ *
+ * This function is an ixgbe specific version of __pskb_pull_tail.  The
+ * main difference between this version and the original function is that
+ * this function can make several assumptions about the state of things
+ * that allow for significant optimizations versus the standard function.
+ * As a result we can do things like drop a frag and maintain an accurate
+ * truesize for the skb.
+ */
+static void ixgbe_pull_tail(struct ixgbe_ring *rx_ring,
+                           struct sk_buff *skb)
+{
+       struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
+       unsigned char *va;
+       unsigned int pull_len;
+
+       /*
+        * it is valid to use page_address instead of kmap since we are
+        * working with pages allocated out of the lomem pool per
+        * alloc_page(GFP_ATOMIC)
+        */
+       va = skb_frag_address(frag);
+
+       /*
+        * we need the header to contain the greater of either ETH_HLEN or
+        * 60 bytes if the skb->len is less than 60 for skb_pad.
+        */
+       pull_len = ixgbe_get_headlen(va, IXGBE_RX_HDR_SIZE);
+
+       /* align pull length to size of long to optimize memcpy performance */
+       skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
+
+       /* update all of the pointers */
+       skb_frag_size_sub(frag, pull_len);
+       frag->page_offset += pull_len;
+       skb->data_len -= pull_len;
+       skb->tail += pull_len;
+}
+
+/**
+ * ixgbe_dma_sync_frag - perform DMA sync for first frag of SKB
+ * @rx_ring: rx descriptor ring packet is being transacted on
+ * @skb: pointer to current skb being updated
+ *
+ * This function provides a basic DMA sync up for the first fragment of an
+ * skb.  The reason for doing this is that the first fragment cannot be
+ * unmapped until we have reached the end of packet descriptor for a buffer
+ * chain.
+ */
+static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring,
+                               struct sk_buff *skb)
+{
+       /* if the page was released unmap it, else just sync our portion */
+       if (unlikely(IXGBE_CB(skb)->page_released)) {
+               dma_unmap_page(rx_ring->dev, IXGBE_CB(skb)->dma,
+                              ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE);
+               IXGBE_CB(skb)->page_released = false;
+       } else {
+               struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
+
+               dma_sync_single_range_for_cpu(rx_ring->dev,
+                                             IXGBE_CB(skb)->dma,
+                                             frag->page_offset,
+                                             ixgbe_rx_bufsz(rx_ring),
+                                             DMA_FROM_DEVICE);
+       }
+       IXGBE_CB(skb)->dma = 0;
+}
+
+/**
  * ixgbe_cleanup_headers - Correct corrupted or empty headers
  * @rx_ring: rx descriptor ring packet is being transacted on
  * @rx_desc: pointer to the EOP Rx descriptor
@@ -1479,24 +1540,7 @@ static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
                                  union ixgbe_adv_rx_desc *rx_desc,
                                  struct sk_buff *skb)
 {
-       struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
        struct net_device *netdev = rx_ring->netdev;
-       unsigned char *va;
-       unsigned int pull_len;
-
-       /* if the page was released unmap it, else just sync our portion */
-       if (unlikely(IXGBE_CB(skb)->page_released)) {
-               dma_unmap_page(rx_ring->dev, IXGBE_CB(skb)->dma,
-                              ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE);
-               IXGBE_CB(skb)->page_released = false;
-       } else {
-               dma_sync_single_range_for_cpu(rx_ring->dev,
-                                             IXGBE_CB(skb)->dma,
-                                             frag->page_offset,
-                                             ixgbe_rx_bufsz(rx_ring),
-                                             DMA_FROM_DEVICE);
-       }
-       IXGBE_CB(skb)->dma = 0;
 
        /* verify that the packet does not have any known errors */
        if (unlikely(ixgbe_test_staterr(rx_desc,
@@ -1506,40 +1550,9 @@ static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
                return true;
        }
 
-       /*
-        * it is valid to use page_address instead of kmap since we are
-        * working with pages allocated out of the lomem pool per
-        * alloc_page(GFP_ATOMIC)
-        */
-       va = skb_frag_address(frag);
-
-       /*
-        * we need the header to contain the greater of either ETH_HLEN or
-        * 60 bytes if the skb->len is less than 60 for skb_pad.
-        */
-       pull_len = skb_frag_size(frag);
-       if (pull_len > IXGBE_RX_HDR_SIZE)
-               pull_len = ixgbe_get_headlen(va, IXGBE_RX_HDR_SIZE);
-
-       /* align pull length to size of long to optimize memcpy performance */
-       skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
-
-       /* update all of the pointers */
-       skb_frag_size_sub(frag, pull_len);
-       frag->page_offset += pull_len;
-       skb->data_len -= pull_len;
-       skb->tail += pull_len;
-
-       /*
-        * if we sucked the frag empty then we should free it,
-        * if there are other frags here something is screwed up in hardware
-        */
-       if (skb_frag_size(frag) == 0) {
-               BUG_ON(skb_shinfo(skb)->nr_frags != 1);
-               skb_shinfo(skb)->nr_frags = 0;
-               __skb_frag_unref(frag);
-               skb->truesize -= ixgbe_rx_bufsz(rx_ring);
-       }
+       /* place header in linear portion of buffer */
+       if (skb_is_nonlinear(skb))
+               ixgbe_pull_tail(rx_ring, skb);
 
 #ifdef IXGBE_FCOE
        /* do not attempt to pad FCoE Frames as this will disrupt DDP */
@@ -1560,33 +1573,17 @@ static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
 }
 
 /**
- * ixgbe_can_reuse_page - determine if we can reuse a page
- * @rx_buffer: pointer to rx_buffer containing the page we want to reuse
- *
- * Returns true if page can be reused in another Rx buffer
- **/
-static inline bool ixgbe_can_reuse_page(struct ixgbe_rx_buffer *rx_buffer)
-{
-       struct page *page = rx_buffer->page;
-
-       /* if we are only owner of page and it is local we can reuse it */
-       return likely(page_count(page) == 1) &&
-              likely(page_to_nid(page) == numa_node_id());
-}
-
-/**
  * ixgbe_reuse_rx_page - page flip buffer and store it back on the ring
  * @rx_ring: rx descriptor ring to store buffers on
  * @old_buff: donor buffer to have page reused
  *
- * Syncronizes page for reuse by the adapter
+ * Synchronizes page for reuse by the adapter
  **/
 static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring,
                                struct ixgbe_rx_buffer *old_buff)
 {
        struct ixgbe_rx_buffer *new_buff;
        u16 nta = rx_ring->next_to_alloc;
-       u16 bufsz = ixgbe_rx_bufsz(rx_ring);
 
        new_buff = &rx_ring->rx_buffer_info[nta];
 
@@ -1597,17 +1594,13 @@ static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring,
        /* transfer page from old buffer to new buffer */
        new_buff->page = old_buff->page;
        new_buff->dma = old_buff->dma;
-
-       /* flip page offset to other buffer and store to new_buff */
-       new_buff->page_offset = old_buff->page_offset ^ bufsz;
+       new_buff->page_offset = old_buff->page_offset;
 
        /* sync the buffer for use by the device */
        dma_sync_single_range_for_device(rx_ring->dev, new_buff->dma,
-                                        new_buff->page_offset, bufsz,
+                                        new_buff->page_offset,
+                                        ixgbe_rx_bufsz(rx_ring),
                                         DMA_FROM_DEVICE);
-
-       /* bump ref count on page before it is given to the stack */
-       get_page(new_buff->page);
 }
 
 /**
@@ -1617,20 +1610,159 @@ static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring,
  * @rx_desc: descriptor containing length of buffer written by hardware
  * @skb: sk_buff to place the data into
  *
- * This function is based on skb_add_rx_frag.  I would have used that
- * function however it doesn't handle the truesize case correctly since we
- * are allocating more memory than might be used for a single receive.
+ * This function will add the data contained in rx_buffer->page to the skb.
+ * This is done either through a direct copy if the data in the buffer is
+ * less than the skb header size, otherwise it will just attach the page as
+ * a frag to the skb.
+ *
+ * The function will then update the page offset if necessary and return
+ * true if the buffer can be reused by the adapter.
  **/
-static void ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring,
+static bool ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring,
                              struct ixgbe_rx_buffer *rx_buffer,
-                             struct sk_buff *skb, int size)
+                             union ixgbe_adv_rx_desc *rx_desc,
+                             struct sk_buff *skb)
 {
-       skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
-                          rx_buffer->page, rx_buffer->page_offset,
-                          size);
-       skb->len += size;
-       skb->data_len += size;
-       skb->truesize += ixgbe_rx_bufsz(rx_ring);
+       struct page *page = rx_buffer->page;
+       unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
+#if (PAGE_SIZE < 8192)
+       unsigned int truesize = ixgbe_rx_bufsz(rx_ring);
+#else
+       unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
+       unsigned int last_offset = ixgbe_rx_pg_size(rx_ring) -
+                                  ixgbe_rx_bufsz(rx_ring);
+#endif
+
+       if ((size <= IXGBE_RX_HDR_SIZE) && !skb_is_nonlinear(skb)) {
+               unsigned char *va = page_address(page) + rx_buffer->page_offset;
+
+               memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
+
+               /* we can reuse buffer as-is, just make sure it is local */
+               if (likely(page_to_nid(page) == numa_node_id()))
+                       return true;
+
+               /* this page cannot be reused so discard it */
+               put_page(page);
+               return false;
+       }
+
+       skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
+                       rx_buffer->page_offset, size, truesize);
+
+       /* avoid re-using remote pages */
+       if (unlikely(page_to_nid(page) != numa_node_id()))
+               return false;
+
+#if (PAGE_SIZE < 8192)
+       /* if we are only owner of page we can reuse it */
+       if (unlikely(page_count(page) != 1))
+               return false;
+
+       /* flip page offset to other buffer */
+       rx_buffer->page_offset ^= truesize;
+
+       /*
+        * since we are the only owner of the page and we need to
+        * increment it, just set the value to 2 in order to avoid
+        * an unecessary locked operation
+        */
+       atomic_set(&page->_count, 2);
+#else
+       /* move offset up to the next cache line */
+       rx_buffer->page_offset += truesize;
+
+       if (rx_buffer->page_offset > last_offset)
+               return false;
+
+       /* bump ref count on page before it is given to the stack */
+       get_page(page);
+#endif
+
+       return true;
+}
+
+static struct sk_buff *ixgbe_fetch_rx_buffer(struct ixgbe_ring *rx_ring,
+                                            union ixgbe_adv_rx_desc *rx_desc)
+{
+       struct ixgbe_rx_buffer *rx_buffer;
+       struct sk_buff *skb;
+       struct page *page;
+
+       rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
+       page = rx_buffer->page;
+       prefetchw(page);
+
+       skb = rx_buffer->skb;
+
+       if (likely(!skb)) {
+               void *page_addr = page_address(page) +
+                                 rx_buffer->page_offset;
+
+               /* prefetch first cache line of first page */
+               prefetch(page_addr);
+#if L1_CACHE_BYTES < 128
+               prefetch(page_addr + L1_CACHE_BYTES);
+#endif
+
+               /* allocate a skb to store the frags */
+               skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
+                                               IXGBE_RX_HDR_SIZE);
+               if (unlikely(!skb)) {
+                       rx_ring->rx_stats.alloc_rx_buff_failed++;
+                       return NULL;
+               }
+
+               /*
+                * we will be copying header into skb->data in
+                * pskb_may_pull so it is in our interest to prefetch
+                * it now to avoid a possible cache miss
+                */
+               prefetchw(skb->data);
+
+               /*
+                * Delay unmapping of the first packet. It carries the
+                * header information, HW may still access the header
+                * after the writeback.  Only unmap it when EOP is
+                * reached
+                */
+               if (likely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)))
+                       goto dma_sync;
+
+               IXGBE_CB(skb)->dma = rx_buffer->dma;
+       } else {
+               if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))
+                       ixgbe_dma_sync_frag(rx_ring, skb);
+
+dma_sync:
+               /* we are reusing so sync this buffer for CPU use */
+               dma_sync_single_range_for_cpu(rx_ring->dev,
+                                             rx_buffer->dma,
+                                             rx_buffer->page_offset,
+                                             ixgbe_rx_bufsz(rx_ring),
+                                             DMA_FROM_DEVICE);
+       }
+
+       /* pull page into skb */
+       if (ixgbe_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) {
+               /* hand second half of page back to the ring */
+               ixgbe_reuse_rx_page(rx_ring, rx_buffer);
+       } else if (IXGBE_CB(skb)->dma == rx_buffer->dma) {
+               /* the page has been released from the ring */
+               IXGBE_CB(skb)->page_released = true;
+       } else {
+               /* we are not reusing the buffer so unmap it */
+               dma_unmap_page(rx_ring->dev, rx_buffer->dma,
+                              ixgbe_rx_pg_size(rx_ring),
+                              DMA_FROM_DEVICE);
+       }
+
+       /* clear contents of buffer_info */
+       rx_buffer->skb = NULL;
+       rx_buffer->dma = 0;
+       rx_buffer->page = NULL;
+
+       return skb;
 }
 
 /**
@@ -1658,11 +1790,8 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
        u16 cleaned_count = ixgbe_desc_unused(rx_ring);
 
        do {
-               struct ixgbe_rx_buffer *rx_buffer;
                union ixgbe_adv_rx_desc *rx_desc;
                struct sk_buff *skb;
-               struct page *page;
-               u16 ntc;
 
                /* return some buffers to hardware, one at a time is too slow */
                if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
@@ -1670,9 +1799,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
                        cleaned_count = 0;
                }
 
-               ntc = rx_ring->next_to_clean;
-               rx_desc = IXGBE_RX_DESC(rx_ring, ntc);
-               rx_buffer = &rx_ring->rx_buffer_info[ntc];
+               rx_desc = IXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean);
 
                if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_DD))
                        break;
@@ -1684,75 +1811,12 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
                 */
                rmb();
 
-               page = rx_buffer->page;
-               prefetchw(page);
-
-               skb = rx_buffer->skb;
+               /* retrieve a buffer from the ring */
+               skb = ixgbe_fetch_rx_buffer(rx_ring, rx_desc);
 
-               if (likely(!skb)) {
-                       void *page_addr = page_address(page) +
-                                         rx_buffer->page_offset;
-
-                       /* prefetch first cache line of first page */
-                       prefetch(page_addr);
-#if L1_CACHE_BYTES < 128
-                       prefetch(page_addr + L1_CACHE_BYTES);
-#endif
-
-                       /* allocate a skb to store the frags */
-                       skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
-                                                       IXGBE_RX_HDR_SIZE);
-                       if (unlikely(!skb)) {
-                               rx_ring->rx_stats.alloc_rx_buff_failed++;
-                               break;
-                       }
-
-                       /*
-                        * we will be copying header into skb->data in
-                        * pskb_may_pull so it is in our interest to prefetch
-                        * it now to avoid a possible cache miss
-                        */
-                       prefetchw(skb->data);
-
-                       /*
-                        * Delay unmapping of the first packet. It carries the
-                        * header information, HW may still access the header
-                        * after the writeback.  Only unmap it when EOP is
-                        * reached
-                        */
-                       IXGBE_CB(skb)->dma = rx_buffer->dma;
-               } else {
-                       /* we are reusing so sync this buffer for CPU use */
-                       dma_sync_single_range_for_cpu(rx_ring->dev,
-                                                     rx_buffer->dma,
-                                                     rx_buffer->page_offset,
-                                                     ixgbe_rx_bufsz(rx_ring),
-                                                     DMA_FROM_DEVICE);
-               }
-
-               /* pull page into skb */
-               ixgbe_add_rx_frag(rx_ring, rx_buffer, skb,
-                                 le16_to_cpu(rx_desc->wb.upper.length));
-
-               if (ixgbe_can_reuse_page(rx_buffer)) {
-                       /* hand second half of page back to the ring */
-                       ixgbe_reuse_rx_page(rx_ring, rx_buffer);
-               } else if (IXGBE_CB(skb)->dma == rx_buffer->dma) {
-                       /* the page has been released from the ring */
-                       IXGBE_CB(skb)->page_released = true;
-               } else {
-                       /* we are not reusing the buffer so unmap it */
-                       dma_unmap_page(rx_ring->dev, rx_buffer->dma,
-                                      ixgbe_rx_pg_size(rx_ring),
-                                      DMA_FROM_DEVICE);
-               }
-
-               /* clear contents of buffer_info */
-               rx_buffer->skb = NULL;
-               rx_buffer->dma = 0;
-               rx_buffer->page = NULL;
-
-               ixgbe_get_rsc_cnt(rx_ring, rx_desc, skb);
+               /* exit if we failed to retrieve a buffer */
+               if (!skb)
+                       break;
 
                cleaned_count++;
 
@@ -2868,11 +2932,7 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
        srrctl = IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT;
 
        /* configure the packet buffer length */
-#if PAGE_SIZE > IXGBE_MAX_RXBUFFER
-       srrctl |= IXGBE_MAX_RXBUFFER >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
-#else
        srrctl |= ixgbe_rx_bufsz(rx_ring) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
-#endif
 
        /* configure descriptor type */
        srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
@@ -2980,13 +3040,7 @@ static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
         * total size of max desc * buf_len is not greater
         * than 65536
         */
-#if (PAGE_SIZE <= 8192)
        rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
-#elif (PAGE_SIZE <= 16384)
-       rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
-#else
-       rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
-#endif
        IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl);
 }
 
@@ -4130,27 +4184,6 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
 }
 
 /**
- * ixgbe_init_rx_page_offset - initialize page offset values for Rx buffers
- * @rx_ring: ring to setup
- *
- * On many IA platforms the L1 cache has a critical stride of 4K, this
- * results in each receive buffer starting in the same cache set.  To help
- * reduce the pressure on this cache set we can interleave the offsets so
- * that only every other buffer will be in the same cache set.
- **/
-static void ixgbe_init_rx_page_offset(struct ixgbe_ring *rx_ring)
-{
-       struct ixgbe_rx_buffer *rx_buffer = rx_ring->rx_buffer_info;
-       u16 i;
-
-       for (i = 0; i < rx_ring->count; i += 2) {
-               rx_buffer[0].page_offset = 0;
-               rx_buffer[1].page_offset = ixgbe_rx_bufsz(rx_ring);
-               rx_buffer = &rx_buffer[2];
-       }
-}
-
-/**
  * ixgbe_clean_rx_ring - Free Rx Buffers per Queue
  * @rx_ring: ring to free buffers from
  **/
@@ -4195,8 +4228,6 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
        size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
        memset(rx_ring->rx_buffer_info, 0, size);
 
-       ixgbe_init_rx_page_offset(rx_ring);
-
        /* Zero out the descriptor ring */
        memset(rx_ring->desc, 0, rx_ring->size);
 
@@ -4646,8 +4677,6 @@ int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring)
        rx_ring->next_to_clean = 0;
        rx_ring->next_to_use = 0;
 
-       ixgbe_init_rx_page_offset(rx_ring);
-
        return 0;
 err:
        vfree(rx_ring->rx_buffer_info);
@@ -5874,9 +5903,12 @@ static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring,
        u32 type_tucmd = 0;
 
        if (skb->ip_summed != CHECKSUM_PARTIAL) {
-               if (!(first->tx_flags & IXGBE_TX_FLAGS_HW_VLAN) &&
-                   !(first->tx_flags & IXGBE_TX_FLAGS_TXSW))
-                       return;
+               if (!(first->tx_flags & IXGBE_TX_FLAGS_HW_VLAN)) {
+                       if (unlikely(skb->no_fcs))
+                               first->tx_flags |= IXGBE_TX_FLAGS_NO_IFCS;
+                       if (!(first->tx_flags & IXGBE_TX_FLAGS_TXSW))
+                               return;
+               }
        } else {
                u8 l4_hdr = 0;
                switch (first->protocol) {
@@ -5938,7 +5970,6 @@ static __le32 ixgbe_tx_cmd_type(u32 tx_flags)
 {
        /* set type for advanced descriptor with frame checksum insertion */
        __le32 cmd_type = cpu_to_le32(IXGBE_ADVTXD_DTYP_DATA |
-                                     IXGBE_ADVTXD_DCMD_IFCS |
                                      IXGBE_ADVTXD_DCMD_DEXT);
 
        /* set HW vlan bit if vlan is present */
@@ -5958,6 +5989,10 @@ static __le32 ixgbe_tx_cmd_type(u32 tx_flags)
 #endif
                cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_TSE);
 
+       /* insert frame checksum */
+       if (!(tx_flags & IXGBE_TX_FLAGS_NO_IFCS))
+               cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_IFCS);
+
        return cmd_type;
 }
 
@@ -6063,8 +6098,6 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,
                if (likely(!data_len))
                        break;
 
-               if (unlikely(skb->no_fcs))
-                       cmd_type &= ~(cpu_to_le32(IXGBE_ADVTXD_DCMD_IFCS));
                tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size);
 
                i++;
index f45def0..876bece 100644 (file)
@@ -3409,7 +3409,7 @@ set_speed:
 
        pause_flags = 0;
        /* setup pause frame */
-       if (np->duplex != 0) {
+       if (netif_running(dev) && (np->duplex != 0)) {
                if (np->autoneg && np->pause_flags & NV_PAUSEFRAME_AUTONEG) {
                        adv_pause = adv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
                        lpa_pause = lpa & (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
@@ -4435,7 +4435,7 @@ static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void
 
        regs->version = FORCEDETH_REGS_VER;
        spin_lock_irq(&np->lock);
-       for (i = 0; i <= np->register_size/sizeof(u32); i++)
+       for (i = 0; i < np->register_size/sizeof(u32); i++)
                rbuf[i] = readl(base + i*sizeof(u32));
        spin_unlock_irq(&np->lock);
 }
@@ -5455,6 +5455,7 @@ static int nv_close(struct net_device *dev)
 
        netif_stop_queue(dev);
        spin_lock_irq(&np->lock);
+       nv_update_pause(dev, 0); /* otherwise stop_tx bricks NIC */
        nv_stop_rxtx(dev);
        nv_txrx_reset(dev);
 
@@ -5904,11 +5905,19 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
                goto out_error;
        }
 
+       netif_carrier_off(dev);
+
+       /* Some NICs freeze when TX pause is enabled while NIC is
+        * down, and this stays across warm reboots. The sequence
+        * below should be enough to recover from that state.
+        */
+       nv_update_pause(dev, 0);
+       nv_start_tx(dev);
+       nv_stop_tx(dev);
+
        if (id->driver_data & DEV_HAS_VLAN)
                nv_vlan_mode(dev, dev->features);
 
-       netif_carrier_off(dev);
-
        dev_info(&pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, addr %pM\n",
                 dev->name, np->phy_oui, np->phyaddr, dev->dev_addr);
 
index 65a8d49..a606db4 100644 (file)
@@ -202,11 +202,21 @@ static void efx_stop_all(struct efx_nic *efx);
 
 #define EFX_ASSERT_RESET_SERIALISED(efx)               \
        do {                                            \
-               if ((efx->state == STATE_RUNNING) ||    \
+               if ((efx->state == STATE_READY) ||      \
                    (efx->state == STATE_DISABLED))     \
                        ASSERT_RTNL();                  \
        } while (0)
 
+static int efx_check_disabled(struct efx_nic *efx)
+{
+       if (efx->state == STATE_DISABLED) {
+               netif_err(efx, drv, efx->net_dev,
+                         "device is disabled due to earlier errors\n");
+               return -EIO;
+       }
+       return 0;
+}
+
 /**************************************************************************
  *
  * Event queue processing
@@ -630,6 +640,16 @@ static void efx_start_datapath(struct efx_nic *efx)
        efx->rx_buffer_order = get_order(efx->rx_buffer_len +
                                         sizeof(struct efx_rx_page_state));
 
+       /* We must keep at least one descriptor in a TX ring empty.
+        * We could avoid this when the queue size does not exactly
+        * match the hardware ring size, but it's not that important.
+        * Therefore we stop the queue when one more skb might fill
+        * the ring completely.  We wake it when half way back to
+        * empty.
+        */
+       efx->txq_stop_thresh = efx->txq_entries - efx_tx_max_skb_descs(efx);
+       efx->txq_wake_thresh = efx->txq_stop_thresh / 2;
+
        /* Initialise the channels */
        efx_for_each_channel(channel, efx) {
                efx_for_each_channel_tx_queue(tx_queue, channel)
@@ -730,7 +750,11 @@ efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
        struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel;
        u32 old_rxq_entries, old_txq_entries;
        unsigned i, next_buffer_table = 0;
-       int rc = 0;
+       int rc;
+
+       rc = efx_check_disabled(efx);
+       if (rc)
+               return rc;
 
        /* Not all channels should be reallocated. We must avoid
         * reallocating their buffer table entries.
@@ -1365,6 +1389,8 @@ static void efx_start_interrupts(struct efx_nic *efx, bool may_keep_eventq)
 {
        struct efx_channel *channel;
 
+       BUG_ON(efx->state == STATE_DISABLED);
+
        if (efx->legacy_irq)
                efx->legacy_irq_enabled = true;
        efx_nic_enable_interrupts(efx);
@@ -1382,6 +1408,9 @@ static void efx_stop_interrupts(struct efx_nic *efx, bool may_keep_eventq)
 {
        struct efx_channel *channel;
 
+       if (efx->state == STATE_DISABLED)
+               return;
+
        efx_mcdi_mode_poll(efx);
 
        efx_nic_disable_interrupts(efx);
@@ -1533,22 +1562,21 @@ static int efx_probe_all(struct efx_nic *efx)
        return rc;
 }
 
-/* Called after previous invocation(s) of efx_stop_all, restarts the port,
- * kernel transmit queues and NAPI processing, and ensures that the port is
- * scheduled to be reconfigured. This function is safe to call multiple
- * times when the NIC is in any state.
+/* If the interface is supposed to be running but is not, start
+ * the hardware and software data path, regular activity for the port
+ * (MAC statistics, link polling, etc.) and schedule the port to be
+ * reconfigured.  Interrupts must already be enabled.  This function
+ * is safe to call multiple times, so long as the NIC is not disabled.
+ * Requires the RTNL lock.
  */
 static void efx_start_all(struct efx_nic *efx)
 {
        EFX_ASSERT_RESET_SERIALISED(efx);
+       BUG_ON(efx->state == STATE_DISABLED);
 
        /* Check that it is appropriate to restart the interface. All
         * of these flags are safe to read under just the rtnl lock */
-       if (efx->port_enabled)
-               return;
-       if ((efx->state != STATE_RUNNING) && (efx->state != STATE_INIT))
-               return;
-       if (!netif_running(efx->net_dev))
+       if (efx->port_enabled || !netif_running(efx->net_dev))
                return;
 
        efx_start_port(efx);
@@ -1582,11 +1610,11 @@ static void efx_flush_all(struct efx_nic *efx)
        cancel_work_sync(&efx->mac_work);
 }
 
-/* Quiesce hardware and software without bringing the link down.
- * Safe to call multiple times, when the nic and interface is in any
- * state. The caller is guaranteed to subsequently be in a position
- * to modify any hardware and software state they see fit without
- * taking locks. */
+/* Quiesce the hardware and software data path, and regular activity
+ * for the port without bringing the link down.  Safe to call multiple
+ * times with the NIC in almost any state, but interrupts should be
+ * enabled.  Requires the RTNL lock.
+ */
 static void efx_stop_all(struct efx_nic *efx)
 {
        EFX_ASSERT_RESET_SERIALISED(efx);
@@ -1739,8 +1767,6 @@ static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
        struct efx_nic *efx = netdev_priv(net_dev);
        struct mii_ioctl_data *data = if_mii(ifr);
 
-       EFX_ASSERT_RESET_SERIALISED(efx);
-
        /* Convert phy_id from older PRTAD/DEVAD format */
        if ((cmd == SIOCGMIIREG || cmd == SIOCSMIIREG) &&
            (data->phy_id & 0xfc00) == 0x0400)
@@ -1820,13 +1846,14 @@ static void efx_netpoll(struct net_device *net_dev)
 static int efx_net_open(struct net_device *net_dev)
 {
        struct efx_nic *efx = netdev_priv(net_dev);
-       EFX_ASSERT_RESET_SERIALISED(efx);
+       int rc;
 
        netif_dbg(efx, ifup, efx->net_dev, "opening device on CPU %d\n",
                  raw_smp_processor_id());
 
-       if (efx->state == STATE_DISABLED)
-               return -EIO;
+       rc = efx_check_disabled(efx);
+       if (rc)
+               return rc;
        if (efx->phy_mode & PHY_MODE_SPECIAL)
                return -EBUSY;
        if (efx_mcdi_poll_reboot(efx) && efx_reset(efx, RESET_TYPE_ALL))
@@ -1852,10 +1879,8 @@ static int efx_net_stop(struct net_device *net_dev)
        netif_dbg(efx, ifdown, efx->net_dev, "closing on CPU %d\n",
                  raw_smp_processor_id());
 
-       if (efx->state != STATE_DISABLED) {
-               /* Stop the device and flush all the channels */
-               efx_stop_all(efx);
-       }
+       /* Stop the device and flush all the channels */
+       efx_stop_all(efx);
 
        return 0;
 }
@@ -1915,9 +1940,11 @@ static void efx_watchdog(struct net_device *net_dev)
 static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
 {
        struct efx_nic *efx = netdev_priv(net_dev);
+       int rc;
 
-       EFX_ASSERT_RESET_SERIALISED(efx);
-
+       rc = efx_check_disabled(efx);
+       if (rc)
+               return rc;
        if (new_mtu > EFX_MAX_MTU)
                return -EINVAL;
 
@@ -1926,8 +1953,6 @@ static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
        netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu);
 
        mutex_lock(&efx->mac_lock);
-       /* Reconfigure the MAC before enabling the dma queues so that
-        * the RX buffers don't overflow */
        net_dev->mtu = new_mtu;
        efx->type->reconfigure_mac(efx);
        mutex_unlock(&efx->mac_lock);
@@ -1942,8 +1967,6 @@ static int efx_set_mac_address(struct net_device *net_dev, void *data)
        struct sockaddr *addr = data;
        char *new_addr = addr->sa_data;
 
-       EFX_ASSERT_RESET_SERIALISED(efx);
-
        if (!is_valid_ether_addr(new_addr)) {
                netif_err(efx, drv, efx->net_dev,
                          "invalid ethernet MAC address requested: %pM\n",
@@ -2079,11 +2102,27 @@ static int efx_register_netdev(struct efx_nic *efx)
 
        rtnl_lock();
 
+       /* Enable resets to be scheduled and check whether any were
+        * already requested.  If so, the NIC is probably hosed so we
+        * abort.
+        */
+       efx->state = STATE_READY;
+       smp_mb(); /* ensure we change state before checking reset_pending */
+       if (efx->reset_pending) {
+               netif_err(efx, probe, efx->net_dev,
+                         "aborting probe due to scheduled reset\n");
+               rc = -EIO;
+               goto fail_locked;
+       }
+
        rc = dev_alloc_name(net_dev, net_dev->name);
        if (rc < 0)
                goto fail_locked;
        efx_update_name(efx);
 
+       /* Always start with carrier off; PHY events will detect the link */
+       netif_carrier_off(net_dev);
+
        rc = register_netdevice(net_dev);
        if (rc)
                goto fail_locked;
@@ -2094,9 +2133,6 @@ static int efx_register_netdev(struct efx_nic *efx)
                        efx_init_tx_queue_core_txq(tx_queue);
        }
 
-       /* Always start with carrier off; PHY events will detect the link */
-       netif_carrier_off(net_dev);
-
        rtnl_unlock();
 
        rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_type);
@@ -2108,14 +2144,14 @@ static int efx_register_netdev(struct efx_nic *efx)
 
        return 0;
 
+fail_registered:
+       rtnl_lock();
+       unregister_netdevice(net_dev);
 fail_locked:
+       efx->state = STATE_UNINIT;
        rtnl_unlock();
        netif_err(efx, drv, efx->net_dev, "could not register net dev\n");
        return rc;
-
-fail_registered:
-       unregister_netdev(net_dev);
-       return rc;
 }
 
 static void efx_unregister_netdev(struct efx_nic *efx)
@@ -2138,7 +2174,11 @@ static void efx_unregister_netdev(struct efx_nic *efx)
 
        strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
        device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
-       unregister_netdev(efx->net_dev);
+
+       rtnl_lock();
+       unregister_netdevice(efx->net_dev);
+       efx->state = STATE_UNINIT;
+       rtnl_unlock();
 }
 
 /**************************************************************************
@@ -2154,9 +2194,9 @@ void efx_reset_down(struct efx_nic *efx, enum reset_type method)
        EFX_ASSERT_RESET_SERIALISED(efx);
 
        efx_stop_all(efx);
-       mutex_lock(&efx->mac_lock);
-
        efx_stop_interrupts(efx, false);
+
+       mutex_lock(&efx->mac_lock);
        if (efx->port_initialized && method != RESET_TYPE_INVISIBLE)
                efx->phy_op->fini(efx);
        efx->type->fini(efx);
@@ -2276,16 +2316,15 @@ static void efx_reset_work(struct work_struct *data)
        if (!pending)
                return;
 
-       /* If we're not RUNNING then don't reset. Leave the reset_pending
-        * flags set so that efx_pci_probe_main will be retried */
-       if (efx->state != STATE_RUNNING) {
-               netif_info(efx, drv, efx->net_dev,
-                          "scheduled reset quenched. NIC not RUNNING\n");
-               return;
-       }
-
        rtnl_lock();
-       (void)efx_reset(efx, fls(pending) - 1);
+
+       /* We checked the state in efx_schedule_reset() but it may
+        * have changed by now.  Now that we have the RTNL lock,
+        * it cannot change again.
+        */
+       if (efx->state == STATE_READY)
+               (void)efx_reset(efx, fls(pending) - 1);
+
        rtnl_unlock();
 }
 
@@ -2311,6 +2350,13 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
        }
 
        set_bit(method, &efx->reset_pending);
+       smp_mb(); /* ensure we change reset_pending before checking state */
+
+       /* If we're not READY then just leave the flags set as the cue
+        * to abort probing or reschedule the reset later.
+        */
+       if (ACCESS_ONCE(efx->state) != STATE_READY)
+               return;
 
        /* efx_process_channel() will no longer read events once a
         * reset is scheduled. So switch back to poll'd MCDI completions. */
@@ -2376,13 +2422,12 @@ static const struct efx_phy_operations efx_dummy_phy_operations = {
 /* This zeroes out and then fills in the invariants in a struct
  * efx_nic (including all sub-structures).
  */
-static int efx_init_struct(struct efx_nic *efx, const struct efx_nic_type *type,
+static int efx_init_struct(struct efx_nic *efx,
                           struct pci_dev *pci_dev, struct net_device *net_dev)
 {
        int i;
 
        /* Initialise common structures */
-       memset(efx, 0, sizeof(*efx));
        spin_lock_init(&efx->biu_lock);
 #ifdef CONFIG_SFC_MTD
        INIT_LIST_HEAD(&efx->mtd_list);
@@ -2392,7 +2437,7 @@ static int efx_init_struct(struct efx_nic *efx, const struct efx_nic_type *type,
        INIT_DELAYED_WORK(&efx->selftest_work, efx_selftest_async_work);
        efx->pci_dev = pci_dev;
        efx->msg_enable = debug;
-       efx->state = STATE_INIT;
+       efx->state = STATE_UNINIT;
        strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name));
 
        efx->net_dev = net_dev;
@@ -2409,8 +2454,6 @@ static int efx_init_struct(struct efx_nic *efx, const struct efx_nic_type *type,
                        goto fail;
        }
 
-       efx->type = type;
-
        EFX_BUG_ON_PARANOID(efx->type->phys_addr_channels > EFX_MAX_CHANNELS);
 
        /* Higher numbered interrupt modes are less capable! */
@@ -2455,6 +2498,12 @@ static void efx_fini_struct(struct efx_nic *efx)
  */
 static void efx_pci_remove_main(struct efx_nic *efx)
 {
+       /* Flush reset_work. It can no longer be scheduled since we
+        * are not READY.
+        */
+       BUG_ON(efx->state == STATE_READY);
+       cancel_work_sync(&efx->reset_work);
+
 #ifdef CONFIG_RFS_ACCEL
        free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap);
        efx->net_dev->rx_cpu_rmap = NULL;
@@ -2480,24 +2529,15 @@ static void efx_pci_remove(struct pci_dev *pci_dev)
 
        /* Mark the NIC as fini, then stop the interface */
        rtnl_lock();
-       efx->state = STATE_FINI;
        dev_close(efx->net_dev);
-
-       /* Allow any queued efx_resets() to complete */
+       efx_stop_interrupts(efx, false);
        rtnl_unlock();
 
-       efx_stop_interrupts(efx, false);
        efx_sriov_fini(efx);
        efx_unregister_netdev(efx);
 
        efx_mtd_remove(efx);
 
-       /* Wait for any scheduled resets to complete. No more will be
-        * scheduled from this point because efx_stop_all() has been
-        * called, we are no longer registered with driverlink, and
-        * the net_device's have been removed. */
-       cancel_work_sync(&efx->reset_work);
-
        efx_pci_remove_main(efx);
 
        efx_fini_io(efx);
@@ -2617,7 +2657,6 @@ static int efx_pci_probe_main(struct efx_nic *efx)
 static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
                                   const struct pci_device_id *entry)
 {
-       const struct efx_nic_type *type = (const struct efx_nic_type *) entry->driver_data;
        struct net_device *net_dev;
        struct efx_nic *efx;
        int rc;
@@ -2627,10 +2666,12 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
                                     EFX_MAX_RX_QUEUES);
        if (!net_dev)
                return -ENOMEM;
-       net_dev->features |= (type->offload_features | NETIF_F_SG |
+       efx = netdev_priv(net_dev);
+       efx->type = (const struct efx_nic_type *) entry->driver_data;
+       net_dev->features |= (efx->type->offload_features | NETIF_F_SG |
                              NETIF_F_HIGHDMA | NETIF_F_TSO |
                              NETIF_F_RXCSUM);
-       if (type->offload_features & NETIF_F_V6_CSUM)
+       if (efx->type->offload_features & NETIF_F_V6_CSUM)
                net_dev->features |= NETIF_F_TSO6;
        /* Mask for features that also apply to VLAN devices */
        net_dev->vlan_features |= (NETIF_F_ALL_CSUM | NETIF_F_SG |
@@ -2638,10 +2679,9 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
                                   NETIF_F_RXCSUM);
        /* All offloads can be toggled */
        net_dev->hw_features = net_dev->features & ~NETIF_F_HIGHDMA;
-       efx = netdev_priv(net_dev);
        pci_set_drvdata(pci_dev, efx);
        SET_NETDEV_DEV(net_dev, &pci_dev->dev);
-       rc = efx_init_struct(efx, type, pci_dev, net_dev);
+       rc = efx_init_struct(efx, pci_dev, net_dev);
        if (rc)
                goto fail1;
 
@@ -2656,28 +2696,9 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
                goto fail2;
 
        rc = efx_pci_probe_main(efx);
-
-       /* Serialise against efx_reset(). No more resets will be
-        * scheduled since efx_stop_all() has been called, and we have
-        * not and never have been registered.
-        */
-       cancel_work_sync(&efx->reset_work);
-
        if (rc)
                goto fail3;
 
-       /* If there was a scheduled reset during probe, the NIC is
-        * probably hosed anyway.
-        */
-       if (efx->reset_pending) {
-               rc = -EIO;
-               goto fail4;
-       }
-
-       /* Switch to the running state before we expose the device to the OS,
-        * so that dev_open()|efx_start_all() will actually start the device */
-       efx->state = STATE_RUNNING;
-
        rc = efx_register_netdev(efx);
        if (rc)
                goto fail4;
@@ -2717,12 +2738,18 @@ static int efx_pm_freeze(struct device *dev)
 {
        struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
 
-       efx->state = STATE_FINI;
+       rtnl_lock();
 
-       netif_device_detach(efx->net_dev);
+       if (efx->state != STATE_DISABLED) {
+               efx->state = STATE_UNINIT;
 
-       efx_stop_all(efx);
-       efx_stop_interrupts(efx, false);
+               netif_device_detach(efx->net_dev);
+
+               efx_stop_all(efx);
+               efx_stop_interrupts(efx, false);
+       }
+
+       rtnl_unlock();
 
        return 0;
 }
@@ -2731,21 +2758,25 @@ static int efx_pm_thaw(struct device *dev)
 {
        struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
 
-       efx->state = STATE_INIT;
+       rtnl_lock();
 
-       efx_start_interrupts(efx, false);
+       if (efx->state != STATE_DISABLED) {
+               efx_start_interrupts(efx, false);
 
-       mutex_lock(&efx->mac_lock);
-       efx->phy_op->reconfigure(efx);
-       mutex_unlock(&efx->mac_lock);
+               mutex_lock(&efx->mac_lock);
+               efx->phy_op->reconfigure(efx);
+               mutex_unlock(&efx->mac_lock);
 
-       efx_start_all(efx);
+               efx_start_all(efx);
 
-       netif_device_attach(efx->net_dev);
+               netif_device_attach(efx->net_dev);
 
-       efx->state = STATE_RUNNING;
+               efx->state = STATE_READY;
 
-       efx->type->resume_wol(efx);
+               efx->type->resume_wol(efx);
+       }
+
+       rtnl_unlock();
 
        /* Reschedule any quenched resets scheduled during efx_pm_freeze() */
        queue_work(reset_workqueue, &efx->reset_work);
index 5faedd8..f8e7e20 100644 (file)
@@ -529,9 +529,7 @@ static void efx_ethtool_self_test(struct net_device *net_dev,
        if (!efx_tests)
                goto fail;
 
-
-       ASSERT_RTNL();
-       if (efx->state != STATE_RUNNING) {
+       if (efx->state != STATE_READY) {
                rc = -EIO;
                goto fail1;
        }
index 8687a6c..ec1e99d 100644 (file)
@@ -380,7 +380,7 @@ static ssize_t set_phy_flash_cfg(struct device *dev,
                new_mode = PHY_MODE_SPECIAL;
        if (!((old_mode ^ new_mode) & PHY_MODE_SPECIAL)) {
                err = 0;
-       } else if (efx->state != STATE_RUNNING || netif_running(efx->net_dev)) {
+       } else if (efx->state != STATE_READY || netif_running(efx->net_dev)) {
                err = -EBUSY;
        } else {
                /* Reset the PHY, reconfigure the MAC and enable/disable
index cd9c0a9..7ab1232 100644 (file)
@@ -91,29 +91,31 @@ struct efx_special_buffer {
 };
 
 /**
- * struct efx_tx_buffer - An Efx TX buffer
- * @skb: The associated socket buffer.
- *     Set only on the final fragment of a packet; %NULL for all other
- *     fragments.  When this fragment completes, then we can free this
- *     skb.
- * @tsoh: The associated TSO header structure, or %NULL if this
- *     buffer is not a TSO header.
+ * struct efx_tx_buffer - buffer state for a TX descriptor
+ * @skb: When @flags & %EFX_TX_BUF_SKB, the associated socket buffer to be
+ *     freed when descriptor completes
+ * @heap_buf: When @flags & %EFX_TX_BUF_HEAP, the associated heap buffer to be
+ *     freed when descriptor completes.
  * @dma_addr: DMA address of the fragment.
+ * @flags: Flags for allocation and DMA mapping type
  * @len: Length of this fragment.
  *     This field is zero when the queue slot is empty.
- * @continuation: True if this fragment is not the end of a packet.
- * @unmap_single: True if dma_unmap_single should be used.
  * @unmap_len: Length of this fragment to unmap
  */
 struct efx_tx_buffer {
-       const struct sk_buff *skb;
-       struct efx_tso_header *tsoh;
+       union {
+               const struct sk_buff *skb;
+               void *heap_buf;
+       };
        dma_addr_t dma_addr;
+       unsigned short flags;
        unsigned short len;
-       bool continuation;
-       bool unmap_single;
        unsigned short unmap_len;
 };
+#define EFX_TX_BUF_CONT                1       /* not last descriptor of packet */
+#define EFX_TX_BUF_SKB         2       /* buffer is last part of skb */
+#define EFX_TX_BUF_HEAP                4       /* buffer was allocated with kmalloc() */
+#define EFX_TX_BUF_MAP_SINGLE  8       /* buffer was mapped with dma_map_single() */
 
 /**
  * struct efx_tx_queue - An Efx TX queue
@@ -133,6 +135,7 @@ struct efx_tx_buffer {
  * @channel: The associated channel
  * @core_txq: The networking core TX queue structure
  * @buffer: The software buffer ring
+ * @tsoh_page: Array of pages of TSO header buffers
  * @txd: The hardware descriptor ring
  * @ptr_mask: The size of the ring minus 1.
  * @initialised: Has hardware queue been initialised?
@@ -156,9 +159,6 @@ struct efx_tx_buffer {
  *     variable indicates that the queue is full.  This is to
  *     avoid cache-line ping-pong between the xmit path and the
  *     completion path.
- * @tso_headers_free: A list of TSO headers allocated for this TX queue
- *     that are not in use, and so available for new TSO sends. The list
- *     is protected by the TX queue lock.
  * @tso_bursts: Number of times TSO xmit invoked by kernel
  * @tso_long_headers: Number of packets with headers too long for standard
  *     blocks
@@ -175,6 +175,7 @@ struct efx_tx_queue {
        struct efx_channel *channel;
        struct netdev_queue *core_txq;
        struct efx_tx_buffer *buffer;
+       struct efx_buffer *tsoh_page;
        struct efx_special_buffer txd;
        unsigned int ptr_mask;
        bool initialised;
@@ -187,7 +188,6 @@ struct efx_tx_queue {
        unsigned int insert_count ____cacheline_aligned_in_smp;
        unsigned int write_count;
        unsigned int old_read_count;
-       struct efx_tso_header *tso_headers_free;
        unsigned int tso_bursts;
        unsigned int tso_long_headers;
        unsigned int tso_packets;
@@ -430,11 +430,9 @@ enum efx_int_mode {
 #define EFX_INT_MODE_USE_MSI(x) (((x)->interrupt_mode) <= EFX_INT_MODE_MSI)
 
 enum nic_state {
-       STATE_INIT = 0,
-       STATE_RUNNING = 1,
-       STATE_FINI = 2,
-       STATE_DISABLED = 3,
-       STATE_MAX,
+       STATE_UNINIT = 0,       /* device being probed/removed or is frozen */
+       STATE_READY = 1,        /* hardware ready and netdev registered */
+       STATE_DISABLED = 2,     /* device disabled due to hardware errors */
 };
 
 /*
@@ -654,7 +652,7 @@ struct vfdi_status;
  * @irq_rx_adaptive: Adaptive IRQ moderation enabled for RX event queues
  * @irq_rx_moderation: IRQ moderation time for RX event queues
  * @msg_enable: Log message enable flags
- * @state: Device state flag. Serialised by the rtnl_lock.
+ * @state: Device state number (%STATE_*). Serialised by the rtnl_lock.
  * @reset_pending: Bitmask for pending resets
  * @tx_queue: TX DMA queues
  * @rx_queue: RX DMA queues
@@ -664,6 +662,8 @@ struct vfdi_status;
  *     should be allocated for this NIC
  * @rxq_entries: Size of receive queues requested by user.
  * @txq_entries: Size of transmit queues requested by user.
+ * @txq_stop_thresh: TX queue fill level at or above which we stop it.
+ * @txq_wake_thresh: TX queue fill level at or below which we wake it.
  * @tx_dc_base: Base qword address in SRAM of TX queue descriptor caches
  * @rx_dc_base: Base qword address in SRAM of RX queue descriptor caches
  * @sram_lim_qw: Qword address limit of SRAM
@@ -774,6 +774,9 @@ struct efx_nic {
 
        unsigned rxq_entries;
        unsigned txq_entries;
+       unsigned int txq_stop_thresh;
+       unsigned int txq_wake_thresh;
+
        unsigned tx_dc_base;
        unsigned rx_dc_base;
        unsigned sram_lim_qw;
index 326d799..cdff40b 100644 (file)
@@ -298,7 +298,7 @@ efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
 /**************************************************************************
  *
  * Generic buffer handling
- * These buffers are used for interrupt status and MAC stats
+ * These buffers are used for interrupt status, MAC stats, etc.
  *
  **************************************************************************/
 
@@ -401,8 +401,10 @@ void efx_nic_push_buffers(struct efx_tx_queue *tx_queue)
                ++tx_queue->write_count;
 
                /* Create TX descriptor ring entry */
+               BUILD_BUG_ON(EFX_TX_BUF_CONT != 1);
                EFX_POPULATE_QWORD_4(*txd,
-                                    FSF_AZ_TX_KER_CONT, buffer->continuation,
+                                    FSF_AZ_TX_KER_CONT,
+                                    buffer->flags & EFX_TX_BUF_CONT,
                                     FSF_AZ_TX_KER_BYTE_COUNT, buffer->len,
                                     FSF_AZ_TX_KER_BUF_REGION, 0,
                                     FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr);
index 1871343..ebca75e 100644 (file)
 #include "nic.h"
 #include "workarounds.h"
 
-/*
- * TX descriptor ring full threshold
- *
- * The tx_queue descriptor ring fill-level must fall below this value
- * before we restart the netif queue
- */
-#define EFX_TXQ_THRESHOLD(_efx) ((_efx)->txq_entries / 2u)
-
 static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
                               struct efx_tx_buffer *buffer,
                               unsigned int *pkts_compl,
@@ -39,67 +31,32 @@ static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
                struct device *dma_dev = &tx_queue->efx->pci_dev->dev;
                dma_addr_t unmap_addr = (buffer->dma_addr + buffer->len -
                                         buffer->unmap_len);
-               if (buffer->unmap_single)
+               if (buffer->flags & EFX_TX_BUF_MAP_SINGLE)
                        dma_unmap_single(dma_dev, unmap_addr, buffer->unmap_len,
                                         DMA_TO_DEVICE);
                else
                        dma_unmap_page(dma_dev, unmap_addr, buffer->unmap_len,
                                       DMA_TO_DEVICE);
                buffer->unmap_len = 0;
-               buffer->unmap_single = false;
        }
 
-       if (buffer->skb) {
+       if (buffer->flags & EFX_TX_BUF_SKB) {
                (*pkts_compl)++;
                (*bytes_compl) += buffer->skb->len;
                dev_kfree_skb_any((struct sk_buff *) buffer->skb);
-               buffer->skb = NULL;
                netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev,
                           "TX queue %d transmission id %x complete\n",
                           tx_queue->queue, tx_queue->read_count);
+       } else if (buffer->flags & EFX_TX_BUF_HEAP) {
+               kfree(buffer->heap_buf);
        }
-}
 
-/**
- * struct efx_tso_header - a DMA mapped buffer for packet headers
- * @next: Linked list of free ones.
- *     The list is protected by the TX queue lock.
- * @dma_unmap_len: Length to unmap for an oversize buffer, or 0.
- * @dma_addr: The DMA address of the header below.
- *
- * This controls the memory used for a TSO header.  Use TSOH_DATA()
- * to find the packet header data.  Use TSOH_SIZE() to calculate the
- * total size required for a given packet header length.  TSO headers
- * in the free list are exactly %TSOH_STD_SIZE bytes in size.
- */
-struct efx_tso_header {
-       union {
-               struct efx_tso_header *next;
-               size_t unmap_len;
-       };
-       dma_addr_t dma_addr;
-};
+       buffer->len = 0;
+       buffer->flags = 0;
+}
 
 static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
                               struct sk_buff *skb);
-static void efx_fini_tso(struct efx_tx_queue *tx_queue);
-static void efx_tsoh_heap_free(struct efx_tx_queue *tx_queue,
-                              struct efx_tso_header *tsoh);
-
-static void efx_tsoh_free(struct efx_tx_queue *tx_queue,
-                         struct efx_tx_buffer *buffer)
-{
-       if (buffer->tsoh) {
-               if (likely(!buffer->tsoh->unmap_len)) {
-                       buffer->tsoh->next = tx_queue->tso_headers_free;
-                       tx_queue->tso_headers_free = buffer->tsoh;
-               } else {
-                       efx_tsoh_heap_free(tx_queue, buffer->tsoh);
-               }
-               buffer->tsoh = NULL;
-       }
-}
-
 
 static inline unsigned
 efx_max_tx_len(struct efx_nic *efx, dma_addr_t dma_addr)
@@ -138,6 +95,56 @@ unsigned int efx_tx_max_skb_descs(struct efx_nic *efx)
        return max_descs;
 }
 
+/* Get partner of a TX queue, seen as part of the same net core queue */
+static struct efx_tx_queue *efx_tx_queue_partner(struct efx_tx_queue *tx_queue)
+{
+       if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD)
+               return tx_queue - EFX_TXQ_TYPE_OFFLOAD;
+       else
+               return tx_queue + EFX_TXQ_TYPE_OFFLOAD;
+}
+
+static void efx_tx_maybe_stop_queue(struct efx_tx_queue *txq1)
+{
+       /* We need to consider both queues that the net core sees as one */
+       struct efx_tx_queue *txq2 = efx_tx_queue_partner(txq1);
+       struct efx_nic *efx = txq1->efx;
+       unsigned int fill_level;
+
+       fill_level = max(txq1->insert_count - txq1->old_read_count,
+                        txq2->insert_count - txq2->old_read_count);
+       if (likely(fill_level < efx->txq_stop_thresh))
+               return;
+
+       /* We used the stale old_read_count above, which gives us a
+        * pessimistic estimate of the fill level (which may even
+        * validly be >= efx->txq_entries).  Now try again using
+        * read_count (more likely to be a cache miss).
+        *
+        * If we read read_count and then conditionally stop the
+        * queue, it is possible for the completion path to race with
+        * us and complete all outstanding descriptors in the middle,
+        * after which there will be no more completions to wake it.
+        * Therefore we stop the queue first, then read read_count
+        * (with a memory barrier to ensure the ordering), then
+        * restart the queue if the fill level turns out to be low
+        * enough.
+        */
+       netif_tx_stop_queue(txq1->core_txq);
+       smp_mb();
+       txq1->old_read_count = ACCESS_ONCE(txq1->read_count);
+       txq2->old_read_count = ACCESS_ONCE(txq2->read_count);
+
+       fill_level = max(txq1->insert_count - txq1->old_read_count,
+                        txq2->insert_count - txq2->old_read_count);
+       EFX_BUG_ON_PARANOID(fill_level >= efx->txq_entries);
+       if (likely(fill_level < efx->txq_stop_thresh)) {
+               smp_mb();
+               if (likely(!efx->loopback_selftest))
+                       netif_tx_start_queue(txq1->core_txq);
+       }
+}
+
 /*
  * Add a socket buffer to a TX queue
  *
@@ -151,7 +158,7 @@ unsigned int efx_tx_max_skb_descs(struct efx_nic *efx)
  * This function is split out from efx_hard_start_xmit to allow the
  * loopback test to direct packets via specific TX queues.
  *
- * Returns NETDEV_TX_OK or NETDEV_TX_BUSY
+ * Returns NETDEV_TX_OK.
  * You must hold netif_tx_lock() to call this function.
  */
 netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
@@ -160,12 +167,11 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
        struct device *dma_dev = &efx->pci_dev->dev;
        struct efx_tx_buffer *buffer;
        skb_frag_t *fragment;
-       unsigned int len, unmap_len = 0, fill_level, insert_ptr;
+       unsigned int len, unmap_len = 0, insert_ptr;
        dma_addr_t dma_addr, unmap_addr = 0;
        unsigned int dma_len;
-       bool unmap_single;
-       int q_space, i = 0;
-       netdev_tx_t rc = NETDEV_TX_OK;
+       unsigned short dma_flags;
+       int i = 0;
 
        EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count);
 
@@ -183,14 +189,11 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
                        return NETDEV_TX_OK;
        }
 
-       fill_level = tx_queue->insert_count - tx_queue->old_read_count;
-       q_space = efx->txq_entries - 1 - fill_level;
-
        /* Map for DMA.  Use dma_map_single rather than dma_map_page
         * since this is more efficient on machines with sparse
         * memory.
         */
-       unmap_single = true;
+       dma_flags = EFX_TX_BUF_MAP_SINGLE;
        dma_addr = dma_map_single(dma_dev, skb->data, len, PCI_DMA_TODEVICE);
 
        /* Process all fragments */
@@ -205,39 +208,10 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
 
                /* Add to TX queue, splitting across DMA boundaries */
                do {
-                       if (unlikely(q_space-- <= 0)) {
-                               /* It might be that completions have
-                                * happened since the xmit path last
-                                * checked.  Update the xmit path's
-                                * copy of read_count.
-                                */
-                               netif_tx_stop_queue(tx_queue->core_txq);
-                               /* This memory barrier protects the
-                                * change of queue state from the access
-                                * of read_count. */
-                               smp_mb();
-                               tx_queue->old_read_count =
-                                       ACCESS_ONCE(tx_queue->read_count);
-                               fill_level = (tx_queue->insert_count
-                                             - tx_queue->old_read_count);
-                               q_space = efx->txq_entries - 1 - fill_level;
-                               if (unlikely(q_space-- <= 0)) {
-                                       rc = NETDEV_TX_BUSY;
-                                       goto unwind;
-                               }
-                               smp_mb();
-                               if (likely(!efx->loopback_selftest))
-                                       netif_tx_start_queue(
-                                               tx_queue->core_txq);
-                       }
-
                        insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
                        buffer = &tx_queue->buffer[insert_ptr];
-                       efx_tsoh_free(tx_queue, buffer);
-                       EFX_BUG_ON_PARANOID(buffer->tsoh);
-                       EFX_BUG_ON_PARANOID(buffer->skb);
+                       EFX_BUG_ON_PARANOID(buffer->flags);
                        EFX_BUG_ON_PARANOID(buffer->len);
-                       EFX_BUG_ON_PARANOID(!buffer->continuation);
                        EFX_BUG_ON_PARANOID(buffer->unmap_len);
 
                        dma_len = efx_max_tx_len(efx, dma_addr);
@@ -247,13 +221,14 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
                        /* Fill out per descriptor fields */
                        buffer->len = dma_len;
                        buffer->dma_addr = dma_addr;
+                       buffer->flags = EFX_TX_BUF_CONT;
                        len -= dma_len;
                        dma_addr += dma_len;
                        ++tx_queue->insert_count;
                } while (len);
 
                /* Transfer ownership of the unmapping to the final buffer */
-               buffer->unmap_single = unmap_single;
+               buffer->flags = EFX_TX_BUF_CONT | dma_flags;
                buffer->unmap_len = unmap_len;
                unmap_len = 0;
 
@@ -264,20 +239,22 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
                len = skb_frag_size(fragment);
                i++;
                /* Map for DMA */
-               unmap_single = false;
+               dma_flags = 0;
                dma_addr = skb_frag_dma_map(dma_dev, fragment, 0, len,
                                            DMA_TO_DEVICE);
        }
 
        /* Transfer ownership of the skb to the final buffer */
        buffer->skb = skb;
-       buffer->continuation = false;
+       buffer->flags = EFX_TX_BUF_SKB | dma_flags;
 
        netdev_tx_sent_queue(tx_queue->core_txq, skb->len);
 
        /* Pass off to hardware */
        efx_nic_push_buffers(tx_queue);
 
+       efx_tx_maybe_stop_queue(tx_queue);
+
        return NETDEV_TX_OK;
 
  dma_err:
@@ -289,7 +266,6 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
        /* Mark the packet as transmitted, and free the SKB ourselves */
        dev_kfree_skb_any(skb);
 
- unwind:
        /* Work backwards until we hit the original insert pointer value */
        while (tx_queue->insert_count != tx_queue->write_count) {
                unsigned int pkts_compl = 0, bytes_compl = 0;
@@ -297,12 +273,11 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
                insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
                buffer = &tx_queue->buffer[insert_ptr];
                efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
-               buffer->len = 0;
        }
 
        /* Free the fragment we were mid-way through pushing */
        if (unmap_len) {
-               if (unmap_single)
+               if (dma_flags & EFX_TX_BUF_MAP_SINGLE)
                        dma_unmap_single(dma_dev, unmap_addr, unmap_len,
                                         DMA_TO_DEVICE);
                else
@@ -310,7 +285,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
                                       DMA_TO_DEVICE);
        }
 
-       return rc;
+       return NETDEV_TX_OK;
 }
 
 /* Remove packets from the TX queue
@@ -340,8 +315,6 @@ static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
                }
 
                efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl);
-               buffer->continuation = true;
-               buffer->len = 0;
 
                ++tx_queue->read_count;
                read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
@@ -450,6 +423,7 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
 {
        unsigned fill_level;
        struct efx_nic *efx = tx_queue->efx;
+       struct efx_tx_queue *txq2;
        unsigned int pkts_compl = 0, bytes_compl = 0;
 
        EFX_BUG_ON_PARANOID(index > tx_queue->ptr_mask);
@@ -457,15 +431,18 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
        efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl);
        netdev_tx_completed_queue(tx_queue->core_txq, pkts_compl, bytes_compl);
 
-       /* See if we need to restart the netif queue.  This barrier
-        * separates the update of read_count from the test of the
-        * queue state. */
+       /* See if we need to restart the netif queue.  This memory
+        * barrier ensures that we write read_count (inside
+        * efx_dequeue_buffers()) before reading the queue status.
+        */
        smp_mb();
        if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) &&
            likely(efx->port_enabled) &&
            likely(netif_device_present(efx->net_dev))) {
-               fill_level = tx_queue->insert_count - tx_queue->read_count;
-               if (fill_level < EFX_TXQ_THRESHOLD(efx))
+               txq2 = efx_tx_queue_partner(tx_queue);
+               fill_level = max(tx_queue->insert_count - tx_queue->read_count,
+                                txq2->insert_count - txq2->read_count);
+               if (fill_level <= efx->txq_wake_thresh)
                        netif_tx_wake_queue(tx_queue->core_txq);
        }
 
@@ -480,11 +457,26 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
        }
 }
 
+/* Size of page-based TSO header buffers.  Larger blocks must be
+ * allocated from the heap.
+ */
+#define TSOH_STD_SIZE  128
+#define TSOH_PER_PAGE  (PAGE_SIZE / TSOH_STD_SIZE)
+
+/* At most half the descriptors in the queue at any time will refer to
+ * a TSO header buffer, since they must always be followed by a
+ * payload descriptor referring to an skb.
+ */
+static unsigned int efx_tsoh_page_count(struct efx_tx_queue *tx_queue)
+{
+       return DIV_ROUND_UP(tx_queue->ptr_mask + 1, 2 * TSOH_PER_PAGE);
+}
+
 int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
 {
        struct efx_nic *efx = tx_queue->efx;
        unsigned int entries;
-       int i, rc;
+       int rc;
 
        /* Create the smallest power-of-two aligned ring */
        entries = max(roundup_pow_of_two(efx->txq_entries), EFX_MIN_DMAQ_SIZE);
@@ -500,17 +492,28 @@ int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
                                   GFP_KERNEL);
        if (!tx_queue->buffer)
                return -ENOMEM;
-       for (i = 0; i <= tx_queue->ptr_mask; ++i)
-               tx_queue->buffer[i].continuation = true;
+
+       if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD) {
+               tx_queue->tsoh_page =
+                       kcalloc(efx_tsoh_page_count(tx_queue),
+                               sizeof(tx_queue->tsoh_page[0]), GFP_KERNEL);
+               if (!tx_queue->tsoh_page) {
+                       rc = -ENOMEM;
+                       goto fail1;
+               }
+       }
 
        /* Allocate hardware ring */
        rc = efx_nic_probe_tx(tx_queue);
        if (rc)
-               goto fail;
+               goto fail2;
 
        return 0;
 
- fail:
+fail2:
+       kfree(tx_queue->tsoh_page);
+       tx_queue->tsoh_page = NULL;
+fail1:
        kfree(tx_queue->buffer);
        tx_queue->buffer = NULL;
        return rc;
@@ -546,8 +549,6 @@ void efx_release_tx_buffers(struct efx_tx_queue *tx_queue)
                unsigned int pkts_compl = 0, bytes_compl = 0;
                buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask];
                efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
-               buffer->continuation = true;
-               buffer->len = 0;
 
                ++tx_queue->read_count;
        }
@@ -568,13 +569,12 @@ void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
        efx_nic_fini_tx(tx_queue);
 
        efx_release_tx_buffers(tx_queue);
-
-       /* Free up TSO header cache */
-       efx_fini_tso(tx_queue);
 }
 
 void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
 {
+       int i;
+
        if (!tx_queue->buffer)
                return;
 
@@ -582,6 +582,14 @@ void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
                  "destroying TX queue %d\n", tx_queue->queue);
        efx_nic_remove_tx(tx_queue);
 
+       if (tx_queue->tsoh_page) {
+               for (i = 0; i < efx_tsoh_page_count(tx_queue); i++)
+                       efx_nic_free_buffer(tx_queue->efx,
+                                           &tx_queue->tsoh_page[i]);
+               kfree(tx_queue->tsoh_page);
+               tx_queue->tsoh_page = NULL;
+       }
+
        kfree(tx_queue->buffer);
        tx_queue->buffer = NULL;
 }
@@ -604,22 +612,7 @@ void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
 #define TSOH_OFFSET    NET_IP_ALIGN
 #endif
 
-#define TSOH_BUFFER(tsoh)      ((u8 *)(tsoh + 1) + TSOH_OFFSET)
-
-/* Total size of struct efx_tso_header, buffer and padding */
-#define TSOH_SIZE(hdr_len)                                     \
-       (sizeof(struct efx_tso_header) + TSOH_OFFSET + hdr_len)
-
-/* Size of blocks on free list.  Larger blocks must be allocated from
- * the heap.
- */
-#define TSOH_STD_SIZE          128
-
 #define PTR_DIFF(p1, p2)  ((u8 *)(p1) - (u8 *)(p2))
-#define ETH_HDR_LEN(skb)  (skb_network_header(skb) - (skb)->data)
-#define SKB_TCP_OFF(skb)  PTR_DIFF(tcp_hdr(skb), (skb)->data)
-#define SKB_IPV4_OFF(skb) PTR_DIFF(ip_hdr(skb), (skb)->data)
-#define SKB_IPV6_OFF(skb) PTR_DIFF(ipv6_hdr(skb), (skb)->data)
 
 /**
  * struct tso_state - TSO state for an SKB
@@ -631,10 +624,12 @@ void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
  * @in_len: Remaining length in current SKB fragment
  * @unmap_len: Length of SKB fragment
  * @unmap_addr: DMA address of SKB fragment
- * @unmap_single: DMA single vs page mapping flag
+ * @dma_flags: TX buffer flags for DMA mapping - %EFX_TX_BUF_MAP_SINGLE or 0
  * @protocol: Network protocol (after any VLAN header)
+ * @ip_off: Offset of IP header
+ * @tcp_off: Offset of TCP header
  * @header_len: Number of bytes of header
- * @full_packet_size: Number of bytes to put in each outgoing segment
+ * @ip_base_len: IPv4 tot_len or IPv6 payload_len, before TCP payload
  *
  * The state used during segmentation.  It is put into this data structure
  * just to make it easy to pass into inline functions.
@@ -651,11 +646,13 @@ struct tso_state {
        unsigned in_len;
        unsigned unmap_len;
        dma_addr_t unmap_addr;
-       bool unmap_single;
+       unsigned short dma_flags;
 
        __be16 protocol;
+       unsigned int ip_off;
+       unsigned int tcp_off;
        unsigned header_len;
-       int full_packet_size;
+       unsigned int ip_base_len;
 };
 
 
@@ -687,91 +684,43 @@ static __be16 efx_tso_check_protocol(struct sk_buff *skb)
        return protocol;
 }
 
-
-/*
- * Allocate a page worth of efx_tso_header structures, and string them
- * into the tx_queue->tso_headers_free linked list. Return 0 or -ENOMEM.
- */
-static int efx_tsoh_block_alloc(struct efx_tx_queue *tx_queue)
+static u8 *efx_tsoh_get_buffer(struct efx_tx_queue *tx_queue,
+                              struct efx_tx_buffer *buffer, unsigned int len)
 {
-       struct device *dma_dev = &tx_queue->efx->pci_dev->dev;
-       struct efx_tso_header *tsoh;
-       dma_addr_t dma_addr;
-       u8 *base_kva, *kva;
-
-       base_kva = dma_alloc_coherent(dma_dev, PAGE_SIZE, &dma_addr, GFP_ATOMIC);
-       if (base_kva == NULL) {
-               netif_err(tx_queue->efx, tx_err, tx_queue->efx->net_dev,
-                         "Unable to allocate page for TSO headers\n");
-               return -ENOMEM;
-       }
-
-       /* dma_alloc_coherent() allocates pages. */
-       EFX_BUG_ON_PARANOID(dma_addr & (PAGE_SIZE - 1u));
-
-       for (kva = base_kva; kva < base_kva + PAGE_SIZE; kva += TSOH_STD_SIZE) {
-               tsoh = (struct efx_tso_header *)kva;
-               tsoh->dma_addr = dma_addr + (TSOH_BUFFER(tsoh) - base_kva);
-               tsoh->next = tx_queue->tso_headers_free;
-               tx_queue->tso_headers_free = tsoh;
-       }
-
-       return 0;
-}
+       u8 *result;
 
+       EFX_BUG_ON_PARANOID(buffer->len);
+       EFX_BUG_ON_PARANOID(buffer->flags);
+       EFX_BUG_ON_PARANOID(buffer->unmap_len);
 
-/* Free up a TSO header, and all others in the same page. */
-static void efx_tsoh_block_free(struct efx_tx_queue *tx_queue,
-                               struct efx_tso_header *tsoh,
-                               struct device *dma_dev)
-{
-       struct efx_tso_header **p;
-       unsigned long base_kva;
-       dma_addr_t base_dma;
-
-       base_kva = (unsigned long)tsoh & PAGE_MASK;
-       base_dma = tsoh->dma_addr & PAGE_MASK;
-
-       p = &tx_queue->tso_headers_free;
-       while (*p != NULL) {
-               if (((unsigned long)*p & PAGE_MASK) == base_kva)
-                       *p = (*p)->next;
-               else
-                       p = &(*p)->next;
-       }
-
-       dma_free_coherent(dma_dev, PAGE_SIZE, (void *)base_kva, base_dma);
-}
+       if (likely(len <= TSOH_STD_SIZE - TSOH_OFFSET)) {
+               unsigned index =
+                       (tx_queue->insert_count & tx_queue->ptr_mask) / 2;
+               struct efx_buffer *page_buf =
+                       &tx_queue->tsoh_page[index / TSOH_PER_PAGE];
+               unsigned offset =
+                       TSOH_STD_SIZE * (index % TSOH_PER_PAGE) + TSOH_OFFSET;
+
+               if (unlikely(!page_buf->addr) &&
+                   efx_nic_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE))
+                       return NULL;
+
+               result = (u8 *)page_buf->addr + offset;
+               buffer->dma_addr = page_buf->dma_addr + offset;
+               buffer->flags = EFX_TX_BUF_CONT;
+       } else {
+               tx_queue->tso_long_headers++;
 
-static struct efx_tso_header *
-efx_tsoh_heap_alloc(struct efx_tx_queue *tx_queue, size_t header_len)
-{
-       struct efx_tso_header *tsoh;
-
-       tsoh = kmalloc(TSOH_SIZE(header_len), GFP_ATOMIC | GFP_DMA);
-       if (unlikely(!tsoh))
-               return NULL;
-
-       tsoh->dma_addr = dma_map_single(&tx_queue->efx->pci_dev->dev,
-                                       TSOH_BUFFER(tsoh), header_len,
-                                       DMA_TO_DEVICE);
-       if (unlikely(dma_mapping_error(&tx_queue->efx->pci_dev->dev,
-                                      tsoh->dma_addr))) {
-               kfree(tsoh);
-               return NULL;
+               buffer->heap_buf = kmalloc(TSOH_OFFSET + len, GFP_ATOMIC);
+               if (unlikely(!buffer->heap_buf))
+                       return NULL;
+               result = (u8 *)buffer->heap_buf + TSOH_OFFSET;
+               buffer->flags = EFX_TX_BUF_CONT | EFX_TX_BUF_HEAP;
        }
 
-       tsoh->unmap_len = header_len;
-       return tsoh;
-}
+       buffer->len = len;
 
-static void
-efx_tsoh_heap_free(struct efx_tx_queue *tx_queue, struct efx_tso_header *tsoh)
-{
-       dma_unmap_single(&tx_queue->efx->pci_dev->dev,
-                        tsoh->dma_addr, tsoh->unmap_len,
-                        DMA_TO_DEVICE);
-       kfree(tsoh);
+       return result;
 }
 
 /**
@@ -781,47 +730,19 @@ efx_tsoh_heap_free(struct efx_tx_queue *tx_queue, struct efx_tso_header *tsoh)
  * @len:               Length of fragment
  * @final_buffer:      The final buffer inserted into the queue
  *
- * Push descriptors onto the TX queue.  Return 0 on success or 1 if
- * @tx_queue full.
+ * Push descriptors onto the TX queue.
  */
-static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
-                              dma_addr_t dma_addr, unsigned len,
-                              struct efx_tx_buffer **final_buffer)
+static void efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
+                               dma_addr_t dma_addr, unsigned len,
+                               struct efx_tx_buffer **final_buffer)
 {
        struct efx_tx_buffer *buffer;
        struct efx_nic *efx = tx_queue->efx;
-       unsigned dma_len, fill_level, insert_ptr;
-       int q_space;
+       unsigned dma_len, insert_ptr;
 
        EFX_BUG_ON_PARANOID(len <= 0);
 
-       fill_level = tx_queue->insert_count - tx_queue->old_read_count;
-       /* -1 as there is no way to represent all descriptors used */
-       q_space = efx->txq_entries - 1 - fill_level;
-
        while (1) {
-               if (unlikely(q_space-- <= 0)) {
-                       /* It might be that completions have happened
-                        * since the xmit path last checked.  Update
-                        * the xmit path's copy of read_count.
-                        */
-                       netif_tx_stop_queue(tx_queue->core_txq);
-                       /* This memory barrier protects the change of
-                        * queue state from the access of read_count. */
-                       smp_mb();
-                       tx_queue->old_read_count =
-                               ACCESS_ONCE(tx_queue->read_count);
-                       fill_level = (tx_queue->insert_count
-                                     - tx_queue->old_read_count);
-                       q_space = efx->txq_entries - 1 - fill_level;
-                       if (unlikely(q_space-- <= 0)) {
-                               *final_buffer = NULL;
-                               return 1;
-                       }
-                       smp_mb();
-                       netif_tx_start_queue(tx_queue->core_txq);
-               }
-
                insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
                buffer = &tx_queue->buffer[insert_ptr];
                ++tx_queue->insert_count;
@@ -830,12 +751,9 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
                                    tx_queue->read_count >=
                                    efx->txq_entries);
 
-               efx_tsoh_free(tx_queue, buffer);
                EFX_BUG_ON_PARANOID(buffer->len);
                EFX_BUG_ON_PARANOID(buffer->unmap_len);
-               EFX_BUG_ON_PARANOID(buffer->skb);
-               EFX_BUG_ON_PARANOID(!buffer->continuation);
-               EFX_BUG_ON_PARANOID(buffer->tsoh);
+               EFX_BUG_ON_PARANOID(buffer->flags);
 
                buffer->dma_addr = dma_addr;
 
@@ -845,7 +763,8 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
                if (dma_len >= len)
                        break;
 
-               buffer->len = dma_len; /* Don't set the other members */
+               buffer->len = dma_len;
+               buffer->flags = EFX_TX_BUF_CONT;
                dma_addr += dma_len;
                len -= dma_len;
        }
@@ -853,7 +772,6 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
        EFX_BUG_ON_PARANOID(!len);
        buffer->len = len;
        *final_buffer = buffer;
-       return 0;
 }
 
 
@@ -864,54 +782,42 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
  * a single fragment, and we know it doesn't cross a page boundary.  It
  * also allows us to not worry about end-of-packet etc.
  */
-static void efx_tso_put_header(struct efx_tx_queue *tx_queue,
-                              struct efx_tso_header *tsoh, unsigned len)
+static int efx_tso_put_header(struct efx_tx_queue *tx_queue,
+                             struct efx_tx_buffer *buffer, u8 *header)
 {
-       struct efx_tx_buffer *buffer;
-
-       buffer = &tx_queue->buffer[tx_queue->insert_count & tx_queue->ptr_mask];
-       efx_tsoh_free(tx_queue, buffer);
-       EFX_BUG_ON_PARANOID(buffer->len);
-       EFX_BUG_ON_PARANOID(buffer->unmap_len);
-       EFX_BUG_ON_PARANOID(buffer->skb);
-       EFX_BUG_ON_PARANOID(!buffer->continuation);
-       EFX_BUG_ON_PARANOID(buffer->tsoh);
-       buffer->len = len;
-       buffer->dma_addr = tsoh->dma_addr;
-       buffer->tsoh = tsoh;
+       if (unlikely(buffer->flags & EFX_TX_BUF_HEAP)) {
+               buffer->dma_addr = dma_map_single(&tx_queue->efx->pci_dev->dev,
+                                                 header, buffer->len,
+                                                 DMA_TO_DEVICE);
+               if (unlikely(dma_mapping_error(&tx_queue->efx->pci_dev->dev,
+                                              buffer->dma_addr))) {
+                       kfree(buffer->heap_buf);
+                       buffer->len = 0;
+                       buffer->flags = 0;
+                       return -ENOMEM;
+               }
+               buffer->unmap_len = buffer->len;
+               buffer->flags |= EFX_TX_BUF_MAP_SINGLE;
+       }
 
        ++tx_queue->insert_count;
+       return 0;
 }
 
 
-/* Remove descriptors put into a tx_queue. */
+/* Remove buffers put into a tx_queue.  None of the buffers must have
+ * an skb attached.
+ */
 static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
 {
        struct efx_tx_buffer *buffer;
-       dma_addr_t unmap_addr;
 
        /* Work backwards until we hit the original insert pointer value */
        while (tx_queue->insert_count != tx_queue->write_count) {
                --tx_queue->insert_count;
                buffer = &tx_queue->buffer[tx_queue->insert_count &
                                           tx_queue->ptr_mask];
-               efx_tsoh_free(tx_queue, buffer);
-               EFX_BUG_ON_PARANOID(buffer->skb);
-               if (buffer->unmap_len) {
-                       unmap_addr = (buffer->dma_addr + buffer->len -
-                                     buffer->unmap_len);
-                       if (buffer->unmap_single)
-                               dma_unmap_single(&tx_queue->efx->pci_dev->dev,
-                                                unmap_addr, buffer->unmap_len,
-                                                DMA_TO_DEVICE);
-                       else
-                               dma_unmap_page(&tx_queue->efx->pci_dev->dev,
-                                              unmap_addr, buffer->unmap_len,
-                                              DMA_TO_DEVICE);
-                       buffer->unmap_len = 0;
-               }
-               buffer->len = 0;
-               buffer->continuation = true;
+               efx_dequeue_buffer(tx_queue, buffer, NULL, NULL);
        }
 }
 
@@ -919,17 +825,16 @@ static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
 /* Parse the SKB header and initialise state. */
 static void tso_start(struct tso_state *st, const struct sk_buff *skb)
 {
-       /* All ethernet/IP/TCP headers combined size is TCP header size
-        * plus offset of TCP header relative to start of packet.
-        */
-       st->header_len = ((tcp_hdr(skb)->doff << 2u)
-                         + PTR_DIFF(tcp_hdr(skb), skb->data));
-       st->full_packet_size = st->header_len + skb_shinfo(skb)->gso_size;
-
-       if (st->protocol == htons(ETH_P_IP))
+       st->ip_off = skb_network_header(skb) - skb->data;
+       st->tcp_off = skb_transport_header(skb) - skb->data;
+       st->header_len = st->tcp_off + (tcp_hdr(skb)->doff << 2u);
+       if (st->protocol == htons(ETH_P_IP)) {
+               st->ip_base_len = st->header_len - st->ip_off;
                st->ipv4_id = ntohs(ip_hdr(skb)->id);
-       else
+       } else {
+               st->ip_base_len = st->header_len - st->tcp_off;
                st->ipv4_id = 0;
+       }
        st->seqnum = ntohl(tcp_hdr(skb)->seq);
 
        EFX_BUG_ON_PARANOID(tcp_hdr(skb)->urg);
@@ -938,7 +843,7 @@ static void tso_start(struct tso_state *st, const struct sk_buff *skb)
 
        st->out_len = skb->len - st->header_len;
        st->unmap_len = 0;
-       st->unmap_single = false;
+       st->dma_flags = 0;
 }
 
 static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx,
@@ -947,7 +852,7 @@ static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx,
        st->unmap_addr = skb_frag_dma_map(&efx->pci_dev->dev, frag, 0,
                                          skb_frag_size(frag), DMA_TO_DEVICE);
        if (likely(!dma_mapping_error(&efx->pci_dev->dev, st->unmap_addr))) {
-               st->unmap_single = false;
+               st->dma_flags = 0;
                st->unmap_len = skb_frag_size(frag);
                st->in_len = skb_frag_size(frag);
                st->dma_addr = st->unmap_addr;
@@ -965,7 +870,7 @@ static int tso_get_head_fragment(struct tso_state *st, struct efx_nic *efx,
        st->unmap_addr = dma_map_single(&efx->pci_dev->dev, skb->data + hl,
                                        len, DMA_TO_DEVICE);
        if (likely(!dma_mapping_error(&efx->pci_dev->dev, st->unmap_addr))) {
-               st->unmap_single = true;
+               st->dma_flags = EFX_TX_BUF_MAP_SINGLE;
                st->unmap_len = len;
                st->in_len = len;
                st->dma_addr = st->unmap_addr;
@@ -982,20 +887,19 @@ static int tso_get_head_fragment(struct tso_state *st, struct efx_nic *efx,
  * @st:                        TSO state
  *
  * Form descriptors for the current fragment, until we reach the end
- * of fragment or end-of-packet.  Return 0 on success, 1 if not enough
- * space in @tx_queue.
+ * of fragment or end-of-packet.
  */
-static int tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue,
-                                        const struct sk_buff *skb,
-                                        struct tso_state *st)
+static void tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue,
+                                         const struct sk_buff *skb,
+                                         struct tso_state *st)
 {
        struct efx_tx_buffer *buffer;
-       int n, end_of_packet, rc;
+       int n;
 
        if (st->in_len == 0)
-               return 0;
+               return;
        if (st->packet_space == 0)
-               return 0;
+               return;
 
        EFX_BUG_ON_PARANOID(st->in_len <= 0);
        EFX_BUG_ON_PARANOID(st->packet_space <= 0);
@@ -1006,25 +910,24 @@ static int tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue,
        st->out_len -= n;
        st->in_len -= n;
 
-       rc = efx_tx_queue_insert(tx_queue, st->dma_addr, n, &buffer);
-       if (likely(rc == 0)) {
-               if (st->out_len == 0)
-                       /* Transfer ownership of the skb */
-                       buffer->skb = skb;
+       efx_tx_queue_insert(tx_queue, st->dma_addr, n, &buffer);
 
-               end_of_packet = st->out_len == 0 || st->packet_space == 0;
-               buffer->continuation = !end_of_packet;
+       if (st->out_len == 0) {
+               /* Transfer ownership of the skb */
+               buffer->skb = skb;
+               buffer->flags = EFX_TX_BUF_SKB;
+       } else if (st->packet_space != 0) {
+               buffer->flags = EFX_TX_BUF_CONT;
+       }
 
-               if (st->in_len == 0) {
-                       /* Transfer ownership of the DMA mapping */
-                       buffer->unmap_len = st->unmap_len;
-                       buffer->unmap_single = st->unmap_single;
-                       st->unmap_len = 0;
-               }
+       if (st->in_len == 0) {
+               /* Transfer ownership of the DMA mapping */
+               buffer->unmap_len = st->unmap_len;
+               buffer->flags |= st->dma_flags;
+               st->unmap_len = 0;
        }
 
        st->dma_addr += n;
-       return rc;
 }
 
 
@@ -1035,36 +938,25 @@ static int tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue,
  * @st:                        TSO state
  *
  * Generate a new header and prepare for the new packet.  Return 0 on
- * success, or -1 if failed to alloc header.
+ * success, or -%ENOMEM if failed to alloc header.
  */
 static int tso_start_new_packet(struct efx_tx_queue *tx_queue,
                                const struct sk_buff *skb,
                                struct tso_state *st)
 {
-       struct efx_tso_header *tsoh;
+       struct efx_tx_buffer *buffer =
+               &tx_queue->buffer[tx_queue->insert_count & tx_queue->ptr_mask];
        struct tcphdr *tsoh_th;
        unsigned ip_length;
        u8 *header;
+       int rc;
 
-       /* Allocate a DMA-mapped header buffer. */
-       if (likely(TSOH_SIZE(st->header_len) <= TSOH_STD_SIZE)) {
-               if (tx_queue->tso_headers_free == NULL) {
-                       if (efx_tsoh_block_alloc(tx_queue))
-                               return -1;
-               }
-               EFX_BUG_ON_PARANOID(!tx_queue->tso_headers_free);
-               tsoh = tx_queue->tso_headers_free;
-               tx_queue->tso_headers_free = tsoh->next;
-               tsoh->unmap_len = 0;
-       } else {
-               tx_queue->tso_long_headers++;
-               tsoh = efx_tsoh_heap_alloc(tx_queue, st->header_len);
-               if (unlikely(!tsoh))
-                       return -1;
-       }
+       /* Allocate and insert a DMA-mapped header buffer. */
+       header = efx_tsoh_get_buffer(tx_queue, buffer, st->header_len);
+       if (!header)
+               return -ENOMEM;
 
-       header = TSOH_BUFFER(tsoh);
-       tsoh_th = (struct tcphdr *)(header + SKB_TCP_OFF(skb));
+       tsoh_th = (struct tcphdr *)(header + st->tcp_off);
 
        /* Copy and update the headers. */
        memcpy(header, skb->data, st->header_len);
@@ -1073,19 +965,19 @@ static int tso_start_new_packet(struct efx_tx_queue *tx_queue,
        st->seqnum += skb_shinfo(skb)->gso_size;
        if (st->out_len > skb_shinfo(skb)->gso_size) {
                /* This packet will not finish the TSO burst. */
-               ip_length = st->full_packet_size - ETH_HDR_LEN(skb);
+               st->packet_space = skb_shinfo(skb)->gso_size;
                tsoh_th->fin = 0;
                tsoh_th->psh = 0;
        } else {
                /* This packet will be the last in the TSO burst. */
-               ip_length = st->header_len - ETH_HDR_LEN(skb) + st->out_len;
+               st->packet_space = st->out_len;
                tsoh_th->fin = tcp_hdr(skb)->fin;
                tsoh_th->psh = tcp_hdr(skb)->psh;
        }
+       ip_length = st->ip_base_len + st->packet_space;
 
        if (st->protocol == htons(ETH_P_IP)) {
-               struct iphdr *tsoh_iph =
-                       (struct iphdr *)(header + SKB_IPV4_OFF(skb));
+               struct iphdr *tsoh_iph = (struct iphdr *)(header + st->ip_off);
 
                tsoh_iph->tot_len = htons(ip_length);
 
@@ -1094,16 +986,16 @@ static int tso_start_new_packet(struct efx_tx_queue *tx_queue,
                st->ipv4_id++;
        } else {
                struct ipv6hdr *tsoh_iph =
-                       (struct ipv6hdr *)(header + SKB_IPV6_OFF(skb));
+                       (struct ipv6hdr *)(header + st->ip_off);
 
-               tsoh_iph->payload_len = htons(ip_length - sizeof(*tsoh_iph));
+               tsoh_iph->payload_len = htons(ip_length);
        }
 
-       st->packet_space = skb_shinfo(skb)->gso_size;
-       ++tx_queue->tso_packets;
+       rc = efx_tso_put_header(tx_queue, buffer, header);
+       if (unlikely(rc))
+               return rc;
 
-       /* Form a descriptor for this header. */
-       efx_tso_put_header(tx_queue, tsoh, st->header_len);
+       ++tx_queue->tso_packets;
 
        return 0;
 }
@@ -1118,13 +1010,13 @@ static int tso_start_new_packet(struct efx_tx_queue *tx_queue,
  *
  * Add socket buffer @skb to @tx_queue, doing TSO or return != 0 if
  * @skb was not enqueued.  In all cases @skb is consumed.  Return
- * %NETDEV_TX_OK or %NETDEV_TX_BUSY.
+ * %NETDEV_TX_OK.
  */
 static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
                               struct sk_buff *skb)
 {
        struct efx_nic *efx = tx_queue->efx;
-       int frag_i, rc, rc2 = NETDEV_TX_OK;
+       int frag_i, rc;
        struct tso_state state;
 
        /* Find the packet protocol and sanity-check it */
@@ -1156,11 +1048,7 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
                goto mem_err;
 
        while (1) {
-               rc = tso_fill_packet_with_fragment(tx_queue, skb, &state);
-               if (unlikely(rc)) {
-                       rc2 = NETDEV_TX_BUSY;
-                       goto unwind;
-               }
+               tso_fill_packet_with_fragment(tx_queue, skb, &state);
 
                /* Move onto the next fragment? */
                if (state.in_len == 0) {
@@ -1184,6 +1072,8 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
        /* Pass off to hardware */
        efx_nic_push_buffers(tx_queue);
 
+       efx_tx_maybe_stop_queue(tx_queue);
+
        tx_queue->tso_bursts++;
        return NETDEV_TX_OK;
 
@@ -1192,10 +1082,9 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
                  "Out of memory for TSO headers, or DMA mapping error\n");
        dev_kfree_skb_any(skb);
 
- unwind:
        /* Free the DMA mapping we were in the process of writing out */
        if (state.unmap_len) {
-               if (state.unmap_single)
+               if (state.dma_flags & EFX_TX_BUF_MAP_SINGLE)
                        dma_unmap_single(&efx->pci_dev->dev, state.unmap_addr,
                                         state.unmap_len, DMA_TO_DEVICE);
                else
@@ -1204,25 +1093,5 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
        }
 
        efx_enqueue_unwind(tx_queue);
-       return rc2;
-}
-
-
-/*
- * Free up all TSO datastructures associated with tx_queue. This
- * routine should be called only once the tx_queue is both empty and
- * will no longer be used.
- */
-static void efx_fini_tso(struct efx_tx_queue *tx_queue)
-{
-       unsigned i;
-
-       if (tx_queue->buffer) {
-               for (i = 0; i <= tx_queue->ptr_mask; ++i)
-                       efx_tsoh_free(tx_queue, &tx_queue->buffer[i]);
-       }
-
-       while (tx_queue->tso_headers_free != NULL)
-               efx_tsoh_block_free(tx_queue, tx_queue->tso_headers_free,
-                                   &tx_queue->efx->pci_dev->dev);
+       return NETDEV_TX_OK;
 }
index 1b173a6..b26cbda 100644 (file)
@@ -32,7 +32,7 @@ config TI_DAVINCI_EMAC
 
 config TI_DAVINCI_MDIO
        tristate "TI DaVinci MDIO Support"
-       depends on ARM && ( ARCH_DAVINCI || ARCH_OMAP3 )
+       depends on ARM && ( ARCH_DAVINCI || ARCH_OMAP3 || SOC_AM33XX )
        select PHYLIB
        ---help---
          This driver supports TI's DaVinci MDIO module.
@@ -42,7 +42,7 @@ config TI_DAVINCI_MDIO
 
 config TI_DAVINCI_CPDMA
        tristate "TI DaVinci CPDMA Support"
-       depends on ARM && ( ARCH_DAVINCI || ARCH_OMAP3 )
+       depends on ARM && ( ARCH_DAVINCI || ARCH_OMAP3 || SOC_AM33XX )
        ---help---
          This driver supports TI's DaVinci CPDMA dma engine.
 
index 1e5d85b..0cbc0e5 100644 (file)
@@ -28,6 +28,9 @@
 #include <linux/workqueue.h>
 #include <linux/delay.h>
 #include <linux/pm_runtime.h>
+#include <linux/of.h>
+#include <linux/of_net.h>
+#include <linux/of_device.h>
 
 #include <linux/platform_data/cpsw.h>
 
@@ -709,6 +712,158 @@ static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv)
        slave->sliver   = regs + data->sliver_reg_ofs;
 }
 
+static int cpsw_probe_dt(struct cpsw_platform_data *data,
+                        struct platform_device *pdev)
+{
+       struct device_node *node = pdev->dev.of_node;
+       struct device_node *slave_node;
+       int i = 0, ret;
+       u32 prop;
+
+       if (!node)
+               return -EINVAL;
+
+       if (of_property_read_u32(node, "slaves", &prop)) {
+               pr_err("Missing slaves property in the DT.\n");
+               return -EINVAL;
+       }
+       data->slaves = prop;
+
+       data->slave_data = kzalloc(sizeof(struct cpsw_slave_data) *
+                                  data->slaves, GFP_KERNEL);
+       if (!data->slave_data) {
+               pr_err("Could not allocate slave memory.\n");
+               return -EINVAL;
+       }
+
+       data->no_bd_ram = of_property_read_bool(node, "no_bd_ram");
+
+       if (of_property_read_u32(node, "cpdma_channels", &prop)) {
+               pr_err("Missing cpdma_channels property in the DT.\n");
+               ret = -EINVAL;
+               goto error_ret;
+       }
+       data->channels = prop;
+
+       if (of_property_read_u32(node, "host_port_no", &prop)) {
+               pr_err("Missing host_port_no property in the DT.\n");
+               ret = -EINVAL;
+               goto error_ret;
+       }
+       data->host_port_num = prop;
+
+       if (of_property_read_u32(node, "cpdma_reg_ofs", &prop)) {
+               pr_err("Missing cpdma_reg_ofs property in the DT.\n");
+               ret = -EINVAL;
+               goto error_ret;
+       }
+       data->cpdma_reg_ofs = prop;
+
+       if (of_property_read_u32(node, "cpdma_sram_ofs", &prop)) {
+               pr_err("Missing cpdma_sram_ofs property in the DT.\n");
+               ret = -EINVAL;
+               goto error_ret;
+       }
+       data->cpdma_sram_ofs = prop;
+
+       if (of_property_read_u32(node, "ale_reg_ofs", &prop)) {
+               pr_err("Missing ale_reg_ofs property in the DT.\n");
+               ret = -EINVAL;
+               goto error_ret;
+       }
+       data->ale_reg_ofs = prop;
+
+       if (of_property_read_u32(node, "ale_entries", &prop)) {
+               pr_err("Missing ale_entries property in the DT.\n");
+               ret = -EINVAL;
+               goto error_ret;
+       }
+       data->ale_entries = prop;
+
+       if (of_property_read_u32(node, "host_port_reg_ofs", &prop)) {
+               pr_err("Missing host_port_reg_ofs property in the DT.\n");
+               ret = -EINVAL;
+               goto error_ret;
+       }
+       data->host_port_reg_ofs = prop;
+
+       if (of_property_read_u32(node, "hw_stats_reg_ofs", &prop)) {
+               pr_err("Missing hw_stats_reg_ofs property in the DT.\n");
+               ret = -EINVAL;
+               goto error_ret;
+       }
+       data->hw_stats_reg_ofs = prop;
+
+       if (of_property_read_u32(node, "bd_ram_ofs", &prop)) {
+               pr_err("Missing bd_ram_ofs property in the DT.\n");
+               ret = -EINVAL;
+               goto error_ret;
+       }
+       data->bd_ram_ofs = prop;
+
+       if (of_property_read_u32(node, "bd_ram_size", &prop)) {
+               pr_err("Missing bd_ram_size property in the DT.\n");
+               ret = -EINVAL;
+               goto error_ret;
+       }
+       data->bd_ram_size = prop;
+
+       if (of_property_read_u32(node, "rx_descs", &prop)) {
+               pr_err("Missing rx_descs property in the DT.\n");
+               ret = -EINVAL;
+               goto error_ret;
+       }
+       data->rx_descs = prop;
+
+       if (of_property_read_u32(node, "mac_control", &prop)) {
+               pr_err("Missing mac_control property in the DT.\n");
+               ret = -EINVAL;
+               goto error_ret;
+       }
+       data->mac_control = prop;
+
+       for_each_child_of_node(node, slave_node) {
+               struct cpsw_slave_data *slave_data = data->slave_data + i;
+               const char *phy_id = NULL;
+               const void *mac_addr = NULL;
+
+               if (of_property_read_string(slave_node, "phy_id", &phy_id)) {
+                       pr_err("Missing slave[%d] phy_id property\n", i);
+                       ret = -EINVAL;
+                       goto error_ret;
+               }
+               slave_data->phy_id = phy_id;
+
+               if (of_property_read_u32(slave_node, "slave_reg_ofs", &prop)) {
+                       pr_err("Missing slave[%d] slave_reg_ofs property\n", i);
+                       ret = -EINVAL;
+                       goto error_ret;
+               }
+               slave_data->slave_reg_ofs = prop;
+
+               if (of_property_read_u32(slave_node, "sliver_reg_ofs",
+                                        &prop)) {
+                       pr_err("Missing slave[%d] sliver_reg_ofs property\n",
+                               i);
+                       ret = -EINVAL;
+                       goto error_ret;
+               }
+               slave_data->sliver_reg_ofs = prop;
+
+               mac_addr = of_get_mac_address(slave_node);
+               if (mac_addr)
+                       memcpy(slave_data->mac_addr, mac_addr, ETH_ALEN);
+
+               i++;
+       }
+
+       return 0;
+
+error_ret:
+       kfree(data->slave_data);
+       return ret;
+}
+
 static int __devinit cpsw_probe(struct platform_device *pdev)
 {
        struct cpsw_platform_data       *data = pdev->dev.platform_data;
@@ -720,11 +875,6 @@ static int __devinit cpsw_probe(struct platform_device *pdev)
        struct resource                 *res;
        int ret = 0, i, k = 0;
 
-       if (!data) {
-               pr_err("platform data missing\n");
-               return -ENODEV;
-       }
-
        ndev = alloc_etherdev(sizeof(struct cpsw_priv));
        if (!ndev) {
                pr_err("error allocating net_device\n");
@@ -734,13 +884,19 @@ static int __devinit cpsw_probe(struct platform_device *pdev)
        platform_set_drvdata(pdev, ndev);
        priv = netdev_priv(ndev);
        spin_lock_init(&priv->lock);
-       priv->data = *data;
        priv->pdev = pdev;
        priv->ndev = ndev;
        priv->dev  = &ndev->dev;
        priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG);
        priv->rx_packet_max = max(rx_packet_max, 128);
 
+       if (cpsw_probe_dt(&priv->data, pdev)) {
+               pr_err("cpsw: platform data missing\n");
+               ret = -ENODEV;
+               goto clean_ndev_ret;
+       }
+       data = &priv->data;
+
        if (is_valid_ether_addr(data->slave_data[0].mac_addr)) {
                memcpy(priv->mac_addr, data->slave_data[0].mac_addr, ETH_ALEN);
                pr_info("Detected MACID = %pM", priv->mac_addr);
@@ -996,11 +1152,17 @@ static const struct dev_pm_ops cpsw_pm_ops = {
        .resume         = cpsw_resume,
 };
 
+static const struct of_device_id cpsw_of_mtable[] = {
+       { .compatible = "ti,cpsw", },
+       { /* sentinel */ },
+};
+
 static struct platform_driver cpsw_driver = {
        .driver = {
                .name    = "cpsw",
                .owner   = THIS_MODULE,
                .pm      = &cpsw_pm_ops,
+               .of_match_table = of_match_ptr(cpsw_of_mtable),
        },
        .probe = cpsw_probe,
        .remove = __devexit_p(cpsw_remove),
index cd7ee20..573f3be 100644 (file)
@@ -36,6 +36,8 @@
 #include <linux/io.h>
 #include <linux/pm_runtime.h>
 #include <linux/davinci_emac.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
 
 /*
  * This timeout definition is a worst-case ultra defensive measure against
@@ -289,6 +291,25 @@ static int davinci_mdio_write(struct mii_bus *bus, int phy_id,
        return 0;
 }
 
+static int davinci_mdio_probe_dt(struct mdio_platform_data *data,
+                        struct platform_device *pdev)
+{
+       struct device_node *node = pdev->dev.of_node;
+       u32 prop;
+
+       if (!node)
+               return -EINVAL;
+
+       if (of_property_read_u32(node, "bus_freq", &prop)) {
+               pr_err("Missing bus_freq property in the DT.\n");
+               return -EINVAL;
+       }
+       data->bus_freq = prop;
+
+       return 0;
+}
+
+
 static int __devinit davinci_mdio_probe(struct platform_device *pdev)
 {
        struct mdio_platform_data *pdata = pdev->dev.platform_data;
@@ -304,8 +325,6 @@ static int __devinit davinci_mdio_probe(struct platform_device *pdev)
                return -ENOMEM;
        }
 
-       data->pdata = pdata ? (*pdata) : default_pdata;
-
        data->bus = mdiobus_alloc();
        if (!data->bus) {
                dev_err(dev, "failed to alloc mii bus\n");
@@ -313,14 +332,22 @@ static int __devinit davinci_mdio_probe(struct platform_device *pdev)
                goto bail_out;
        }
 
+       if (dev->of_node) {
+               if (davinci_mdio_probe_dt(&data->pdata, pdev))
+                       data->pdata = default_pdata;
+               snprintf(data->bus->id, MII_BUS_ID_SIZE, "%s", pdev->name);
+       } else {
+               data->pdata = pdata ? (*pdata) : default_pdata;
+               snprintf(data->bus->id, MII_BUS_ID_SIZE, "%s-%x",
+                        pdev->name, pdev->id);
+       }
+
        data->bus->name         = dev_name(dev);
        data->bus->read         = davinci_mdio_read,
        data->bus->write        = davinci_mdio_write,
        data->bus->reset        = davinci_mdio_reset,
        data->bus->parent       = dev;
        data->bus->priv         = data;
-       snprintf(data->bus->id, MII_BUS_ID_SIZE, "%s-%x",
-               pdev->name, pdev->id);
 
        pm_runtime_enable(&pdev->dev);
        pm_runtime_get_sync(&pdev->dev);
@@ -454,11 +481,17 @@ static const struct dev_pm_ops davinci_mdio_pm_ops = {
        .resume         = davinci_mdio_resume,
 };
 
+static const struct of_device_id davinci_mdio_of_mtable[] = {
+       { .compatible = "ti,davinci_mdio", },
+       { /* sentinel */ },
+};
+
 static struct platform_driver davinci_mdio_driver = {
        .driver = {
                .name    = "davinci_mdio",
                .owner   = THIS_MODULE,
                .pm      = &davinci_mdio_pm_ops,
+               .of_match_table = of_match_ptr(davinci_mdio_of_mtable),
        },
        .probe = davinci_mdio_probe,
        .remove = __devexit_p(davinci_mdio_remove),
index 277c93e..8fa947a 100644 (file)
@@ -1359,7 +1359,6 @@ static int tsi108_open(struct net_device *dev)
                }
 
                data->rxskbs[i] = skb;
-               data->rxskbs[i] = skb;
                data->rxring[i].buf0 = virt_to_phys(data->rxskbs[i]->data);
                data->rxring[i].misc = TSI108_RX_OWN | TSI108_RX_INT;
        }
index a5826a3..2c08bf6 100644 (file)
@@ -637,8 +637,7 @@ static int __devinit w5100_hw_probe(struct platform_device *pdev)
        if (data && is_valid_ether_addr(data->mac_addr)) {
                memcpy(ndev->dev_addr, data->mac_addr, ETH_ALEN);
        } else {
-               eth_random_addr(ndev->dev_addr);
-               ndev->addr_assign_type |= NET_ADDR_RANDOM;
+               eth_hw_addr_random(ndev);
        }
 
        mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
index bdd8891..88943d9 100644 (file)
@@ -557,8 +557,7 @@ static int __devinit w5300_hw_probe(struct platform_device *pdev)
        if (data && is_valid_ether_addr(data->mac_addr)) {
                memcpy(ndev->dev_addr, data->mac_addr, ETH_ALEN);
        } else {
-               eth_random_addr(ndev->dev_addr);
-               ndev->addr_assign_type |= NET_ADDR_RANDOM;
+               eth_hw_addr_random(ndev);
        }
 
        mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
index 8c5a1c4..e91111a 100644 (file)
@@ -400,7 +400,7 @@ static void netvsc_send_garp(struct work_struct *w)
        ndev_ctx = container_of(w, struct net_device_context, dwork.work);
        net_device = hv_get_drvdata(ndev_ctx->device_ctx);
        net = net_device->ndev;
-       netif_notify_peers(net);
+       netdev_notify_peers(net);
 }
 
 
index 1e88a10..06f8601 100644 (file)
@@ -46,8 +46,14 @@ struct rndis_request {
        /* Simplify allocation by having a netvsc packet inline */
        struct hv_netvsc_packet pkt;
        struct hv_page_buffer buf;
-       /* FIXME: We assumed a fixed size request here. */
+
        struct rndis_message request_msg;
+       /*
+        * The buffer for the extended info after the RNDIS message. It's
+        * referenced based on the data offset in the RNDIS message. Its size
+        * is enough for current needs, and should be sufficient for the near
+        * future.
+        */
        u8 ext[100];
 };
 
similarity index 98%
rename from drivers/ieee802154/at86rf230.c
rename to drivers/net/ieee802154/at86rf230.c
index 5d30940..ba753d8 100644 (file)
@@ -952,17 +952,7 @@ static struct spi_driver at86rf230_driver = {
        .resume     = at86rf230_resume,
 };
 
-static int __init at86rf230_init(void)
-{
-       return spi_register_driver(&at86rf230_driver);
-}
-module_init(at86rf230_init);
-
-static void __exit at86rf230_exit(void)
-{
-       spi_unregister_driver(&at86rf230_driver);
-}
-module_exit(at86rf230_exit);
+module_spi_driver(at86rf230_driver);
 
 MODULE_DESCRIPTION("AT86RF230 Transceiver Driver");
 MODULE_LICENSE("GPL v2");
similarity index 99%
rename from drivers/ieee802154/fakehard.c
rename to drivers/net/ieee802154/fakehard.c
index 73d4531..7d39add 100644 (file)
@@ -446,4 +446,3 @@ static __exit void fake_exit(void)
 module_init(fake_init);
 module_exit(fake_exit);
 MODULE_LICENSE("GPL");
-
index e2a06fd..4a075ba 100644 (file)
@@ -197,6 +197,7 @@ static __net_init int loopback_net_init(struct net *net)
        if (err)
                goto out_free_netdev;
 
+       BUG_ON(dev->ifindex != LOOPBACK_IFINDEX);
        net->loopback_dev = dev;
        return 0;
 
index 3090dc6..983bbf4 100644 (file)
@@ -159,6 +159,19 @@ config MDIO_BUS_MUX_GPIO
          several child MDIO busses to a parent bus.  Child bus
          selection is under the control of GPIO lines.
 
+config MDIO_BUS_MUX_MMIOREG
+       tristate "Support for MMIO device-controlled MDIO bus multiplexers"
+       depends on OF_MDIO
+       select MDIO_BUS_MUX
+       help
+         This module provides a driver for MDIO bus multiplexers that
+         are controlled via a simple memory-mapped device, like an FPGA.
+         The multiplexer connects one of several child MDIO busses to a
+         parent bus.  Child bus selection is under the control of one of
+         the FPGA's registers.
+
+         Currently, only 8-bit registers are supported.
+
 endif # PHYLIB
 
 config MICREL_KS8995MA
index 6d2dc6c..426674d 100644 (file)
@@ -28,3 +28,4 @@ obj-$(CONFIG_MICREL_KS8995MA) += spi_ks8995.o
 obj-$(CONFIG_AMD_PHY)          += amd.o
 obj-$(CONFIG_MDIO_BUS_MUX)     += mdio-mux.o
 obj-$(CONFIG_MDIO_BUS_MUX_GPIO)        += mdio-mux-gpio.o
+obj-$(CONFIG_MDIO_BUS_MUX_MMIOREG) += mdio-mux-mmioreg.o
index 7189adf..899274f 100644 (file)
 #include <linux/gpio.h>
 #include <linux/mdio-gpio.h>
 
-#ifdef CONFIG_OF_GPIO
 #include <linux/of_gpio.h>
 #include <linux/of_mdio.h>
-#include <linux/of_platform.h>
-#endif
 
 struct mdio_gpio_info {
        struct mdiobb_ctrl ctrl;
        int mdc, mdio;
 };
 
+static void *mdio_gpio_of_get_data(struct platform_device *pdev)
+{
+       struct device_node *np = pdev->dev.of_node;
+       struct mdio_gpio_platform_data *pdata;
+       int ret;
+
+       pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+       if (!pdata)
+               return NULL;
+
+       ret = of_get_gpio(np, 0);
+       if (ret < 0)
+               return NULL;
+
+       pdata->mdc = ret;
+
+       ret = of_get_gpio(np, 1);
+       if (ret < 0)
+               return NULL;
+       pdata->mdio = ret;
+
+       return pdata;
+}
+
 static void mdio_dir(struct mdiobb_ctrl *ctrl, int dir)
 {
        struct mdio_gpio_info *bitbang =
@@ -162,10 +183,15 @@ static void __devexit mdio_gpio_bus_destroy(struct device *dev)
 
 static int __devinit mdio_gpio_probe(struct platform_device *pdev)
 {
-       struct mdio_gpio_platform_data *pdata = pdev->dev.platform_data;
+       struct mdio_gpio_platform_data *pdata;
        struct mii_bus *new_bus;
        int ret;
 
+       if (pdev->dev.of_node)
+               pdata = mdio_gpio_of_get_data(pdev);
+       else
+               pdata = pdev->dev.platform_data;
+
        if (!pdata)
                return -ENODEV;
 
@@ -173,7 +199,11 @@ static int __devinit mdio_gpio_probe(struct platform_device *pdev)
        if (!new_bus)
                return -ENODEV;
 
-       ret = mdiobus_register(new_bus);
+       if (pdev->dev.of_node)
+               ret = of_mdiobus_register(new_bus, pdev->dev.of_node);
+       else
+               ret = mdiobus_register(new_bus);
+
        if (ret)
                mdio_gpio_bus_deinit(&pdev->dev);
 
@@ -187,112 +217,30 @@ static int __devexit mdio_gpio_remove(struct platform_device *pdev)
        return 0;
 }
 
-#ifdef CONFIG_OF_GPIO
-
-static int __devinit mdio_ofgpio_probe(struct platform_device *ofdev)
-{
-       struct mdio_gpio_platform_data *pdata;
-       struct mii_bus *new_bus;
-       int ret;
-
-       pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
-       if (!pdata)
-               return -ENOMEM;
-
-       ret = of_get_gpio(ofdev->dev.of_node, 0);
-       if (ret < 0)
-               goto out_free;
-       pdata->mdc = ret;
-
-       ret = of_get_gpio(ofdev->dev.of_node, 1);
-       if (ret < 0)
-               goto out_free;
-       pdata->mdio = ret;
-
-       new_bus = mdio_gpio_bus_init(&ofdev->dev, pdata, pdata->mdc);
-       if (!new_bus)
-               goto out_free;
-
-       ret = of_mdiobus_register(new_bus, ofdev->dev.of_node);
-       if (ret)
-               mdio_gpio_bus_deinit(&ofdev->dev);
-
-       return ret;
-
-out_free:
-       kfree(pdata);
-       return -ENODEV;
-}
-
-static int __devexit mdio_ofgpio_remove(struct platform_device *ofdev)
-{
-       mdio_gpio_bus_destroy(&ofdev->dev);
-       kfree(ofdev->dev.platform_data);
-
-       return 0;
-}
-
-static struct of_device_id mdio_ofgpio_match[] = {
-       {
-               .compatible = "virtual,mdio-gpio",
-       },
-       {},
-};
-MODULE_DEVICE_TABLE(of, mdio_ofgpio_match);
-
-static struct platform_driver mdio_ofgpio_driver = {
-       .driver = {
-               .name = "mdio-ofgpio",
-               .owner = THIS_MODULE,
-               .of_match_table = mdio_ofgpio_match,
-       },
-       .probe = mdio_ofgpio_probe,
-       .remove = __devexit_p(mdio_ofgpio_remove),
+static struct of_device_id mdio_gpio_of_match[] = {
+       { .compatible = "virtual,mdio-gpio", },
+       { /* sentinel */ }
 };
 
-static inline int __init mdio_ofgpio_init(void)
-{
-       return platform_driver_register(&mdio_ofgpio_driver);
-}
-
-static inline void mdio_ofgpio_exit(void)
-{
-       platform_driver_unregister(&mdio_ofgpio_driver);
-}
-#else
-static inline int __init mdio_ofgpio_init(void) { return 0; }
-static inline void mdio_ofgpio_exit(void) { }
-#endif /* CONFIG_OF_GPIO */
-
 static struct platform_driver mdio_gpio_driver = {
        .probe = mdio_gpio_probe,
        .remove = __devexit_p(mdio_gpio_remove),
        .driver         = {
                .name   = "mdio-gpio",
                .owner  = THIS_MODULE,
+               .of_match_table = mdio_gpio_of_match,
        },
 };
 
 static int __init mdio_gpio_init(void)
 {
-       int ret;
-
-       ret = mdio_ofgpio_init();
-       if (ret)
-               return ret;
-
-       ret = platform_driver_register(&mdio_gpio_driver);
-       if (ret)
-               mdio_ofgpio_exit();
-
-       return ret;
+       return platform_driver_register(&mdio_gpio_driver);
 }
 module_init(mdio_gpio_init);
 
 static void __exit mdio_gpio_exit(void)
 {
        platform_driver_unregister(&mdio_gpio_driver);
-       mdio_ofgpio_exit();
 }
 module_exit(mdio_gpio_exit);
 
diff --git a/drivers/net/phy/mdio-mux-mmioreg.c b/drivers/net/phy/mdio-mux-mmioreg.c
new file mode 100644 (file)
index 0000000..098239a
--- /dev/null
@@ -0,0 +1,170 @@
+/*
+ * Simple memory-mapped device MDIO MUX driver
+ *
+ * Author: Timur Tabi <timur@freescale.com>
+ *
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2.  This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/device.h>
+#include <linux/of_mdio.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/phy.h>
+#include <linux/mdio-mux.h>
+
+struct mdio_mux_mmioreg_state {
+       void *mux_handle;
+       phys_addr_t phys;
+       uint8_t mask;
+};
+
+/*
+ * MDIO multiplexing switch function
+ *
+ * This function is called by the mdio-mux layer when it thinks the mdio bus
+ * multiplexer needs to switch.
+ *
+ * 'current_child' is the current value of the mux register (masked via
+ * s->mask).
+ *
+ * 'desired_child' is the value of the 'reg' property of the target child MDIO
+ * node.
+ *
+ * The first time this function is called, current_child == -1.
+ *
+ * If current_child == desired_child, then the mux is already set to the
+ * correct bus.
+ */
+static int mdio_mux_mmioreg_switch_fn(int current_child, int desired_child,
+                                     void *data)
+{
+       struct mdio_mux_mmioreg_state *s = data;
+
+       if (current_child ^ desired_child) {
+               void *p = ioremap(s->phys, 1);
+               uint8_t x, y;
+
+               if (!p)
+                       return -ENOMEM;
+
+               x = ioread8(p);
+               y = (x & ~s->mask) | desired_child;
+               if (x != y) {
+                       iowrite8((x & ~s->mask) | desired_child, p);
+                       pr_debug("%s: %02x -> %02x\n", __func__, x, y);
+               }
+
+               iounmap(p);
+       }
+
+       return 0;
+}
+
+static int __devinit mdio_mux_mmioreg_probe(struct platform_device *pdev)
+{
+       struct device_node *np2, *np = pdev->dev.of_node;
+       struct mdio_mux_mmioreg_state *s;
+       struct resource res;
+       const __be32 *iprop;
+       int len, ret;
+
+       dev_dbg(&pdev->dev, "probing node %s\n", np->full_name);
+
+       s = devm_kzalloc(&pdev->dev, sizeof(*s), GFP_KERNEL);
+       if (!s)
+               return -ENOMEM;
+
+       ret = of_address_to_resource(np, 0, &res);
+       if (ret) {
+               dev_err(&pdev->dev, "could not obtain memory map for node %s\n",
+                       np->full_name);
+               return ret;
+       }
+       s->phys = res.start;
+
+       if (resource_size(&res) != sizeof(uint8_t)) {
+               dev_err(&pdev->dev, "only 8-bit registers are supported\n");
+               return -EINVAL;
+       }
+
+       iprop = of_get_property(np, "mux-mask", &len);
+       if (!iprop || len != sizeof(uint32_t)) {
+               dev_err(&pdev->dev, "missing or invalid mux-mask property\n");
+               return -ENODEV;
+       }
+       if (be32_to_cpup(iprop) > 255) {
+               dev_err(&pdev->dev, "only 8-bit registers are supported\n");
+               return -EINVAL;
+       }
+       s->mask = be32_to_cpup(iprop);
+
+       /*
+        * Verify that the 'reg' property of each child MDIO bus does not
+        * set any bits outside of the 'mask'.
+        */
+       for_each_available_child_of_node(np, np2) {
+               iprop = of_get_property(np2, "reg", &len);
+               if (!iprop || len != sizeof(uint32_t)) {
+                       dev_err(&pdev->dev, "mdio-mux child node %s is "
+                               "missing a 'reg' property\n", np2->full_name);
+                       return -ENODEV;
+               }
+               if (be32_to_cpup(iprop) & ~s->mask) {
+                       dev_err(&pdev->dev, "mdio-mux child node %s has "
+                               "a 'reg' value with unmasked bits\n",
+                               np2->full_name);
+                       return -ENODEV;
+               }
+       }
+
+       ret = mdio_mux_init(&pdev->dev, mdio_mux_mmioreg_switch_fn,
+                           &s->mux_handle, s);
+       if (ret) {
+               dev_err(&pdev->dev, "failed to register mdio-mux bus %s\n",
+                       np->full_name);
+               return ret;
+       }
+
+       pdev->dev.platform_data = s;
+
+       return 0;
+}
+
+static int __devexit mdio_mux_mmioreg_remove(struct platform_device *pdev)
+{
+       struct mdio_mux_mmioreg_state *s = dev_get_platdata(&pdev->dev);
+
+       mdio_mux_uninit(s->mux_handle);
+
+       return 0;
+}
+
+static struct of_device_id mdio_mux_mmioreg_match[] = {
+       {
+               .compatible = "mdio-mux-mmioreg",
+       },
+       {},
+};
+MODULE_DEVICE_TABLE(of, mdio_mux_mmioreg_match);
+
+static struct platform_driver mdio_mux_mmioreg_driver = {
+       .driver = {
+               .name           = "mdio-mux-mmioreg",
+               .owner          = THIS_MODULE,
+               .of_match_table = mdio_mux_mmioreg_match,
+       },
+       .probe          = mdio_mux_mmioreg_probe,
+       .remove         = __devexit_p(mdio_mux_mmioreg_remove),
+};
+
+module_platform_driver(mdio_mux_mmioreg_driver);
+
+MODULE_AUTHOR("Timur Tabi <timur@freescale.com>");
+MODULE_DESCRIPTION("Memory-mapped device MDIO MUX driver");
+MODULE_LICENSE("GPL v2");
index 7ca2ff9..ef9ea92 100644 (file)
@@ -1035,66 +1035,6 @@ static void phy_write_mmd_indirect(struct mii_bus *bus, int prtad, int devad,
        bus->write(bus, addr, MII_MMD_DATA, data);
 }
 
-static u32 phy_eee_to_adv(u16 eee_adv)
-{
-       u32 adv = 0;
-
-       if (eee_adv & MDIO_EEE_100TX)
-               adv |= ADVERTISED_100baseT_Full;
-       if (eee_adv & MDIO_EEE_1000T)
-               adv |= ADVERTISED_1000baseT_Full;
-       if (eee_adv & MDIO_EEE_10GT)
-               adv |= ADVERTISED_10000baseT_Full;
-       if (eee_adv & MDIO_EEE_1000KX)
-               adv |= ADVERTISED_1000baseKX_Full;
-       if (eee_adv & MDIO_EEE_10GKX4)
-               adv |= ADVERTISED_10000baseKX4_Full;
-       if (eee_adv & MDIO_EEE_10GKR)
-               adv |= ADVERTISED_10000baseKR_Full;
-
-       return adv;
-}
-
-static u32 phy_eee_to_supported(u16 eee_caported)
-{
-       u32 supported = 0;
-
-       if (eee_caported & MDIO_EEE_100TX)
-               supported |= SUPPORTED_100baseT_Full;
-       if (eee_caported & MDIO_EEE_1000T)
-               supported |= SUPPORTED_1000baseT_Full;
-       if (eee_caported & MDIO_EEE_10GT)
-               supported |= SUPPORTED_10000baseT_Full;
-       if (eee_caported & MDIO_EEE_1000KX)
-               supported |= SUPPORTED_1000baseKX_Full;
-       if (eee_caported & MDIO_EEE_10GKX4)
-               supported |= SUPPORTED_10000baseKX4_Full;
-       if (eee_caported & MDIO_EEE_10GKR)
-               supported |= SUPPORTED_10000baseKR_Full;
-
-       return supported;
-}
-
-static u16 phy_adv_to_eee(u32 adv)
-{
-       u16 reg = 0;
-
-       if (adv & ADVERTISED_100baseT_Full)
-               reg |= MDIO_EEE_100TX;
-       if (adv & ADVERTISED_1000baseT_Full)
-               reg |= MDIO_EEE_1000T;
-       if (adv & ADVERTISED_10000baseT_Full)
-               reg |= MDIO_EEE_10GT;
-       if (adv & ADVERTISED_1000baseKX_Full)
-               reg |= MDIO_EEE_1000KX;
-       if (adv & ADVERTISED_10000baseKX4_Full)
-               reg |= MDIO_EEE_10GKX4;
-       if (adv & ADVERTISED_10000baseKR_Full)
-               reg |= MDIO_EEE_10GKR;
-
-       return reg;
-}
-
 /**
  * phy_init_eee - init and check the EEE feature
  * @phydev: target phy_device struct
@@ -1132,7 +1072,7 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
                if (eee_cap < 0)
                        return eee_cap;
 
-               cap = phy_eee_to_supported(eee_cap);
+               cap = mmd_eee_cap_to_ethtool_sup_t(eee_cap);
                if (!cap)
                        goto eee_exit;
 
@@ -1149,8 +1089,8 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
                if (eee_adv < 0)
                        return eee_adv;
 
-               adv = phy_eee_to_adv(eee_adv);
-               lp = phy_eee_to_adv(eee_lp);
+               adv = mmd_eee_adv_to_ethtool_adv_t(eee_adv);
+               lp = mmd_eee_adv_to_ethtool_adv_t(eee_lp);
                idx = phy_find_setting(phydev->speed, phydev->duplex);
                if ((lp & adv & settings[idx].setting))
                        goto eee_exit;
@@ -1210,21 +1150,21 @@ int phy_ethtool_get_eee(struct phy_device *phydev, struct ethtool_eee *data)
                                    MDIO_MMD_PCS, phydev->addr);
        if (val < 0)
                return val;
-       data->supported = phy_eee_to_supported(val);
+       data->supported = mmd_eee_cap_to_ethtool_sup_t(val);
 
        /* Get advertisement EEE */
        val = phy_read_mmd_indirect(phydev->bus, MDIO_AN_EEE_ADV,
                                    MDIO_MMD_AN, phydev->addr);
        if (val < 0)
                return val;
-       data->advertised = phy_eee_to_adv(val);
+       data->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
 
        /* Get LP advertisement EEE */
        val = phy_read_mmd_indirect(phydev->bus, MDIO_AN_EEE_LPABLE,
                                    MDIO_MMD_AN, phydev->addr);
        if (val < 0)
                return val;
-       data->lp_advertised = phy_eee_to_adv(val);
+       data->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
 
        return 0;
 }
@@ -1241,7 +1181,7 @@ int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_eee *data)
 {
        int val;
 
-       val = phy_adv_to_eee(data->advertised);
+       val = ethtool_adv_to_mmd_eee_adv_t(data->advertised);
        phy_write_mmd_indirect(phydev->bus, MDIO_AN_EEE_ADV, MDIO_MMD_AN,
                               phydev->addr, val);
 
index 5c05572..eb3f5ce 100644 (file)
@@ -94,6 +94,18 @@ struct ppp_file {
 #define PF_TO_CHANNEL(pf)      PF_TO_X(pf, struct channel)
 
 /*
+ * Data structure to hold primary network stats for which
+ * we want to use 64 bit storage.  Other network stats
+ * are stored in dev->stats of the ppp strucute.
+ */
+struct ppp_link_stats {
+       u64 rx_packets;
+       u64 tx_packets;
+       u64 rx_bytes;
+       u64 tx_bytes;
+};
+
+/*
  * Data structure describing one ppp unit.
  * A ppp unit corresponds to a ppp network interface device
  * and represents a multilink bundle.
@@ -136,6 +148,7 @@ struct ppp {
        unsigned pass_len, active_len;
 #endif /* CONFIG_PPP_FILTER */
        struct net      *ppp_net;       /* the net we belong to */
+       struct ppp_link_stats stats64;  /* 64 bit network stats */
 };
 
 /*
@@ -1021,9 +1034,34 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
        return err;
 }
 
+struct rtnl_link_stats64*
+ppp_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats64)
+{
+       struct ppp *ppp = netdev_priv(dev);
+
+       ppp_recv_lock(ppp);
+       stats64->rx_packets = ppp->stats64.rx_packets;
+       stats64->rx_bytes   = ppp->stats64.rx_bytes;
+       ppp_recv_unlock(ppp);
+
+       ppp_xmit_lock(ppp);
+       stats64->tx_packets = ppp->stats64.tx_packets;
+       stats64->tx_bytes   = ppp->stats64.tx_bytes;
+       ppp_xmit_unlock(ppp);
+
+       stats64->rx_errors        = dev->stats.rx_errors;
+       stats64->tx_errors        = dev->stats.tx_errors;
+       stats64->rx_dropped       = dev->stats.rx_dropped;
+       stats64->tx_dropped       = dev->stats.tx_dropped;
+       stats64->rx_length_errors = dev->stats.rx_length_errors;
+
+       return stats64;
+}
+
 static const struct net_device_ops ppp_netdev_ops = {
-       .ndo_start_xmit = ppp_start_xmit,
-       .ndo_do_ioctl   = ppp_net_ioctl,
+       .ndo_start_xmit  = ppp_start_xmit,
+       .ndo_do_ioctl    = ppp_net_ioctl,
+       .ndo_get_stats64 = ppp_get_stats64,
 };
 
 static void ppp_setup(struct net_device *dev)
@@ -1157,8 +1195,8 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
 #endif /* CONFIG_PPP_FILTER */
        }
 
-       ++ppp->dev->stats.tx_packets;
-       ppp->dev->stats.tx_bytes += skb->len - 2;
+       ++ppp->stats64.tx_packets;
+       ppp->stats64.tx_bytes += skb->len - 2;
 
        switch (proto) {
        case PPP_IP:
@@ -1745,8 +1783,8 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
                break;
        }
 
-       ++ppp->dev->stats.rx_packets;
-       ppp->dev->stats.rx_bytes += skb->len - 2;
+       ++ppp->stats64.rx_packets;
+       ppp->stats64.rx_bytes += skb->len - 2;
 
        npi = proto_to_npindex(proto);
        if (npi < 0) {
@@ -2570,12 +2608,12 @@ ppp_get_stats(struct ppp *ppp, struct ppp_stats *st)
        struct slcompress *vj = ppp->vj;
 
        memset(st, 0, sizeof(*st));
-       st->p.ppp_ipackets = ppp->dev->stats.rx_packets;
+       st->p.ppp_ipackets = ppp->stats64.rx_packets;
        st->p.ppp_ierrors = ppp->dev->stats.rx_errors;
-       st->p.ppp_ibytes = ppp->dev->stats.rx_bytes;
-       st->p.ppp_opackets = ppp->dev->stats.tx_packets;
+       st->p.ppp_ibytes = ppp->stats64.rx_bytes;
+       st->p.ppp_opackets = ppp->stats64.tx_packets;
        st->p.ppp_oerrors = ppp->dev->stats.tx_errors;
-       st->p.ppp_obytes = ppp->dev->stats.tx_bytes;
+       st->p.ppp_obytes = ppp->stats64.tx_bytes;
        if (!vj)
                return;
        st->vj.vjs_packets = vj->sls_o_compressed + vj->sls_o_uncompressed;
index 6a7260b..6b08bd4 100644 (file)
@@ -21,7 +21,7 @@ config NET_TEAM_MODE_BROADCAST
        ---help---
          Basic mode where packets are transmitted always by all suitable ports.
 
-         All added ports are setup to have team's mac address.
+         All added ports are setup to have team's device address.
 
          To compile this team mode as a module, choose M here: the module
          will be called team_mode_broadcast.
@@ -33,7 +33,7 @@ config NET_TEAM_MODE_ROUNDROBIN
          Basic mode where port used for transmitting packets is selected in
          round-robin fashion using packet counter.
 
-         All added ports are setup to have team's mac address.
+         All added ports are setup to have team's device address.
 
          To compile this team mode as a module, choose M here: the module
          will be called team_mode_roundrobin.
index 341b65d..b4f67b5 100644 (file)
@@ -54,29 +54,29 @@ static struct team_port *team_port_get_rtnl(const struct net_device *dev)
 }
 
 /*
- * Since the ability to change mac address for open port device is tested in
+ * Since the ability to change device address for open port device is tested in
  * team_port_add, this function can be called without control of return value
  */
-static int __set_port_mac(struct net_device *port_dev,
-                         const unsigned char *dev_addr)
+static int __set_port_dev_addr(struct net_device *port_dev,
+                              const unsigned char *dev_addr)
 {
        struct sockaddr addr;
 
-       memcpy(addr.sa_data, dev_addr, ETH_ALEN);
-       addr.sa_family = ARPHRD_ETHER;
+       memcpy(addr.sa_data, dev_addr, port_dev->addr_len);
+       addr.sa_family = port_dev->type;
        return dev_set_mac_address(port_dev, &addr);
 }
 
-static int team_port_set_orig_mac(struct team_port *port)
+static int team_port_set_orig_dev_addr(struct team_port *port)
 {
-       return __set_port_mac(port->dev, port->orig.dev_addr);
+       return __set_port_dev_addr(port->dev, port->orig.dev_addr);
 }
 
-int team_port_set_team_mac(struct team_port *port)
+int team_port_set_team_dev_addr(struct team_port *port)
 {
-       return __set_port_mac(port->dev, port->team->dev->dev_addr);
+       return __set_port_dev_addr(port->dev, port->team->dev->dev_addr);
 }
-EXPORT_SYMBOL(team_port_set_team_mac);
+EXPORT_SYMBOL(team_port_set_team_dev_addr);
 
 static void team_refresh_port_linkup(struct team_port *port)
 {
@@ -658,6 +658,122 @@ static rx_handler_result_t team_handle_frame(struct sk_buff **pskb)
 }
 
 
+/*************************************
+ * Multiqueue Tx port select override
+ *************************************/
+
+static int team_queue_override_init(struct team *team)
+{
+       struct list_head *listarr;
+       unsigned int queue_cnt = team->dev->num_tx_queues - 1;
+       unsigned int i;
+
+       if (!queue_cnt)
+               return 0;
+       listarr = kmalloc(sizeof(struct list_head) * queue_cnt, GFP_KERNEL);
+       if (!listarr)
+               return -ENOMEM;
+       team->qom_lists = listarr;
+       for (i = 0; i < queue_cnt; i++)
+               INIT_LIST_HEAD(listarr++);
+       return 0;
+}
+
+static void team_queue_override_fini(struct team *team)
+{
+       kfree(team->qom_lists);
+}
+
+static struct list_head *__team_get_qom_list(struct team *team, u16 queue_id)
+{
+       return &team->qom_lists[queue_id - 1];
+}
+
+/*
+ * note: already called with rcu_read_lock
+ */
+static bool team_queue_override_transmit(struct team *team, struct sk_buff *skb)
+{
+       struct list_head *qom_list;
+       struct team_port *port;
+
+       if (!team->queue_override_enabled || !skb->queue_mapping)
+               return false;
+       qom_list = __team_get_qom_list(team, skb->queue_mapping);
+       list_for_each_entry_rcu(port, qom_list, qom_list) {
+               if (!team_dev_queue_xmit(team, port, skb))
+                       return true;
+       }
+       return false;
+}
+
+static void __team_queue_override_port_del(struct team *team,
+                                          struct team_port *port)
+{
+       list_del_rcu(&port->qom_list);
+       synchronize_rcu();
+       INIT_LIST_HEAD(&port->qom_list);
+}
+
+static bool team_queue_override_port_has_gt_prio_than(struct team_port *port,
+                                                     struct team_port *cur)
+{
+       if (port->priority < cur->priority)
+               return true;
+       if (port->priority > cur->priority)
+               return false;
+       if (port->index < cur->index)
+               return true;
+       return false;
+}
+
+static void __team_queue_override_port_add(struct team *team,
+                                          struct team_port *port)
+{
+       struct team_port *cur;
+       struct list_head *qom_list;
+       struct list_head *node;
+
+       if (!port->queue_id || !team_port_enabled(port))
+               return;
+
+       qom_list = __team_get_qom_list(team, port->queue_id);
+       node = qom_list;
+       list_for_each_entry(cur, qom_list, qom_list) {
+               if (team_queue_override_port_has_gt_prio_than(port, cur))
+                       break;
+               node = &cur->qom_list;
+       }
+       list_add_tail_rcu(&port->qom_list, node);
+}
+
+static void __team_queue_override_enabled_check(struct team *team)
+{
+       struct team_port *port;
+       bool enabled = false;
+
+       list_for_each_entry(port, &team->port_list, list) {
+               if (!list_empty(&port->qom_list)) {
+                       enabled = true;
+                       break;
+               }
+       }
+       if (enabled == team->queue_override_enabled)
+               return;
+       netdev_dbg(team->dev, "%s queue override\n",
+                  enabled ? "Enabling" : "Disabling");
+       team->queue_override_enabled = enabled;
+}
+
+static void team_queue_override_port_refresh(struct team *team,
+                                            struct team_port *port)
+{
+       __team_queue_override_port_del(team, port);
+       __team_queue_override_port_add(team, port);
+       __team_queue_override_enabled_check(team);
+}
+
+
 /****************
  * Port handling
  ****************/
@@ -688,6 +804,7 @@ static void team_port_enable(struct team *team,
        hlist_add_head_rcu(&port->hlist,
                           team_port_index_hash(team, port->index));
        team_adjust_ops(team);
+       team_queue_override_port_refresh(team, port);
        if (team->ops.port_enabled)
                team->ops.port_enabled(team, port);
 }
@@ -716,6 +833,7 @@ static void team_port_disable(struct team *team,
        hlist_del_rcu(&port->hlist);
        __reconstruct_port_hlist(team, port->index);
        port->index = -1;
+       team_queue_override_port_refresh(team, port);
        __team_adjust_ops(team, team->en_port_count - 1);
        /*
         * Wait until readers see adjusted ops. This ensures that
@@ -849,6 +967,8 @@ static struct netpoll_info *team_netpoll_info(struct team *team)
 #endif
 
 static void __team_port_change_check(struct team_port *port, bool linkup);
+static int team_dev_type_check_change(struct net_device *dev,
+                                     struct net_device *port_dev);
 
 static int team_port_add(struct team *team, struct net_device *port_dev)
 {
@@ -857,9 +977,8 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
        char *portname = port_dev->name;
        int err;
 
-       if (port_dev->flags & IFF_LOOPBACK ||
-           port_dev->type != ARPHRD_ETHER) {
-               netdev_err(dev, "Device %s is of an unsupported type\n",
+       if (port_dev->flags & IFF_LOOPBACK) {
+               netdev_err(dev, "Device %s is loopback device. Loopback devices can't be added as a team port\n",
                           portname);
                return -EINVAL;
        }
@@ -870,6 +989,17 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
                return -EBUSY;
        }
 
+       if (port_dev->features & NETIF_F_VLAN_CHALLENGED &&
+           vlan_uses_dev(dev)) {
+               netdev_err(dev, "Device %s is VLAN challenged and team device has VLAN set up\n",
+                          portname);
+               return -EPERM;
+       }
+
+       err = team_dev_type_check_change(dev, port_dev);
+       if (err)
+               return err;
+
        if (port_dev->flags & IFF_UP) {
                netdev_err(dev, "Device %s is up. Set it down before adding it as a team port\n",
                           portname);
@@ -883,6 +1013,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
 
        port->dev = port_dev;
        port->team = team;
+       INIT_LIST_HEAD(&port->qom_list);
 
        port->orig.mtu = port_dev->mtu;
        err = dev_set_mtu(port_dev, dev->mtu);
@@ -891,7 +1022,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
                goto err_set_mtu;
        }
 
-       memcpy(port->orig.dev_addr, port_dev->dev_addr, ETH_ALEN);
+       memcpy(port->orig.dev_addr, port_dev->dev_addr, port_dev->addr_len);
 
        err = team_port_enter(team, port);
        if (err) {
@@ -972,7 +1103,7 @@ err_vids_add:
 
 err_dev_open:
        team_port_leave(team, port);
-       team_port_set_orig_mac(port);
+       team_port_set_orig_dev_addr(port);
 
 err_port_enter:
        dev_set_mtu(port_dev, port->orig.mtu);
@@ -1009,7 +1140,7 @@ static int team_port_del(struct team *team, struct net_device *port_dev)
        vlan_vids_del_by_dev(port_dev, dev);
        dev_close(port_dev);
        team_port_leave(team, port);
-       team_port_set_orig_mac(port);
+       team_port_set_orig_dev_addr(port);
        dev_set_mtu(port_dev, port->orig.mtu);
        synchronize_rcu();
        kfree(port);
@@ -1094,6 +1225,49 @@ static int team_user_linkup_en_option_set(struct team *team,
        return 0;
 }
 
+static int team_priority_option_get(struct team *team,
+                                   struct team_gsetter_ctx *ctx)
+{
+       struct team_port *port = ctx->info->port;
+
+       ctx->data.s32_val = port->priority;
+       return 0;
+}
+
+static int team_priority_option_set(struct team *team,
+                                   struct team_gsetter_ctx *ctx)
+{
+       struct team_port *port = ctx->info->port;
+
+       port->priority = ctx->data.s32_val;
+       team_queue_override_port_refresh(team, port);
+       return 0;
+}
+
+static int team_queue_id_option_get(struct team *team,
+                                   struct team_gsetter_ctx *ctx)
+{
+       struct team_port *port = ctx->info->port;
+
+       ctx->data.u32_val = port->queue_id;
+       return 0;
+}
+
+static int team_queue_id_option_set(struct team *team,
+                                   struct team_gsetter_ctx *ctx)
+{
+       struct team_port *port = ctx->info->port;
+
+       if (port->queue_id == ctx->data.u32_val)
+               return 0;
+       if (ctx->data.u32_val >= team->dev->real_num_tx_queues)
+               return -EINVAL;
+       port->queue_id = ctx->data.u32_val;
+       team_queue_override_port_refresh(team, port);
+       return 0;
+}
+
+
 static const struct team_option team_options[] = {
        {
                .name = "mode",
@@ -1122,6 +1296,20 @@ static const struct team_option team_options[] = {
                .getter = team_user_linkup_en_option_get,
                .setter = team_user_linkup_en_option_set,
        },
+       {
+               .name = "priority",
+               .type = TEAM_OPTION_TYPE_S32,
+               .per_port = true,
+               .getter = team_priority_option_get,
+               .setter = team_priority_option_set,
+       },
+       {
+               .name = "queue_id",
+               .type = TEAM_OPTION_TYPE_U32,
+               .per_port = true,
+               .getter = team_queue_id_option_get,
+               .setter = team_queue_id_option_set,
+       },
 };
 
 static struct lock_class_key team_netdev_xmit_lock_key;
@@ -1157,6 +1345,9 @@ static int team_init(struct net_device *dev)
        for (i = 0; i < TEAM_PORT_HASHENTRIES; i++)
                INIT_HLIST_HEAD(&team->en_port_hlist[i]);
        INIT_LIST_HEAD(&team->port_list);
+       err = team_queue_override_init(team);
+       if (err)
+               goto err_team_queue_override_init;
 
        team_adjust_ops(team);
 
@@ -1172,6 +1363,8 @@ static int team_init(struct net_device *dev)
        return 0;
 
 err_options_register:
+       team_queue_override_fini(team);
+err_team_queue_override_init:
        free_percpu(team->pcpu_stats);
 
        return err;
@@ -1189,6 +1382,7 @@ static void team_uninit(struct net_device *dev)
 
        __team_change_mode(team, NULL); /* cleanup */
        __team_options_unregister(team, team_options, ARRAY_SIZE(team_options));
+       team_queue_override_fini(team);
        mutex_unlock(&team->lock);
 }
 
@@ -1218,10 +1412,12 @@ static int team_close(struct net_device *dev)
 static netdev_tx_t team_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct team *team = netdev_priv(dev);
-       bool tx_success = false;
+       bool tx_success;
        unsigned int len = skb->len;
 
-       tx_success = team->ops.transmit(team, skb);
+       tx_success = team_queue_override_transmit(team, skb);
+       if (!tx_success)
+               tx_success = team->ops.transmit(team, skb);
        if (tx_success) {
                struct team_pcpu_stats *pcpu_stats;
 
@@ -1295,17 +1491,18 @@ static void team_set_rx_mode(struct net_device *dev)
 
 static int team_set_mac_address(struct net_device *dev, void *p)
 {
+       struct sockaddr *addr = p;
        struct team *team = netdev_priv(dev);
        struct team_port *port;
-       int err;
 
-       err = eth_mac_addr(dev, p);
-       if (err)
-               return err;
+       if (dev->type == ARPHRD_ETHER && !is_valid_ether_addr(addr->sa_data))
+               return -EADDRNOTAVAIL;
+       memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+       dev->addr_assign_type &= ~NET_ADDR_RANDOM;
        rcu_read_lock();
        list_for_each_entry_rcu(port, &team->port_list, list)
-               if (team->ops.port_change_mac)
-                       team->ops.port_change_mac(team, port);
+               if (team->ops.port_change_dev_addr)
+                       team->ops.port_change_dev_addr(team, port);
        rcu_read_unlock();
        return 0;
 }
@@ -1536,6 +1733,45 @@ static const struct net_device_ops team_netdev_ops = {
  * rt netlink interface
  ***********************/
 
+static void team_setup_by_port(struct net_device *dev,
+                              struct net_device *port_dev)
+{
+       dev->header_ops = port_dev->header_ops;
+       dev->type = port_dev->type;
+       dev->hard_header_len = port_dev->hard_header_len;
+       dev->addr_len = port_dev->addr_len;
+       dev->mtu = port_dev->mtu;
+       memcpy(dev->broadcast, port_dev->broadcast, port_dev->addr_len);
+       memcpy(dev->dev_addr, port_dev->dev_addr, port_dev->addr_len);
+       dev->addr_assign_type &= ~NET_ADDR_RANDOM;
+}
+
+static int team_dev_type_check_change(struct net_device *dev,
+                                     struct net_device *port_dev)
+{
+       struct team *team = netdev_priv(dev);
+       char *portname = port_dev->name;
+       int err;
+
+       if (dev->type == port_dev->type)
+               return 0;
+       if (!list_empty(&team->port_list)) {
+               netdev_err(dev, "Device %s is of different type\n", portname);
+               return -EBUSY;
+       }
+       err = call_netdevice_notifiers(NETDEV_PRE_TYPE_CHANGE, dev);
+       err = notifier_to_errno(err);
+       if (err) {
+               netdev_err(dev, "Refused to change device type\n");
+               return err;
+       }
+       dev_uc_flush(dev);
+       dev_mc_flush(dev);
+       team_setup_by_port(dev, port_dev);
+       call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev);
+       return 0;
+}
+
 static void team_setup(struct net_device *dev)
 {
        ether_setup(dev);
@@ -1789,6 +2025,12 @@ static int team_nl_fill_one_option_get(struct sk_buff *skb, struct team *team,
                    nla_put_flag(skb, TEAM_ATTR_OPTION_DATA))
                        goto nest_cancel;
                break;
+       case TEAM_OPTION_TYPE_S32:
+               if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_S32))
+                       goto nest_cancel;
+               if (nla_put_s32(skb, TEAM_ATTR_OPTION_DATA, ctx.data.s32_val))
+                       goto nest_cancel;
+               break;
        default:
                BUG();
        }
@@ -1977,6 +2219,9 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
                case NLA_FLAG:
                        opt_type = TEAM_OPTION_TYPE_BOOL;
                        break;
+               case NLA_S32:
+                       opt_type = TEAM_OPTION_TYPE_S32;
+                       break;
                default:
                        goto team_put;
                }
@@ -2033,6 +2278,9 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
                        case TEAM_OPTION_TYPE_BOOL:
                                ctx.data.bool_val = attr_data ? true : false;
                                break;
+                       case TEAM_OPTION_TYPE_S32:
+                               ctx.data.s32_val = nla_get_s32(attr_data);
+                               break;
                        default:
                                BUG();
                        }
@@ -2245,7 +2493,7 @@ static void __team_options_change_check(struct team *team)
                        list_add_tail(&opt_inst->tmp_list, &sel_opt_inst_list);
        }
        err = team_nl_send_event_options_get(team, &sel_opt_inst_list);
-       if (err)
+       if (err && err != -ESRCH)
                netdev_warn(team->dev, "Failed to send options change via netlink (err %d)\n",
                            err);
 }
@@ -2276,9 +2524,9 @@ static void __team_port_change_check(struct team_port *port, bool linkup)
 
 send_event:
        err = team_nl_send_event_port_list_get(port->team);
-       if (err)
-               netdev_warn(port->team->dev, "Failed to send port change of device %s via netlink\n",
-                           port->dev->name);
+       if (err && err != -ESRCH)
+               netdev_warn(port->team->dev, "Failed to send port change of device %s via netlink (err %d)\n",
+                           port->dev->name, err);
 
 }
 
index c96e4d2..9db0171 100644 (file)
@@ -48,18 +48,18 @@ static bool bc_transmit(struct team *team, struct sk_buff *skb)
 
 static int bc_port_enter(struct team *team, struct team_port *port)
 {
-       return team_port_set_team_mac(port);
+       return team_port_set_team_dev_addr(port);
 }
 
-static void bc_port_change_mac(struct team *team, struct team_port *port)
+static void bc_port_change_dev_addr(struct team *team, struct team_port *port)
 {
-       team_port_set_team_mac(port);
+       team_port_set_team_dev_addr(port);
 }
 
 static const struct team_mode_ops bc_mode_ops = {
        .transmit               = bc_transmit,
        .port_enter             = bc_port_enter,
-       .port_change_mac        = bc_port_change_mac,
+       .port_change_dev_addr   = bc_port_change_dev_addr,
 };
 
 static const struct team_mode bc_mode = {
index ad7ed0e..105135a 100644 (file)
@@ -66,18 +66,18 @@ drop:
 
 static int rr_port_enter(struct team *team, struct team_port *port)
 {
-       return team_port_set_team_mac(port);
+       return team_port_set_team_dev_addr(port);
 }
 
-static void rr_port_change_mac(struct team *team, struct team_port *port)
+static void rr_port_change_dev_addr(struct team *team, struct team_port *port)
 {
-       team_port_set_team_mac(port);
+       team_port_set_team_dev_addr(port);
 }
 
 static const struct team_mode_ops rr_mode_ops = {
        .transmit               = rr_transmit,
        .port_enter             = rr_port_enter,
-       .port_change_mac        = rr_port_change_mac,
+       .port_change_dev_addr   = rr_port_change_dev_addr,
 };
 
 static const struct team_mode rr_mode = {
index 3a16d4f..498dc0d 100644 (file)
@@ -120,8 +120,8 @@ struct tun_sock;
 struct tun_struct {
        struct tun_file         *tfile;
        unsigned int            flags;
-       uid_t                   owner;
-       gid_t                   group;
+       kuid_t                  owner;
+       kgid_t                  group;
 
        struct net_device       *dev;
        netdev_features_t       set_features;
@@ -1031,8 +1031,8 @@ static void tun_setup(struct net_device *dev)
 {
        struct tun_struct *tun = netdev_priv(dev);
 
-       tun->owner = -1;
-       tun->group = -1;
+       tun->owner = INVALID_UID;
+       tun->group = INVALID_GID;
 
        dev->ethtool_ops = &tun_ethtool_ops;
        dev->destructor = tun_free_netdev;
@@ -1155,14 +1155,20 @@ static ssize_t tun_show_owner(struct device *dev, struct device_attribute *attr,
                              char *buf)
 {
        struct tun_struct *tun = netdev_priv(to_net_dev(dev));
-       return sprintf(buf, "%d\n", tun->owner);
+       return uid_valid(tun->owner)?
+               sprintf(buf, "%u\n",
+                       from_kuid_munged(current_user_ns(), tun->owner)):
+               sprintf(buf, "-1\n");
 }
 
 static ssize_t tun_show_group(struct device *dev, struct device_attribute *attr,
                              char *buf)
 {
        struct tun_struct *tun = netdev_priv(to_net_dev(dev));
-       return sprintf(buf, "%d\n", tun->group);
+       return gid_valid(tun->group) ?
+               sprintf(buf, "%u\n",
+                       from_kgid_munged(current_user_ns(), tun->group)):
+               sprintf(buf, "-1\n");
 }
 
 static DEVICE_ATTR(tun_flags, 0444, tun_show_flags, NULL);
@@ -1189,8 +1195,8 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
                else
                        return -EINVAL;
 
-               if (((tun->owner != -1 && cred->euid != tun->owner) ||
-                    (tun->group != -1 && !in_egroup_p(tun->group))) &&
+               if (((uid_valid(tun->owner) && !uid_eq(cred->euid, tun->owner)) ||
+                    (gid_valid(tun->group) && !in_egroup_p(tun->group))) &&
                    !capable(CAP_NET_ADMIN))
                        return -EPERM;
                err = security_tun_dev_attach(tun->socket.sk);
@@ -1374,6 +1380,8 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
        void __user* argp = (void __user*)arg;
        struct sock_fprog fprog;
        struct ifreq ifr;
+       kuid_t owner;
+       kgid_t group;
        int sndbuf;
        int vnet_hdr_sz;
        int ret;
@@ -1447,16 +1455,26 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
 
        case TUNSETOWNER:
                /* Set owner of the device */
-               tun->owner = (uid_t) arg;
-
-               tun_debug(KERN_INFO, tun, "owner set to %d\n", tun->owner);
+               owner = make_kuid(current_user_ns(), arg);
+               if (!uid_valid(owner)) {
+                       ret = -EINVAL;
+                       break;
+               }
+               tun->owner = owner;
+               tun_debug(KERN_INFO, tun, "owner set to %d\n",
+                         from_kuid(&init_user_ns, tun->owner));
                break;
 
        case TUNSETGROUP:
                /* Set group of the device */
-               tun->group= (gid_t) arg;
-
-               tun_debug(KERN_INFO, tun, "group set to %d\n", tun->group);
+               group = make_kgid(current_user_ns(), arg);
+               if (!gid_valid(group)) {
+                       ret = -EINVAL;
+                       break;
+               }
+               tun->group = group;
+               tun_debug(KERN_INFO, tun, "group set to %d\n",
+                         from_kgid(&init_user_ns, tun->group));
                break;
 
        case TUNSETLINK:
index 5852361..e522ff7 100644 (file)
@@ -348,6 +348,9 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
        if (tbp[IFLA_ADDRESS] == NULL)
                eth_hw_addr_random(peer);
 
+       if (ifmp && (dev->ifindex != 0))
+               peer->ifindex = ifmp->ifi_index;
+
        err = register_netdevice(peer);
        put_net(net);
        net = NULL;
index 83d2b0c..81a64c5 100644 (file)
@@ -993,7 +993,7 @@ static void virtnet_config_changed_work(struct work_struct *work)
                goto done;
 
        if (v & VIRTIO_NET_S_ANNOUNCE) {
-               netif_notify_peers(vi->dev);
+               netdev_notify_peers(vi->dev);
                virtnet_ack_link_announce(vi);
        }
 
index 0254261..9c34d2f 100644 (file)
@@ -222,7 +222,6 @@ int i2400m_check_mac_addr(struct i2400m *i2400m)
        struct sk_buff *skb;
        const struct i2400m_tlv_detailed_device_info *ddi;
        struct net_device *net_dev = i2400m->wimax_dev.net_dev;
-       const unsigned char zeromac[ETH_ALEN] = { 0 };
 
        d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
        skb = i2400m_get_device_info(i2400m);
@@ -244,7 +243,7 @@ int i2400m_check_mac_addr(struct i2400m *i2400m)
                 "to that of boot mode's\n");
        dev_warn(dev, "device reports     %pM\n", ddi->mac_address);
        dev_warn(dev, "boot mode reported %pM\n", net_dev->perm_addr);
-       if (!memcmp(zeromac, ddi->mac_address, sizeof(zeromac)))
+       if (is_zero_ether_addr(ddi->mac_address))
                dev_err(dev, "device reports an invalid MAC address, "
                        "not updating\n");
        else {
index 689a71c..154a496 100644 (file)
@@ -1661,7 +1661,9 @@ static void adm8211_tx_raw(struct ieee80211_hw *dev, struct sk_buff *skb,
 }
 
 /* Put adm8211_tx_hdr on skb and transmit */
-static void adm8211_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
+static void adm8211_tx(struct ieee80211_hw *dev,
+                      struct ieee80211_tx_control *control,
+                      struct sk_buff *skb)
 {
        struct adm8211_tx_hdr *txhdr;
        size_t payload_len, hdrlen;
index f9f15bb..c586f78 100644 (file)
@@ -232,8 +232,10 @@ static int adhoc;
 
 static int probe = 1;
 
+static kuid_t proc_kuid;
 static int proc_uid /* = 0 */;
 
+static kgid_t proc_kgid;
 static int proc_gid /* = 0 */;
 
 static int airo_perm = 0555;
@@ -4499,78 +4501,79 @@ struct proc_data {
 static int setup_proc_entry( struct net_device *dev,
                             struct airo_info *apriv ) {
        struct proc_dir_entry *entry;
+
        /* First setup the device directory */
        strcpy(apriv->proc_name,dev->name);
        apriv->proc_entry = proc_mkdir_mode(apriv->proc_name, airo_perm,
                                            airo_entry);
        if (!apriv->proc_entry)
                goto fail;
-       apriv->proc_entry->uid = proc_uid;
-       apriv->proc_entry->gid = proc_gid;
+       apriv->proc_entry->uid = proc_kuid;
+       apriv->proc_entry->gid = proc_kgid;
 
        /* Setup the StatsDelta */
        entry = proc_create_data("StatsDelta", S_IRUGO & proc_perm,
                                 apriv->proc_entry, &proc_statsdelta_ops, dev);
        if (!entry)
                goto fail_stats_delta;
-       entry->uid = proc_uid;
-       entry->gid = proc_gid;
+       entry->uid = proc_kuid;
+       entry->gid = proc_kgid;
 
        /* Setup the Stats */
        entry = proc_create_data("Stats", S_IRUGO & proc_perm,
                                 apriv->proc_entry, &proc_stats_ops, dev);
        if (!entry)
                goto fail_stats;
-       entry->uid = proc_uid;
-       entry->gid = proc_gid;
+       entry->uid = proc_kuid;
+       entry->gid = proc_kgid;
 
        /* Setup the Status */
        entry = proc_create_data("Status", S_IRUGO & proc_perm,
                                 apriv->proc_entry, &proc_status_ops, dev);
        if (!entry)
                goto fail_status;
-       entry->uid = proc_uid;
-       entry->gid = proc_gid;
+       entry->uid = proc_kuid;
+       entry->gid = proc_kgid;
 
        /* Setup the Config */
        entry = proc_create_data("Config", proc_perm,
                                 apriv->proc_entry, &proc_config_ops, dev);
        if (!entry)
                goto fail_config;
-       entry->uid = proc_uid;
-       entry->gid = proc_gid;
+       entry->uid = proc_kuid;
+       entry->gid = proc_kgid;
 
        /* Setup the SSID */
        entry = proc_create_data("SSID", proc_perm,
                                 apriv->proc_entry, &proc_SSID_ops, dev);
        if (!entry)
                goto fail_ssid;
-       entry->uid = proc_uid;
-       entry->gid = proc_gid;
+       entry->uid = proc_kuid;
+       entry->gid = proc_kgid;
 
        /* Setup the APList */
        entry = proc_create_data("APList", proc_perm,
                                 apriv->proc_entry, &proc_APList_ops, dev);
        if (!entry)
                goto fail_aplist;
-       entry->uid = proc_uid;
-       entry->gid = proc_gid;
+       entry->uid = proc_kuid;
+       entry->gid = proc_kgid;
 
        /* Setup the BSSList */
        entry = proc_create_data("BSSList", proc_perm,
                                 apriv->proc_entry, &proc_BSSList_ops, dev);
        if (!entry)
                goto fail_bsslist;
-       entry->uid = proc_uid;
-       entry->gid = proc_gid;
+       entry->uid = proc_kuid;
+       entry->gid = proc_kgid;
 
        /* Setup the WepKey */
        entry = proc_create_data("WepKey", proc_perm,
                                 apriv->proc_entry, &proc_wepkey_ops, dev);
        if (!entry)
                goto fail_wepkey;
-       entry->uid = proc_uid;
-       entry->gid = proc_gid;
+       entry->uid = proc_kuid;
+       entry->gid = proc_kgid;
 
        return 0;
 
@@ -5697,11 +5700,16 @@ static int __init airo_init_module( void )
 {
        int i;
 
+       proc_kuid = make_kuid(&init_user_ns, proc_uid);
+       proc_kgid = make_kgid(&init_user_ns, proc_gid);
+       if (!uid_valid(proc_kuid) || !gid_valid(proc_kgid))
+               return -EINVAL;
+
        airo_entry = proc_mkdir_mode("driver/aironet", airo_perm, NULL);
 
        if (airo_entry) {
-               airo_entry->uid = proc_uid;
-               airo_entry->gid = proc_gid;
+               airo_entry->uid = proc_kuid;
+               airo_entry->gid = proc_kgid;
        }
 
        for (i = 0; i < 4 && io[i] && irq[i]; i++) {
index 88b8d64..e361afe 100644 (file)
@@ -1726,7 +1726,9 @@ static void at76_mac80211_tx_callback(struct urb *urb)
        ieee80211_wake_queues(priv->hw);
 }
 
-static void at76_mac80211_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+static void at76_mac80211_tx(struct ieee80211_hw *hw,
+                            struct ieee80211_tx_control *control,
+                            struct sk_buff *skb)
 {
        struct at76_priv *priv = hw->priv;
        struct at76_tx_buffer *tx_buffer = priv->bulk_out_buffer;
index 64a453a..3150def 100644 (file)
@@ -1331,7 +1331,6 @@ struct ath5k_hw {
        unsigned int            nexttbtt;       /* next beacon time in TU */
        struct ath5k_txq        *cabq;          /* content after beacon */
 
-       int                     power_level;    /* Requested tx power in dBm */
        bool                    assoc;          /* associate state */
        bool                    enable_beacon;  /* true if beacons are on */
 
@@ -1425,6 +1424,7 @@ struct ath5k_hw {
                /* Value in dB units */
                s16             txp_cck_ofdm_pwr_delta;
                bool            txp_setup;
+               int             txp_requested;  /* Requested tx power in dBm */
        } ah_txpower;
 
        struct ath5k_nfcal_hist ah_nfcal_hist;
index 2aab20e..a0a202d 100644 (file)
@@ -723,7 +723,7 @@ ath5k_txbuf_setup(struct ath5k_hw *ah, struct ath5k_buf *bf,
        ret = ah->ah_setup_tx_desc(ah, ds, pktlen,
                ieee80211_get_hdrlen_from_skb(skb), padsize,
                get_hw_packet_type(skb),
-               (ah->power_level * 2),
+               (ah->ah_txpower.txp_requested * 2),
                hw_rate,
                info->control.rates[0].count, keyidx, ah->ah_tx_ant, flags,
                cts_rate, duration);
@@ -1778,7 +1778,8 @@ ath5k_beacon_setup(struct ath5k_hw *ah, struct ath5k_buf *bf)
        ds->ds_data = bf->skbaddr;
        ret = ah->ah_setup_tx_desc(ah, ds, skb->len,
                        ieee80211_get_hdrlen_from_skb(skb), padsize,
-                       AR5K_PKT_TYPE_BEACON, (ah->power_level * 2),
+                       AR5K_PKT_TYPE_BEACON,
+                       (ah->ah_txpower.txp_requested * 2),
                        ieee80211_get_tx_rate(ah->hw, info)->hw_value,
                        1, AR5K_TXKEYIX_INVALID,
                        antenna, flags, 0, 0);
index d56453e..df61a09 100644 (file)
@@ -55,7 +55,8 @@
 \********************/
 
 static void
-ath5k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+ath5k_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
+        struct sk_buff *skb)
 {
        struct ath5k_hw *ah = hw->priv;
        u16 qnum = skb_get_queue_mapping(skb);
@@ -207,8 +208,8 @@ ath5k_config(struct ieee80211_hw *hw, u32 changed)
        }
 
        if ((changed & IEEE80211_CONF_CHANGE_POWER) &&
-       (ah->power_level != conf->power_level)) {
-               ah->power_level = conf->power_level;
+       (ah->ah_txpower.txp_requested != conf->power_level)) {
+               ah->ah_txpower.txp_requested = conf->power_level;
 
                /* Half dB steps */
                ath5k_hw_set_txpower_limit(ah, (conf->power_level * 2));
index 8b71a2d..01c90ed 100644 (file)
@@ -3516,6 +3516,7 @@ ath5k_setup_rate_powertable(struct ath5k_hw *ah, u16 max_pwr,
 {
        unsigned int i;
        u16 *rates;
+       s16 rate_idx_scaled = 0;
 
        /* max_pwr is power level we got from driver/user in 0.5dB
         * units, switch to 0.25dB units so we can compare */
@@ -3562,20 +3563,32 @@ ath5k_setup_rate_powertable(struct ath5k_hw *ah, u16 max_pwr,
                for (i = 8; i <= 15; i++)
                        rates[i] -= ah->ah_txpower.txp_cck_ofdm_gainf_delta;
 
+       /* Save min/max and current tx power for this channel
+        * in 0.25dB units.
+        *
+        * Note: We use rates[0] for current tx power because
+        * it covers most of the rates, in most cases. It's our
+        * tx power limit and what the user expects to see. */
+       ah->ah_txpower.txp_min_pwr = 2 * rates[7];
+       ah->ah_txpower.txp_cur_pwr = 2 * rates[0];
+
+       /* Set max txpower for correct OFDM operation on all rates
+        * -that is the txpower for 54Mbit-, it's used for the PAPD
+        * gain probe and it's in 0.5dB units */
+       ah->ah_txpower.txp_ofdm = rates[7];
+
        /* Now that we have all rates setup use table offset to
         * match the power range set by user with the power indices
         * on PCDAC/PDADC table */
        for (i = 0; i < 16; i++) {
-               rates[i] += ah->ah_txpower.txp_offset;
+               rate_idx_scaled = rates[i] + ah->ah_txpower.txp_offset;
                /* Don't get out of bounds */
-               if (rates[i] > 63)
-                       rates[i] = 63;
+               if (rate_idx_scaled > 63)
+                       rate_idx_scaled = 63;
+               if (rate_idx_scaled < 0)
+                       rate_idx_scaled = 0;
+               rates[i] = rate_idx_scaled;
        }
-
-       /* Min/max in 0.25dB units */
-       ah->ah_txpower.txp_min_pwr = 2 * rates[7];
-       ah->ah_txpower.txp_cur_pwr = 2 * rates[0];
-       ah->ah_txpower.txp_ofdm = rates[7];
 }
 
 
@@ -3639,10 +3652,17 @@ ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel,
        if (!ah->ah_txpower.txp_setup ||
            (channel->hw_value != curr_channel->hw_value) ||
            (channel->center_freq != curr_channel->center_freq)) {
-               /* Reset TX power values */
+               /* Reset TX power values but preserve requested
+                * tx power from above */
+               int requested_txpower = ah->ah_txpower.txp_requested;
+
                memset(&ah->ah_txpower, 0, sizeof(ah->ah_txpower));
+
+               /* Restore TPC setting and requested tx power */
                ah->ah_txpower.txp_tpc = AR5K_TUNE_TPC_TXPOWER;
 
+               ah->ah_txpower.txp_requested = requested_txpower;
+
                /* Calculate the powertable */
                ret = ath5k_setup_channel_powertable(ah, channel,
                                                        ee_mode, type);
@@ -3789,8 +3809,9 @@ ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel,
         * RF buffer settings on 5211/5212+ so that we
         * properly set curve indices.
         */
-       ret = ath5k_hw_txpower(ah, channel, ah->ah_txpower.txp_cur_pwr ?
-                       ah->ah_txpower.txp_cur_pwr / 2 : AR5K_TUNE_MAX_TXPOWER);
+       ret = ath5k_hw_txpower(ah, channel, ah->ah_txpower.txp_requested ?
+                                       ah->ah_txpower.txp_requested * 2 :
+                                       AR5K_TUNE_MAX_TXPOWER);
        if (ret)
                return ret;
 
index 2588848..c37fe96 100644 (file)
@@ -4901,90 +4901,79 @@ static void ar9003_hw_set_power_per_rate_table(struct ath_hw *ah,
                                i, cfgCtl, pCtlMode[ctlMode], ctlIndex[i],
                                chan->channel);
 
-                               /*
-                                * compare test group from regulatory
-                                * channel list with test mode from pCtlMode
-                                * list
-                                */
-                               if ((((cfgCtl & ~CTL_MODE_M) |
-                                      (pCtlMode[ctlMode] & CTL_MODE_M)) ==
-                                       ctlIndex[i]) ||
-                                   (((cfgCtl & ~CTL_MODE_M) |
-                                      (pCtlMode[ctlMode] & CTL_MODE_M)) ==
-                                    ((ctlIndex[i] & CTL_MODE_M) |
-                                      SD_NO_CTL))) {
-                                       twiceMinEdgePower =
-                                         ar9003_hw_get_max_edge_power(pEepData,
-                                                                      freq, i,
-                                                                      is2ghz);
-
-                                       if ((cfgCtl & ~CTL_MODE_M) == SD_NO_CTL)
-                                               /*
-                                                * Find the minimum of all CTL
-                                                * edge powers that apply to
-                                                * this channel
-                                                */
-                                               twiceMaxEdgePower =
-                                                       min(twiceMaxEdgePower,
-                                                           twiceMinEdgePower);
-                                               else {
-                                                       /* specific */
-                                                       twiceMaxEdgePower =
-                                                         twiceMinEdgePower;
-                                                       break;
-                                               }
+                       /*
+                        * compare test group from regulatory
+                        * channel list with test mode from pCtlMode
+                        * list
+                        */
+                       if ((((cfgCtl & ~CTL_MODE_M) |
+                              (pCtlMode[ctlMode] & CTL_MODE_M)) ==
+                               ctlIndex[i]) ||
+                           (((cfgCtl & ~CTL_MODE_M) |
+                              (pCtlMode[ctlMode] & CTL_MODE_M)) ==
+                            ((ctlIndex[i] & CTL_MODE_M) |
+                              SD_NO_CTL))) {
+                               twiceMinEdgePower =
+                                 ar9003_hw_get_max_edge_power(pEepData,
+                                                              freq, i,
+                                                              is2ghz);
+
+                               if ((cfgCtl & ~CTL_MODE_M) == SD_NO_CTL)
+                                       /*
+                                        * Find the minimum of all CTL
+                                        * edge powers that apply to
+                                        * this channel
+                                        */
+                                       twiceMaxEdgePower =
+                                               min(twiceMaxEdgePower,
+                                                   twiceMinEdgePower);
+                               else {
+                                       /* specific */
+                                       twiceMaxEdgePower = twiceMinEdgePower;
+                                       break;
                                }
                        }
+               }
 
-                       minCtlPower = (u8)min(twiceMaxEdgePower, scaledPower);
+               minCtlPower = (u8)min(twiceMaxEdgePower, scaledPower);
 
-                       ath_dbg(common, REGULATORY,
-                               "SEL-Min ctlMode %d pCtlMode %d 2xMaxEdge %d sP %d minCtlPwr %d\n",
-                               ctlMode, pCtlMode[ctlMode], twiceMaxEdgePower,
-                               scaledPower, minCtlPower);
-
-                       /* Apply ctl mode to correct target power set */
-                       switch (pCtlMode[ctlMode]) {
-                       case CTL_11B:
-                               for (i = ALL_TARGET_LEGACY_1L_5L;
-                                    i <= ALL_TARGET_LEGACY_11S; i++)
-                                       pPwrArray[i] =
-                                         (u8)min((u16)pPwrArray[i],
-                                                 minCtlPower);
-                               break;
-                       case CTL_11A:
-                       case CTL_11G:
-                               for (i = ALL_TARGET_LEGACY_6_24;
-                                    i <= ALL_TARGET_LEGACY_54; i++)
-                                       pPwrArray[i] =
-                                         (u8)min((u16)pPwrArray[i],
-                                                 minCtlPower);
-                               break;
-                       case CTL_5GHT20:
-                       case CTL_2GHT20:
-                               for (i = ALL_TARGET_HT20_0_8_16;
-                                    i <= ALL_TARGET_HT20_21; i++)
-                                       pPwrArray[i] =
-                                         (u8)min((u16)pPwrArray[i],
-                                                 minCtlPower);
-                               pPwrArray[ALL_TARGET_HT20_22] =
-                                 (u8)min((u16)pPwrArray[ALL_TARGET_HT20_22],
-                                         minCtlPower);
-                               pPwrArray[ALL_TARGET_HT20_23] =
-                                 (u8)min((u16)pPwrArray[ALL_TARGET_HT20_23],
-                                          minCtlPower);
-                               break;
-                       case CTL_5GHT40:
-                       case CTL_2GHT40:
-                               for (i = ALL_TARGET_HT40_0_8_16;
-                                    i <= ALL_TARGET_HT40_23; i++)
-                                       pPwrArray[i] =
-                                         (u8)min((u16)pPwrArray[i],
-                                                 minCtlPower);
-                               break;
-                       default:
-                           break;
-                       }
+               ath_dbg(common, REGULATORY,
+                       "SEL-Min ctlMode %d pCtlMode %d 2xMaxEdge %d sP %d minCtlPwr %d\n",
+                       ctlMode, pCtlMode[ctlMode], twiceMaxEdgePower,
+                       scaledPower, minCtlPower);
+
+               /* Apply ctl mode to correct target power set */
+               switch (pCtlMode[ctlMode]) {
+               case CTL_11B:
+                       for (i = ALL_TARGET_LEGACY_1L_5L;
+                            i <= ALL_TARGET_LEGACY_11S; i++)
+                               pPwrArray[i] = (u8)min((u16)pPwrArray[i],
+                                                      minCtlPower);
+                       break;
+               case CTL_11A:
+               case CTL_11G:
+                       for (i = ALL_TARGET_LEGACY_6_24;
+                            i <= ALL_TARGET_LEGACY_54; i++)
+                               pPwrArray[i] = (u8)min((u16)pPwrArray[i],
+                                                      minCtlPower);
+                       break;
+               case CTL_5GHT20:
+               case CTL_2GHT20:
+                       for (i = ALL_TARGET_HT20_0_8_16;
+                            i <= ALL_TARGET_HT20_23; i++)
+                               pPwrArray[i] = (u8)min((u16)pPwrArray[i],
+                                                      minCtlPower);
+                       break;
+               case CTL_5GHT40:
+               case CTL_2GHT40:
+                       for (i = ALL_TARGET_HT40_0_8_16;
+                            i <= ALL_TARGET_HT40_23; i++)
+                               pPwrArray[i] = (u8)min((u16)pPwrArray[i],
+                                                      minCtlPower);
+                       break;
+               default:
+                       break;
+               }
        } /* end ctl mode checking */
 }
 
index b09285c..7373e4b 100644 (file)
@@ -280,6 +280,7 @@ struct ath_tx_control {
        struct ath_txq *txq;
        struct ath_node *an;
        u8 paprd;
+       struct ieee80211_sta *sta;
 };
 
 #define ATH_TX_ERROR        0x01
index 936e920..b30596f 100644 (file)
@@ -542,6 +542,7 @@ void ath9k_htc_stop_ani(struct ath9k_htc_priv *priv);
 
 int ath9k_tx_init(struct ath9k_htc_priv *priv);
 int ath9k_htc_tx_start(struct ath9k_htc_priv *priv,
+                      struct ieee80211_sta *sta,
                       struct sk_buff *skb, u8 slot, bool is_cab);
 void ath9k_tx_cleanup(struct ath9k_htc_priv *priv);
 bool ath9k_htc_txq_setup(struct ath9k_htc_priv *priv, int subtype);
index 77d541f..f42d2eb 100644 (file)
@@ -326,7 +326,7 @@ static void ath9k_htc_send_buffered(struct ath9k_htc_priv *priv,
                        goto next;
                }
 
-               ret = ath9k_htc_tx_start(priv, skb, tx_slot, true);
+               ret = ath9k_htc_tx_start(priv, NULL, skb, tx_slot, true);
                if (ret != 0) {
                        ath9k_htc_tx_clear_slot(priv, tx_slot);
                        dev_kfree_skb_any(skb);
index c785129..c32f6e3 100644 (file)
@@ -856,7 +856,9 @@ set_timer:
 /* mac80211 Callbacks */
 /**********************/
 
-static void ath9k_htc_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+static void ath9k_htc_tx(struct ieee80211_hw *hw,
+                        struct ieee80211_tx_control *control,
+                        struct sk_buff *skb)
 {
        struct ieee80211_hdr *hdr;
        struct ath9k_htc_priv *priv = hw->priv;
@@ -883,7 +885,7 @@ static void ath9k_htc_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
                goto fail_tx;
        }
 
-       ret = ath9k_htc_tx_start(priv, skb, slot, false);
+       ret = ath9k_htc_tx_start(priv, control->sta, skb, slot, false);
        if (ret != 0) {
                ath_dbg(common, XMIT, "Tx failed\n");
                goto clear_slot;
@@ -1331,6 +1333,34 @@ static int ath9k_htc_sta_remove(struct ieee80211_hw *hw,
        return ret;
 }
 
+static void ath9k_htc_sta_rc_update(struct ieee80211_hw *hw,
+                                   struct ieee80211_vif *vif,
+                                   struct ieee80211_sta *sta, u32 changed)
+{
+       struct ath9k_htc_priv *priv = hw->priv;
+       struct ath_common *common = ath9k_hw_common(priv->ah);
+       struct ath9k_htc_target_rate trate;
+
+       mutex_lock(&priv->mutex);
+       ath9k_htc_ps_wakeup(priv);
+
+       if (changed & IEEE80211_RC_SUPP_RATES_CHANGED) {
+               memset(&trate, 0, sizeof(struct ath9k_htc_target_rate));
+               ath9k_htc_setup_rate(priv, sta, &trate);
+               if (!ath9k_htc_send_rate_cmd(priv, &trate))
+                       ath_dbg(common, CONFIG,
+                               "Supported rates for sta: %pM updated, rate caps: 0x%X\n",
+                               sta->addr, be32_to_cpu(trate.capflags));
+               else
+                       ath_dbg(common, CONFIG,
+                               "Unable to update supported rates for sta: %pM\n",
+                               sta->addr);
+       }
+
+       ath9k_htc_ps_restore(priv);
+       mutex_unlock(&priv->mutex);
+}
+
 static int ath9k_htc_conf_tx(struct ieee80211_hw *hw,
                             struct ieee80211_vif *vif, u16 queue,
                             const struct ieee80211_tx_queue_params *params)
@@ -1758,6 +1788,7 @@ struct ieee80211_ops ath9k_htc_ops = {
        .sta_add            = ath9k_htc_sta_add,
        .sta_remove         = ath9k_htc_sta_remove,
        .conf_tx            = ath9k_htc_conf_tx,
+       .sta_rc_update      = ath9k_htc_sta_rc_update,
        .bss_info_changed   = ath9k_htc_bss_info_changed,
        .set_key            = ath9k_htc_set_key,
        .get_tsf            = ath9k_htc_get_tsf,
index 47e61d0..06cdcb7 100644 (file)
@@ -333,12 +333,12 @@ static void ath9k_htc_tx_data(struct ath9k_htc_priv *priv,
 }
 
 int ath9k_htc_tx_start(struct ath9k_htc_priv *priv,
+                      struct ieee80211_sta *sta,
                       struct sk_buff *skb,
                       u8 slot, bool is_cab)
 {
        struct ieee80211_hdr *hdr;
        struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
-       struct ieee80211_sta *sta = tx_info->control.sta;
        struct ieee80211_vif *vif = tx_info->control.vif;
        struct ath9k_htc_sta *ista;
        struct ath9k_htc_vif *avp = NULL;
index a22df74..8a2b04d 100644 (file)
@@ -696,7 +696,9 @@ mutex_unlock:
        return r;
 }
 
-static void ath9k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+static void ath9k_tx(struct ieee80211_hw *hw,
+                    struct ieee80211_tx_control *control,
+                    struct sk_buff *skb)
 {
        struct ath_softc *sc = hw->priv;
        struct ath_common *common = ath9k_hw_common(sc->sc_ah);
@@ -756,6 +758,7 @@ static void ath9k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
 
        memset(&txctl, 0, sizeof(struct ath_tx_control));
        txctl.txq = sc->tx.txq_map[skb_get_queue_mapping(skb)];
+       txctl.sta = control->sta;
 
        ath_dbg(common, XMIT, "transmitting packet, skb: %p\n", skb);
 
index e034add..4b12c34 100644 (file)
@@ -25,141 +25,141 @@ static const struct ath_rate_table ar5416_11na_ratetable = {
        8, /* MCS start */
        {
                [0] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 6000,
-                       5400, 0, 12, 0, 0, 0, 0 }, /* 6 Mb */
+                       5400, 0, 12 }, /* 6 Mb */
                [1] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 9000,
-                       7800,  1, 18, 0, 1, 1, 1 }, /* 9 Mb */
+                       7800,  1, 18 }, /* 9 Mb */
                [2] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 12000,
-                       10000, 2, 24, 2, 2, 2, 2 }, /* 12 Mb */
+                       10000, 2, 24 }, /* 12 Mb */
                [3] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 18000,
-                       13900, 3, 36, 2, 3, 3, 3 }, /* 18 Mb */
+                       13900, 3, 36 }, /* 18 Mb */
                [4] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 24000,
-                       17300, 4, 48, 4, 4, 4, 4 }, /* 24 Mb */
+                       17300, 4, 48 }, /* 24 Mb */
                [5] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 36000,
-                       23000, 5, 72, 4, 5, 5, 5 }, /* 36 Mb */
+                       23000, 5, 72 }, /* 36 Mb */
                [6] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 48000,
-                       27400, 6, 96, 4, 6, 6, 6 }, /* 48 Mb */
+                       27400, 6, 96 }, /* 48 Mb */
                [7] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 54000,
-                       29300, 7, 108, 4, 7, 7, 7 }, /* 54 Mb */
+                       29300, 7, 108 }, /* 54 Mb */
                [8] = { RC_HT_SDT_2040, WLAN_RC_PHY_HT_20_SS, 6500,
-                       6400, 0, 0, 0, 38, 8, 38 }, /* 6.5 Mb */
+                       6400, 0, 0 }, /* 6.5 Mb */
                [9] = { RC_HT_SDT_20, WLAN_RC_PHY_HT_20_SS, 13000,
-                       12700, 1, 1, 2, 39, 9, 39 }, /* 13 Mb */
+                       12700, 1, 1 }, /* 13 Mb */
                [10] = { RC_HT_SDT_20, WLAN_RC_PHY_HT_20_SS, 19500,
-                       18800, 2, 2, 2, 40, 10, 40 }, /* 19.5 Mb */
+                       18800, 2, 2 }, /* 19.5 Mb */
                [11] = { RC_HT_SD_20, WLAN_RC_PHY_HT_20_SS, 26000,
-                       25000, 3, 3, 4, 41, 11, 41 }, /* 26 Mb */
+                       25000, 3, 3 }, /* 26 Mb */
                [12] = { RC_HT_SD_20, WLAN_RC_PHY_HT_20_SS, 39000,
-                       36700, 4, 4, 4, 42, 12, 42 }, /* 39 Mb */
+                       36700, 4, 4 }, /* 39 Mb */
                [13] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS, 52000,
-                       48100, 5, 5, 4, 43, 13, 43 }, /* 52 Mb */
+                       48100, 5, 5 }, /* 52 Mb */
                [14] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS, 58500,
-                       53500, 6, 6, 4, 44, 14, 44 }, /* 58.5 Mb */
+                       53500, 6, 6 }, /* 58.5 Mb */
                [15] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS, 65000,
-                       59000, 7, 7, 4, 45, 16, 46 }, /* 65 Mb */
+                       59000, 7, 7 }, /* 65 Mb */
                [16] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS_HGI, 72200,
-                       65400, 7, 7, 4, 45, 16, 46 }, /* 75 Mb */
+                       65400, 7, 7 }, /* 75 Mb */
                [17] = { RC_INVALID, WLAN_RC_PHY_HT_20_DS, 13000,
-                       12700, 8, 8, 0, 47, 17, 47 }, /* 13 Mb */
+                       12700, 8, 8 }, /* 13 Mb */
                [18] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_DS, 26000,
-                       24800, 9, 9, 2, 48, 18, 48 }, /* 26 Mb */
+                       24800, 9, 9 }, /* 26 Mb */
                [19] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_DS, 39000,
-                       36600, 10, 10, 2, 49, 19, 49 }, /* 39 Mb */
+                       36600, 10, 10 }, /* 39 Mb */
                [20] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 52000,
-                       48100, 11, 11, 4, 50, 20, 50 }, /* 52 Mb */
+                       48100, 11, 11 }, /* 52 Mb */
                [21] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 78000,
-                       69500, 12, 12, 4, 51, 21, 51 }, /* 78 Mb */
+                       69500, 12, 12 }, /* 78 Mb */
                [22] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 104000,
-                       89500, 13, 13, 4, 52, 22, 52 }, /* 104 Mb */
+                       89500, 13, 13 }, /* 104 Mb */
                [23] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 117000,
-                       98900, 14, 14, 4, 53, 23, 53 }, /* 117 Mb */
+                       98900, 14, 14 }, /* 117 Mb */
                [24] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 130000,
-                       108300, 15, 15, 4, 54, 25, 55 }, /* 130 Mb */
+                       108300, 15, 15 }, /* 130 Mb */
                [25] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS_HGI, 144400,
-                       120000, 15, 15, 4, 54, 25, 55 }, /* 144.4 Mb */
+                       120000, 15, 15 }, /* 144.4 Mb */
                [26] = {  RC_INVALID, WLAN_RC_PHY_HT_20_TS, 19500,
-                       17400, 16, 16, 0, 56, 26, 56 }, /* 19.5 Mb */
+                       17400, 16, 16 }, /* 19.5 Mb */
                [27] = {  RC_INVALID, WLAN_RC_PHY_HT_20_TS, 39000,
-                       35100, 17, 17, 2, 57, 27, 57 }, /* 39 Mb */
+                       35100, 17, 17 }, /* 39 Mb */
                [28] = {  RC_INVALID, WLAN_RC_PHY_HT_20_TS, 58500,
-                       52600, 18, 18, 2, 58, 28, 58 }, /* 58.5 Mb */
+                       52600, 18, 18 }, /* 58.5 Mb */
                [29] = {  RC_INVALID, WLAN_RC_PHY_HT_20_TS, 78000,
-                       70400, 19, 19, 4, 59, 29, 59 }, /* 78 Mb */
+                       70400, 19, 19 }, /* 78 Mb */
                [30] = {  RC_INVALID, WLAN_RC_PHY_HT_20_TS, 117000,
-                       104900, 20, 20, 4, 60, 31, 61 }, /* 117 Mb */
+                       104900, 20, 20 }, /* 117 Mb */
                [31] = {  RC_INVALID, WLAN_RC_PHY_HT_20_TS_HGI, 130000,
-                       115800, 20, 20, 4, 60, 31, 61 }, /* 130 Mb*/
+                       115800, 20, 20 }, /* 130 Mb*/
                [32] = {  RC_HT_T_20, WLAN_RC_PHY_HT_20_TS, 156000,
-                       137200, 21, 21, 4, 62, 33, 63 }, /* 156 Mb */
+                       137200, 21, 21 }, /* 156 Mb */
                [33] = {  RC_HT_T_20, WLAN_RC_PHY_HT_20_TS_HGI, 173300,
-                       151100, 21, 21, 4, 62, 33, 63 }, /* 173.3 Mb */
+                       151100, 21, 21 }, /* 173.3 Mb */
                [34] = {  RC_HT_T_20, WLAN_RC_PHY_HT_20_TS, 175500,
-                       152800, 22, 22, 4, 64, 35, 65 }, /* 175.5 Mb */
+                       152800, 22, 22 }, /* 175.5 Mb */
                [35] = {  RC_HT_T_20, WLAN_RC_PHY_HT_20_TS_HGI, 195000,
-                       168400, 22, 22, 4, 64, 35, 65 }, /* 195 Mb*/
+                       168400, 22, 22 }, /* 195 Mb*/
                [36] = {  RC_HT_T_20, WLAN_RC_PHY_HT_20_TS, 195000,
-                       168400, 23, 23, 4, 66, 37, 67 }, /* 195 Mb */
+                       168400, 23, 23 }, /* 195 Mb */
                [37] = {  RC_HT_T_20, WLAN_RC_PHY_HT_20_TS_HGI, 216700,
-                       185000, 23, 23, 4, 66, 37, 67 }, /* 216.7 Mb */
+                       185000, 23, 23 }, /* 216.7 Mb */
                [38] = { RC_HT_SDT_40, WLAN_RC_PHY_HT_40_SS, 13500,
-                       13200, 0, 0, 0, 38, 38, 38 }, /* 13.5 Mb*/
+                       13200, 0, 0 }, /* 13.5 Mb*/
                [39] = { RC_HT_SDT_40, WLAN_RC_PHY_HT_40_SS, 27500,
-                       25900, 1, 1, 2, 39, 39, 39 }, /* 27.0 Mb*/
+                       25900, 1, 1 }, /* 27.0 Mb*/
                [40] = { RC_HT_SDT_40, WLAN_RC_PHY_HT_40_SS, 40500,
-                       38600, 2, 2, 2, 40, 40, 40 }, /* 40.5 Mb*/
+                       38600, 2, 2 }, /* 40.5 Mb*/
                [41] = { RC_HT_SD_40, WLAN_RC_PHY_HT_40_SS, 54000,
-                       49800, 3, 3, 4, 41, 41, 41 }, /* 54 Mb */
+                       49800, 3, 3 }, /* 54 Mb */
                [42] = { RC_HT_SD_40, WLAN_RC_PHY_HT_40_SS, 81500,
-                       72200, 4, 4, 4, 42, 42, 42 }, /* 81 Mb */
+                       72200, 4, 4 }, /* 81 Mb */
                [43] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS, 108000,
-                       92900, 5, 5, 4, 43, 43, 43 }, /* 108 Mb */
+                       92900, 5, 5 }, /* 108 Mb */
                [44] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS, 121500,
-                       102700, 6, 6, 4, 44, 44, 44 }, /* 121.5 Mb*/
+                       102700, 6, 6 }, /* 121.5 Mb*/
                [45] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS, 135000,
-                       112000, 7, 7, 4, 45, 46, 46 }, /* 135 Mb */
+                       112000, 7, 7 }, /* 135 Mb */
                [46] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS_HGI, 150000,
-                       122000, 7, 7, 4, 45, 46, 46 }, /* 150 Mb */
+                       122000, 7, 7 }, /* 150 Mb */
                [47] = { RC_INVALID, WLAN_RC_PHY_HT_40_DS, 27000,
-                       25800, 8, 8, 0, 47, 47, 47 }, /* 27 Mb */
+                       25800, 8, 8 }, /* 27 Mb */
                [48] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_DS, 54000,
-                       49800, 9, 9, 2, 48, 48, 48 }, /* 54 Mb */
+                       49800, 9, 9 }, /* 54 Mb */
                [49] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_DS, 81000,
-                       71900, 10, 10, 2, 49, 49, 49 }, /* 81 Mb */
+                       71900, 10, 10 }, /* 81 Mb */
                [50] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 108000,
-                       92500, 11, 11, 4, 50, 50, 50 }, /* 108 Mb */
+                       92500, 11, 11 }, /* 108 Mb */
                [51] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 162000,
-                       130300, 12, 12, 4, 51, 51, 51 }, /* 162 Mb */
+                       130300, 12, 12 }, /* 162 Mb */
                [52] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 216000,
-                       162800, 13, 13, 4, 52, 52, 52 }, /* 216 Mb */
+                       162800, 13, 13 }, /* 216 Mb */
                [53] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 243000,
-                       178200, 14, 14, 4, 53, 53, 53 }, /* 243 Mb */
+                       178200, 14, 14 }, /* 243 Mb */
                [54] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 270000,
-                       192100, 15, 15, 4, 54, 55, 55 }, /* 270 Mb */
+                       192100, 15, 15 }, /* 270 Mb */
                [55] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS_HGI, 300000,
-                       207000, 15, 15, 4, 54, 55, 55 }, /* 300 Mb */
+                       207000, 15, 15 }, /* 300 Mb */
                [56] = {  RC_INVALID, WLAN_RC_PHY_HT_40_TS, 40500,
-                       36100, 16, 16, 0, 56, 56, 56 }, /* 40.5 Mb */
+                       36100, 16, 16 }, /* 40.5 Mb */
                [57] = {  RC_INVALID, WLAN_RC_PHY_HT_40_TS, 81000,
-                       72900, 17, 17, 2, 57, 57, 57 }, /* 81 Mb */
+                       72900, 17, 17 }, /* 81 Mb */
                [58] = {  RC_INVALID, WLAN_RC_PHY_HT_40_TS, 121500,
-                       108300, 18, 18, 2, 58, 58, 58 }, /* 121.5 Mb */
+                       108300, 18, 18 }, /* 121.5 Mb */
                [59] = {  RC_INVALID, WLAN_RC_PHY_HT_40_TS, 162000,
-                       142000, 19, 19, 4, 59, 59, 59 }, /*  162 Mb */
+                       142000, 19, 19 }, /*  162 Mb */
                [60] = {  RC_INVALID, WLAN_RC_PHY_HT_40_TS, 243000,
-                       205100, 20, 20, 4, 60, 61, 61 }, /*  243 Mb */
+                       205100, 20, 20 }, /*  243 Mb */
                [61] = {  RC_INVALID, WLAN_RC_PHY_HT_40_TS_HGI, 270000,
-                       224700, 20, 20, 4, 60, 61, 61 }, /*  270 Mb */
+                       224700, 20, 20 }, /*  270 Mb */
                [62] = {  RC_HT_T_40, WLAN_RC_PHY_HT_40_TS, 324000,
-                       263100, 21, 21, 4, 62, 63, 63 }, /*  324 Mb */
+                       263100, 21, 21 }, /*  324 Mb */
                [63] = {  RC_HT_T_40, WLAN_RC_PHY_HT_40_TS_HGI, 360000,
-                       288000, 21, 21, 4, 62, 63, 63 }, /*  360 Mb */
+                       288000, 21, 21 }, /*  360 Mb */
                [64] = {  RC_HT_T_40, WLAN_RC_PHY_HT_40_TS, 364500,
-                       290700, 22, 22, 4, 64, 65, 65 }, /* 364.5 Mb */
+                       290700, 22, 22 }, /* 364.5 Mb */
                [65] = {  RC_HT_T_40, WLAN_RC_PHY_HT_40_TS_HGI, 405000,
-                       317200, 22, 22, 4, 64, 65, 65 }, /* 405 Mb */
+                       317200, 22, 22 }, /* 405 Mb */
                [66] = {  RC_HT_T_40, WLAN_RC_PHY_HT_40_TS, 405000,
-                       317200, 23, 23, 4, 66, 67, 67 }, /* 405 Mb */
+                       317200, 23, 23 }, /* 405 Mb */
                [67] = {  RC_HT_T_40, WLAN_RC_PHY_HT_40_TS_HGI, 450000,
-                       346400, 23, 23, 4, 66, 67, 67 }, /* 450 Mb */
+                       346400, 23, 23 }, /* 450 Mb */
        },
        50,  /* probe interval */
        WLAN_RC_HT_FLAG,  /* Phy rates allowed initially */
@@ -173,149 +173,149 @@ static const struct ath_rate_table ar5416_11ng_ratetable = {
        12, /* MCS start */
        {
                [0] = { RC_ALL, WLAN_RC_PHY_CCK, 1000,
-                       900, 0, 2, 0, 0, 0, 0 }, /* 1 Mb */
+                       900, 0, 2 }, /* 1 Mb */
                [1] = { RC_ALL, WLAN_RC_PHY_CCK, 2000,
-                       1900, 1, 4, 1, 1, 1, 1 }, /* 2 Mb */
+                       1900, 1, 4 }, /* 2 Mb */
                [2] = { RC_ALL, WLAN_RC_PHY_CCK, 5500,
-                       4900, 2, 11, 2, 2, 2, 2 }, /* 5.5 Mb */
+                       4900, 2, 11 }, /* 5.5 Mb */
                [3] = { RC_ALL, WLAN_RC_PHY_CCK, 11000,
-                       8100, 3, 22, 3, 3, 3, 3 }, /* 11 Mb */
+                       8100, 3, 22 }, /* 11 Mb */
                [4] = { RC_INVALID, WLAN_RC_PHY_OFDM, 6000,
-                       5400, 4, 12, 4, 4, 4, 4 }, /* 6 Mb */
+                       5400, 4, 12 }, /* 6 Mb */
                [5] = { RC_INVALID, WLAN_RC_PHY_OFDM, 9000,
-                       7800, 5, 18, 4, 5, 5, 5 }, /* 9 Mb */
+                       7800, 5, 18 }, /* 9 Mb */
                [6] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 12000,
-                       10100, 6, 24, 6, 6, 6, 6 }, /* 12 Mb */
+                       10100, 6, 24 }, /* 12 Mb */
                [7] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 18000,
-                       14100, 7, 36, 6, 7, 7, 7 }, /* 18 Mb */
+                       14100, 7, 36 }, /* 18 Mb */
                [8] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 24000,
-                       17700, 8, 48, 8, 8, 8, 8 }, /* 24 Mb */
+                       17700, 8, 48 }, /* 24 Mb */
                [9] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 36000,
-                       23700, 9, 72, 8, 9, 9, 9 }, /* 36 Mb */
+                       23700, 9, 72 }, /* 36 Mb */
                [10] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 48000,
-                       27400, 10, 96, 8, 10, 10, 10 }, /* 48 Mb */
+                       27400, 10, 96 }, /* 48 Mb */
                [11] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 54000,
-                       30900, 11, 108, 8, 11, 11, 11 }, /* 54 Mb */
+                       30900, 11, 108 }, /* 54 Mb */
                [12] = { RC_INVALID, WLAN_RC_PHY_HT_20_SS, 6500,
-                       6400, 0, 0, 4, 42, 12, 42 }, /* 6.5 Mb */
+                       6400, 0, 0 }, /* 6.5 Mb */
                [13] = { RC_HT_SDT_20, WLAN_RC_PHY_HT_20_SS, 13000,
-                       12700, 1, 1, 6, 43, 13, 43 }, /* 13 Mb */
+                       12700, 1, 1 }, /* 13 Mb */
                [14] = { RC_HT_SDT_20, WLAN_RC_PHY_HT_20_SS, 19500,
-                       18800, 2, 2, 6, 44, 14, 44 }, /* 19.5 Mb*/
+                       18800, 2, 2 }, /* 19.5 Mb*/
                [15] = { RC_HT_SD_20, WLAN_RC_PHY_HT_20_SS, 26000,
-                       25000, 3, 3, 8, 45, 15, 45 }, /* 26 Mb */
+                       25000, 3, 3 }, /* 26 Mb */
                [16] = { RC_HT_SD_20, WLAN_RC_PHY_HT_20_SS, 39000,
-                       36700, 4, 4, 8, 46, 16, 46 }, /* 39 Mb */
+                       36700, 4, 4 }, /* 39 Mb */
                [17] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS, 52000,
-                       48100, 5, 5, 8, 47, 17, 47 }, /* 52 Mb */
+                       48100, 5, 5 }, /* 52 Mb */
                [18] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS, 58500,
-                       53500, 6, 6, 8, 48, 18, 48 }, /* 58.5 Mb */
+                       53500, 6, 6 }, /* 58.5 Mb */
                [19] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS, 65000,
-                       59000, 7, 7, 8, 49, 20, 50 }, /* 65 Mb */
+                       59000, 7, 7 }, /* 65 Mb */
                [20] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS_HGI, 72200,
-                       65400, 7, 7, 8, 49, 20, 50 }, /* 65 Mb*/
+                       65400, 7, 7 }, /* 65 Mb*/
                [21] = { RC_INVALID, WLAN_RC_PHY_HT_20_DS, 13000,
-                       12700, 8, 8, 4, 51, 21, 51 }, /* 13 Mb */
+                       12700, 8, 8 }, /* 13 Mb */
                [22] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_DS, 26000,
-                       24800, 9, 9, 6, 52, 22, 52 }, /* 26 Mb */
+                       24800, 9, 9 }, /* 26 Mb */
                [23] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_DS, 39000,
-                       36600, 10, 10, 6, 53, 23, 53 }, /* 39 Mb */
+                       36600, 10, 10 }, /* 39 Mb */
                [24] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 52000,
-                       48100, 11, 11, 8, 54, 24, 54 }, /* 52 Mb */
+                       48100, 11, 11 }, /* 52 Mb */
                [25] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 78000,
-                       69500, 12, 12, 8, 55, 25, 55 }, /* 78 Mb */
+                       69500, 12, 12 }, /* 78 Mb */
                [26] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 104000,
-                       89500, 13, 13, 8, 56, 26, 56 }, /* 104 Mb */
+                       89500, 13, 13 }, /* 104 Mb */
                [27] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 117000,
-                       98900, 14, 14, 8, 57, 27, 57 }, /* 117 Mb */
+                       98900, 14, 14 }, /* 117 Mb */
                [28] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 130000,
-                       108300, 15, 15, 8, 58, 29, 59 }, /* 130 Mb */
+                       108300, 15, 15 }, /* 130 Mb */
                [29] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS_HGI, 144400,
-                       120000, 15, 15, 8, 58, 29, 59 }, /* 144.4 Mb */
+                       120000, 15, 15 }, /* 144.4 Mb */
                [30] = {  RC_INVALID, WLAN_RC_PHY_HT_20_TS, 19500,
-                       17400, 16, 16, 4, 60, 30, 60 }, /* 19.5 Mb */
+                       17400, 16, 16 }, /* 19.5 Mb */
                [31] = {  RC_INVALID, WLAN_RC_PHY_HT_20_TS, 39000,
-                       35100, 17, 17, 6, 61, 31, 61 }, /* 39 Mb */
+                       35100, 17, 17 }, /* 39 Mb */
                [32] = {  RC_INVALID, WLAN_RC_PHY_HT_20_TS, 58500,
-                       52600, 18, 18, 6, 62, 32, 62 }, /* 58.5 Mb */
+                       52600, 18, 18 }, /* 58.5 Mb */
                [33] = {  RC_INVALID, WLAN_RC_PHY_HT_20_TS, 78000,
-                       70400, 19, 19, 8, 63, 33, 63 }, /* 78 Mb */
+                       70400, 19, 19 }, /* 78 Mb */
                [34] = {  RC_INVALID, WLAN_RC_PHY_HT_20_TS, 117000,
-                       104900, 20, 20, 8, 64, 35, 65 }, /* 117 Mb */
+                       104900, 20, 20 }, /* 117 Mb */
                [35] = {  RC_INVALID, WLAN_RC_PHY_HT_20_TS_HGI, 130000,
-                       115800, 20, 20, 8, 64, 35, 65 }, /* 130 Mb */
+                       115800, 20, 20 }, /* 130 Mb */
                [36] = {  RC_HT_T_20, WLAN_RC_PHY_HT_20_TS, 156000,
-                       137200, 21, 21, 8, 66, 37, 67 }, /* 156 Mb */
+                       137200, 21, 21 }, /* 156 Mb */
                [37] = {  RC_HT_T_20, WLAN_RC_PHY_HT_20_TS_HGI, 173300,
-                       151100, 21, 21, 8, 66, 37, 67 }, /* 173.3 Mb */
+                       151100, 21, 21 }, /* 173.3 Mb */
                [38] = {  RC_HT_T_20, WLAN_RC_PHY_HT_20_TS, 175500,
-                       152800, 22, 22, 8, 68, 39, 69 }, /* 175.5 Mb */
+                       152800, 22, 22 }, /* 175.5 Mb */
                [39] = {  RC_HT_T_20, WLAN_RC_PHY_HT_20_TS_HGI, 195000,
-                       168400, 22, 22, 8, 68, 39, 69 }, /* 195 Mb */
+                       168400, 22, 22 }, /* 195 Mb */
                [40] = {  RC_HT_T_20, WLAN_RC_PHY_HT_20_TS, 195000,
-                       168400, 23, 23, 8, 70, 41, 71 }, /* 195 Mb */
+                       168400, 23, 23 }, /* 195 Mb */
                [41] = {  RC_HT_T_20, WLAN_RC_PHY_HT_20_TS_HGI, 216700,
-                       185000, 23, 23, 8, 70, 41, 71 }, /* 216.7 Mb */
+                       185000, 23, 23 }, /* 216.7 Mb */
                [42] = { RC_HT_SDT_40, WLAN_RC_PHY_HT_40_SS, 13500,
-                       13200, 0, 0, 8, 42, 42, 42 }, /* 13.5 Mb */
+                       13200, 0, 0 }, /* 13.5 Mb */
                [43] = { RC_HT_SDT_40, WLAN_RC_PHY_HT_40_SS, 27500,
-                       25900, 1, 1, 8, 43, 43, 43 }, /* 27.0 Mb */
+                       25900, 1, 1 }, /* 27.0 Mb */
                [44] = { RC_HT_SDT_40, WLAN_RC_PHY_HT_40_SS, 40500,
-                       38600, 2, 2, 8, 44, 44, 44 }, /* 40.5 Mb */
+                       38600, 2, 2 }, /* 40.5 Mb */
                [45] = { RC_HT_SD_40, WLAN_RC_PHY_HT_40_SS, 54000,
-                       49800, 3, 3, 8, 45, 45, 45 }, /* 54 Mb */
+                       49800, 3, 3 }, /* 54 Mb */
                [46] = { RC_HT_SD_40, WLAN_RC_PHY_HT_40_SS, 81500,
-                       72200, 4, 4, 8, 46, 46, 46 }, /* 81 Mb */
+                       72200, 4, 4 }, /* 81 Mb */
                [47] = { RC_HT_S_40 , WLAN_RC_PHY_HT_40_SS, 108000,
-                       92900, 5, 5, 8, 47, 47, 47 }, /* 108 Mb */
+                       92900, 5, 5 }, /* 108 Mb */
                [48] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS, 121500,
-                       102700, 6, 6, 8, 48, 48, 48 }, /* 121.5 Mb */
+                       102700, 6, 6 }, /* 121.5 Mb */
                [49] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS, 135000,
-                       112000, 7, 7, 8, 49, 50, 50 }, /* 135 Mb */
+                       112000, 7, 7 }, /* 135 Mb */
                [50] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS_HGI, 150000,
-                       122000, 7, 7, 8, 49, 50, 50 }, /* 150 Mb */
+                       122000, 7, 7 }, /* 150 Mb */
                [51] = { RC_INVALID, WLAN_RC_PHY_HT_40_DS, 27000,
-                       25800, 8, 8, 8, 51, 51, 51 }, /* 27 Mb */
+                       25800, 8, 8 }, /* 27 Mb */
                [52] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_DS, 54000,
-                       49800, 9, 9, 8, 52, 52, 52 }, /* 54 Mb */
+                       49800, 9, 9 }, /* 54 Mb */
                [53] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_DS, 81000,
-                       71900, 10, 10, 8, 53, 53, 53 }, /* 81 Mb */
+                       71900, 10, 10 }, /* 81 Mb */
                [54] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 108000,
-                       92500, 11, 11, 8, 54, 54, 54 }, /* 108 Mb */
+                       92500, 11, 11 }, /* 108 Mb */
                [55] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 162000,
-                       130300, 12, 12, 8, 55, 55, 55 }, /* 162 Mb */
+                       130300, 12, 12 }, /* 162 Mb */
                [56] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 216000,
-                       162800, 13, 13, 8, 56, 56, 56 }, /* 216 Mb */
+                       162800, 13, 13 }, /* 216 Mb */
                [57] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 243000,
-                       178200, 14, 14, 8, 57, 57, 57 }, /* 243 Mb */
+                       178200, 14, 14 }, /* 243 Mb */
                [58] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 270000,
-                       192100, 15, 15, 8, 58, 59, 59 }, /* 270 Mb */
+                       192100, 15, 15 }, /* 270 Mb */
                [59] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS_HGI, 300000,
-                       207000, 15, 15, 8, 58, 59, 59 }, /* 300 Mb */
+                       207000, 15, 15 }, /* 300 Mb */
                [60] = {  RC_INVALID, WLAN_RC_PHY_HT_40_TS, 40500,
-                       36100, 16, 16, 8, 60, 60, 60 }, /* 40.5 Mb */
+                       36100, 16, 16 }, /* 40.5 Mb */
                [61] = {  RC_INVALID, WLAN_RC_PHY_HT_40_TS, 81000,
-                       72900, 17, 17, 8, 61, 61, 61 }, /* 81 Mb */
+                       72900, 17, 17 }, /* 81 Mb */
                [62] = {  RC_INVALID, WLAN_RC_PHY_HT_40_TS, 121500,
-                       108300, 18, 18, 8, 62, 62, 62 }, /* 121.5 Mb */
+                       108300, 18, 18 }, /* 121.5 Mb */
                [63] = {  RC_INVALID, WLAN_RC_PHY_HT_40_TS, 162000,
-                       142000, 19, 19, 8, 63, 63, 63 }, /* 162 Mb */
+                       142000, 19, 19 }, /* 162 Mb */
                [64] = {  RC_INVALID, WLAN_RC_PHY_HT_40_TS, 243000,
-                       205100, 20, 20, 8, 64, 65, 65 }, /* 243 Mb */
+                       205100, 20, 20 }, /* 243 Mb */
                [65] = {  RC_INVALID, WLAN_RC_PHY_HT_40_TS_HGI, 270000,
-                       224700, 20, 20, 8, 64, 65, 65 }, /* 270 Mb */
+                       224700, 20, 20 }, /* 270 Mb */
                [66] = {  RC_HT_T_40, WLAN_RC_PHY_HT_40_TS, 324000,
-                       263100, 21, 21, 8, 66, 67, 67 }, /* 324 Mb */
+                       263100, 21, 21 }, /* 324 Mb */
                [67] = {  RC_HT_T_40, WLAN_RC_PHY_HT_40_TS_HGI, 360000,
-                       288000, 21, 21, 8, 66, 67, 67 }, /* 360 Mb */
+                       288000, 21, 21 }, /* 360 Mb */
                [68] = {  RC_HT_T_40, WLAN_RC_PHY_HT_40_TS, 364500,
-                       290700, 22, 22, 8, 68, 69, 69 }, /* 364.5 Mb */
+                       290700, 22, 22 }, /* 364.5 Mb */
                [69] = {  RC_HT_T_40, WLAN_RC_PHY_HT_40_TS_HGI, 405000,
-                       317200, 22, 22, 8, 68, 69, 69 }, /* 405 Mb */
+                       317200, 22, 22 }, /* 405 Mb */
                [70] = {  RC_HT_T_40, WLAN_RC_PHY_HT_40_TS, 405000,
-                       317200, 23, 23, 8, 70, 71, 71 }, /* 405 Mb */
+                       317200, 23, 23 }, /* 405 Mb */
                [71] = {  RC_HT_T_40, WLAN_RC_PHY_HT_40_TS_HGI, 450000,
-                       346400, 23, 23, 8, 70, 71, 71 }, /* 450 Mb */
+                       346400, 23, 23 }, /* 450 Mb */
        },
        50,  /* probe interval */
        WLAN_RC_HT_FLAG,  /* Phy rates allowed initially */
@@ -326,21 +326,21 @@ static const struct ath_rate_table ar5416_11a_ratetable = {
        0,
        {
                { RC_L_SDT, WLAN_RC_PHY_OFDM, 6000, /* 6 Mb */
-                       5400, 0, 12, 0},
+                       5400, 0, 12},
                { RC_L_SDT, WLAN_RC_PHY_OFDM, 9000, /* 9 Mb */
-                       7800,  1, 18, 0},
+                       7800,  1, 18},
                { RC_L_SDT, WLAN_RC_PHY_OFDM, 12000, /* 12 Mb */
-                       10000, 2, 24, 2},
+                       10000, 2, 24},
                { RC_L_SDT, WLAN_RC_PHY_OFDM, 18000, /* 18 Mb */
-                       13900, 3, 36, 2},
+                       13900, 3, 36},
                { RC_L_SDT, WLAN_RC_PHY_OFDM, 24000, /* 24 Mb */
-                       17300, 4, 48, 4},
+                       17300, 4, 48},
                { RC_L_SDT, WLAN_RC_PHY_OFDM, 36000, /* 36 Mb */
-                       23000, 5, 72, 4},
+                       23000, 5, 72},
                { RC_L_SDT, WLAN_RC_PHY_OFDM, 48000, /* 48 Mb */
-                       27400, 6, 96, 4},
+                       27400, 6, 96},
                { RC_L_SDT, WLAN_RC_PHY_OFDM, 54000, /* 54 Mb */
-                       29300, 7, 108, 4},
+                       29300, 7, 108},
        },
        50,  /* probe interval */
        0,   /* Phy rates allowed initially */
@@ -351,63 +351,62 @@ static const struct ath_rate_table ar5416_11g_ratetable = {
        0,
        {
                { RC_L_SDT, WLAN_RC_PHY_CCK, 1000, /* 1 Mb */
-                       900, 0, 2, 0},
+                       900, 0, 2},
                { RC_L_SDT, WLAN_RC_PHY_CCK, 2000, /* 2 Mb */
-                       1900, 1, 4, 1},
+                       1900, 1, 4},
                { RC_L_SDT, WLAN_RC_PHY_CCK, 5500, /* 5.5 Mb */
-                       4900, 2, 11, 2},
+                       4900, 2, 11},
                { RC_L_SDT, WLAN_RC_PHY_CCK, 11000, /* 11 Mb */
-                       8100, 3, 22, 3},
+                       8100, 3, 22},
                { RC_INVALID, WLAN_RC_PHY_OFDM, 6000, /* 6 Mb */
-                       5400, 4, 12, 4},
+                       5400, 4, 12},
                { RC_INVALID, WLAN_RC_PHY_OFDM, 9000, /* 9 Mb */
-                       7800, 5, 18, 4},
+                       7800, 5, 18},
                { RC_L_SDT, WLAN_RC_PHY_OFDM, 12000, /* 12 Mb */
-                       10000, 6, 24, 6},
+                       10000, 6, 24},
                { RC_L_SDT, WLAN_RC_PHY_OFDM, 18000, /* 18 Mb */
-                       13900, 7, 36, 6},
+                       13900, 7, 36},
                { RC_L_SDT, WLAN_RC_PHY_OFDM, 24000, /* 24 Mb */
-                       17300, 8, 48, 8},
+                       17300, 8, 48},
                { RC_L_SDT, WLAN_RC_PHY_OFDM, 36000, /* 36 Mb */
-                       23000, 9, 72, 8},
+                       23000, 9, 72},
                { RC_L_SDT, WLAN_RC_PHY_OFDM, 48000, /* 48 Mb */
-                       27400, 10, 96, 8},
+                       27400, 10, 96},
                { RC_L_SDT, WLAN_RC_PHY_OFDM, 54000, /* 54 Mb */
-                       29300, 11, 108, 8},
+                       29300, 11, 108},
        },
        50,  /* probe interval */
        0,   /* Phy rates allowed initially */
 };
 
-static int ath_rc_get_rateindex(const struct ath_rate_table *rate_table,
+static int ath_rc_get_rateindex(struct ath_rate_priv *ath_rc_priv,
                                struct ieee80211_tx_rate *rate)
 {
-       int rix = 0, i = 0;
-       static const int mcs_rix_off[] = { 7, 15, 20, 21, 22, 23 };
+       const struct ath_rate_table *rate_table = ath_rc_priv->rate_table;
+       int rix, i, idx = 0;
 
        if (!(rate->flags & IEEE80211_TX_RC_MCS))
                return rate->idx;
 
-       while (i < ARRAY_SIZE(mcs_rix_off) && rate->idx > mcs_rix_off[i]) {
-               rix++; i++;
+       for (i = 0; i < ath_rc_priv->max_valid_rate; i++) {
+               idx = ath_rc_priv->valid_rate_index[i];
+
+               if (WLAN_RC_PHY_HT(rate_table->info[idx].phy) &&
+                   rate_table->info[idx].ratecode == rate->idx)
+                       break;
        }
 
-       rix += rate->idx + rate_table->mcs_start;
+       rix = idx;
 
-       if ((rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) &&
-           (rate->flags & IEEE80211_TX_RC_SHORT_GI))
-               rix = rate_table->info[rix].ht_index;
-       else if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
-               rix = rate_table->info[rix].sgi_index;
-       else if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
-               rix = rate_table->info[rix].cw40index;
+       if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
+               rix++;
 
        return rix;
 }
 
-static void ath_rc_sort_validrates(const struct ath_rate_table *rate_table,
-                                  struct ath_rate_priv *ath_rc_priv)
+static void ath_rc_sort_validrates(struct ath_rate_priv *ath_rc_priv)
 {
+       const struct ath_rate_table *rate_table = ath_rc_priv->rate_table;
        u8 i, j, idx, idx_next;
 
        for (i = ath_rc_priv->max_valid_rate - 1; i > 0; i--) {
@@ -424,21 +423,6 @@ static void ath_rc_sort_validrates(const struct ath_rate_table *rate_table,
        }
 }
 
-static void ath_rc_init_valid_rate_idx(struct ath_rate_priv *ath_rc_priv)
-{
-       u8 i;
-
-       for (i = 0; i < ath_rc_priv->rate_table_size; i++)
-               ath_rc_priv->valid_rate_index[i] = 0;
-}
-
-static inline void ath_rc_set_valid_rate_idx(struct ath_rate_priv *ath_rc_priv,
-                                          u8 index, int valid_tx_rate)
-{
-       BUG_ON(index > ath_rc_priv->rate_table_size);
-       ath_rc_priv->valid_rate_index[index] = !!valid_tx_rate;
-}
-
 static inline
 int ath_rc_get_nextvalid_txrate(const struct ath_rate_table *rate_table,
                                struct ath_rate_priv *ath_rc_priv,
@@ -479,8 +463,7 @@ static int ath_rc_valid_phyrate(u32 phy, u32 capflag, int ignore_cw)
 }
 
 static inline int
-ath_rc_get_lower_rix(const struct ath_rate_table *rate_table,
-                    struct ath_rate_priv *ath_rc_priv,
+ath_rc_get_lower_rix(struct ath_rate_priv *ath_rc_priv,
                     u8 cur_valid_txrate, u8 *next_idx)
 {
        int8_t i;
@@ -495,10 +478,9 @@ ath_rc_get_lower_rix(const struct ath_rate_table *rate_table,
        return 0;
 }
 
-static u8 ath_rc_init_validrates(struct ath_rate_priv *ath_rc_priv,
-                                const struct ath_rate_table *rate_table,
-                                u32 capflag)
+static u8 ath_rc_init_validrates(struct ath_rate_priv *ath_rc_priv)
 {
+       const struct ath_rate_table *rate_table = ath_rc_priv->rate_table;
        u8 i, hi = 0;
 
        for (i = 0; i < rate_table->rate_cnt; i++) {
@@ -506,14 +488,14 @@ static u8 ath_rc_init_validrates(struct ath_rate_priv *ath_rc_priv,
                        u32 phy = rate_table->info[i].phy;
                        u8 valid_rate_count = 0;
 
-                       if (!ath_rc_valid_phyrate(phy, capflag, 0))
+                       if (!ath_rc_valid_phyrate(phy, ath_rc_priv->ht_cap, 0))
                                continue;
 
                        valid_rate_count = ath_rc_priv->valid_phy_ratecnt[phy];
 
                        ath_rc_priv->valid_phy_rateidx[phy][valid_rate_count] = i;
                        ath_rc_priv->valid_phy_ratecnt[phy] += 1;
-                       ath_rc_set_valid_rate_idx(ath_rc_priv, i, 1);
+                       ath_rc_priv->valid_rate_index[i] = true;
                        hi = i;
                }
        }
@@ -521,76 +503,73 @@ static u8 ath_rc_init_validrates(struct ath_rate_priv *ath_rc_priv,
        return hi;
 }
 
-static u8 ath_rc_setvalid_rates(struct ath_rate_priv *ath_rc_priv,
-                               const struct ath_rate_table *rate_table,
-                               struct ath_rateset *rateset,
-                               u32 capflag)
+static inline bool ath_rc_check_legacy(u8 rate, u8 dot11rate, u16 rate_flags,
+                                      u32 phy, u32 capflag)
 {
-       u8 i, j, hi = 0;
+       if (rate != dot11rate || WLAN_RC_PHY_HT(phy))
+               return false;
 
-       /* Use intersection of working rates and valid rates */
-       for (i = 0; i < rateset->rs_nrates; i++) {
-               for (j = 0; j < rate_table->rate_cnt; j++) {
-                       u32 phy = rate_table->info[j].phy;
-                       u16 rate_flags = rate_table->info[j].rate_flags;
-                       u8 rate = rateset->rs_rates[i];
-                       u8 dot11rate = rate_table->info[j].dot11rate;
-
-                       /* We allow a rate only if its valid and the
-                        * capflag matches one of the validity
-                        * (VALID/VALID_20/VALID_40) flags */
-
-                       if ((rate == dot11rate) &&
-                           (rate_flags & WLAN_RC_CAP_MODE(capflag)) ==
-                           WLAN_RC_CAP_MODE(capflag) &&
-                           (rate_flags & WLAN_RC_CAP_STREAM(capflag)) &&
-                           !WLAN_RC_PHY_HT(phy)) {
-                               u8 valid_rate_count = 0;
-
-                               if (!ath_rc_valid_phyrate(phy, capflag, 0))
-                                       continue;
-
-                               valid_rate_count =
-                                       ath_rc_priv->valid_phy_ratecnt[phy];
-
-                               ath_rc_priv->valid_phy_rateidx[phy]
-                                       [valid_rate_count] = j;
-                               ath_rc_priv->valid_phy_ratecnt[phy] += 1;
-                               ath_rc_set_valid_rate_idx(ath_rc_priv, j, 1);
-                               hi = max(hi, j);
-                       }
-               }
-       }
+       if ((rate_flags & WLAN_RC_CAP_MODE(capflag)) != WLAN_RC_CAP_MODE(capflag))
+               return false;
 
-       return hi;
+       if (!(rate_flags & WLAN_RC_CAP_STREAM(capflag)))
+               return false;
+
+       return true;
 }
 
-static u8 ath_rc_setvalid_htrates(struct ath_rate_priv *ath_rc_priv,
-                                 const struct ath_rate_table *rate_table,
-                                 struct ath_rateset *rateset, u32 capflag)
+static inline bool ath_rc_check_ht(u8 rate, u8 dot11rate, u16 rate_flags,
+                                  u32 phy, u32 capflag)
 {
-       u8 i, j, hi = 0;
+       if (rate != dot11rate || !WLAN_RC_PHY_HT(phy))
+               return false;
+
+       if (!WLAN_RC_PHY_HT_VALID(rate_flags, capflag))
+               return false;
+
+       if (!(rate_flags & WLAN_RC_CAP_STREAM(capflag)))
+               return false;
+
+       return true;
+}
+
+static u8 ath_rc_setvalid_rates(struct ath_rate_priv *ath_rc_priv, bool legacy)
+{
+       const struct ath_rate_table *rate_table = ath_rc_priv->rate_table;
+       struct ath_rateset *rateset;
+       u32 phy, capflag = ath_rc_priv->ht_cap;
+       u16 rate_flags;
+       u8 i, j, hi = 0, rate, dot11rate, valid_rate_count;
+
+       if (legacy)
+               rateset = &ath_rc_priv->neg_rates;
+       else
+               rateset = &ath_rc_priv->neg_ht_rates;
 
-       /* Use intersection of working rates and valid rates */
        for (i = 0; i < rateset->rs_nrates; i++) {
                for (j = 0; j < rate_table->rate_cnt; j++) {
-                       u32 phy = rate_table->info[j].phy;
-                       u16 rate_flags = rate_table->info[j].rate_flags;
-                       u8 rate = rateset->rs_rates[i];
-                       u8 dot11rate = rate_table->info[j].dot11rate;
-
-                       if ((rate != dot11rate) || !WLAN_RC_PHY_HT(phy) ||
-                           !(rate_flags & WLAN_RC_CAP_STREAM(capflag)) ||
-                           !WLAN_RC_PHY_HT_VALID(rate_flags, capflag))
+                       phy = rate_table->info[j].phy;
+                       rate_flags = rate_table->info[j].rate_flags;
+                       rate = rateset->rs_rates[i];
+                       dot11rate = rate_table->info[j].dot11rate;
+
+                       if (legacy &&
+                           !ath_rc_check_legacy(rate, dot11rate,
+                                                rate_flags, phy, capflag))
+                               continue;
+
+                       if (!legacy &&
+                           !ath_rc_check_ht(rate, dot11rate,
+                                            rate_flags, phy, capflag))
                                continue;
 
                        if (!ath_rc_valid_phyrate(phy, capflag, 0))
                                continue;
 
-                       ath_rc_priv->valid_phy_rateidx[phy]
-                               [ath_rc_priv->valid_phy_ratecnt[phy]] = j;
+                       valid_rate_count = ath_rc_priv->valid_phy_ratecnt[phy];
+                       ath_rc_priv->valid_phy_rateidx[phy][valid_rate_count] = j;
                        ath_rc_priv->valid_phy_ratecnt[phy] += 1;
-                       ath_rc_set_valid_rate_idx(ath_rc_priv, j, 1);
+                       ath_rc_priv->valid_rate_index[j] = true;
                        hi = max(hi, j);
                }
        }
@@ -598,13 +577,10 @@ static u8 ath_rc_setvalid_htrates(struct ath_rate_priv *ath_rc_priv,
        return hi;
 }
 
-/* Finds the highest rate index we can use */
-static u8 ath_rc_get_highest_rix(struct ath_softc *sc,
-                                struct ath_rate_priv *ath_rc_priv,
-                                const struct ath_rate_table *rate_table,
-                                int *is_probing,
-                                bool legacy)
+static u8 ath_rc_get_highest_rix(struct ath_rate_priv *ath_rc_priv,
+                                int *is_probing)
 {
+       const struct ath_rate_table *rate_table = ath_rc_priv->rate_table;
        u32 best_thruput, this_thruput, now_msec;
        u8 rate, next_rate, best_rate, maxindex, minindex;
        int8_t index = 0;
@@ -624,8 +600,6 @@ static u8 ath_rc_get_highest_rix(struct ath_softc *sc,
                u8 per_thres;
 
                rate = ath_rc_priv->valid_rate_index[index];
-               if (legacy && !(rate_table->info[rate].rate_flags & RC_LEGACY))
-                       continue;
                if (rate > ath_rc_priv->rate_max_phy)
                        continue;
 
@@ -707,8 +681,6 @@ static void ath_rc_rate_set_series(const struct ath_rate_table *rate_table,
        rate->count = tries;
        rate->idx = rate_table->info[rix].ratecode;
 
-       if (txrc->short_preamble)
-               rate->flags |= IEEE80211_TX_RC_USE_SHORT_PREAMBLE;
        if (txrc->rts || rtsctsenable)
                rate->flags |= IEEE80211_TX_RC_USE_RTS_CTS;
 
@@ -726,37 +698,25 @@ static void ath_rc_rate_set_rtscts(struct ath_softc *sc,
                                   const struct ath_rate_table *rate_table,
                                   struct ieee80211_tx_info *tx_info)
 {
-       struct ieee80211_tx_rate *rates = tx_info->control.rates;
-       int i = 0, rix = 0, cix, enable_g_protection = 0;
+       struct ieee80211_bss_conf *bss_conf;
 
-       /* get the cix for the lowest valid rix */
-       for (i = 3; i >= 0; i--) {
-               if (rates[i].count && (rates[i].idx >= 0)) {
-                       rix = ath_rc_get_rateindex(rate_table, &rates[i]);
-                       break;
-               }
-       }
-       cix = rate_table->info[rix].ctrl_rate;
+       if (!tx_info->control.vif)
+               return;
+       /*
+        * For legacy frames, mac80211 takes care of CTS protection.
+        */
+       if (!(tx_info->control.rates[0].flags & IEEE80211_TX_RC_MCS))
+               return;
 
-       /* All protection frames are transmited at 2Mb/s for 802.11g,
-        * otherwise we transmit them at 1Mb/s */
-       if (sc->hw->conf.channel->band == IEEE80211_BAND_2GHZ &&
-           !conf_is_ht(&sc->hw->conf))
-               enable_g_protection = 1;
+       bss_conf = &tx_info->control.vif->bss_conf;
+
+       if (!bss_conf->basic_rates)
+               return;
 
        /*
-        * If 802.11g protection is enabled, determine whether to use RTS/CTS or
-        * just CTS.  Note that this is only done for OFDM/HT unicast frames.
+        * For now, use the lowest allowed basic rate for HT frames.
         */
-       if ((tx_info->control.vif &&
-            tx_info->control.vif->bss_conf.use_cts_prot) &&
-           (rate_table->info[rix].phy == WLAN_RC_PHY_OFDM ||
-            WLAN_RC_PHY_HT(rate_table->info[rix].phy))) {
-               rates[0].flags |= IEEE80211_TX_RC_USE_CTS_PROTECT;
-               cix = rate_table->info[enable_g_protection].ctrl_rate;
-       }
-
-       tx_info->control.rts_cts_rate_idx = cix;
+       tx_info->control.rts_cts_rate_idx = __ffs(bss_conf->basic_rates);
 }
 
 static void ath_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
@@ -789,14 +749,8 @@ static void ath_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
        try_per_rate = 4;
 
        rate_table = ath_rc_priv->rate_table;
-       rix = ath_rc_get_highest_rix(sc, ath_rc_priv, rate_table,
-                                    &is_probe, false);
+       rix = ath_rc_get_highest_rix(ath_rc_priv, &is_probe);
 
-       /*
-        * If we're in HT mode and both us and our peer supports LDPC.
-        * We don't need to check our own device's capabilities as our own
-        * ht capabilities would have already been intersected with our peer's.
-        */
        if (conf_is_ht(&sc->hw->conf) &&
            (sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING))
                tx_info->flags |= IEEE80211_TX_CTL_LDPC;
@@ -806,52 +760,45 @@ static void ath_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
                tx_info->flags |= (1 << IEEE80211_TX_CTL_STBC_SHIFT);
 
        if (is_probe) {
-               /* set one try for probe rates. For the
-                * probes don't enable rts */
+               /*
+                * Set one try for probe rates. For the
+                * probes don't enable RTS.
+                */
                ath_rc_rate_set_series(rate_table, &rates[i++], txrc,
                                       1, rix, 0);
-
-               /* Get the next tried/allowed rate. No RTS for the next series
-                * after the probe rate
+               /*
+                * Get the next tried/allowed rate.
+                * No RTS for the next series after the probe rate.
                 */
-               ath_rc_get_lower_rix(rate_table, ath_rc_priv, rix, &rix);
+               ath_rc_get_lower_rix(ath_rc_priv, rix, &rix);
                ath_rc_rate_set_series(rate_table, &rates[i++], txrc,
                                       try_per_rate, rix, 0);
 
                tx_info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;
        } else {
-               /* Set the chosen rate. No RTS for first series entry. */
+               /*
+                * Set the chosen rate. No RTS for first series entry.
+                */
                ath_rc_rate_set_series(rate_table, &rates[i++], txrc,
                                       try_per_rate, rix, 0);
        }
 
-       /* Fill in the other rates for multirate retry */
-       for ( ; i < 3; i++) {
+       for ( ; i < 4; i++) {
+               /*
+                * Use twice the number of tries for the last MRR segment.
+                */
+               if (i + 1 == 4)
+                       try_per_rate = 8;
+
+               ath_rc_get_lower_rix(ath_rc_priv, rix, &rix);
 
-               ath_rc_get_lower_rix(rate_table, ath_rc_priv, rix, &rix);
-               /* All other rates in the series have RTS enabled */
+               /*
+                * All other rates in the series have RTS enabled.
+                */
                ath_rc_rate_set_series(rate_table, &rates[i], txrc,
                                       try_per_rate, rix, 1);
        }
 
-       /* Use twice the number of tries for the last MRR segment. */
-       try_per_rate = 8;
-
-       /*
-        * If the last rate in the rate series is MCS and has
-        * more than 80% of per thresh, then use a legacy rate
-        * as last retry to ensure that the frame is tried in both
-        * MCS and legacy rate.
-        */
-       ath_rc_get_lower_rix(rate_table, ath_rc_priv, rix, &rix);
-       if (WLAN_RC_PHY_HT(rate_table->info[rix].phy) &&
-           (ath_rc_priv->per[rix] > 45))
-               rix = ath_rc_get_highest_rix(sc, ath_rc_priv, rate_table,
-                               &is_probe, true);
-
-       /* All other rates in the series have RTS enabled */
-       ath_rc_rate_set_series(rate_table, &rates[i], txrc,
-                              try_per_rate, rix, 1);
        /*
         * NB:Change rate series to enable aggregation when operating
         * at lower MCS rates. When first rate in series is MCS2
@@ -893,7 +840,6 @@ static void ath_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
                rates[0].count = ATH_TXMAXTRY;
        }
 
-       /* Setup RTS/CTS */
        ath_rc_rate_set_rtscts(sc, rate_table, tx_info);
 }
 
@@ -1046,9 +992,6 @@ static void ath_debug_stat_retries(struct ath_rate_priv *rc, int rix,
        stats->per = per;
 }
 
-/* Update PER, RSSI and whatever else that the code thinks it is doing.
-   If you can make sense of all this, you really need to go out more. */
-
 static void ath_rc_update_ht(struct ath_softc *sc,
                             struct ath_rate_priv *ath_rc_priv,
                             struct ieee80211_tx_info *tx_info,
@@ -1077,8 +1020,8 @@ static void ath_rc_update_ht(struct ath_softc *sc,
        if (ath_rc_priv->per[tx_rate] >= 55 && tx_rate > 0 &&
            rate_table->info[tx_rate].ratekbps <=
            rate_table->info[ath_rc_priv->rate_max_phy].ratekbps) {
-               ath_rc_get_lower_rix(rate_table, ath_rc_priv,
-                                    (u8)tx_rate, &ath_rc_priv->rate_max_phy);
+               ath_rc_get_lower_rix(ath_rc_priv, (u8)tx_rate,
+                                    &ath_rc_priv->rate_max_phy);
 
                /* Don't probe for a little while. */
                ath_rc_priv->probe_time = now_msec;
@@ -1122,25 +1065,42 @@ static void ath_rc_update_ht(struct ath_softc *sc,
 
 }
 
+static void ath_debug_stat_rc(struct ath_rate_priv *rc, int final_rate)
+{
+       struct ath_rc_stats *stats;
+
+       stats = &rc->rcstats[final_rate];
+       stats->success++;
+}
 
 static void ath_rc_tx_status(struct ath_softc *sc,
                             struct ath_rate_priv *ath_rc_priv,
-                            struct ieee80211_tx_info *tx_info,
-                            int final_ts_idx, int xretries, int long_retry)
+                            struct sk_buff *skb)
 {
-       const struct ath_rate_table *rate_table;
+       struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
        struct ieee80211_tx_rate *rates = tx_info->status.rates;
+       struct ieee80211_tx_rate *rate;
+       int final_ts_idx = 0, xretries = 0, long_retry = 0;
        u8 flags;
        u32 i = 0, rix;
 
-       rate_table = ath_rc_priv->rate_table;
+       for (i = 0; i < sc->hw->max_rates; i++) {
+               rate = &tx_info->status.rates[i];
+               if (rate->idx < 0 || !rate->count)
+                       break;
+
+               final_ts_idx = i;
+               long_retry = rate->count - 1;
+       }
+
+       if (!(tx_info->flags & IEEE80211_TX_STAT_ACK))
+               xretries = 1;
 
        /*
         * If the first rate is not the final index, there
         * are intermediate rate failures to be processed.
         */
        if (final_ts_idx != 0) {
-               /* Process intermediate rates that failed.*/
                for (i = 0; i < final_ts_idx ; i++) {
                        if (rates[i].count != 0 && (rates[i].idx >= 0)) {
                                flags = rates[i].flags;
@@ -1152,32 +1112,24 @@ static void ath_rc_tx_status(struct ath_softc *sc,
                                    !(ath_rc_priv->ht_cap & WLAN_RC_40_FLAG))
                                        return;
 
-                               rix = ath_rc_get_rateindex(rate_table, &rates[i]);
+                               rix = ath_rc_get_rateindex(ath_rc_priv, &rates[i]);
                                ath_rc_update_ht(sc, ath_rc_priv, tx_info,
-                                               rix, xretries ? 1 : 2,
-                                               rates[i].count);
+                                                rix, xretries ? 1 : 2,
+                                                rates[i].count);
                        }
                }
-       } else {
-               /*
-                * Handle the special case of MIMO PS burst, where the second
-                * aggregate is sent out with only one rate and one try.
-                * Treating it as an excessive retry penalizes the rate
-                * inordinately.
-                */
-               if (rates[0].count == 1 && xretries == 1)
-                       xretries = 2;
        }
 
-       flags = rates[i].flags;
+       flags = rates[final_ts_idx].flags;
 
        /* If HT40 and we have switched mode from 40 to 20 => don't update */
        if ((flags & IEEE80211_TX_RC_40_MHZ_WIDTH) &&
            !(ath_rc_priv->ht_cap & WLAN_RC_40_FLAG))
                return;
 
-       rix = ath_rc_get_rateindex(rate_table, &rates[i]);
+       rix = ath_rc_get_rateindex(ath_rc_priv, &rates[final_ts_idx]);
        ath_rc_update_ht(sc, ath_rc_priv, tx_info, rix, xretries, long_retry);
+       ath_debug_stat_rc(ath_rc_priv, rix);
 }
 
 static const
@@ -1185,8 +1137,6 @@ struct ath_rate_table *ath_choose_rate_table(struct ath_softc *sc,
                                             enum ieee80211_band band,
                                             bool is_ht)
 {
-       struct ath_common *common = ath9k_hw_common(sc->sc_ah);
-
        switch(band) {
        case IEEE80211_BAND_2GHZ:
                if (is_ht)
@@ -1197,34 +1147,25 @@ struct ath_rate_table *ath_choose_rate_table(struct ath_softc *sc,
                        return &ar5416_11na_ratetable;
                return &ar5416_11a_ratetable;
        default:
-               ath_dbg(common, CONFIG, "Invalid band\n");
                return NULL;
        }
 }
 
 static void ath_rc_init(struct ath_softc *sc,
-                       struct ath_rate_priv *ath_rc_priv,
-                       struct ieee80211_supported_band *sband,
-                       struct ieee80211_sta *sta,
-                       const struct ath_rate_table *rate_table)
+                       struct ath_rate_priv *ath_rc_priv)
 {
+       const struct ath_rate_table *rate_table = ath_rc_priv->rate_table;
        struct ath_rateset *rateset = &ath_rc_priv->neg_rates;
        struct ath_common *common = ath9k_hw_common(sc->sc_ah);
-       struct ath_rateset *ht_mcs = &ath_rc_priv->neg_ht_rates;
        u8 i, j, k, hi = 0, hthi = 0;
 
-       /* Initial rate table size. Will change depending
-        * on the working rate set */
        ath_rc_priv->rate_table_size = RATE_TABLE_SIZE;
 
-       /* Initialize thresholds according to the global rate table */
        for (i = 0 ; i < ath_rc_priv->rate_table_size; i++) {
                ath_rc_priv->per[i] = 0;
+               ath_rc_priv->valid_rate_index[i] = 0;
        }
 
-       /* Determine the valid rates */
-       ath_rc_init_valid_rate_idx(ath_rc_priv);
-
        for (i = 0; i < WLAN_RC_PHY_MAX; i++) {
                for (j = 0; j < RATE_TABLE_SIZE; j++)
                        ath_rc_priv->valid_phy_rateidx[i][j] = 0;
@@ -1232,25 +1173,19 @@ static void ath_rc_init(struct ath_softc *sc,
        }
 
        if (!rateset->rs_nrates) {
-               /* No working rate, just initialize valid rates */
-               hi = ath_rc_init_validrates(ath_rc_priv, rate_table,
-                                           ath_rc_priv->ht_cap);
+               hi = ath_rc_init_validrates(ath_rc_priv);
        } else {
-               /* Use intersection of working rates and valid rates */
-               hi = ath_rc_setvalid_rates(ath_rc_priv, rate_table,
-                                          rateset, ath_rc_priv->ht_cap);
-               if (ath_rc_priv->ht_cap & WLAN_RC_HT_FLAG) {
-                       hthi = ath_rc_setvalid_htrates(ath_rc_priv,
-                                                      rate_table,
-                                                      ht_mcs,
-                                                      ath_rc_priv->ht_cap);
-               }
+               hi = ath_rc_setvalid_rates(ath_rc_priv, true);
+
+               if (ath_rc_priv->ht_cap & WLAN_RC_HT_FLAG)
+                       hthi = ath_rc_setvalid_rates(ath_rc_priv, false);
+
                hi = max(hi, hthi);
        }
 
        ath_rc_priv->rate_table_size = hi + 1;
        ath_rc_priv->rate_max_phy = 0;
-       BUG_ON(ath_rc_priv->rate_table_size > RATE_TABLE_SIZE);
+       WARN_ON(ath_rc_priv->rate_table_size > RATE_TABLE_SIZE);
 
        for (i = 0, k = 0; i < WLAN_RC_PHY_MAX; i++) {
                for (j = 0; j < ath_rc_priv->valid_phy_ratecnt[i]; j++) {
@@ -1258,28 +1193,26 @@ static void ath_rc_init(struct ath_softc *sc,
                                ath_rc_priv->valid_phy_rateidx[i][j];
                }
 
-               if (!ath_rc_valid_phyrate(i, rate_table->initial_ratemax, 1)
-                   || !ath_rc_priv->valid_phy_ratecnt[i])
+               if (!ath_rc_valid_phyrate(i, rate_table->initial_ratemax, 1) ||
+                   !ath_rc_priv->valid_phy_ratecnt[i])
                        continue;
 
                ath_rc_priv->rate_max_phy = ath_rc_priv->valid_phy_rateidx[i][j-1];
        }
-       BUG_ON(ath_rc_priv->rate_table_size > RATE_TABLE_SIZE);
-       BUG_ON(k > RATE_TABLE_SIZE);
+       WARN_ON(ath_rc_priv->rate_table_size > RATE_TABLE_SIZE);
+       WARN_ON(k > RATE_TABLE_SIZE);
 
        ath_rc_priv->max_valid_rate = k;
-       ath_rc_sort_validrates(rate_table, ath_rc_priv);
+       ath_rc_sort_validrates(ath_rc_priv);
        ath_rc_priv->rate_max_phy = (k > 4) ?
-                                       ath_rc_priv->valid_rate_index[k-4] :
-                                       ath_rc_priv->valid_rate_index[k-1];
-       ath_rc_priv->rate_table = rate_table;
+               ath_rc_priv->valid_rate_index[k-4] :
+               ath_rc_priv->valid_rate_index[k-1];
 
        ath_dbg(common, CONFIG, "RC Initialized with capabilities: 0x%x\n",
                ath_rc_priv->ht_cap);
 }
 
-static u8 ath_rc_build_ht_caps(struct ath_softc *sc, struct ieee80211_sta *sta,
-                              bool is_cw40, bool is_sgi)
+static u8 ath_rc_build_ht_caps(struct ath_softc *sc, struct ieee80211_sta *sta)
 {
        u8 caps = 0;
 
@@ -1289,9 +1222,10 @@ static u8 ath_rc_build_ht_caps(struct ath_softc *sc, struct ieee80211_sta *sta,
                        caps |= WLAN_RC_TS_FLAG | WLAN_RC_DS_FLAG;
                else if (sta->ht_cap.mcs.rx_mask[1])
                        caps |= WLAN_RC_DS_FLAG;
-               if (is_cw40)
+               if (sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)
                        caps |= WLAN_RC_40_FLAG;
-               if (is_sgi)
+               if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40 ||
+                   sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20)
                        caps |= WLAN_RC_SGI_FLAG;
        }
 
@@ -1319,15 +1253,6 @@ static bool ath_tx_aggr_check(struct ath_softc *sc, struct ieee80211_sta *sta,
 /* mac80211 Rate Control callbacks */
 /***********************************/
 
-static void ath_debug_stat_rc(struct ath_rate_priv *rc, int final_rate)
-{
-       struct ath_rc_stats *stats;
-
-       stats = &rc->rcstats[final_rate];
-       stats->success++;
-}
-
-
 static void ath_tx_status(void *priv, struct ieee80211_supported_band *sband,
                          struct ieee80211_sta *sta, void *priv_sta,
                          struct sk_buff *skb)
@@ -1335,22 +1260,8 @@ static void ath_tx_status(void *priv, struct ieee80211_supported_band *sband,
        struct ath_softc *sc = priv;
        struct ath_rate_priv *ath_rc_priv = priv_sta;
        struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
-       struct ieee80211_hdr *hdr;
-       int final_ts_idx = 0, tx_status = 0;
-       int long_retry = 0;
-       __le16 fc;
-       int i;
-
-       hdr = (struct ieee80211_hdr *)skb->data;
-       fc = hdr->frame_control;
-       for (i = 0; i < sc->hw->max_rates; i++) {
-               struct ieee80211_tx_rate *rate = &tx_info->status.rates[i];
-               if (rate->idx < 0 || !rate->count)
-                       break;
-
-               final_ts_idx = i;
-               long_retry = rate->count - 1;
-       }
+       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+       __le16 fc = hdr->frame_control;
 
        if (!priv_sta || !ieee80211_is_data(fc))
                return;
@@ -1363,11 +1274,7 @@ static void ath_tx_status(void *priv, struct ieee80211_supported_band *sband,
        if (tx_info->flags & IEEE80211_TX_STAT_TX_FILTERED)
                return;
 
-       if (!(tx_info->flags & IEEE80211_TX_STAT_ACK))
-               tx_status = 1;
-
-       ath_rc_tx_status(sc, ath_rc_priv, tx_info, final_ts_idx, tx_status,
-                        long_retry);
+       ath_rc_tx_status(sc, ath_rc_priv, skb);
 
        /* Check if aggregation has to be enabled for this tid */
        if (conf_is_ht(&sc->hw->conf) &&
@@ -1383,19 +1290,14 @@ static void ath_tx_status(void *priv, struct ieee80211_supported_band *sband,
                                ieee80211_start_tx_ba_session(sta, tid, 0);
                }
        }
-
-       ath_debug_stat_rc(ath_rc_priv,
-               ath_rc_get_rateindex(ath_rc_priv->rate_table,
-                       &tx_info->status.rates[final_ts_idx]));
 }
 
 static void ath_rate_init(void *priv, struct ieee80211_supported_band *sband,
                           struct ieee80211_sta *sta, void *priv_sta)
 {
        struct ath_softc *sc = priv;
+       struct ath_common *common = ath9k_hw_common(sc->sc_ah);
        struct ath_rate_priv *ath_rc_priv = priv_sta;
-       const struct ath_rate_table *rate_table;
-       bool is_cw40, is_sgi = false;
        int i, j = 0;
 
        for (i = 0; i < sband->n_bitrates; i++) {
@@ -1417,20 +1319,15 @@ static void ath_rate_init(void *priv, struct ieee80211_supported_band *sband,
                ath_rc_priv->neg_ht_rates.rs_nrates = j;
        }
 
-       is_cw40 = !!(sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40);
-
-       if (is_cw40)
-               is_sgi = !!(sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40);
-       else if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_SGI_20)
-               is_sgi = !!(sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20);
-
-       /* Choose rate table first */
-
-       rate_table = ath_choose_rate_table(sc, sband->band,
-                             sta->ht_cap.ht_supported);
+       ath_rc_priv->rate_table = ath_choose_rate_table(sc, sband->band,
+                                                       sta->ht_cap.ht_supported);
+       if (!ath_rc_priv->rate_table) {
+               ath_err(common, "No rate table chosen\n");
+               return;
+       }
 
-       ath_rc_priv->ht_cap = ath_rc_build_ht_caps(sc, sta, is_cw40, is_sgi);
-       ath_rc_init(sc, priv_sta, sband, sta, rate_table);
+       ath_rc_priv->ht_cap = ath_rc_build_ht_caps(sc, sta);
+       ath_rc_init(sc, priv_sta);
 }
 
 static void ath_rate_update(void *priv, struct ieee80211_supported_band *sband,
@@ -1439,40 +1336,14 @@ static void ath_rate_update(void *priv, struct ieee80211_supported_band *sband,
 {
        struct ath_softc *sc = priv;
        struct ath_rate_priv *ath_rc_priv = priv_sta;
-       const struct ath_rate_table *rate_table = NULL;
-       bool oper_cw40 = false, oper_sgi;
-       bool local_cw40 = !!(ath_rc_priv->ht_cap & WLAN_RC_40_FLAG);
-       bool local_sgi = !!(ath_rc_priv->ht_cap & WLAN_RC_SGI_FLAG);
-
-       /* FIXME: Handle AP mode later when we support CWM */
 
        if (changed & IEEE80211_RC_BW_CHANGED) {
-               if (sc->sc_ah->opmode != NL80211_IFTYPE_STATION)
-                       return;
+               ath_rc_priv->ht_cap = ath_rc_build_ht_caps(sc, sta);
+               ath_rc_init(sc, priv_sta);
 
-               if (sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)
-                       oper_cw40 = true;
-
-               if (oper_cw40)
-                       oper_sgi = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
-                                  true : false;
-               else if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_SGI_20)
-                       oper_sgi = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
-                                  true : false;
-               else
-                       oper_sgi = false;
-
-               if ((local_cw40 != oper_cw40) || (local_sgi != oper_sgi)) {
-                       rate_table = ath_choose_rate_table(sc, sband->band,
-                                                  sta->ht_cap.ht_supported);
-                       ath_rc_priv->ht_cap = ath_rc_build_ht_caps(sc, sta,
-                                                  oper_cw40, oper_sgi);
-                       ath_rc_init(sc, priv_sta, sband, sta, rate_table);
-
-                       ath_dbg(ath9k_hw_common(sc->sc_ah), CONFIG,
-                               "Operating HT Bandwidth changed to: %d\n",
-                               sc->hw->conf.channel_type);
-               }
+               ath_dbg(ath9k_hw_common(sc->sc_ah), CONFIG,
+                       "Operating HT Bandwidth changed to: %d\n",
+                       sc->hw->conf.channel_type);
        }
 }
 
@@ -1484,7 +1355,7 @@ static ssize_t read_file_rcstat(struct file *file, char __user *user_buf,
        struct ath_rate_priv *rc = file->private_data;
        char *buf;
        unsigned int len = 0, max;
-       int i = 0;
+       int rix;
        ssize_t retval;
 
        if (rc->rate_table == NULL)
@@ -1500,7 +1371,8 @@ static ssize_t read_file_rcstat(struct file *file, char __user *user_buf,
                       "HT", "MCS", "Rate",
                       "Success", "Retries", "XRetries", "PER");
 
-       for (i = 0; i < rc->rate_table_size; i++) {
+       for (rix = 0; rix < rc->max_valid_rate; rix++) {
+               u8 i = rc->valid_rate_index[rix];
                u32 ratekbps = rc->rate_table->info[i].ratekbps;
                struct ath_rc_stats *stats = &rc->rcstats[i];
                char mcs[5];
index 75f8e9b..268e67d 100644 (file)
@@ -160,10 +160,6 @@ struct ath_rate_table {
                u32 user_ratekbps;
                u8 ratecode;
                u8 dot11rate;
-               u8 ctrl_rate;
-               u8 cw40index;
-               u8 sgi_index;
-               u8 ht_index;
        } info[RATE_TABLE_SIZE];
        u32 probe_interval;
        u8 initial_ratemax;
index 2c9da6b..ef91f6c 100644 (file)
@@ -1773,11 +1773,12 @@ static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
        TX_STAT_INC(txq->axq_qnum, queued);
 }
 
-static void setup_frame_info(struct ieee80211_hw *hw, struct sk_buff *skb,
+static void setup_frame_info(struct ieee80211_hw *hw,
+                            struct ieee80211_sta *sta,
+                            struct sk_buff *skb,
                             int framelen)
 {
        struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
-       struct ieee80211_sta *sta = tx_info->control.sta;
        struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
        const struct ieee80211_rate *rate;
@@ -1935,7 +1936,7 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
 {
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-       struct ieee80211_sta *sta = info->control.sta;
+       struct ieee80211_sta *sta = txctl->sta;
        struct ieee80211_vif *vif = info->control.vif;
        struct ath_softc *sc = hw->priv;
        struct ath_txq *txq = txctl->txq;
@@ -1979,7 +1980,7 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
            !ieee80211_is_data(hdr->frame_control))
                info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
 
-       setup_frame_info(hw, skb, frmlen);
+       setup_frame_info(hw, sta, skb, frmlen);
 
        /*
         * At this point, the vif, hw_key and sta pointers in the tx control
index 376be11..2aa4a59 100644 (file)
@@ -425,6 +425,7 @@ struct ar9170 {
        bool rx_has_plcp;
        struct sk_buff *rx_failover;
        int rx_failover_missing;
+       u32 ampdu_ref;
 
        /* FIFO for collecting outstanding BlockAckRequest */
        struct list_head bar_list[__AR9170_NUM_TXQ];
@@ -577,7 +578,9 @@ void carl9170_rx(struct ar9170 *ar, void *buf, unsigned int len);
 void carl9170_handle_command_response(struct ar9170 *ar, void *buf, u32 len);
 
 /* TX */
-void carl9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
+void carl9170_op_tx(struct ieee80211_hw *hw,
+                   struct ieee80211_tx_control *control,
+                   struct sk_buff *skb);
 void carl9170_tx_janitor(struct work_struct *work);
 void carl9170_tx_process_status(struct ar9170 *ar,
                                const struct carl9170_rsp *cmd);
index c5ca6f1..24ac287 100644 (file)
@@ -341,6 +341,7 @@ static int carl9170_fw(struct ar9170 *ar, const __u8 *data, size_t len)
                if (SUPP(CARL9170FW_WLANTX_CAB)) {
                        if_comb_types |=
                                BIT(NL80211_IFTYPE_AP) |
+                               BIT(NL80211_IFTYPE_MESH_POINT) |
                                BIT(NL80211_IFTYPE_P2P_GO);
                }
        }
index 53415bf..f867628 100644 (file)
@@ -318,10 +318,10 @@ int carl9170_set_operating_mode(struct ar9170 *ar)
                bssid = common->curbssid;
 
                switch (vif->type) {
-               case NL80211_IFTYPE_MESH_POINT:
                case NL80211_IFTYPE_ADHOC:
                        cam_mode |= AR9170_MAC_CAM_IBSS;
                        break;
+               case NL80211_IFTYPE_MESH_POINT:
                case NL80211_IFTYPE_AP:
                        cam_mode |= AR9170_MAC_CAM_AP;
 
index 858e58d..18554ab 100644 (file)
@@ -616,10 +616,12 @@ static int carl9170_op_add_interface(struct ieee80211_hw *hw,
 
                        goto unlock;
 
+               case NL80211_IFTYPE_MESH_POINT:
                case NL80211_IFTYPE_AP:
                        if ((vif->type == NL80211_IFTYPE_STATION) ||
                            (vif->type == NL80211_IFTYPE_WDS) ||
-                           (vif->type == NL80211_IFTYPE_AP))
+                           (vif->type == NL80211_IFTYPE_AP) ||
+                           (vif->type == NL80211_IFTYPE_MESH_POINT))
                                break;
 
                        err = -EBUSY;
index 6f6a341..a0b7230 100644 (file)
@@ -206,6 +206,7 @@ void carl9170_handle_command_response(struct ar9170 *ar, void *buf, u32 len)
 
                case NL80211_IFTYPE_AP:
                case NL80211_IFTYPE_ADHOC:
+               case NL80211_IFTYPE_MESH_POINT:
                        carl9170_update_beacon(ar, true);
                        break;
 
@@ -623,7 +624,8 @@ static void carl9170_ba_check(struct ar9170 *ar, void *data, unsigned int len)
 #undef TID_CHECK
 }
 
-static bool carl9170_ampdu_check(struct ar9170 *ar, u8 *buf, u8 ms)
+static bool carl9170_ampdu_check(struct ar9170 *ar, u8 *buf, u8 ms,
+                                struct ieee80211_rx_status *rx_status)
 {
        __le16 fc;
 
@@ -636,6 +638,9 @@ static bool carl9170_ampdu_check(struct ar9170 *ar, u8 *buf, u8 ms)
                return true;
        }
 
+       rx_status->flag |= RX_FLAG_AMPDU_DETAILS | RX_FLAG_AMPDU_LAST_KNOWN;
+       rx_status->ampdu_reference = ar->ampdu_ref;
+
        /*
         * "802.11n - 7.4a.3 A-MPDU contents" describes in which contexts
         * certain frame types can be part of an aMPDU.
@@ -684,12 +689,15 @@ static void carl9170_handle_mpdu(struct ar9170 *ar, u8 *buf, int len)
        if (unlikely(len < sizeof(*mac)))
                goto drop;
 
+       memset(&status, 0, sizeof(status));
+
        mpdu_len = len - sizeof(*mac);
 
        mac = (void *)(buf + mpdu_len);
        mac_status = mac->status;
        switch (mac_status & AR9170_RX_STATUS_MPDU) {
        case AR9170_RX_STATUS_MPDU_FIRST:
+               ar->ampdu_ref++;
                /* Aggregated MPDUs start with an PLCP header */
                if (likely(mpdu_len >= sizeof(struct ar9170_rx_head))) {
                        head = (void *) buf;
@@ -720,12 +728,13 @@ static void carl9170_handle_mpdu(struct ar9170 *ar, u8 *buf, int len)
                break;
 
        case AR9170_RX_STATUS_MPDU_LAST:
+               status.flag |= RX_FLAG_AMPDU_IS_LAST;
+
                /*
                 * The last frame of an A-MPDU has an extra tail
                 * which does contain the phy status of the whole
                 * aggregate.
                 */
-
                if (likely(mpdu_len >= sizeof(struct ar9170_rx_phystatus))) {
                        mpdu_len -= sizeof(struct ar9170_rx_phystatus);
                        phy = (void *)(buf + mpdu_len);
@@ -773,11 +782,10 @@ static void carl9170_handle_mpdu(struct ar9170 *ar, u8 *buf, int len)
        if (unlikely(mpdu_len < (2 + 2 + ETH_ALEN + FCS_LEN)))
                goto drop;
 
-       memset(&status, 0, sizeof(status));
        if (unlikely(carl9170_rx_mac_status(ar, head, mac, &status)))
                goto drop;
 
-       if (!carl9170_ampdu_check(ar, buf, mac_status))
+       if (!carl9170_ampdu_check(ar, buf, mac_status, &status))
                goto drop;
 
        if (phy)
index 6a86814..84377cf 100644 (file)
@@ -867,14 +867,15 @@ static bool carl9170_tx_cts_check(struct ar9170 *ar,
        return false;
 }
 
-static int carl9170_tx_prepare(struct ar9170 *ar, struct sk_buff *skb)
+static int carl9170_tx_prepare(struct ar9170 *ar,
+                              struct ieee80211_sta *sta,
+                              struct sk_buff *skb)
 {
        struct ieee80211_hdr *hdr;
        struct _carl9170_tx_superframe *txc;
        struct carl9170_vif_info *cvif;
        struct ieee80211_tx_info *info;
        struct ieee80211_tx_rate *txrate;
-       struct ieee80211_sta *sta;
        struct carl9170_tx_info *arinfo;
        unsigned int hw_queue;
        int i;
@@ -910,8 +911,6 @@ static int carl9170_tx_prepare(struct ar9170 *ar, struct sk_buff *skb)
        else
                cvif = NULL;
 
-       sta = info->control.sta;
-
        txc = (void *)skb_push(skb, sizeof(*txc));
        memset(txc, 0, sizeof(*txc));
 
@@ -1457,20 +1456,21 @@ err_unlock_rcu:
        return false;
 }
 
-void carl9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+void carl9170_op_tx(struct ieee80211_hw *hw,
+                   struct ieee80211_tx_control *control,
+                   struct sk_buff *skb)
 {
        struct ar9170 *ar = hw->priv;
        struct ieee80211_tx_info *info;
-       struct ieee80211_sta *sta;
+       struct ieee80211_sta *sta = control->sta;
        bool run;
 
        if (unlikely(!IS_STARTED(ar)))
                goto err_free;
 
        info = IEEE80211_SKB_CB(skb);
-       sta = info->control.sta;
 
-       if (unlikely(carl9170_tx_prepare(ar, skb)))
+       if (unlikely(carl9170_tx_prepare(ar, sta, skb)))
                goto err_free;
 
        carl9170_tx_accounting(ar, skb);
index 4648bbf..098fe9e 100644 (file)
@@ -4,6 +4,7 @@ b43-y                           += tables.o
 b43-$(CONFIG_B43_PHY_N)                += tables_nphy.o
 b43-$(CONFIG_B43_PHY_N)                += radio_2055.o
 b43-$(CONFIG_B43_PHY_N)                += radio_2056.o
+b43-$(CONFIG_B43_PHY_N)                += radio_2057.o
 b43-y                          += phy_common.o
 b43-y                          += phy_g.o
 b43-y                          += phy_a.o
index 7c899fc..b298e5d 100644 (file)
@@ -241,16 +241,18 @@ enum {
 #define B43_SHM_SH_PHYVER              0x0050  /* PHY version */
 #define B43_SHM_SH_PHYTYPE             0x0052  /* PHY type */
 #define B43_SHM_SH_ANTSWAP             0x005C  /* Antenna swap threshold */
-#define B43_SHM_SH_HOSTFLO             0x005E  /* Hostflags for ucode options (low) */
-#define B43_SHM_SH_HOSTFMI             0x0060  /* Hostflags for ucode options (middle) */
-#define B43_SHM_SH_HOSTFHI             0x0062  /* Hostflags for ucode options (high) */
+#define B43_SHM_SH_HOSTF1              0x005E  /* Hostflags 1 for ucode options */
+#define B43_SHM_SH_HOSTF2              0x0060  /* Hostflags 2 for ucode options */
+#define B43_SHM_SH_HOSTF3              0x0062  /* Hostflags 3 for ucode options */
 #define B43_SHM_SH_RFATT               0x0064  /* Current radio attenuation value */
 #define B43_SHM_SH_RADAR               0x0066  /* Radar register */
 #define B43_SHM_SH_PHYTXNOI            0x006E  /* PHY noise directly after TX (lower 8bit only) */
 #define B43_SHM_SH_RFRXSP1             0x0072  /* RF RX SP Register 1 */
+#define B43_SHM_SH_HOSTF4              0x0078  /* Hostflags 4 for ucode options */
 #define B43_SHM_SH_CHAN                        0x00A0  /* Current channel (low 8bit only) */
 #define  B43_SHM_SH_CHAN_5GHZ          0x0100  /* Bit set, if 5 Ghz channel */
 #define  B43_SHM_SH_CHAN_40MHZ         0x0200  /* Bit set, if 40 Mhz channel width */
+#define B43_SHM_SH_HOSTF5              0x00D4  /* Hostflags 5 for ucode options */
 #define B43_SHM_SH_BCMCFIFOID          0x0108  /* Last posted cookie to the bcast/mcast FIFO */
 /* TSSI information */
 #define B43_SHM_SH_TSSI_CCK            0x0058  /* TSSI for last 4 CCK frames (32bit) */
@@ -415,6 +417,8 @@ enum {
 #define B43_PHYTYPE_HT                 0x07
 #define B43_PHYTYPE_LCN                        0x08
 #define B43_PHYTYPE_LCNXN              0x09
+#define B43_PHYTYPE_LCN40              0x0a
+#define B43_PHYTYPE_AC                 0x0b
 
 /* PHYRegisters */
 #define B43_PHY_ILT_A_CTRL             0x0072
index a140165..73730e9 100644 (file)
@@ -533,11 +533,11 @@ u64 b43_hf_read(struct b43_wldev *dev)
 {
        u64 ret;
 
-       ret = b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTFHI);
+       ret = b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTF3);
        ret <<= 16;
-       ret |= b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTFMI);
+       ret |= b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTF2);
        ret <<= 16;
-       ret |= b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTFLO);
+       ret |= b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTF1);
 
        return ret;
 }
@@ -550,9 +550,9 @@ void b43_hf_write(struct b43_wldev *dev, u64 value)
        lo = (value & 0x00000000FFFFULL);
        mi = (value & 0x0000FFFF0000ULL) >> 16;
        hi = (value & 0xFFFF00000000ULL) >> 32;
-       b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTFLO, lo);
-       b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTFMI, mi);
-       b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTFHI, hi);
+       b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTF1, lo);
+       b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTF2, mi);
+       b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTF3, hi);
 }
 
 /* Read the firmware capabilities bitmask (Opensource firmware only) */
@@ -3412,7 +3412,8 @@ static void b43_tx_work(struct work_struct *work)
 }
 
 static void b43_op_tx(struct ieee80211_hw *hw,
-                    struct sk_buff *skb)
+                     struct ieee80211_tx_control *control,
+                     struct sk_buff *skb)
 {
        struct b43_wl *wl = hw_to_b43_wl(hw);
 
@@ -4282,6 +4283,35 @@ out:
        return err;
 }
 
+static char *b43_phy_name(struct b43_wldev *dev, u8 phy_type)
+{
+       switch (phy_type) {
+       case B43_PHYTYPE_A:
+               return "A";
+       case B43_PHYTYPE_B:
+               return "B";
+       case B43_PHYTYPE_G:
+               return "G";
+       case B43_PHYTYPE_N:
+               return "N";
+       case B43_PHYTYPE_LP:
+               return "LP";
+       case B43_PHYTYPE_SSLPN:
+               return "SSLPN";
+       case B43_PHYTYPE_HT:
+               return "HT";
+       case B43_PHYTYPE_LCN:
+               return "LCN";
+       case B43_PHYTYPE_LCNXN:
+               return "LCNXN";
+       case B43_PHYTYPE_LCN40:
+               return "LCN40";
+       case B43_PHYTYPE_AC:
+               return "AC";
+       }
+       return "UNKNOWN";
+}
+
 /* Get PHY and RADIO versioning numbers */
 static int b43_phy_versioning(struct b43_wldev *dev)
 {
@@ -4342,13 +4372,13 @@ static int b43_phy_versioning(struct b43_wldev *dev)
                unsupported = 1;
        }
        if (unsupported) {
-               b43err(dev->wl, "FOUND UNSUPPORTED PHY "
-                      "(Analog %u, Type %u, Revision %u)\n",
-                      analog_type, phy_type, phy_rev);
+               b43err(dev->wl, "FOUND UNSUPPORTED PHY (Analog %u, Type %d (%s), Revision %u)\n",
+                      analog_type, phy_type, b43_phy_name(dev, phy_type),
+                      phy_rev);
                return -EOPNOTSUPP;
        }
-       b43dbg(dev->wl, "Found PHY: Analog %u, Type %u, Revision %u\n",
-              analog_type, phy_type, phy_rev);
+       b43info(dev->wl, "Found PHY: Analog %u, Type %d (%s), Revision %u\n",
+               analog_type, phy_type, b43_phy_name(dev, phy_type), phy_rev);
 
        /* Get RADIO versioning */
        if (dev->dev->core_rev >= 24) {
index 3f8883b..f01676a 100644 (file)
@@ -240,6 +240,21 @@ void b43_radio_maskset(struct b43_wldev *dev, u16 offset, u16 mask, u16 set)
                          (b43_radio_read16(dev, offset) & mask) | set);
 }
 
+bool b43_radio_wait_value(struct b43_wldev *dev, u16 offset, u16 mask,
+                         u16 value, int delay, int timeout)
+{
+       u16 val;
+       int i;
+
+       for (i = 0; i < timeout; i += delay) {
+               val = b43_radio_read(dev, offset);
+               if ((val & mask) == value)
+                       return true;
+               udelay(delay);
+       }
+       return false;
+}
+
 u16 b43_phy_read(struct b43_wldev *dev, u16 reg)
 {
        assert_mac_suspended(dev);
@@ -428,7 +443,7 @@ int b43_phy_shm_tssi_read(struct b43_wldev *dev, u16 shm_offset)
        average = (a + b + c + d + 2) / 4;
        if (is_ofdm) {
                /* Adjust for CCK-boost */
-               if (b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTFLO)
+               if (b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTF1)
                    & B43_HF_CCKBOOST)
                        average = (average >= 13) ? (average - 13) : 0;
        }
index 9233b13..f1b9993 100644 (file)
@@ -365,6 +365,12 @@ void b43_radio_set(struct b43_wldev *dev, u16 offset, u16 set);
 void b43_radio_maskset(struct b43_wldev *dev, u16 offset, u16 mask, u16 set);
 
 /**
+ * b43_radio_wait_value - Waits for a given value in masked register read
+ */
+bool b43_radio_wait_value(struct b43_wldev *dev, u16 offset, u16 mask,
+                         u16 value, int delay, int timeout);
+
+/**
  * b43_radio_lock - Lock firmware radio register access
  */
 void b43_radio_lock(struct b43_wldev *dev);
index b92bb9c..3c35382 100644 (file)
@@ -32,6 +32,7 @@
 #include "tables_nphy.h"
 #include "radio_2055.h"
 #include "radio_2056.h"
+#include "radio_2057.h"
 #include "main.h"
 
 struct nphy_txgains {
@@ -126,6 +127,46 @@ ok:
        b43_phy_write(dev, B43_NPHY_RFSEQMODE, seq_mode);
 }
 
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RFCtrlOverrideRev7 */
+static void b43_nphy_rf_control_override_rev7(struct b43_wldev *dev, u16 field,
+                                             u16 value, u8 core, bool off,
+                                             u8 override)
+{
+       const struct nphy_rf_control_override_rev7 *e;
+       u16 en_addrs[3][2] = {
+               { 0x0E7, 0x0EC }, { 0x342, 0x343 }, { 0x346, 0x347 }
+       };
+       u16 en_addr;
+       u16 en_mask = field;
+       u16 val_addr;
+       u8 i;
+
+       /* Remember: we can get NULL! */
+       e = b43_nphy_get_rf_ctl_over_rev7(dev, field, override);
+
+       for (i = 0; i < 2; i++) {
+               if (override >= ARRAY_SIZE(en_addrs)) {
+                       b43err(dev->wl, "Invalid override value %d\n", override);
+                       return;
+               }
+               en_addr = en_addrs[override][i];
+
+               val_addr = (i == 0) ? e->val_addr_core0 : e->val_addr_core1;
+
+               if (off) {
+                       b43_phy_mask(dev, en_addr, ~en_mask);
+                       if (e) /* Do it safer, better than wl */
+                               b43_phy_mask(dev, val_addr, ~e->val_mask);
+               } else {
+                       if (!core || (core & (1 << i))) {
+                               b43_phy_set(dev, en_addr, en_mask);
+                               if (e)
+                                       b43_phy_maskset(dev, val_addr, ~e->val_mask, (value << e->val_shift));
+                       }
+               }
+       }
+}
+
 /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RFCtrlOverride */
 static void b43_nphy_rf_control_override(struct b43_wldev *dev, u16 field,
                                                u16 value, u8 core, bool off)
@@ -459,6 +500,137 @@ static void b43_nphy_set_rf_sequence(struct b43_wldev *dev, u8 cmd,
 }
 
 /**************************************************
+ * Radio 0x2057
+ **************************************************/
+
+/* http://bcm-v4.sipsolutions.net/PHY/radio2057_rcal */
+static u8 b43_radio_2057_rcal(struct b43_wldev *dev)
+{
+       struct b43_phy *phy = &dev->phy;
+       u16 tmp;
+
+       if (phy->radio_rev == 5) {
+               b43_phy_mask(dev, 0x342, ~0x2);
+               udelay(10);
+               b43_radio_set(dev, R2057_IQTEST_SEL_PU, 0x1);
+               b43_radio_maskset(dev, 0x1ca, ~0x2, 0x1);
+       }
+
+       b43_radio_set(dev, R2057_RCAL_CONFIG, 0x1);
+       udelay(10);
+       b43_radio_set(dev, R2057_RCAL_CONFIG, 0x3);
+       if (!b43_radio_wait_value(dev, R2057_RCCAL_N1_1, 1, 1, 100, 1000000)) {
+               b43err(dev->wl, "Radio 0x2057 rcal timeout\n");
+               return 0;
+       }
+       b43_radio_mask(dev, R2057_RCAL_CONFIG, ~0x2);
+       tmp = b43_radio_read(dev, R2057_RCAL_STATUS) & 0x3E;
+       b43_radio_mask(dev, R2057_RCAL_CONFIG, ~0x1);
+
+       if (phy->radio_rev == 5) {
+               b43_radio_mask(dev, R2057_IPA2G_CASCONV_CORE0, ~0x1);
+               b43_radio_mask(dev, 0x1ca, ~0x2);
+       }
+       if (phy->radio_rev <= 4 || phy->radio_rev == 6) {
+               b43_radio_maskset(dev, R2057_TEMPSENSE_CONFIG, ~0x3C, tmp);
+               b43_radio_maskset(dev, R2057_BANDGAP_RCAL_TRIM, ~0xF0,
+                                 tmp << 2);
+       }
+
+       return tmp & 0x3e;
+}
+
+/* http://bcm-v4.sipsolutions.net/PHY/radio2057_rccal */
+static u16 b43_radio_2057_rccal(struct b43_wldev *dev)
+{
+       struct b43_phy *phy = &dev->phy;
+       bool special = (phy->radio_rev == 3 || phy->radio_rev == 4 ||
+                       phy->radio_rev == 6);
+       u16 tmp;
+
+       if (special) {
+               b43_radio_write(dev, R2057_RCCAL_MASTER, 0x61);
+               b43_radio_write(dev, R2057_RCCAL_TRC0, 0xC0);
+       } else {
+               b43_radio_write(dev, 0x1AE, 0x61);
+               b43_radio_write(dev, R2057_RCCAL_TRC0, 0xE1);
+       }
+       b43_radio_write(dev, R2057_RCCAL_X1, 0x6E);
+       b43_radio_write(dev, R2057_RCCAL_START_R1_Q1_P1, 0x55);
+       if (!b43_radio_wait_value(dev, R2057_RCCAL_DONE_OSCCAP, 1, 1, 500,
+                                 5000000))
+               b43dbg(dev->wl, "Radio 0x2057 rccal timeout\n");
+       b43_radio_write(dev, R2057_RCCAL_START_R1_Q1_P1, 0x15);
+       if (special) {
+               b43_radio_write(dev, R2057_RCCAL_MASTER, 0x69);
+               b43_radio_write(dev, R2057_RCCAL_TRC0, 0xB0);
+       } else {
+               b43_radio_write(dev, 0x1AE, 0x69);
+               b43_radio_write(dev, R2057_RCCAL_TRC0, 0xD5);
+       }
+       b43_radio_write(dev, R2057_RCCAL_X1, 0x6E);
+       b43_radio_write(dev, R2057_RCCAL_START_R1_Q1_P1, 0x55);
+       if (!b43_radio_wait_value(dev, R2057_RCCAL_DONE_OSCCAP, 1, 1, 500,
+                                 5000000))
+               b43dbg(dev->wl, "Radio 0x2057 rccal timeout\n");
+       b43_radio_write(dev, R2057_RCCAL_START_R1_Q1_P1, 0x15);
+       if (special) {
+               b43_radio_write(dev, R2057_RCCAL_MASTER, 0x73);
+               b43_radio_write(dev, R2057_RCCAL_X1, 0x28);
+               b43_radio_write(dev, R2057_RCCAL_TRC0, 0xB0);
+       } else {
+               b43_radio_write(dev, 0x1AE, 0x73);
+               b43_radio_write(dev, R2057_RCCAL_X1, 0x6E);
+               b43_radio_write(dev, R2057_RCCAL_TRC0, 0x99);
+       }
+       b43_radio_write(dev, R2057_RCCAL_START_R1_Q1_P1, 0x55);
+       if (!b43_radio_wait_value(dev, R2057_RCCAL_DONE_OSCCAP, 1, 1, 500,
+                                 5000000)) {
+               b43err(dev->wl, "Radio 0x2057 rcal timeout\n");
+               return 0;
+       }
+       tmp = b43_radio_read(dev, R2057_RCCAL_DONE_OSCCAP);
+       b43_radio_write(dev, R2057_RCCAL_START_R1_Q1_P1, 0x15);
+       return tmp;
+}
+
+static void b43_radio_2057_init_pre(struct b43_wldev *dev)
+{
+       b43_phy_mask(dev, B43_NPHY_RFCTL_CMD, ~B43_NPHY_RFCTL_CMD_CHIP0PU);
+       /* Maybe wl meant to reset and set (order?) RFCTL_CMD_OEPORFORCE? */
+       b43_phy_mask(dev, B43_NPHY_RFCTL_CMD, B43_NPHY_RFCTL_CMD_OEPORFORCE);
+       b43_phy_set(dev, B43_NPHY_RFCTL_CMD, ~B43_NPHY_RFCTL_CMD_OEPORFORCE);
+       b43_phy_set(dev, B43_NPHY_RFCTL_CMD, B43_NPHY_RFCTL_CMD_CHIP0PU);
+}
+
+static void b43_radio_2057_init_post(struct b43_wldev *dev)
+{
+       b43_radio_set(dev, R2057_XTALPUOVR_PINCTRL, 0x1);
+
+       b43_radio_set(dev, R2057_RFPLL_MISC_CAL_RESETN, 0x78);
+       b43_radio_set(dev, R2057_XTAL_CONFIG2, 0x80);
+       mdelay(2);
+       b43_radio_mask(dev, R2057_RFPLL_MISC_CAL_RESETN, ~0x78);
+       b43_radio_mask(dev, R2057_XTAL_CONFIG2, ~0x80);
+
+       if (dev->phy.n->init_por) {
+               b43_radio_2057_rcal(dev);
+               b43_radio_2057_rccal(dev);
+       }
+       b43_radio_mask(dev, R2057_RFPLL_MASTER, ~0x8);
+
+       dev->phy.n->init_por = false;
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/Radio/2057/Init */
+static void b43_radio_2057_init(struct b43_wldev *dev)
+{
+       b43_radio_2057_init_pre(dev);
+       r2057_upload_inittabs(dev);
+       b43_radio_2057_init_post(dev);
+}
+
+/**************************************************
  * Radio 0x2056
  **************************************************/
 
@@ -545,7 +717,9 @@ static void b43_radio_2056_setup(struct b43_wldev *dev,
        enum ieee80211_band band = b43_current_band(dev->wl);
        u16 offset;
        u8 i;
-       u16 bias, cbias, pag_boost, pgag_boost, mixg_boost, padg_boost;
+       u16 bias, cbias;
+       u16 pag_boost, padg_boost, pgag_boost, mixg_boost;
+       u16 paa_boost, pada_boost, pgaa_boost, mixa_boost;
 
        B43_WARN_ON(dev->phy.rev < 3);
 
@@ -630,7 +804,56 @@ static void b43_radio_2056_setup(struct b43_wldev *dev,
                        b43_radio_write(dev, offset | B2056_TX_PA_SPARE1, 0xee);
                }
        } else if (dev->phy.n->ipa5g_on && band == IEEE80211_BAND_5GHZ) {
-               /* TODO */
+               u16 freq = dev->phy.channel_freq;
+               if (freq < 5100) {
+                       paa_boost = 0xA;
+                       pada_boost = 0x77;
+                       pgaa_boost = 0xF;
+                       mixa_boost = 0xF;
+               } else if (freq < 5340) {
+                       paa_boost = 0x8;
+                       pada_boost = 0x77;
+                       pgaa_boost = 0xFB;
+                       mixa_boost = 0xF;
+               } else if (freq < 5650) {
+                       paa_boost = 0x0;
+                       pada_boost = 0x77;
+                       pgaa_boost = 0xB;
+                       mixa_boost = 0xF;
+               } else {
+                       paa_boost = 0x0;
+                       pada_boost = 0x77;
+                       if (freq != 5825)
+                               pgaa_boost = -(freq - 18) / 36 + 168;
+                       else
+                               pgaa_boost = 6;
+                       mixa_boost = 0xF;
+               }
+
+               for (i = 0; i < 2; i++) {
+                       offset = i ? B2056_TX1 : B2056_TX0;
+
+                       b43_radio_write(dev,
+                               offset | B2056_TX_INTPAA_BOOST_TUNE, paa_boost);
+                       b43_radio_write(dev,
+                               offset | B2056_TX_PADA_BOOST_TUNE, pada_boost);
+                       b43_radio_write(dev,
+                               offset | B2056_TX_PGAA_BOOST_TUNE, pgaa_boost);
+                       b43_radio_write(dev,
+                               offset | B2056_TX_MIXA_BOOST_TUNE, mixa_boost);
+                       b43_radio_write(dev,
+                               offset | B2056_TX_TXSPARE1, 0x30);
+                       b43_radio_write(dev,
+                               offset | B2056_TX_PA_SPARE2, 0xee);
+                       b43_radio_write(dev,
+                               offset | B2056_TX_PADA_CASCBIAS, 0x03);
+                       b43_radio_write(dev,
+                               offset | B2056_TX_INTPAA_IAUX_STAT, 0x50);
+                       b43_radio_write(dev,
+                               offset | B2056_TX_INTPAA_IMAIN_STAT, 0x50);
+                       b43_radio_write(dev,
+                               offset | B2056_TX_INTPAA_CASCBIAS, 0x30);
+               }
        }
 
        udelay(50);
@@ -643,6 +866,37 @@ static void b43_radio_2056_setup(struct b43_wldev *dev,
        udelay(300);
 }
 
+static u8 b43_radio_2056_rcal(struct b43_wldev *dev)
+{
+       struct b43_phy *phy = &dev->phy;
+       u16 mast2, tmp;
+
+       if (phy->rev != 3)
+               return 0;
+
+       mast2 = b43_radio_read(dev, B2056_SYN_PLL_MAST2);
+       b43_radio_write(dev, B2056_SYN_PLL_MAST2, mast2 | 0x7);
+
+       udelay(10);
+       b43_radio_write(dev, B2056_SYN_RCAL_MASTER, 0x01);
+       udelay(10);
+       b43_radio_write(dev, B2056_SYN_RCAL_MASTER, 0x09);
+
+       if (!b43_radio_wait_value(dev, B2056_SYN_RCAL_CODE_OUT, 0x80, 0x80, 100,
+                                 1000000)) {
+               b43err(dev->wl, "Radio recalibration timeout\n");
+               return 0;
+       }
+
+       b43_radio_write(dev, B2056_SYN_RCAL_MASTER, 0x01);
+       tmp = b43_radio_read(dev, B2056_SYN_RCAL_CODE_OUT);
+       b43_radio_write(dev, B2056_SYN_RCAL_MASTER, 0x00);
+
+       b43_radio_write(dev, B2056_SYN_PLL_MAST2, mast2);
+
+       return tmp & 0x1f;
+}
+
 static void b43_radio_init2056_pre(struct b43_wldev *dev)
 {
        b43_phy_mask(dev, B43_NPHY_RFCTL_CMD,
@@ -665,10 +919,8 @@ static void b43_radio_init2056_post(struct b43_wldev *dev)
        b43_radio_mask(dev, B2056_SYN_COM_RESET, ~0x2);
        b43_radio_mask(dev, B2056_SYN_PLL_MAST2, ~0xFC);
        b43_radio_mask(dev, B2056_SYN_RCCAL_CTRL0, ~0x1);
-       /*
-       if (nphy->init_por)
-               Call Radio 2056 Recalibrate
-       */
+       if (dev->phy.n->init_por)
+               b43_radio_2056_rcal(dev);
 }
 
 /*
@@ -680,6 +932,8 @@ static void b43_radio_init2056(struct b43_wldev *dev)
        b43_radio_init2056_pre(dev);
        b2056_upload_inittabs(dev, 0, 0);
        b43_radio_init2056_post(dev);
+
+       dev->phy.n->init_por = false;
 }
 
 /**************************************************
@@ -753,8 +1007,6 @@ static void b43_radio_init2055_post(struct b43_wldev *dev)
 {
        struct b43_phy_n *nphy = dev->phy.n;
        struct ssb_sprom *sprom = dev->dev->bus_sprom;
-       int i;
-       u16 val;
        bool workaround = false;
 
        if (sprom->revision < 4)
@@ -777,15 +1029,7 @@ static void b43_radio_init2055_post(struct b43_wldev *dev)
        b43_radio_set(dev, B2055_CAL_MISC, 0x1);
        msleep(1);
        b43_radio_set(dev, B2055_CAL_MISC, 0x40);
-       for (i = 0; i < 200; i++) {
-               val = b43_radio_read(dev, B2055_CAL_COUT2);
-               if (val & 0x80) {
-                       i = 0;
-                       break;
-               }
-               udelay(10);
-       }
-       if (i)
+       if (!b43_radio_wait_value(dev, B2055_CAL_COUT2, 0x80, 0x80, 10, 2000))
                b43err(dev->wl, "radio post init timeout\n");
        b43_radio_mask(dev, B2055_CAL_LPOCTL, 0xFF7F);
        b43_switch_channel(dev, dev->phy.channel);
@@ -1860,12 +2104,334 @@ static void b43_nphy_gain_ctl_workarounds_rev1_2(struct b43_wldev *dev)
 /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/WorkaroundsGainCtrl */
 static void b43_nphy_gain_ctl_workarounds(struct b43_wldev *dev)
 {
-       if (dev->phy.rev >= 3)
+       if (dev->phy.rev >= 7)
+               ; /* TODO */
+       else if (dev->phy.rev >= 3)
                b43_nphy_gain_ctl_workarounds_rev3plus(dev);
        else
                b43_nphy_gain_ctl_workarounds_rev1_2(dev);
 }
 
+/* http://bcm-v4.sipsolutions.net/PHY/N/Read_Lpf_Bw_Ctl */
+static u16 b43_nphy_read_lpf_ctl(struct b43_wldev *dev, u16 offset)
+{
+       if (!offset)
+               offset = (dev->phy.is_40mhz) ? 0x159 : 0x154;
+       return b43_ntab_read(dev, B43_NTAB16(7, offset)) & 0x7;
+}
+
+static void b43_nphy_workarounds_rev7plus(struct b43_wldev *dev)
+{
+       struct ssb_sprom *sprom = dev->dev->bus_sprom;
+       struct b43_phy *phy = &dev->phy;
+
+       u8 rx2tx_events_ipa[9] = { 0x0, 0x1, 0x2, 0x8, 0x5, 0x6, 0xF, 0x3,
+                                       0x1F };
+       u8 rx2tx_delays_ipa[9] = { 8, 6, 6, 4, 4, 16, 43, 1, 1 };
+
+       u16 ntab7_15e_16e[] = { 0x10f, 0x10f };
+       u8 ntab7_138_146[] = { 0x11, 0x11 };
+       u8 ntab7_133[] = { 0x77, 0x11, 0x11 };
+
+       u16 lpf_20, lpf_40, lpf_11b;
+       u16 bcap_val, bcap_val_11b, bcap_val_11n_20, bcap_val_11n_40;
+       u16 scap_val, scap_val_11b, scap_val_11n_20, scap_val_11n_40;
+       bool rccal_ovrd = false;
+
+       u16 rx2tx_lut_20_11b, rx2tx_lut_20_11n, rx2tx_lut_40_11n;
+       u16 bias, conv, filt;
+
+       u32 tmp32;
+       u8 core;
+
+       if (phy->rev == 7) {
+               b43_phy_set(dev, B43_NPHY_FINERX2_CGC, 0x10);
+               b43_phy_maskset(dev, B43_NPHY_FREQGAIN0, 0xFF80, 0x0020);
+               b43_phy_maskset(dev, B43_NPHY_FREQGAIN0, 0x80FF, 0x2700);
+               b43_phy_maskset(dev, B43_NPHY_FREQGAIN1, 0xFF80, 0x002E);
+               b43_phy_maskset(dev, B43_NPHY_FREQGAIN1, 0x80FF, 0x3300);
+               b43_phy_maskset(dev, B43_NPHY_FREQGAIN2, 0xFF80, 0x0037);
+               b43_phy_maskset(dev, B43_NPHY_FREQGAIN2, 0x80FF, 0x3A00);
+               b43_phy_maskset(dev, B43_NPHY_FREQGAIN3, 0xFF80, 0x003C);
+               b43_phy_maskset(dev, B43_NPHY_FREQGAIN3, 0x80FF, 0x3E00);
+               b43_phy_maskset(dev, B43_NPHY_FREQGAIN4, 0xFF80, 0x003E);
+               b43_phy_maskset(dev, B43_NPHY_FREQGAIN4, 0x80FF, 0x3F00);
+               b43_phy_maskset(dev, B43_NPHY_FREQGAIN5, 0xFF80, 0x0040);
+               b43_phy_maskset(dev, B43_NPHY_FREQGAIN5, 0x80FF, 0x4000);
+               b43_phy_maskset(dev, B43_NPHY_FREQGAIN6, 0xFF80, 0x0040);
+               b43_phy_maskset(dev, B43_NPHY_FREQGAIN6, 0x80FF, 0x4000);
+               b43_phy_maskset(dev, B43_NPHY_FREQGAIN7, 0xFF80, 0x0040);
+               b43_phy_maskset(dev, B43_NPHY_FREQGAIN7, 0x80FF, 0x4000);
+       }
+       if (phy->rev <= 8) {
+               b43_phy_write(dev, 0x23F, 0x1B0);
+               b43_phy_write(dev, 0x240, 0x1B0);
+       }
+       if (phy->rev >= 8)
+               b43_phy_maskset(dev, B43_NPHY_TXTAILCNT, ~0xFF, 0x72);
+
+       b43_ntab_write(dev, B43_NTAB16(8, 0x00), 2);
+       b43_ntab_write(dev, B43_NTAB16(8, 0x10), 2);
+       tmp32 = b43_ntab_read(dev, B43_NTAB32(30, 0));
+       tmp32 &= 0xffffff;
+       b43_ntab_write(dev, B43_NTAB32(30, 0), tmp32);
+       b43_ntab_write_bulk(dev, B43_NTAB16(7, 0x15e), 2, ntab7_15e_16e);
+       b43_ntab_write_bulk(dev, B43_NTAB16(7, 0x16e), 2, ntab7_15e_16e);
+
+       if (b43_nphy_ipa(dev))
+               b43_nphy_set_rf_sequence(dev, 0, rx2tx_events_ipa,
+                               rx2tx_delays_ipa, ARRAY_SIZE(rx2tx_events_ipa));
+
+       b43_phy_maskset(dev, 0x299, 0x3FFF, 0x4000);
+       b43_phy_maskset(dev, 0x29D, 0x3FFF, 0x4000);
+
+       lpf_20 = b43_nphy_read_lpf_ctl(dev, 0x154);
+       lpf_40 = b43_nphy_read_lpf_ctl(dev, 0x159);
+       lpf_11b = b43_nphy_read_lpf_ctl(dev, 0x152);
+       if (b43_nphy_ipa(dev)) {
+               if ((phy->radio_rev == 5 && phy->is_40mhz) ||
+                   phy->radio_rev == 7 || phy->radio_rev == 8) {
+                       bcap_val = b43_radio_read(dev, 0x16b);
+                       scap_val = b43_radio_read(dev, 0x16a);
+                       scap_val_11b = scap_val;
+                       bcap_val_11b = bcap_val;
+                       if (phy->radio_rev == 5 && phy->is_40mhz) {
+                               scap_val_11n_20 = scap_val;
+                               bcap_val_11n_20 = bcap_val;
+                               scap_val_11n_40 = bcap_val_11n_40 = 0xc;
+                               rccal_ovrd = true;
+                       } else { /* Rev 7/8 */
+                               lpf_20 = 4;
+                               lpf_11b = 1;
+                               if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+                                       scap_val_11n_20 = 0xc;
+                                       bcap_val_11n_20 = 0xc;
+                                       scap_val_11n_40 = 0xa;
+                                       bcap_val_11n_40 = 0xa;
+                               } else {
+                                       scap_val_11n_20 = 0x14;
+                                       bcap_val_11n_20 = 0x14;
+                                       scap_val_11n_40 = 0xf;
+                                       bcap_val_11n_40 = 0xf;
+                               }
+                               rccal_ovrd = true;
+                       }
+               }
+       } else {
+               if (phy->radio_rev == 5) {
+                       lpf_20 = 1;
+                       lpf_40 = 3;
+                       bcap_val = b43_radio_read(dev, 0x16b);
+                       scap_val = b43_radio_read(dev, 0x16a);
+                       scap_val_11b = scap_val;
+                       bcap_val_11b = bcap_val;
+                       scap_val_11n_20 = 0x11;
+                       scap_val_11n_40 = 0x11;
+                       bcap_val_11n_20 = 0x13;
+                       bcap_val_11n_40 = 0x13;
+                       rccal_ovrd = true;
+               }
+       }
+       if (rccal_ovrd) {
+               rx2tx_lut_20_11b = (bcap_val_11b << 8) |
+                                  (scap_val_11b << 3) |
+                                  lpf_11b;
+               rx2tx_lut_20_11n = (bcap_val_11n_20 << 8) |
+                                  (scap_val_11n_20 << 3) |
+                                  lpf_20;
+               rx2tx_lut_40_11n = (bcap_val_11n_40 << 8) |
+                                  (scap_val_11n_40 << 3) |
+                                  lpf_40;
+               for (core = 0; core < 2; core++) {
+                       b43_ntab_write(dev, B43_NTAB16(7, 0x152 + core * 16),
+                                      rx2tx_lut_20_11b);
+                       b43_ntab_write(dev, B43_NTAB16(7, 0x153 + core * 16),
+                                      rx2tx_lut_20_11n);
+                       b43_ntab_write(dev, B43_NTAB16(7, 0x154 + core * 16),
+                                      rx2tx_lut_20_11n);
+                       b43_ntab_write(dev, B43_NTAB16(7, 0x155 + core * 16),
+                                      rx2tx_lut_40_11n);
+                       b43_ntab_write(dev, B43_NTAB16(7, 0x156 + core * 16),
+                                      rx2tx_lut_40_11n);
+                       b43_ntab_write(dev, B43_NTAB16(7, 0x157 + core * 16),
+                                      rx2tx_lut_40_11n);
+                       b43_ntab_write(dev, B43_NTAB16(7, 0x158 + core * 16),
+                                      rx2tx_lut_40_11n);
+                       b43_ntab_write(dev, B43_NTAB16(7, 0x159 + core * 16),
+                                      rx2tx_lut_40_11n);
+               }
+               b43_nphy_rf_control_override_rev7(dev, 16, 1, 3, false, 2);
+       }
+       b43_phy_write(dev, 0x32F, 0x3);
+       if (phy->radio_rev == 4 || phy->radio_rev == 6)
+               b43_nphy_rf_control_override_rev7(dev, 4, 1, 3, false, 0);
+
+       if (phy->radio_rev == 3 || phy->radio_rev == 4 || phy->radio_rev == 6) {
+               if (sprom->revision &&
+                   sprom->boardflags2_hi & B43_BFH2_IPALVLSHIFT_3P3) {
+                       b43_radio_write(dev, 0x5, 0x05);
+                       b43_radio_write(dev, 0x6, 0x30);
+                       b43_radio_write(dev, 0x7, 0x00);
+                       b43_radio_set(dev, 0x4f, 0x1);
+                       b43_radio_set(dev, 0xd4, 0x1);
+                       bias = 0x1f;
+                       conv = 0x6f;
+                       filt = 0xaa;
+               } else {
+                       bias = 0x2b;
+                       conv = 0x7f;
+                       filt = 0xee;
+               }
+               if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+                       for (core = 0; core < 2; core++) {
+                               if (core == 0) {
+                                       b43_radio_write(dev, 0x5F, bias);
+                                       b43_radio_write(dev, 0x64, conv);
+                                       b43_radio_write(dev, 0x66, filt);
+                               } else {
+                                       b43_radio_write(dev, 0xE8, bias);
+                                       b43_radio_write(dev, 0xE9, conv);
+                                       b43_radio_write(dev, 0xEB, filt);
+                               }
+                       }
+               }
+       }
+
+       if (b43_nphy_ipa(dev)) {
+               if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+                       if (phy->radio_rev == 3 || phy->radio_rev == 4 ||
+                           phy->radio_rev == 6) {
+                               for (core = 0; core < 2; core++) {
+                                       if (core == 0)
+                                               b43_radio_write(dev, 0x51,
+                                                               0x7f);
+                                       else
+                                               b43_radio_write(dev, 0xd6,
+                                                               0x7f);
+                               }
+                       }
+                       if (phy->radio_rev == 3) {
+                               for (core = 0; core < 2; core++) {
+                                       if (core == 0) {
+                                               b43_radio_write(dev, 0x64,
+                                                               0x13);
+                                               b43_radio_write(dev, 0x5F,
+                                                               0x1F);
+                                               b43_radio_write(dev, 0x66,
+                                                               0xEE);
+                                               b43_radio_write(dev, 0x59,
+                                                               0x8A);
+                                               b43_radio_write(dev, 0x80,
+                                                               0x3E);
+                                       } else {
+                                               b43_radio_write(dev, 0x69,
+                                                               0x13);
+                                               b43_radio_write(dev, 0xE8,
+                                                               0x1F);
+                                               b43_radio_write(dev, 0xEB,
+                                                               0xEE);
+                                               b43_radio_write(dev, 0xDE,
+                                                               0x8A);
+                                               b43_radio_write(dev, 0x105,
+                                                               0x3E);
+                                       }
+                               }
+                       } else if (phy->radio_rev == 7 || phy->radio_rev == 8) {
+                               if (!phy->is_40mhz) {
+                                       b43_radio_write(dev, 0x5F, 0x14);
+                                       b43_radio_write(dev, 0xE8, 0x12);
+                               } else {
+                                       b43_radio_write(dev, 0x5F, 0x16);
+                                       b43_radio_write(dev, 0xE8, 0x16);
+                               }
+                       }
+               } else {
+                       u16 freq = phy->channel_freq;
+                       if ((freq >= 5180 && freq <= 5230) ||
+                           (freq >= 5745 && freq <= 5805)) {
+                               b43_radio_write(dev, 0x7D, 0xFF);
+                               b43_radio_write(dev, 0xFE, 0xFF);
+                       }
+               }
+       } else {
+               if (phy->radio_rev != 5) {
+                       for (core = 0; core < 2; core++) {
+                               if (core == 0) {
+                                       b43_radio_write(dev, 0x5c, 0x61);
+                                       b43_radio_write(dev, 0x51, 0x70);
+                               } else {
+                                       b43_radio_write(dev, 0xe1, 0x61);
+                                       b43_radio_write(dev, 0xd6, 0x70);
+                               }
+                       }
+               }
+       }
+
+       if (phy->radio_rev == 4) {
+               b43_ntab_write(dev, B43_NTAB16(8, 0x05), 0x20);
+               b43_ntab_write(dev, B43_NTAB16(8, 0x15), 0x20);
+               for (core = 0; core < 2; core++) {
+                       if (core == 0) {
+                               b43_radio_write(dev, 0x1a1, 0x00);
+                               b43_radio_write(dev, 0x1a2, 0x3f);
+                               b43_radio_write(dev, 0x1a6, 0x3f);
+                       } else {
+                               b43_radio_write(dev, 0x1a7, 0x00);
+                               b43_radio_write(dev, 0x1ab, 0x3f);
+                               b43_radio_write(dev, 0x1ac, 0x3f);
+                       }
+               }
+       } else {
+               b43_phy_set(dev, B43_NPHY_AFECTL_C1, 0x4);
+               b43_phy_set(dev, B43_NPHY_AFECTL_OVER1, 0x4);
+               b43_phy_set(dev, B43_NPHY_AFECTL_C2, 0x4);
+               b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x4);
+
+               b43_phy_mask(dev, B43_NPHY_AFECTL_C1, ~0x1);
+               b43_phy_set(dev, B43_NPHY_AFECTL_OVER1, 0x1);
+               b43_phy_mask(dev, B43_NPHY_AFECTL_C2, ~0x1);
+               b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x1);
+               b43_ntab_write(dev, B43_NTAB16(8, 0x05), 0x20);
+               b43_ntab_write(dev, B43_NTAB16(8, 0x15), 0x20);
+
+               b43_phy_mask(dev, B43_NPHY_AFECTL_C1, ~0x4);
+               b43_phy_mask(dev, B43_NPHY_AFECTL_OVER1, ~0x4);
+               b43_phy_mask(dev, B43_NPHY_AFECTL_C2, ~0x4);
+               b43_phy_mask(dev, B43_NPHY_AFECTL_OVER, ~0x4);
+       }
+
+       b43_phy_write(dev, B43_NPHY_ENDROP_TLEN, 0x2);
+
+       b43_ntab_write(dev, B43_NTAB32(16, 0x100), 20);
+       b43_ntab_write_bulk(dev, B43_NTAB16(7, 0x138), 2, ntab7_138_146);
+       b43_ntab_write(dev, B43_NTAB16(7, 0x141), 0x77);
+       b43_ntab_write_bulk(dev, B43_NTAB16(7, 0x133), 3, ntab7_133);
+       b43_ntab_write_bulk(dev, B43_NTAB16(7, 0x146), 2, ntab7_138_146);
+       b43_ntab_write(dev, B43_NTAB16(7, 0x123), 0x77);
+       b43_ntab_write(dev, B43_NTAB16(7, 0x12A), 0x77);
+
+       if (!phy->is_40mhz) {
+               b43_ntab_write(dev, B43_NTAB32(16, 0x03), 0x18D);
+               b43_ntab_write(dev, B43_NTAB32(16, 0x7F), 0x18D);
+       } else {
+               b43_ntab_write(dev, B43_NTAB32(16, 0x03), 0x14D);
+               b43_ntab_write(dev, B43_NTAB32(16, 0x7F), 0x14D);
+       }
+
+       b43_nphy_gain_ctl_workarounds(dev);
+
+       /* TODO
+       b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x08), 4,
+                           aux_adc_vmid_rev7_core0);
+       b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x18), 4,
+                           aux_adc_vmid_rev7_core1);
+       b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x0C), 4,
+                           aux_adc_gain_rev7);
+       b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x1C), 4,
+                           aux_adc_gain_rev7);
+       */
+}
+
 static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
 {
        struct b43_phy_n *nphy = dev->phy.n;
@@ -1916,7 +2482,7 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
                        rx2tx_delays[6] = 1;
                        rx2tx_events[7] = 0x1F;
                }
-               b43_nphy_set_rf_sequence(dev, 1, rx2tx_events, rx2tx_delays,
+               b43_nphy_set_rf_sequence(dev, 0, rx2tx_events, rx2tx_delays,
                                         ARRAY_SIZE(rx2tx_events));
        }
 
@@ -1926,8 +2492,13 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
 
        b43_phy_maskset(dev, 0x294, 0xF0FF, 0x0700);
 
-       b43_ntab_write(dev, B43_NTAB32(16, 3), 0x18D);
-       b43_ntab_write(dev, B43_NTAB32(16, 127), 0x18D);
+       if (!dev->phy.is_40mhz) {
+               b43_ntab_write(dev, B43_NTAB32(16, 3), 0x18D);
+               b43_ntab_write(dev, B43_NTAB32(16, 127), 0x18D);
+       } else {
+               b43_ntab_write(dev, B43_NTAB32(16, 3), 0x14D);
+               b43_ntab_write(dev, B43_NTAB32(16, 127), 0x14D);
+       }
 
        b43_nphy_gain_ctl_workarounds(dev);
 
@@ -1963,13 +2534,14 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
        b43_ntab_write(dev, B43_NTAB32(30, 3), tmp32);
 
        if (dev->phy.rev == 4 &&
-               b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+           b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
                b43_radio_write(dev, B2056_TX0 | B2056_TX_GMBB_IDAC,
                                0x70);
                b43_radio_write(dev, B2056_TX1 | B2056_TX_GMBB_IDAC,
                                0x70);
        }
 
+       /* Dropped probably-always-true condition */
        b43_phy_write(dev, 0x224, 0x03eb);
        b43_phy_write(dev, 0x225, 0x03eb);
        b43_phy_write(dev, 0x226, 0x0341);
@@ -1982,6 +2554,9 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
        b43_phy_write(dev, 0x22d, 0x042b);
        b43_phy_write(dev, 0x22e, 0x0381);
        b43_phy_write(dev, 0x22f, 0x0381);
+
+       if (dev->phy.rev >= 6 && sprom->boardflags2_lo & B43_BFL2_SINGLEANT_CCK)
+               ; /* TODO: 0x0080000000000000 HF */
 }
 
 static void b43_nphy_workarounds_rev1_2(struct b43_wldev *dev)
@@ -1996,6 +2571,12 @@ static void b43_nphy_workarounds_rev1_2(struct b43_wldev *dev)
        u8 events2[7] = { 0x0, 0x3, 0x5, 0x4, 0x2, 0x1, 0x8 };
        u8 delays2[7] = { 0x8, 0x6, 0x2, 0x4, 0x4, 0x6, 0x1 };
 
+       if (sprom->boardflags2_lo & B43_BFL2_SKWRKFEM_BRD ||
+           dev->dev->board_type == 0x8B) {
+               delays1[0] = 0x1;
+               delays1[5] = 0x14;
+       }
+
        if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ &&
            nphy->band5g_pwrgain) {
                b43_radio_mask(dev, B2055_C1_TX_RF_SPARE, ~0x8);
@@ -2007,8 +2588,10 @@ static void b43_nphy_workarounds_rev1_2(struct b43_wldev *dev)
 
        b43_ntab_write(dev, B43_NTAB16(8, 0x00), 0x000A);
        b43_ntab_write(dev, B43_NTAB16(8, 0x10), 0x000A);
-       b43_ntab_write(dev, B43_NTAB16(8, 0x02), 0xCDAA);
-       b43_ntab_write(dev, B43_NTAB16(8, 0x12), 0xCDAA);
+       if (dev->phy.rev < 3) {
+               b43_ntab_write(dev, B43_NTAB16(8, 0x02), 0xCDAA);
+               b43_ntab_write(dev, B43_NTAB16(8, 0x12), 0xCDAA);
+       }
 
        if (dev->phy.rev < 2) {
                b43_ntab_write(dev, B43_NTAB16(8, 0x08), 0x0000);
@@ -2024,11 +2607,6 @@ static void b43_nphy_workarounds_rev1_2(struct b43_wldev *dev)
        b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_LO2, 0x2D8);
        b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP2, 0x301);
 
-       if (sprom->boardflags2_lo & B43_BFL2_SKWRKFEM_BRD &&
-           dev->dev->board_type == 0x8B) {
-               delays1[0] = 0x1;
-               delays1[5] = 0x14;
-       }
        b43_nphy_set_rf_sequence(dev, 0, events1, delays1, 7);
        b43_nphy_set_rf_sequence(dev, 1, events2, delays2, 7);
 
@@ -2055,11 +2633,13 @@ static void b43_nphy_workarounds_rev1_2(struct b43_wldev *dev)
        b43_phy_write(dev, B43_NPHY_PHASETR_B1, 0xCD);
        b43_phy_write(dev, B43_NPHY_PHASETR_B2, 0x20);
 
-       b43_phy_mask(dev, B43_NPHY_PIL_DW1,
-                       ~B43_NPHY_PIL_DW_64QAM & 0xFFFF);
-       b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B1, 0xB5);
-       b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B2, 0xA4);
-       b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B3, 0x00);
+       if (dev->phy.rev < 3) {
+               b43_phy_mask(dev, B43_NPHY_PIL_DW1,
+                            ~B43_NPHY_PIL_DW_64QAM & 0xFFFF);
+               b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B1, 0xB5);
+               b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B2, 0xA4);
+               b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B3, 0x00);
+       }
 
        if (dev->phy.rev == 2)
                b43_phy_set(dev, B43_NPHY_FINERX2_CGC,
@@ -2083,7 +2663,9 @@ static void b43_nphy_workarounds(struct b43_wldev *dev)
        b43_phy_set(dev, B43_NPHY_IQFLIP,
                    B43_NPHY_IQFLIP_ADC1 | B43_NPHY_IQFLIP_ADC2);
 
-       if (dev->phy.rev >= 3)
+       if (dev->phy.rev >= 7)
+               b43_nphy_workarounds_rev7plus(dev);
+       else if (dev->phy.rev >= 3)
                b43_nphy_workarounds_rev3plus(dev);
        else
                b43_nphy_workarounds_rev1_2(dev);
@@ -2542,7 +3124,7 @@ static void b43_nphy_tx_power_ctl_idle_tssi(struct b43_wldev *dev)
                b43_nphy_ipa_internal_tssi_setup(dev);
 
        if (phy->rev >= 7)
-               ; /* TODO: Override Rev7 with 0x2000, 0, 3, 0, 0 as arguments */
+               b43_nphy_rf_control_override_rev7(dev, 0x2000, 0, 3, false, 0);
        else if (phy->rev >= 3)
                b43_nphy_rf_control_override(dev, 0x2000, 0, 3, false);
 
@@ -2554,7 +3136,7 @@ static void b43_nphy_tx_power_ctl_idle_tssi(struct b43_wldev *dev)
        b43_nphy_rssi_select(dev, 0, 0);
 
        if (phy->rev >= 7)
-               ; /* TODO: Override Rev7 with 0x2000, 0, 3, 1, 0 as arguments */
+               b43_nphy_rf_control_override_rev7(dev, 0x2000, 0, 3, true, 0);
        else if (phy->rev >= 3)
                b43_nphy_rf_control_override(dev, 0x2000, 0, 3, true);
 
@@ -4761,6 +5343,7 @@ static void b43_nphy_op_prepare_structs(struct b43_wldev *dev)
        nphy->hang_avoid = (phy->rev == 3 || phy->rev == 4);
        nphy->spur_avoid = (phy->rev >= 3) ?
                                B43_SPUR_AVOID_AUTO : B43_SPUR_AVOID_DISABLE;
+       nphy->init_por = true;
        nphy->gain_boost = true; /* this way we follow wl, assume it is true */
        nphy->txrx_chain = 2; /* sth different than 0 and 1 for now */
        nphy->phyrxchain = 3; /* to avoid b43_nphy_set_rx_core_state like wl */
@@ -4801,6 +5384,8 @@ static void b43_nphy_op_prepare_structs(struct b43_wldev *dev)
                nphy->ipa2g_on = sprom->fem.ghz2.extpa_gain == 2;
                nphy->ipa5g_on = sprom->fem.ghz5.extpa_gain == 2;
        }
+
+       nphy->init_por = true;
 }
 
 static void b43_nphy_op_free(struct b43_wldev *dev)
@@ -4887,7 +5472,9 @@ static void b43_nphy_op_software_rfkill(struct b43_wldev *dev,
        if (blocked) {
                b43_phy_mask(dev, B43_NPHY_RFCTL_CMD,
                                ~B43_NPHY_RFCTL_CMD_CHIP0PU);
-               if (dev->phy.rev >= 3) {
+               if (dev->phy.rev >= 7) {
+                       /* TODO */
+               } else if (dev->phy.rev >= 3) {
                        b43_radio_mask(dev, 0x09, ~0x2);
 
                        b43_radio_write(dev, 0x204D, 0);
@@ -4905,7 +5492,10 @@ static void b43_nphy_op_software_rfkill(struct b43_wldev *dev,
                        b43_radio_write(dev, 0x3064, 0);
                }
        } else {
-               if (dev->phy.rev >= 3) {
+               if (dev->phy.rev >= 7) {
+                       b43_radio_2057_init(dev);
+                       b43_switch_channel(dev, dev->phy.channel);
+               } else if (dev->phy.rev >= 3) {
                        b43_radio_init2056(dev);
                        b43_switch_channel(dev, dev->phy.channel);
                } else {
index fd12b38..092c014 100644 (file)
@@ -785,6 +785,7 @@ struct b43_phy_n {
        u16 papd_epsilon_offset[2];
        s32 preamble_override;
        u32 bb_mult_save;
+       bool init_por;
 
        bool gain_boost;
        bool elna_gain_config;
diff --git a/drivers/net/wireless/b43/radio_2057.c b/drivers/net/wireless/b43/radio_2057.c
new file mode 100644 (file)
index 0000000..d61d683
--- /dev/null
@@ -0,0 +1,141 @@
+/*
+
+  Broadcom B43 wireless driver
+  IEEE 802.11n 2057 radio device data tables
+
+  Copyright (c) 2010 RafaÅ‚ MiÅ‚ecki <zajec5@gmail.com>
+
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 2 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; see the file COPYING.  If not, write to
+  the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
+  Boston, MA 02110-1301, USA.
+
+*/
+
+#include "b43.h"
+#include "radio_2057.h"
+#include "phy_common.h"
+
+static u16 r2057_rev4_init[42][2] = {
+       { 0x0E, 0x20 }, { 0x31, 0x00 }, { 0x32, 0x00 }, { 0x33, 0x00 },
+       { 0x35, 0x26 }, { 0x3C, 0xff }, { 0x3D, 0xff }, { 0x3E, 0xff },
+       { 0x3F, 0xff }, { 0x62, 0x33 }, { 0x8A, 0xf0 }, { 0x8B, 0x10 },
+       { 0x8C, 0xf0 }, { 0x91, 0x3f }, { 0x92, 0x36 }, { 0xA4, 0x8c },
+       { 0xA8, 0x55 }, { 0xAF, 0x01 }, { 0x10F, 0xf0 }, { 0x110, 0x10 },
+       { 0x111, 0xf0 }, { 0x116, 0x3f }, { 0x117, 0x36 }, { 0x129, 0x8c },
+       { 0x12D, 0x55 }, { 0x134, 0x01 }, { 0x15E, 0x00 }, { 0x15F, 0x00 },
+       { 0x160, 0x00 }, { 0x161, 0x00 }, { 0x162, 0x00 }, { 0x163, 0x00 },
+       { 0x169, 0x02 }, { 0x16A, 0x00 }, { 0x16B, 0x00 }, { 0x16C, 0x00 },
+       { 0x1A4, 0x00 }, { 0x1A5, 0x00 }, { 0x1A6, 0x00 }, { 0x1AA, 0x00 },
+       { 0x1AB, 0x00 }, { 0x1AC, 0x00 },
+};
+
+static u16 r2057_rev5_init[44][2] = {
+       { 0x00, 0x00 }, { 0x01, 0x57 }, { 0x02, 0x20 }, { 0x23, 0x6 },
+       { 0x31, 0x00 }, { 0x32, 0x00 }, { 0x33, 0x00 }, { 0x51, 0x70 },
+       { 0x59, 0x88 }, { 0x5C, 0x20 }, { 0x62, 0x33 }, { 0x63, 0x0f },
+       { 0x64, 0x0f }, { 0x81, 0x01 }, { 0x91, 0x3f }, { 0x92, 0x36 },
+       { 0xA1, 0x20 }, { 0xD6, 0x70 }, { 0xDE, 0x88 }, { 0xE1, 0x20 },
+       { 0xE8, 0x0f }, { 0xE9, 0x0f }, { 0x106, 0x01 }, { 0x116, 0x3f },
+       { 0x117, 0x36 }, { 0x126, 0x20 }, { 0x15E, 0x00 }, { 0x15F, 0x00 },
+       { 0x160, 0x00 }, { 0x161, 0x00 }, { 0x162, 0x00 }, { 0x163, 0x00 },
+       { 0x16A, 0x00 }, { 0x16B, 0x00 }, { 0x16C, 0x00 }, { 0x1A4, 0x00 },
+       { 0x1A5, 0x00 }, { 0x1A6, 0x00 }, { 0x1AA, 0x00 }, { 0x1AB, 0x00 },
+       { 0x1AC, 0x00 }, { 0x1B7, 0x0c }, { 0x1C1, 0x01 }, { 0x1C2, 0x80 },
+};
+
+static u16 r2057_rev5a_init[45][2] = {
+       { 0x00, 0x15 }, { 0x01, 0x57 }, { 0x02, 0x20 }, { 0x23, 0x6 },
+       { 0x31, 0x00 }, { 0x32, 0x00 }, { 0x33, 0x00 }, { 0x51, 0x70 },
+       { 0x59, 0x88 }, { 0x5C, 0x20 }, { 0x62, 0x33 }, { 0x63, 0x0f },
+       { 0x64, 0x0f }, { 0x81, 0x01 }, { 0x91, 0x3f }, { 0x92, 0x36 },
+       { 0xC9, 0x01 }, { 0xD6, 0x70 }, { 0xDE, 0x88 }, { 0xE1, 0x20 },
+       { 0xE8, 0x0f }, { 0xE9, 0x0f }, { 0x106, 0x01 }, { 0x116, 0x3f },
+       { 0x117, 0x36 }, { 0x126, 0x20 }, { 0x14E, 0x01 }, { 0x15E, 0x00 },
+       { 0x15F, 0x00 }, { 0x160, 0x00 }, { 0x161, 0x00 }, { 0x162, 0x00 },
+       { 0x163, 0x00 }, { 0x16A, 0x00 }, { 0x16B, 0x00 }, { 0x16C, 0x00 },
+       { 0x1A4, 0x00 }, { 0x1A5, 0x00 }, { 0x1A6, 0x00 }, { 0x1AA, 0x00 },
+       { 0x1AB, 0x00 }, { 0x1AC, 0x00 }, { 0x1B7, 0x0c }, { 0x1C1, 0x01 },
+       { 0x1C2, 0x80 },
+};
+
+static u16 r2057_rev7_init[54][2] = {
+       { 0x00, 0x00 }, { 0x01, 0x57 }, { 0x02, 0x20 }, { 0x31, 0x00 },
+       { 0x32, 0x00 }, { 0x33, 0x00 }, { 0x51, 0x70 }, { 0x59, 0x88 },
+       { 0x5C, 0x20 }, { 0x62, 0x33 }, { 0x63, 0x0f }, { 0x64, 0x13 },
+       { 0x66, 0xee }, { 0x6E, 0x58 }, { 0x75, 0x13 }, { 0x7B, 0x13 },
+       { 0x7C, 0x14 }, { 0x7D, 0xee }, { 0x81, 0x01 }, { 0x91, 0x3f },
+       { 0x92, 0x36 }, { 0xA1, 0x20 }, { 0xD6, 0x70 }, { 0xDE, 0x88 },
+       { 0xE1, 0x20 }, { 0xE8, 0x0f }, { 0xE9, 0x13 }, { 0xEB, 0xee },
+       { 0xF3, 0x58 }, { 0xFA, 0x13 }, { 0x100, 0x13 }, { 0x101, 0x14 },
+       { 0x102, 0xee }, { 0x106, 0x01 }, { 0x116, 0x3f }, { 0x117, 0x36 },
+       { 0x126, 0x20 }, { 0x15E, 0x00 }, { 0x15F, 0x00 }, { 0x160, 0x00 },
+       { 0x161, 0x00 }, { 0x162, 0x00 }, { 0x163, 0x00 }, { 0x16A, 0x00 },
+       { 0x16B, 0x00 }, { 0x16C, 0x00 }, { 0x1A4, 0x00 }, { 0x1A5, 0x00 },
+       { 0x1A6, 0x00 }, { 0x1AA, 0x00 }, { 0x1AB, 0x00 }, { 0x1AC, 0x00 },
+       { 0x1B7, 0x05 }, { 0x1C2, 0xa0 },
+};
+
+static u16 r2057_rev8_init[54][2] = {
+       { 0x00, 0x08 }, { 0x01, 0x57 }, { 0x02, 0x20 }, { 0x31, 0x00 },
+       { 0x32, 0x00 }, { 0x33, 0x00 }, { 0x51, 0x70 }, { 0x59, 0x88 },
+       { 0x5C, 0x20 }, { 0x62, 0x33 }, { 0x63, 0x0f }, { 0x64, 0x0f },
+       { 0x6E, 0x58 }, { 0x75, 0x13 }, { 0x7B, 0x13 }, { 0x7C, 0x0f },
+       { 0x7D, 0xee }, { 0x81, 0x01 }, { 0x91, 0x3f }, { 0x92, 0x36 },
+       { 0xA1, 0x20 }, { 0xC9, 0x01 }, { 0xD6, 0x70 }, { 0xDE, 0x88 },
+       { 0xE1, 0x20 }, { 0xE8, 0x0f }, { 0xE9, 0x0f }, { 0xF3, 0x58 },
+       { 0xFA, 0x13 }, { 0x100, 0x13 }, { 0x101, 0x0f }, { 0x102, 0xee },
+       { 0x106, 0x01 }, { 0x116, 0x3f }, { 0x117, 0x36 }, { 0x126, 0x20 },
+       { 0x14E, 0x01 }, { 0x15E, 0x00 }, { 0x15F, 0x00 }, { 0x160, 0x00 },
+       { 0x161, 0x00 }, { 0x162, 0x00 }, { 0x163, 0x00 }, { 0x16A, 0x00 },
+       { 0x16B, 0x00 }, { 0x16C, 0x00 }, { 0x1A4, 0x00 }, { 0x1A5, 0x00 },
+       { 0x1A6, 0x00 }, { 0x1AA, 0x00 }, { 0x1AB, 0x00 }, { 0x1AC, 0x00 },
+       { 0x1B7, 0x05 }, { 0x1C2, 0xa0 },
+};
+
+void r2057_upload_inittabs(struct b43_wldev *dev)
+{
+       struct b43_phy *phy = &dev->phy;
+       u16 *table = NULL;
+       u16 size, i;
+
+       if (phy->rev == 7) {
+               table = r2057_rev4_init[0];
+               size = ARRAY_SIZE(r2057_rev4_init);
+       } else if (phy->rev == 8 || phy->rev == 9) {
+               if (phy->radio_rev == 5) {
+                       if (phy->radio_rev == 8) {
+                               table = r2057_rev5_init[0];
+                               size = ARRAY_SIZE(r2057_rev5_init);
+                       } else {
+                               table = r2057_rev5a_init[0];
+                               size = ARRAY_SIZE(r2057_rev5a_init);
+                       }
+               } else if (phy->radio_rev == 7) {
+                       table = r2057_rev7_init[0];
+                       size = ARRAY_SIZE(r2057_rev7_init);
+               } else if (phy->radio_rev == 9) {
+                       table = r2057_rev8_init[0];
+                       size = ARRAY_SIZE(r2057_rev8_init);
+               }
+       }
+
+       if (table) {
+               for (i = 0; i < 10; i++) {
+                       pr_info("radio_write 0x%X ", *table);
+                       table++;
+                       pr_info("0x%X\n", *table);
+                       table++;
+               }
+       }
+}
diff --git a/drivers/net/wireless/b43/radio_2057.h b/drivers/net/wireless/b43/radio_2057.h
new file mode 100644 (file)
index 0000000..eeebd8f
--- /dev/null
@@ -0,0 +1,430 @@
+#ifndef B43_RADIO_2057_H_
+#define B43_RADIO_2057_H_
+
+#include <linux/types.h>
+
+#include "tables_nphy.h"
+
+#define R2057_DACBUF_VINCM_CORE0               0x000
+#define R2057_IDCODE                           0x001
+#define R2057_RCCAL_MASTER                     0x002
+#define R2057_RCCAL_CAP_SIZE                   0x003
+#define R2057_RCAL_CONFIG                      0x004
+#define R2057_GPAIO_CONFIG                     0x005
+#define R2057_GPAIO_SEL1                       0x006
+#define R2057_GPAIO_SEL0                       0x007
+#define R2057_CLPO_CONFIG                      0x008
+#define R2057_BANDGAP_CONFIG                   0x009
+#define R2057_BANDGAP_RCAL_TRIM                        0x00a
+#define R2057_AFEREG_CONFIG                    0x00b
+#define R2057_TEMPSENSE_CONFIG                 0x00c
+#define R2057_XTAL_CONFIG1                     0x00d
+#define R2057_XTAL_ICORE_SIZE                  0x00e
+#define R2057_XTAL_BUF_SIZE                    0x00f
+#define R2057_XTAL_PULLCAP_SIZE                        0x010
+#define R2057_RFPLL_MASTER                     0x011
+#define R2057_VCOMONITOR_VTH_L                 0x012
+#define R2057_VCOMONITOR_VTH_H                 0x013
+#define R2057_VCOCAL_BIASRESET_RFPLLREG_VOUT   0x014
+#define R2057_VCO_VARCSIZE_IDAC                        0x015
+#define R2057_VCOCAL_COUNTVAL0                 0x016
+#define R2057_VCOCAL_COUNTVAL1                 0x017
+#define R2057_VCOCAL_INTCLK_COUNT              0x018
+#define R2057_VCOCAL_MASTER                    0x019
+#define R2057_VCOCAL_NUMCAPCHANGE              0x01a
+#define R2057_VCOCAL_WINSIZE                   0x01b
+#define R2057_VCOCAL_DELAY_AFTER_REFRESH       0x01c
+#define R2057_VCOCAL_DELAY_AFTER_CLOSELOOP     0x01d
+#define R2057_VCOCAL_DELAY_AFTER_OPENLOOP      0x01e
+#define R2057_VCOCAL_DELAY_BEFORE_OPENLOOP     0x01f
+#define R2057_VCO_FORCECAPEN_FORCECAP1         0x020
+#define R2057_VCO_FORCECAP0                    0x021
+#define R2057_RFPLL_REFMASTER_SPAREXTALSIZE    0x022
+#define R2057_RFPLL_PFD_RESET_PW               0x023
+#define R2057_RFPLL_LOOPFILTER_R2              0x024
+#define R2057_RFPLL_LOOPFILTER_R1              0x025
+#define R2057_RFPLL_LOOPFILTER_C3              0x026
+#define R2057_RFPLL_LOOPFILTER_C2              0x027
+#define R2057_RFPLL_LOOPFILTER_C1              0x028
+#define R2057_CP_KPD_IDAC                      0x029
+#define R2057_RFPLL_IDACS                      0x02a
+#define R2057_RFPLL_MISC_EN                    0x02b
+#define R2057_RFPLL_MMD0                       0x02c
+#define R2057_RFPLL_MMD1                       0x02d
+#define R2057_RFPLL_MISC_CAL_RESETN            0x02e
+#define R2057_JTAGXTAL_SIZE_CPBIAS_FILTRES     0x02f
+#define R2057_VCO_ALCREF_BBPLLXTAL_SIZE                0x030
+#define R2057_VCOCAL_READCAP0                  0x031
+#define R2057_VCOCAL_READCAP1                  0x032
+#define R2057_VCOCAL_STATUS                    0x033
+#define R2057_LOGEN_PUS                                0x034
+#define R2057_LOGEN_PTAT_RESETS                        0x035
+#define R2057_VCOBUF_IDACS                     0x036
+#define R2057_VCOBUF_TUNE                      0x037
+#define R2057_CMOSBUF_TX2GQ_IDACS              0x038
+#define R2057_CMOSBUF_TX2GI_IDACS              0x039
+#define R2057_CMOSBUF_TX5GQ_IDACS              0x03a
+#define R2057_CMOSBUF_TX5GI_IDACS              0x03b
+#define R2057_CMOSBUF_RX2GQ_IDACS              0x03c
+#define R2057_CMOSBUF_RX2GI_IDACS              0x03d
+#define R2057_CMOSBUF_RX5GQ_IDACS              0x03e
+#define R2057_CMOSBUF_RX5GI_IDACS              0x03f
+#define R2057_LOGEN_MX2G_IDACS                 0x040
+#define R2057_LOGEN_MX2G_TUNE                  0x041
+#define R2057_LOGEN_MX5G_IDACS                 0x042
+#define R2057_LOGEN_MX5G_TUNE                  0x043
+#define R2057_LOGEN_MX5G_RCCR                  0x044
+#define R2057_LOGEN_INDBUF2G_IDAC              0x045
+#define R2057_LOGEN_INDBUF2G_IBOOST            0x046
+#define R2057_LOGEN_INDBUF2G_TUNE              0x047
+#define R2057_LOGEN_INDBUF5G_IDAC              0x048
+#define R2057_LOGEN_INDBUF5G_IBOOST            0x049
+#define R2057_LOGEN_INDBUF5G_TUNE              0x04a
+#define R2057_CMOSBUF_TX_RCCR                  0x04b
+#define R2057_CMOSBUF_RX_RCCR                  0x04c
+#define R2057_LOGEN_SEL_PKDET                  0x04d
+#define R2057_CMOSBUF_SHAREIQ_PTAT             0x04e
+#define R2057_RXTXBIAS_CONFIG_CORE0            0x04f
+#define R2057_TXGM_TXRF_PUS_CORE0              0x050
+#define R2057_TXGM_IDAC_BLEED_CORE0            0x051
+#define R2057_TXGM_GAIN_CORE0                  0x056
+#define R2057_TXGM2G_PKDET_PUS_CORE0           0x057
+#define R2057_PAD2G_PTATS_CORE0                        0x058
+#define R2057_PAD2G_IDACS_CORE0                        0x059
+#define R2057_PAD2G_BOOST_PU_CORE0             0x05a
+#define R2057_PAD2G_CASCV_GAIN_CORE0           0x05b
+#define R2057_TXMIX2G_TUNE_BOOST_PU_CORE0      0x05c
+#define R2057_TXMIX2G_LODC_CORE0               0x05d
+#define R2057_PAD2G_TUNE_PUS_CORE0             0x05e
+#define R2057_IPA2G_GAIN_CORE0                 0x05f
+#define R2057_TSSI2G_SPARE1_CORE0              0x060
+#define R2057_TSSI2G_SPARE2_CORE0              0x061
+#define R2057_IPA2G_TUNEV_CASCV_PTAT_CORE0     0x062
+#define R2057_IPA2G_IMAIN_CORE0                        0x063
+#define R2057_IPA2G_CASCONV_CORE0              0x064
+#define R2057_IPA2G_CASCOFFV_CORE0             0x065
+#define R2057_IPA2G_BIAS_FILTER_CORE0          0x066
+#define R2057_TX5G_PKDET_CORE0                 0x069
+#define R2057_PGA_PTAT_TXGM5G_PU_CORE0         0x06a
+#define R2057_PAD5G_PTATS1_CORE0               0x06b
+#define R2057_PAD5G_CLASS_PTATS2_CORE0         0x06c
+#define R2057_PGA_BOOSTPTAT_IMAIN_CORE0                0x06d
+#define R2057_PAD5G_CASCV_IMAIN_CORE0          0x06e
+#define R2057_TXMIX5G_IBOOST_PAD_IAUX_CORE0    0x06f
+#define R2057_PGA_BOOST_TUNE_CORE0             0x070
+#define R2057_PGA_GAIN_CORE0                   0x071
+#define R2057_PAD5G_CASCOFFV_GAIN_PUS_CORE0    0x072
+#define R2057_TXMIX5G_BOOST_TUNE_CORE0         0x073
+#define R2057_PAD5G_TUNE_MISC_PUS_CORE0                0x074
+#define R2057_IPA5G_IAUX_CORE0                 0x075
+#define R2057_IPA5G_GAIN_CORE0                 0x076
+#define R2057_TSSI5G_SPARE1_CORE0              0x077
+#define R2057_TSSI5G_SPARE2_CORE0              0x078
+#define R2057_IPA5G_CASCOFFV_PU_CORE0          0x079
+#define R2057_IPA5G_PTAT_CORE0                 0x07a
+#define R2057_IPA5G_IMAIN_CORE0                        0x07b
+#define R2057_IPA5G_CASCONV_CORE0              0x07c
+#define R2057_IPA5G_BIAS_FILTER_CORE0          0x07d
+#define R2057_PAD_BIAS_FILTER_BWS_CORE0                0x080
+#define R2057_TR2G_CONFIG1_CORE0_NU            0x081
+#define R2057_TR2G_CONFIG2_CORE0_NU            0x082
+#define R2057_LNA5G_RFEN_CORE0                 0x083
+#define R2057_TR5G_CONFIG2_CORE0_NU            0x084
+#define R2057_RXRFBIAS_IBOOST_PU_CORE0         0x085
+#define R2057_RXRF_IABAND_RXGM_IMAIN_PTAT_CORE0        0x086
+#define R2057_RXGM_CMFBITAIL_AUXPTAT_CORE0     0x087
+#define R2057_RXMIX_ICORE_RXGM_IAUX_CORE0      0x088
+#define R2057_RXMIX_CMFBITAIL_PU_CORE0         0x089
+#define R2057_LNA2_IMAIN_PTAT_PU_CORE0         0x08a
+#define R2057_LNA2_IAUX_PTAT_CORE0             0x08b
+#define R2057_LNA1_IMAIN_PTAT_PU_CORE0         0x08c
+#define R2057_LNA15G_INPUT_MATCH_TUNE_CORE0    0x08d
+#define R2057_RXRFBIAS_BANDSEL_CORE0           0x08e
+#define R2057_TIA_CONFIG_CORE0                 0x08f
+#define R2057_TIA_IQGAIN_CORE0                 0x090
+#define R2057_TIA_IBIAS2_CORE0                 0x091
+#define R2057_TIA_IBIAS1_CORE0                 0x092
+#define R2057_TIA_SPARE_Q_CORE0                        0x093
+#define R2057_TIA_SPARE_I_CORE0                        0x094
+#define R2057_RXMIX2G_PUS_CORE0                        0x095
+#define R2057_RXMIX2G_VCMREFS_CORE0            0x096
+#define R2057_RXMIX2G_LODC_QI_CORE0            0x097
+#define R2057_W12G_BW_LNA2G_PUS_CORE0          0x098
+#define R2057_LNA2G_GAIN_CORE0                 0x099
+#define R2057_LNA2G_TUNE_CORE0                 0x09a
+#define R2057_RXMIX5G_PUS_CORE0                        0x09b
+#define R2057_RXMIX5G_VCMREFS_CORE0            0x09c
+#define R2057_RXMIX5G_LODC_QI_CORE0            0x09d
+#define R2057_W15G_BW_LNA5G_PUS_CORE0          0x09e
+#define R2057_LNA5G_GAIN_CORE0                 0x09f
+#define R2057_LNA5G_TUNE_CORE0                 0x0a0
+#define R2057_LPFSEL_TXRX_RXBB_PUS_CORE0       0x0a1
+#define R2057_RXBB_BIAS_MASTER_CORE0           0x0a2
+#define R2057_RXBB_VGABUF_IDACS_CORE0          0x0a3
+#define R2057_LPF_VCMREF_TXBUF_VCMREF_CORE0    0x0a4
+#define R2057_TXBUF_VINCM_CORE0                        0x0a5
+#define R2057_TXBUF_IDACS_CORE0                        0x0a6
+#define R2057_LPF_RESP_RXBUF_BW_CORE0          0x0a7
+#define R2057_RXBB_CC_CORE0                    0x0a8
+#define R2057_RXBB_SPARE3_CORE0                        0x0a9
+#define R2057_RXBB_RCCAL_HPC_CORE0             0x0aa
+#define R2057_LPF_IDACS_CORE0                  0x0ab
+#define R2057_LPFBYP_DCLOOP_BYP_IDAC_CORE0     0x0ac
+#define R2057_TXBUF_GAIN_CORE0                 0x0ad
+#define R2057_AFELOOPBACK_AACI_RESP_CORE0      0x0ae
+#define R2057_RXBUF_DEGEN_CORE0                        0x0af
+#define R2057_RXBB_SPARE2_CORE0                        0x0b0
+#define R2057_RXBB_SPARE1_CORE0                        0x0b1
+#define R2057_RSSI_MASTER_CORE0                        0x0b2
+#define R2057_W2_MASTER_CORE0                  0x0b3
+#define R2057_NB_MASTER_CORE0                  0x0b4
+#define R2057_W2_IDACS0_Q_CORE0                        0x0b5
+#define R2057_W2_IDACS1_Q_CORE0                        0x0b6
+#define R2057_W2_IDACS0_I_CORE0                        0x0b7
+#define R2057_W2_IDACS1_I_CORE0                        0x0b8
+#define R2057_RSSI_GPAIOSEL_W1_IDACS_CORE0     0x0b9
+#define R2057_NB_IDACS_Q_CORE0                 0x0ba
+#define R2057_NB_IDACS_I_CORE0                 0x0bb
+#define R2057_BACKUP4_CORE0                    0x0c1
+#define R2057_BACKUP3_CORE0                    0x0c2
+#define R2057_BACKUP2_CORE0                    0x0c3
+#define R2057_BACKUP1_CORE0                    0x0c4
+#define R2057_SPARE16_CORE0                    0x0c5
+#define R2057_SPARE15_CORE0                    0x0c6
+#define R2057_SPARE14_CORE0                    0x0c7
+#define R2057_SPARE13_CORE0                    0x0c8
+#define R2057_SPARE12_CORE0                    0x0c9
+#define R2057_SPARE11_CORE0                    0x0ca
+#define R2057_TX2G_BIAS_RESETS_CORE0           0x0cb
+#define R2057_TX5G_BIAS_RESETS_CORE0           0x0cc
+#define R2057_IQTEST_SEL_PU                    0x0cd
+#define R2057_XTAL_CONFIG2                     0x0ce
+#define R2057_BUFS_MISC_LPFBW_CORE0            0x0cf
+#define R2057_TXLPF_RCCAL_CORE0                        0x0d0
+#define R2057_RXBB_GPAIOSEL_RXLPF_RCCAL_CORE0  0x0d1
+#define R2057_LPF_GAIN_CORE0                   0x0d2
+#define R2057_DACBUF_IDACS_BW_CORE0            0x0d3
+#define R2057_RXTXBIAS_CONFIG_CORE1            0x0d4
+#define R2057_TXGM_TXRF_PUS_CORE1              0x0d5
+#define R2057_TXGM_IDAC_BLEED_CORE1            0x0d6
+#define R2057_TXGM_GAIN_CORE1                  0x0db
+#define R2057_TXGM2G_PKDET_PUS_CORE1           0x0dc
+#define R2057_PAD2G_PTATS_CORE1                        0x0dd
+#define R2057_PAD2G_IDACS_CORE1                        0x0de
+#define R2057_PAD2G_BOOST_PU_CORE1             0x0df
+#define R2057_PAD2G_CASCV_GAIN_CORE1           0x0e0
+#define R2057_TXMIX2G_TUNE_BOOST_PU_CORE1      0x0e1
+#define R2057_TXMIX2G_LODC_CORE1               0x0e2
+#define R2057_PAD2G_TUNE_PUS_CORE1             0x0e3
+#define R2057_IPA2G_GAIN_CORE1                 0x0e4
+#define R2057_TSSI2G_SPARE1_CORE1              0x0e5
+#define R2057_TSSI2G_SPARE2_CORE1              0x0e6
+#define R2057_IPA2G_TUNEV_CASCV_PTAT_CORE1     0x0e7
+#define R2057_IPA2G_IMAIN_CORE1                        0x0e8
+#define R2057_IPA2G_CASCONV_CORE1              0x0e9
+#define R2057_IPA2G_CASCOFFV_CORE1             0x0ea
+#define R2057_IPA2G_BIAS_FILTER_CORE1          0x0eb
+#define R2057_TX5G_PKDET_CORE1                 0x0ee
+#define R2057_PGA_PTAT_TXGM5G_PU_CORE1         0x0ef
+#define R2057_PAD5G_PTATS1_CORE1               0x0f0
+#define R2057_PAD5G_CLASS_PTATS2_CORE1         0x0f1
+#define R2057_PGA_BOOSTPTAT_IMAIN_CORE1                0x0f2
+#define R2057_PAD5G_CASCV_IMAIN_CORE1          0x0f3
+#define R2057_TXMIX5G_IBOOST_PAD_IAUX_CORE1    0x0f4
+#define R2057_PGA_BOOST_TUNE_CORE1             0x0f5
+#define R2057_PGA_GAIN_CORE1                   0x0f6
+#define R2057_PAD5G_CASCOFFV_GAIN_PUS_CORE1    0x0f7
+#define R2057_TXMIX5G_BOOST_TUNE_CORE1         0x0f8
+#define R2057_PAD5G_TUNE_MISC_PUS_CORE1                0x0f9
+#define R2057_IPA5G_IAUX_CORE1                 0x0fa
+#define R2057_IPA5G_GAIN_CORE1                 0x0fb
+#define R2057_TSSI5G_SPARE1_CORE1              0x0fc
+#define R2057_TSSI5G_SPARE2_CORE1              0x0fd
+#define R2057_IPA5G_CASCOFFV_PU_CORE1          0x0fe
+#define R2057_IPA5G_PTAT_CORE1                 0x0ff
+#define R2057_IPA5G_IMAIN_CORE1                        0x100
+#define R2057_IPA5G_CASCONV_CORE1              0x101
+#define R2057_IPA5G_BIAS_FILTER_CORE1          0x102
+#define R2057_PAD_BIAS_FILTER_BWS_CORE1                0x105
+#define R2057_TR2G_CONFIG1_CORE1_NU            0x106
+#define R2057_TR2G_CONFIG2_CORE1_NU            0x107
+#define R2057_LNA5G_RFEN_CORE1                 0x108
+#define R2057_TR5G_CONFIG2_CORE1_NU            0x109
+#define R2057_RXRFBIAS_IBOOST_PU_CORE1         0x10a
+#define R2057_RXRF_IABAND_RXGM_IMAIN_PTAT_CORE1        0x10b
+#define R2057_RXGM_CMFBITAIL_AUXPTAT_CORE1     0x10c
+#define R2057_RXMIX_ICORE_RXGM_IAUX_CORE1      0x10d
+#define R2057_RXMIX_CMFBITAIL_PU_CORE1         0x10e
+#define R2057_LNA2_IMAIN_PTAT_PU_CORE1         0x10f
+#define R2057_LNA2_IAUX_PTAT_CORE1             0x110
+#define R2057_LNA1_IMAIN_PTAT_PU_CORE1         0x111
+#define R2057_LNA15G_INPUT_MATCH_TUNE_CORE1    0x112
+#define R2057_RXRFBIAS_BANDSEL_CORE1           0x113
+#define R2057_TIA_CONFIG_CORE1                 0x114
+#define R2057_TIA_IQGAIN_CORE1                 0x115
+#define R2057_TIA_IBIAS2_CORE1                 0x116
+#define R2057_TIA_IBIAS1_CORE1                 0x117
+#define R2057_TIA_SPARE_Q_CORE1                        0x118
+#define R2057_TIA_SPARE_I_CORE1                        0x119
+#define R2057_RXMIX2G_PUS_CORE1                        0x11a
+#define R2057_RXMIX2G_VCMREFS_CORE1            0x11b
+#define R2057_RXMIX2G_LODC_QI_CORE1            0x11c
+#define R2057_W12G_BW_LNA2G_PUS_CORE1          0x11d
+#define R2057_LNA2G_GAIN_CORE1                 0x11e
+#define R2057_LNA2G_TUNE_CORE1                 0x11f
+#define R2057_RXMIX5G_PUS_CORE1                        0x120
+#define R2057_RXMIX5G_VCMREFS_CORE1            0x121
+#define R2057_RXMIX5G_LODC_QI_CORE1            0x122
+#define R2057_W15G_BW_LNA5G_PUS_CORE1          0x123
+#define R2057_LNA5G_GAIN_CORE1                 0x124
+#define R2057_LNA5G_TUNE_CORE1                 0x125
+#define R2057_LPFSEL_TXRX_RXBB_PUS_CORE1       0x126
+#define R2057_RXBB_BIAS_MASTER_CORE1           0x127
+#define R2057_RXBB_VGABUF_IDACS_CORE1          0x128
+#define R2057_LPF_VCMREF_TXBUF_VCMREF_CORE1    0x129
+#define R2057_TXBUF_VINCM_CORE1                        0x12a
+#define R2057_TXBUF_IDACS_CORE1                        0x12b
+#define R2057_LPF_RESP_RXBUF_BW_CORE1          0x12c
+#define R2057_RXBB_CC_CORE1                    0x12d
+#define R2057_RXBB_SPARE3_CORE1                        0x12e
+#define R2057_RXBB_RCCAL_HPC_CORE1             0x12f
+#define R2057_LPF_IDACS_CORE1                  0x130
+#define R2057_LPFBYP_DCLOOP_BYP_IDAC_CORE1     0x131
+#define R2057_TXBUF_GAIN_CORE1                 0x132
+#define R2057_AFELOOPBACK_AACI_RESP_CORE1      0x133
+#define R2057_RXBUF_DEGEN_CORE1                        0x134
+#define R2057_RXBB_SPARE2_CORE1                        0x135
+#define R2057_RXBB_SPARE1_CORE1                        0x136
+#define R2057_RSSI_MASTER_CORE1                        0x137
+#define R2057_W2_MASTER_CORE1                  0x138
+#define R2057_NB_MASTER_CORE1                  0x139
+#define R2057_W2_IDACS0_Q_CORE1                        0x13a
+#define R2057_W2_IDACS1_Q_CORE1                        0x13b
+#define R2057_W2_IDACS0_I_CORE1                        0x13c
+#define R2057_W2_IDACS1_I_CORE1                        0x13d
+#define R2057_RSSI_GPAIOSEL_W1_IDACS_CORE1     0x13e
+#define R2057_NB_IDACS_Q_CORE1                 0x13f
+#define R2057_NB_IDACS_I_CORE1                 0x140
+#define R2057_BACKUP4_CORE1                    0x146
+#define R2057_BACKUP3_CORE1                    0x147
+#define R2057_BACKUP2_CORE1                    0x148
+#define R2057_BACKUP1_CORE1                    0x149
+#define R2057_SPARE16_CORE1                    0x14a
+#define R2057_SPARE15_CORE1                    0x14b
+#define R2057_SPARE14_CORE1                    0x14c
+#define R2057_SPARE13_CORE1                    0x14d
+#define R2057_SPARE12_CORE1                    0x14e
+#define R2057_SPARE11_CORE1                    0x14f
+#define R2057_TX2G_BIAS_RESETS_CORE1           0x150
+#define R2057_TX5G_BIAS_RESETS_CORE1           0x151
+#define R2057_SPARE8_CORE1                     0x152
+#define R2057_SPARE7_CORE1                     0x153
+#define R2057_BUFS_MISC_LPFBW_CORE1            0x154
+#define R2057_TXLPF_RCCAL_CORE1                        0x155
+#define R2057_RXBB_GPAIOSEL_RXLPF_RCCAL_CORE1  0x156
+#define R2057_LPF_GAIN_CORE1                   0x157
+#define R2057_DACBUF_IDACS_BW_CORE1            0x158
+#define R2057_DACBUF_VINCM_CORE1               0x159
+#define R2057_RCCAL_START_R1_Q1_P1             0x15a
+#define R2057_RCCAL_X1                         0x15b
+#define R2057_RCCAL_TRC0                       0x15c
+#define R2057_RCCAL_TRC1                       0x15d
+#define R2057_RCCAL_DONE_OSCCAP                        0x15e
+#define R2057_RCCAL_N0_0                       0x15f
+#define R2057_RCCAL_N0_1                       0x160
+#define R2057_RCCAL_N1_0                       0x161
+#define R2057_RCCAL_N1_1                       0x162
+#define R2057_RCAL_STATUS                      0x163
+#define R2057_XTALPUOVR_PINCTRL                        0x164
+#define R2057_OVR_REG0                         0x165
+#define R2057_OVR_REG1                         0x166
+#define R2057_OVR_REG2                         0x167
+#define R2057_OVR_REG3                         0x168
+#define R2057_OVR_REG4                         0x169
+#define R2057_RCCAL_SCAP_VAL                   0x16a
+#define R2057_RCCAL_BCAP_VAL                   0x16b
+#define R2057_RCCAL_HPC_VAL                    0x16c
+#define R2057_RCCAL_OVERRIDES                  0x16d
+#define R2057_TX0_IQCAL_GAIN_BW                        0x170
+#define R2057_TX0_LOFT_FINE_I                  0x171
+#define R2057_TX0_LOFT_FINE_Q                  0x172
+#define R2057_TX0_LOFT_COARSE_I                        0x173
+#define R2057_TX0_LOFT_COARSE_Q                        0x174
+#define R2057_TX0_TX_SSI_MASTER                        0x175
+#define R2057_TX0_IQCAL_VCM_HG                 0x176
+#define R2057_TX0_IQCAL_IDAC                   0x177
+#define R2057_TX0_TSSI_VCM                     0x178
+#define R2057_TX0_TX_SSI_MUX                   0x179
+#define R2057_TX0_TSSIA                                0x17a
+#define R2057_TX0_TSSIG                                0x17b
+#define R2057_TX0_TSSI_MISC1                   0x17c
+#define R2057_TX0_TXRXCOUPLE_2G_ATTEN          0x17d
+#define R2057_TX0_TXRXCOUPLE_2G_PWRUP          0x17e
+#define R2057_TX0_TXRXCOUPLE_5G_ATTEN          0x17f
+#define R2057_TX0_TXRXCOUPLE_5G_PWRUP          0x180
+#define R2057_TX1_IQCAL_GAIN_BW                        0x190
+#define R2057_TX1_LOFT_FINE_I                  0x191
+#define R2057_TX1_LOFT_FINE_Q                  0x192
+#define R2057_TX1_LOFT_COARSE_I                        0x193
+#define R2057_TX1_LOFT_COARSE_Q                        0x194
+#define R2057_TX1_TX_SSI_MASTER                        0x195
+#define R2057_TX1_IQCAL_VCM_HG                 0x196
+#define R2057_TX1_IQCAL_IDAC                   0x197
+#define R2057_TX1_TSSI_VCM                     0x198
+#define R2057_TX1_TX_SSI_MUX                   0x199
+#define R2057_TX1_TSSIA                                0x19a
+#define R2057_TX1_TSSIG                                0x19b
+#define R2057_TX1_TSSI_MISC1                   0x19c
+#define R2057_TX1_TXRXCOUPLE_2G_ATTEN          0x19d
+#define R2057_TX1_TXRXCOUPLE_2G_PWRUP          0x19e
+#define R2057_TX1_TXRXCOUPLE_5G_ATTEN          0x19f
+#define R2057_TX1_TXRXCOUPLE_5G_PWRUP          0x1a0
+#define R2057_AFE_VCM_CAL_MASTER_CORE0         0x1a1
+#define R2057_AFE_SET_VCM_I_CORE0              0x1a2
+#define R2057_AFE_SET_VCM_Q_CORE0              0x1a3
+#define R2057_AFE_STATUS_VCM_IQADC_CORE0       0x1a4
+#define R2057_AFE_STATUS_VCM_I_CORE0           0x1a5
+#define R2057_AFE_STATUS_VCM_Q_CORE0           0x1a6
+#define R2057_AFE_VCM_CAL_MASTER_CORE1         0x1a7
+#define R2057_AFE_SET_VCM_I_CORE1              0x1a8
+#define R2057_AFE_SET_VCM_Q_CORE1              0x1a9
+#define R2057_AFE_STATUS_VCM_IQADC_CORE1       0x1aa
+#define R2057_AFE_STATUS_VCM_I_CORE1           0x1ab
+#define R2057_AFE_STATUS_VCM_Q_CORE1           0x1ac
+
+#define R2057v7_DACBUF_VINCM_CORE0             0x1ad
+#define R2057v7_RCCAL_MASTER                   0x1ae
+#define R2057v7_TR2G_CONFIG3_CORE0_NU          0x1af
+#define R2057v7_TR2G_CONFIG3_CORE1_NU          0x1b0
+#define R2057v7_LOGEN_PUS1                     0x1b1
+#define R2057v7_OVR_REG5                       0x1b2
+#define R2057v7_OVR_REG6                       0x1b3
+#define R2057v7_OVR_REG7                       0x1b4
+#define R2057v7_OVR_REG8                       0x1b5
+#define R2057v7_OVR_REG9                       0x1b6
+#define R2057v7_OVR_REG10                      0x1b7
+#define R2057v7_OVR_REG11                      0x1b8
+#define R2057v7_OVR_REG12                      0x1b9
+#define R2057v7_OVR_REG13                      0x1ba
+#define R2057v7_OVR_REG14                      0x1bb
+#define R2057v7_OVR_REG15                      0x1bc
+#define R2057v7_OVR_REG16                      0x1bd
+#define R2057v7_OVR_REG1                       0x1be
+#define R2057v7_OVR_REG18                      0x1bf
+#define R2057v7_OVR_REG19                      0x1c0
+#define R2057v7_OVR_REG20                      0x1c1
+#define R2057v7_OVR_REG21                      0x1c2
+#define R2057v7_OVR_REG2                       0x1c3
+#define R2057v7_OVR_REG23                      0x1c4
+#define R2057v7_OVR_REG24                      0x1c5
+#define R2057v7_OVR_REG25                      0x1c6
+#define R2057v7_OVR_REG26                      0x1c7
+#define R2057v7_OVR_REG27                      0x1c8
+#define R2057v7_OVR_REG28                      0x1c9
+#define R2057v7_IQTEST_SEL_PU2                 0x1ca
+
+#define R2057_VCM_MASK                         0x7
+
+void r2057_upload_inittabs(struct b43_wldev *dev);
+
+#endif /* B43_RADIO_2057_H_ */
index f0d8377..97d4e27 100644 (file)
@@ -2757,6 +2757,49 @@ const struct nphy_rf_control_override_rev3 tbl_rf_control_override_rev3[] = {
        { 0x00C0,  6, 0xE7, 0xF9, 0xEC, 0xFB }  /* field == 0x4000 (fls 15) */
 };
 
+/* field, val_addr_core0, val_addr_core1, val_mask, val_shift */
+static const struct nphy_rf_control_override_rev7
+                       tbl_rf_control_override_rev7_over0[] = {
+       { 0x0004, 0x07A, 0x07D, 0x0002, 1 },
+       { 0x0008, 0x07A, 0x07D, 0x0004, 2 },
+       { 0x0010, 0x07A, 0x07D, 0x0010, 4 },
+       { 0x0020, 0x07A, 0x07D, 0x0020, 5 },
+       { 0x0040, 0x07A, 0x07D, 0x0040, 6 },
+       { 0x0080, 0x0F8, 0x0FA, 0x0080, 7 },
+       { 0x0400, 0x0F8, 0x0FA, 0x0070, 4 },
+       { 0x0800, 0x07B, 0x07E, 0xFFFF, 0 },
+       { 0x1000, 0x07C, 0x07F, 0xFFFF, 0 },
+       { 0x6000, 0x348, 0x349, 0xFFFF, 0 },
+       { 0x2000, 0x348, 0x349, 0x000F, 0 },
+};
+
+/* field, val_addr_core0, val_addr_core1, val_mask, val_shift */
+static const struct nphy_rf_control_override_rev7
+                       tbl_rf_control_override_rev7_over1[] = {
+       { 0x0002, 0x340, 0x341, 0x0002, 1 },
+       { 0x0008, 0x340, 0x341, 0x0008, 3 },
+       { 0x0020, 0x340, 0x341, 0x0020, 5 },
+       { 0x0010, 0x340, 0x341, 0x0010, 4 },
+       { 0x0004, 0x340, 0x341, 0x0004, 2 },
+       { 0x0080, 0x340, 0x341, 0x0700, 8 },
+       { 0x0800, 0x340, 0x341, 0x4000, 14 },
+       { 0x0400, 0x340, 0x341, 0x2000, 13 },
+       { 0x0200, 0x340, 0x341, 0x0800, 12 },
+       { 0x0100, 0x340, 0x341, 0x0100, 11 },
+       { 0x0040, 0x340, 0x341, 0x0040, 6 },
+       { 0x0001, 0x340, 0x341, 0x0001, 0 },
+};
+
+/* field, val_addr_core0, val_addr_core1, val_mask, val_shift */
+static const struct nphy_rf_control_override_rev7
+                       tbl_rf_control_override_rev7_over2[] = {
+       { 0x0008, 0x344, 0x345, 0x0008, 3 },
+       { 0x0002, 0x344, 0x345, 0x0002, 1 },
+       { 0x0001, 0x344, 0x345, 0x0001, 0 },
+       { 0x0004, 0x344, 0x345, 0x0004, 2 },
+       { 0x0010, 0x344, 0x345, 0x0010, 4 },
+};
+
 struct nphy_gain_ctl_workaround_entry nphy_gain_ctl_wa_phy6_radio11_ghz2 = {
        { 10, 14, 19, 27 },
        { -5, 6, 10, 15 },
@@ -3248,3 +3291,35 @@ struct nphy_gain_ctl_workaround_entry *b43_nphy_get_gain_ctl_workaround_ent(
 
        return e;
 }
+
+const struct nphy_rf_control_override_rev7 *b43_nphy_get_rf_ctl_over_rev7(
+       struct b43_wldev *dev, u16 field, u8 override)
+{
+       const struct nphy_rf_control_override_rev7 *e;
+       u8 size, i;
+
+       switch (override) {
+       case 0:
+               e = tbl_rf_control_override_rev7_over0;
+               size = ARRAY_SIZE(tbl_rf_control_override_rev7_over0);
+               break;
+       case 1:
+               e = tbl_rf_control_override_rev7_over1;
+               size = ARRAY_SIZE(tbl_rf_control_override_rev7_over1);
+               break;
+       case 2:
+               e = tbl_rf_control_override_rev7_over2;
+               size = ARRAY_SIZE(tbl_rf_control_override_rev7_over2);
+               break;
+       default:
+               b43err(dev->wl, "Invalid override value %d\n", override);
+               return NULL;
+       }
+
+       for (i = 0; i < size; i++) {
+               if (e[i].field == field)
+                       return &e[i];
+       }
+
+       return NULL;
+}
index f348953..c600700 100644 (file)
@@ -35,6 +35,14 @@ struct nphy_rf_control_override_rev3 {
        u8 val_addr1;
 };
 
+struct nphy_rf_control_override_rev7 {
+       u16 field;
+       u16 val_addr_core0;
+       u16 val_addr_core1;
+       u16 val_mask;
+       u8 val_shift;
+};
+
 struct nphy_gain_ctl_workaround_entry {
        s8 lna1_gain[4];
        s8 lna2_gain[4];
@@ -202,5 +210,7 @@ extern const struct nphy_rf_control_override_rev2
        tbl_rf_control_override_rev2[];
 extern const struct nphy_rf_control_override_rev3
        tbl_rf_control_override_rev3[];
+const struct nphy_rf_control_override_rev7 *b43_nphy_get_rf_ctl_over_rev7(
+       struct b43_wldev *dev, u16 field, u8 override);
 
 #endif /* B43_TABLES_NPHY_H_ */
index 8156135..291cdf6 100644 (file)
@@ -1920,7 +1920,7 @@ static int b43legacy_gpio_init(struct b43legacy_wldev *dev)
                return 0;
        ssb_write32(gpiodev, B43legacy_GPIO_CONTROL,
                    (ssb_read32(gpiodev, B43legacy_GPIO_CONTROL)
-                    & mask) | set);
+                    & ~mask) | set);
 
        return 0;
 }
@@ -2492,6 +2492,7 @@ static void b43legacy_tx_work(struct work_struct *work)
 }
 
 static void b43legacy_op_tx(struct ieee80211_hw *hw,
+                           struct ieee80211_tx_control *control,
                            struct sk_buff *skb)
 {
        struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw);
index a5edebe..718da8d 100644 (file)
@@ -86,7 +86,9 @@ MODULE_AUTHOR("Broadcom Corporation");
 MODULE_DESCRIPTION("Broadcom 802.11n wireless LAN driver.");
 MODULE_SUPPORTED_DEVICE("Broadcom 802.11n WLAN cards");
 MODULE_LICENSE("Dual BSD/GPL");
-
+/* This needs to be adjusted when brcms_firmwares changes */
+MODULE_FIRMWARE("brcm/bcm43xx-0.fw");
+MODULE_FIRMWARE("brcm/bcm43xx_hdr-0.fw");
 
 /* recognized BCMA Core IDs */
 static struct bcma_device_id brcms_coreid_table[] = {
@@ -265,7 +267,9 @@ static void brcms_set_basic_rate(struct brcm_rateset *rs, u16 rate, bool is_br)
        }
 }
 
-static void brcms_ops_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+static void brcms_ops_tx(struct ieee80211_hw *hw,
+                        struct ieee80211_tx_control *control,
+                        struct sk_buff *skb)
 {
        struct brcms_info *wl = hw->priv;
        struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
@@ -277,7 +281,7 @@ static void brcms_ops_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
                goto done;
        }
        brcms_c_sendpkt_mac80211(wl->wlc, skb, hw);
-       tx_info->rate_driver_data[0] = tx_info->control.sta;
+       tx_info->rate_driver_data[0] = control->sta;
  done:
        spin_unlock_bh(&wl->lock);
 }
index 03ca653..75086b3 100644 (file)
@@ -7512,15 +7512,10 @@ prep_mac80211_status(struct brcms_c_info *wlc, struct d11rxhdr *rxh,
 
        channel = BRCMS_CHAN_CHANNEL(rxh->RxChan);
 
-       if (channel > 14) {
-               rx_status->band = IEEE80211_BAND_5GHZ;
-               rx_status->freq = ieee80211_ofdm_chan_to_freq(
-                                       WF_CHAN_FACTOR_5_G/2, channel);
-
-       } else {
-               rx_status->band = IEEE80211_BAND_2GHZ;
-               rx_status->freq = ieee80211_dsss_chan_to_freq(channel);
-       }
+       rx_status->band =
+               channel > 14 ? IEEE80211_BAND_5GHZ : IEEE80211_BAND_2GHZ;
+       rx_status->freq =
+               ieee80211_channel_to_frequency(channel, rx_status->band);
 
        rx_status->signal = wlc_phy_rssi_compute(wlc->hw->band->pi, rxh);
 
index f10d302..c11a290 100644 (file)
 #define WL_CHANSPEC_BAND_2G            0x2000
 #define INVCHANSPEC                    255
 
-/* used to calculate the chan_freq = chan_factor * 500Mhz + 5 * chan_number */
-#define WF_CHAN_FACTOR_2_4_G           4814    /* 2.4 GHz band, 2407 MHz */
-#define WF_CHAN_FACTOR_5_G             10000   /* 5   GHz band, 5000 MHz */
-#define WF_CHAN_FACTOR_4_G             8000    /* 4.9 GHz band for Japan */
-
 #define CHSPEC_CHANNEL(chspec) ((u8)((chspec) & WL_CHANSPEC_CHAN_MASK))
 #define CHSPEC_BAND(chspec)    ((chspec) & WL_CHANSPEC_BAND_MASK)
 
index faec404..e252acb 100644 (file)
@@ -460,7 +460,9 @@ il3945_build_tx_cmd_basic(struct il_priv *il, struct il_device_cmd *cmd,
  * start C_TX command process
  */
 static int
-il3945_tx_skb(struct il_priv *il, struct sk_buff *skb)
+il3945_tx_skb(struct il_priv *il,
+             struct ieee80211_sta *sta,
+             struct sk_buff *skb)
 {
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -512,7 +514,7 @@ il3945_tx_skb(struct il_priv *il, struct sk_buff *skb)
        hdr_len = ieee80211_hdrlen(fc);
 
        /* Find idx into station table for destination station */
-       sta_id = il_sta_id_or_broadcast(il, info->control.sta);
+       sta_id = il_sta_id_or_broadcast(il, sta);
        if (sta_id == IL_INVALID_STATION) {
                D_DROP("Dropping - INVALID STATION: %pM\n", hdr->addr1);
                goto drop;
@@ -2859,7 +2861,9 @@ il3945_mac_stop(struct ieee80211_hw *hw)
 }
 
 static void
-il3945_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+il3945_mac_tx(struct ieee80211_hw *hw,
+              struct ieee80211_tx_control *control,
+              struct sk_buff *skb)
 {
        struct il_priv *il = hw->priv;
 
@@ -2868,7 +2872,7 @@ il3945_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
        D_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
             ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
 
-       if (il3945_tx_skb(il, skb))
+       if (il3945_tx_skb(il, control->sta, skb))
                dev_kfree_skb_any(skb);
 
        D_MAC80211("leave\n");
index 34f61a0..eac4dc8 100644 (file)
@@ -1526,8 +1526,11 @@ il4965_tx_cmd_build_basic(struct il_priv *il, struct sk_buff *skb,
 }
 
 static void
-il4965_tx_cmd_build_rate(struct il_priv *il, struct il_tx_cmd *tx_cmd,
-                        struct ieee80211_tx_info *info, __le16 fc)
+il4965_tx_cmd_build_rate(struct il_priv *il,
+                        struct il_tx_cmd *tx_cmd,
+                        struct ieee80211_tx_info *info,
+                        struct ieee80211_sta *sta,
+                        __le16 fc)
 {
        const u8 rts_retry_limit = 60;
        u32 rate_flags;
@@ -1561,9 +1564,7 @@ il4965_tx_cmd_build_rate(struct il_priv *il, struct il_tx_cmd *tx_cmd,
        rate_idx = info->control.rates[0].idx;
        if ((info->control.rates[0].flags & IEEE80211_TX_RC_MCS) || rate_idx < 0
            || rate_idx > RATE_COUNT_LEGACY)
-               rate_idx =
-                   rate_lowest_index(&il->bands[info->band],
-                                     info->control.sta);
+               rate_idx = rate_lowest_index(&il->bands[info->band], sta);
        /* For 5 GHZ band, remap mac80211 rate indices into driver indices */
        if (info->band == IEEE80211_BAND_5GHZ)
                rate_idx += IL_FIRST_OFDM_RATE;
@@ -1630,11 +1631,12 @@ il4965_tx_cmd_build_hwcrypto(struct il_priv *il, struct ieee80211_tx_info *info,
  * start C_TX command process
  */
 int
-il4965_tx_skb(struct il_priv *il, struct sk_buff *skb)
+il4965_tx_skb(struct il_priv *il,
+             struct ieee80211_sta *sta,
+             struct sk_buff *skb)
 {
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-       struct ieee80211_sta *sta = info->control.sta;
        struct il_station_priv *sta_priv = NULL;
        struct il_tx_queue *txq;
        struct il_queue *q;
@@ -1680,7 +1682,7 @@ il4965_tx_skb(struct il_priv *il, struct sk_buff *skb)
                sta_id = il->hw_params.bcast_id;
        else {
                /* Find idx into station table for destination station */
-               sta_id = il_sta_id_or_broadcast(il, info->control.sta);
+               sta_id = il_sta_id_or_broadcast(il, sta);
 
                if (sta_id == IL_INVALID_STATION) {
                        D_DROP("Dropping - INVALID STATION: %pM\n", hdr->addr1);
@@ -1786,7 +1788,7 @@ il4965_tx_skb(struct il_priv *il, struct sk_buff *skb)
        /* TODO need this for burst mode later on */
        il4965_tx_cmd_build_basic(il, skb, tx_cmd, info, hdr, sta_id);
 
-       il4965_tx_cmd_build_rate(il, tx_cmd, info, fc);
+       il4965_tx_cmd_build_rate(il, tx_cmd, info, sta, fc);
 
        il_update_stats(il, true, fc, len);
        /*
@@ -5828,7 +5830,9 @@ il4965_mac_stop(struct ieee80211_hw *hw)
 }
 
 void
-il4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+il4965_mac_tx(struct ieee80211_hw *hw,
+             struct ieee80211_tx_control *control,
+             struct sk_buff *skb)
 {
        struct il_priv *il = hw->priv;
 
@@ -5837,7 +5841,7 @@ il4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
        D_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
             ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
 
-       if (il4965_tx_skb(il, skb))
+       if (il4965_tx_skb(il, control->sta, skb))
                dev_kfree_skb_any(skb);
 
        D_MACDUMP("leave\n");
index 1db6776..2d092f3 100644 (file)
@@ -78,7 +78,9 @@ int il4965_hw_txq_attach_buf_to_tfd(struct il_priv *il, struct il_tx_queue *txq,
 int il4965_hw_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq);
 void il4965_hwrate_to_tx_control(struct il_priv *il, u32 rate_n_flags,
                                 struct ieee80211_tx_info *info);
-int il4965_tx_skb(struct il_priv *il, struct sk_buff *skb);
+int il4965_tx_skb(struct il_priv *il,
+                 struct ieee80211_sta *sta,
+                 struct sk_buff *skb);
 int il4965_tx_agg_start(struct il_priv *il, struct ieee80211_vif *vif,
                        struct ieee80211_sta *sta, u16 tid, u16 * ssn);
 int il4965_tx_agg_stop(struct il_priv *il, struct ieee80211_vif *vif,
@@ -163,7 +165,9 @@ void il4965_eeprom_release_semaphore(struct il_priv *il);
 int il4965_eeprom_check_version(struct il_priv *il);
 
 /* mac80211 handlers (for 4965) */
-void il4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
+void il4965_mac_tx(struct ieee80211_hw *hw,
+                  struct ieee80211_tx_control *control,
+                  struct sk_buff *skb);
 int il4965_mac_start(struct ieee80211_hw *hw);
 void il4965_mac_stop(struct ieee80211_hw *hw);
 void il4965_configure_filter(struct ieee80211_hw *hw,
index 0370403..eb99875 100644 (file)
@@ -4860,7 +4860,7 @@ EXPORT_SYMBOL(il_add_beacon_time);
 
 #ifdef CONFIG_PM
 
-int
+static int
 il_pci_suspend(struct device *device)
 {
        struct pci_dev *pdev = to_pci_dev(device);
@@ -4877,9 +4877,8 @@ il_pci_suspend(struct device *device)
 
        return 0;
 }
-EXPORT_SYMBOL(il_pci_suspend);
 
-int
+static int
 il_pci_resume(struct device *device)
 {
        struct pci_dev *pdev = to_pci_dev(device);
@@ -4906,16 +4905,8 @@ il_pci_resume(struct device *device)
 
        return 0;
 }
-EXPORT_SYMBOL(il_pci_resume);
 
-const struct dev_pm_ops il_pm_ops = {
-       .suspend = il_pci_suspend,
-       .resume = il_pci_resume,
-       .freeze = il_pci_suspend,
-       .thaw = il_pci_resume,
-       .poweroff = il_pci_suspend,
-       .restore = il_pci_resume,
-};
+SIMPLE_DEV_PM_OPS(il_pm_ops, il_pci_suspend, il_pci_resume);
 EXPORT_SYMBOL(il_pm_ops);
 
 #endif /* CONFIG_PM */
index 5f50177..3d3135e 100644 (file)
@@ -1845,8 +1845,6 @@ __le32 il_add_beacon_time(struct il_priv *il, u32 base, u32 addon,
                          u32 beacon_interval);
 
 #ifdef CONFIG_PM
-int il_pci_suspend(struct device *device);
-int il_pci_resume(struct device *device);
 extern const struct dev_pm_ops il_pm_ops;
 
 #define IL_LEGACY_PM_OPS       (&il_pm_ops)
index 9bb16bd..75e12f2 100644 (file)
@@ -201,7 +201,9 @@ void iwl_chswitch_done(struct iwl_priv *priv, bool is_success);
 
 
 /* tx */
-int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb);
+int iwlagn_tx_skb(struct iwl_priv *priv,
+                 struct ieee80211_sta *sta,
+                 struct sk_buff *skb);
 int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
                        struct ieee80211_sta *sta, u16 tid, u16 *ssn);
 int iwlagn_tx_agg_oper(struct iwl_priv *priv, struct ieee80211_vif *vif,
@@ -485,16 +487,13 @@ static inline void iwl_dvm_set_pmi(struct iwl_priv *priv, bool state)
 }
 
 #ifdef CONFIG_IWLWIFI_DEBUGFS
-int iwl_dbgfs_register(struct iwl_priv *priv, const char *name);
-void iwl_dbgfs_unregister(struct iwl_priv *priv);
+int iwl_dbgfs_register(struct iwl_priv *priv, struct dentry *dbgfs_dir);
 #else
-static inline int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
+static inline int iwl_dbgfs_register(struct iwl_priv *priv,
+                                    struct dentry *dbgfs_dir)
 {
        return 0;
 }
-static inline void iwl_dbgfs_unregister(struct iwl_priv *priv)
-{
-}
 #endif /* CONFIG_IWLWIFI_DEBUGFS */
 
 #ifdef CONFIG_IWLWIFI_DEBUG
index a47b306..1a98fa3 100644 (file)
@@ -2352,24 +2352,19 @@ DEBUGFS_READ_WRITE_FILE_OPS(calib_disabled);
  * Create the debugfs files and directories
  *
  */
-int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
+int iwl_dbgfs_register(struct iwl_priv *priv, struct dentry *dbgfs_dir)
 {
-       struct dentry *phyd = priv->hw->wiphy->debugfsdir;
-       struct dentry *dir_drv, *dir_data, *dir_rf, *dir_debug;
+       struct dentry *dir_data, *dir_rf, *dir_debug;
 
-       dir_drv = debugfs_create_dir(name, phyd);
-       if (!dir_drv)
-               return -ENOMEM;
-
-       priv->debugfs_dir = dir_drv;
+       priv->debugfs_dir = dbgfs_dir;
 
-       dir_data = debugfs_create_dir("data", dir_drv);
+       dir_data = debugfs_create_dir("data", dbgfs_dir);
        if (!dir_data)
                goto err;
-       dir_rf = debugfs_create_dir("rf", dir_drv);
+       dir_rf = debugfs_create_dir("rf", dbgfs_dir);
        if (!dir_rf)
                goto err;
-       dir_debug = debugfs_create_dir("debug", dir_drv);
+       dir_debug = debugfs_create_dir("debug", dbgfs_dir);
        if (!dir_debug)
                goto err;
 
@@ -2415,25 +2410,30 @@ int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
        /* Calibrations disabled/enabled status*/
        DEBUGFS_ADD_FILE(calib_disabled, dir_rf, S_IWUSR | S_IRUSR);
 
-       if (iwl_trans_dbgfs_register(priv->trans, dir_debug))
-               goto err;
+       /*
+        * Create a symlink with mac80211. This is not very robust, as it does
+        * not remove the symlink created. The implicit assumption is that
+        * when the opmode exits, mac80211 will also exit, and will remove
+        * this symlink as part of its cleanup.
+        */
+       if (priv->mac80211_registered) {
+               char buf[100];
+               struct dentry *mac80211_dir, *dev_dir, *root_dir;
+
+               dev_dir = dbgfs_dir->d_parent;
+               root_dir = dev_dir->d_parent;
+               mac80211_dir = priv->hw->wiphy->debugfsdir;
+
+               snprintf(buf, 100, "../../%s/%s", root_dir->d_name.name,
+                        dev_dir->d_name.name);
+
+               if (!debugfs_create_symlink("iwlwifi", mac80211_dir, buf))
+                       goto err;
+       }
+
        return 0;
 
 err:
-       IWL_ERR(priv, "Can't create the debugfs directory\n");
-       iwl_dbgfs_unregister(priv);
+       IWL_ERR(priv, "failed to create the dvm debugfs entries\n");
        return -ENOMEM;
 }
-
-/**
- * Remove the debugfs files and directories
- *
- */
-void iwl_dbgfs_unregister(struct iwl_priv *priv)
-{
-       if (!priv->debugfs_dir)
-               return;
-
-       debugfs_remove_recursive(priv->debugfs_dir);
-       priv->debugfs_dir = NULL;
-}
index a5f7bce..ff8162d 100644 (file)
@@ -195,7 +195,7 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
                        ARRAY_SIZE(iwlagn_iface_combinations_dualmode);
        }
 
-       hw->wiphy->max_remain_on_channel_duration = 1000;
+       hw->wiphy->max_remain_on_channel_duration = 500;
 
        hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
                            WIPHY_FLAG_DISABLE_BEACON_HINTS |
@@ -511,14 +511,16 @@ static void iwlagn_mac_set_wakeup(struct ieee80211_hw *hw, bool enabled)
 }
 #endif
 
-static void iwlagn_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+static void iwlagn_mac_tx(struct ieee80211_hw *hw,
+                         struct ieee80211_tx_control *control,
+                         struct sk_buff *skb)
 {
        struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
 
        IWL_DEBUG_TX(priv, "dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
                     ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
 
-       if (iwlagn_tx_skb(priv, skb))
+       if (iwlagn_tx_skb(priv, control->sta, skb))
                dev_kfree_skb_any(skb);
 }
 
index 84d3db5..7ff3f14 100644 (file)
@@ -862,7 +862,8 @@ void iwl_down(struct iwl_priv *priv)
         * No race since we hold the mutex here and a new one
         * can't come in at this time.
         */
-       ieee80211_remain_on_channel_expired(priv->hw);
+       if (priv->ucode_loaded && priv->cur_ucode != IWL_UCODE_INIT)
+               ieee80211_remain_on_channel_expired(priv->hw);
 
        exit_pending =
                test_and_set_bit(STATUS_EXIT_PENDING, &priv->status);
@@ -994,7 +995,11 @@ static void iwl_bg_restart(struct work_struct *data)
                iwlagn_prepare_restart(priv);
                mutex_unlock(&priv->mutex);
                iwl_cancel_deferred_work(priv);
-               ieee80211_restart_hw(priv->hw);
+               if (priv->mac80211_registered)
+                       ieee80211_restart_hw(priv->hw);
+               else
+                       IWL_ERR(priv,
+                               "Cannot request restart before registrating with mac80211");
        } else {
                WARN_ON(1);
        }
@@ -1222,7 +1227,8 @@ static int iwl_eeprom_init_hw_params(struct iwl_priv *priv)
 
 static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
                                                 const struct iwl_cfg *cfg,
-                                                const struct iwl_fw *fw)
+                                                const struct iwl_fw *fw,
+                                                struct dentry *dbgfs_dir)
 {
        struct iwl_priv *priv;
        struct ieee80211_hw *hw;
@@ -1466,13 +1472,17 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
        if (iwlagn_mac_setup_register(priv, &fw->ucode_capa))
                goto out_destroy_workqueue;
 
-       if (iwl_dbgfs_register(priv, DRV_NAME))
-               IWL_ERR(priv,
-                       "failed to create debugfs files. Ignoring error\n");
+       if (iwl_dbgfs_register(priv, dbgfs_dir))
+               goto out_mac80211_unregister;
 
        return op_mode;
 
+out_mac80211_unregister:
+       iwlagn_mac_unregister(priv);
 out_destroy_workqueue:
+       iwl_tt_exit(priv);
+       iwl_testmode_free(priv);
+       iwl_cancel_deferred_work(priv);
        destroy_workqueue(priv->workqueue);
        priv->workqueue = NULL;
        iwl_uninit_drv(priv);
@@ -1493,8 +1503,6 @@ static void iwl_op_mode_dvm_stop(struct iwl_op_mode *op_mode)
 
        IWL_DEBUG_INFO(priv, "*** UNLOAD DRIVER ***\n");
 
-       iwl_dbgfs_unregister(priv);
-
        iwl_testmode_free(priv);
        iwlagn_mac_unregister(priv);
 
index b29b798..fe36a38 100644 (file)
@@ -150,7 +150,7 @@ int iwl_send_add_sta(struct iwl_priv *priv,
                       sta_id, sta->sta.addr, flags & CMD_ASYNC ?  "a" : "");
 
        if (!(flags & CMD_ASYNC)) {
-               cmd.flags |= CMD_WANT_SKB;
+               cmd.flags |= CMD_WANT_SKB | CMD_WANT_HCMD;
                might_sleep();
        }
 
index 5971a23..f5ca73a 100644 (file)
@@ -127,6 +127,7 @@ static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv,
 static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv,
                                     struct iwl_tx_cmd *tx_cmd,
                                     struct ieee80211_tx_info *info,
+                                    struct ieee80211_sta *sta,
                                     __le16 fc)
 {
        u32 rate_flags;
@@ -187,8 +188,7 @@ static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv,
        if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS ||
                        (rate_idx < 0) || (rate_idx > IWL_RATE_COUNT_LEGACY))
                rate_idx = rate_lowest_index(
-                               &priv->eeprom_data->bands[info->band],
-                               info->control.sta);
+                               &priv->eeprom_data->bands[info->band], sta);
        /* For 5 GHZ band, remap mac80211 rate indices into driver indices */
        if (info->band == IEEE80211_BAND_5GHZ)
                rate_idx += IWL_FIRST_OFDM_RATE;
@@ -291,7 +291,9 @@ static int iwl_sta_id_or_broadcast(struct iwl_rxon_context *context,
 /*
  * start REPLY_TX command process
  */
-int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
+int iwlagn_tx_skb(struct iwl_priv *priv,
+                 struct ieee80211_sta *sta,
+                 struct sk_buff *skb)
 {
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -345,7 +347,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
                sta_id = ctx->bcast_sta_id;
        else {
                /* Find index into station table for destination station */
-               sta_id = iwl_sta_id_or_broadcast(ctx, info->control.sta);
+               sta_id = iwl_sta_id_or_broadcast(ctx, sta);
                if (sta_id == IWL_INVALID_STATION) {
                        IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
                                       hdr->addr1);
@@ -355,8 +357,8 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
 
        IWL_DEBUG_TX(priv, "station Id %d\n", sta_id);
 
-       if (info->control.sta)
-               sta_priv = (void *)info->control.sta->drv_priv;
+       if (sta)
+               sta_priv = (void *)sta->drv_priv;
 
        if (sta_priv && sta_priv->asleep &&
            (info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER)) {
@@ -397,7 +399,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
        /* TODO need this for burst mode later on */
        iwlagn_tx_cmd_build_basic(priv, skb, tx_cmd, info, hdr, sta_id);
 
-       iwlagn_tx_cmd_build_rate(priv, tx_cmd, info, fc);
+       iwlagn_tx_cmd_build_rate(priv, tx_cmd, info, sta, fc);
 
        memset(&info->status, 0, sizeof(info->status));
 
@@ -431,7 +433,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
                 * only. Check this here.
                 */
                if (WARN_ONCE(tid_data->agg.state != IWL_AGG_ON &&
-                   tid_data->agg.state != IWL_AGG_OFF,
+                             tid_data->agg.state != IWL_AGG_OFF,
                    "Tx while agg.state = %d", tid_data->agg.state))
                        goto drop_unlock_sta;
 
index cc41cfa..48d6d44 100644 (file)
@@ -101,6 +101,10 @@ MODULE_VERSION(DRV_VERSION);
 MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
 MODULE_LICENSE("GPL");
 
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+static struct dentry *iwl_dbgfs_root;
+#endif
+
 /**
  * struct iwl_drv - drv common data
  * @list: list of drv structures using this opmode
@@ -126,6 +130,12 @@ struct iwl_drv {
        char firmware_name[25];         /* name of firmware file to load */
 
        struct completion request_firmware_complete;
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+       struct dentry *dbgfs_drv;
+       struct dentry *dbgfs_trans;
+       struct dentry *dbgfs_op_mode;
+#endif
 };
 
 #define DVM_OP_MODE    0
@@ -194,7 +204,8 @@ static int iwl_alloc_fw_desc(struct iwl_drv *drv, struct fw_desc *desc,
        return 0;
 }
 
-static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context);
+static void iwl_req_fw_callback(const struct firmware *ucode_raw,
+                               void *context);
 
 #define UCODE_EXPERIMENTAL_INDEX       100
 #define UCODE_EXPERIMENTAL_TAG         "exp"
@@ -231,7 +242,7 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first)
 
        return request_firmware_nowait(THIS_MODULE, 1, drv->firmware_name,
                                       drv->trans->dev,
-                                      GFP_KERNEL, drv, iwl_ucode_callback);
+                                      GFP_KERNEL, drv, iwl_req_fw_callback);
 }
 
 struct fw_img_parsing {
@@ -759,13 +770,57 @@ static int validate_sec_sizes(struct iwl_drv *drv,
        return 0;
 }
 
+static struct iwl_op_mode *
+_iwl_op_mode_start(struct iwl_drv *drv, struct iwlwifi_opmode_table *op)
+{
+       const struct iwl_op_mode_ops *ops = op->ops;
+       struct dentry *dbgfs_dir = NULL;
+       struct iwl_op_mode *op_mode = NULL;
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+       drv->dbgfs_op_mode = debugfs_create_dir(op->name,
+                                               drv->dbgfs_drv);
+       if (!drv->dbgfs_op_mode) {
+               IWL_ERR(drv,
+                       "failed to create opmode debugfs directory\n");
+               return op_mode;
+       }
+       dbgfs_dir = drv->dbgfs_op_mode;
+#endif
+
+       op_mode = ops->start(drv->trans, drv->cfg, &drv->fw, dbgfs_dir);
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+       if (!op_mode) {
+               debugfs_remove_recursive(drv->dbgfs_op_mode);
+               drv->dbgfs_op_mode = NULL;
+       }
+#endif
+
+       return op_mode;
+}
+
+static void _iwl_op_mode_stop(struct iwl_drv *drv)
+{
+       /* op_mode can be NULL if its start failed */
+       if (drv->op_mode) {
+               iwl_op_mode_stop(drv->op_mode);
+               drv->op_mode = NULL;
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+               debugfs_remove_recursive(drv->dbgfs_op_mode);
+               drv->dbgfs_op_mode = NULL;
+#endif
+       }
+}
+
 /**
- * iwl_ucode_callback - callback when firmware was loaded
+ * iwl_req_fw_callback - callback when firmware was loaded
  *
  * If loaded successfully, copies the firmware into buffers
  * for the card to fetch (via DMA).
  */
-static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
+static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
 {
        struct iwl_drv *drv = context;
        struct iwl_fw *fw = &drv->fw;
@@ -908,8 +963,7 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
        list_add_tail(&drv->list, &op->drv);
 
        if (op->ops) {
-               const struct iwl_op_mode_ops *ops = op->ops;
-               drv->op_mode = ops->start(drv->trans, drv->cfg, &drv->fw);
+               drv->op_mode = _iwl_op_mode_start(drv, op);
 
                if (!drv->op_mode) {
                        mutex_unlock(&iwlwifi_opmode_table_mtx);
@@ -969,24 +1023,51 @@ struct iwl_drv *iwl_drv_start(struct iwl_trans *trans,
        init_completion(&drv->request_firmware_complete);
        INIT_LIST_HEAD(&drv->list);
 
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+       /* Create the device debugfs entries. */
+       drv->dbgfs_drv = debugfs_create_dir(dev_name(trans->dev),
+                                           iwl_dbgfs_root);
+
+       if (!drv->dbgfs_drv) {
+               IWL_ERR(drv, "failed to create debugfs directory\n");
+               goto err_free_drv;
+       }
+
+       /* Create transport layer debugfs dir */
+       drv->trans->dbgfs_dir = debugfs_create_dir("trans", drv->dbgfs_drv);
+
+       if (!drv->trans->dbgfs_dir) {
+               IWL_ERR(drv, "failed to create transport debugfs directory\n");
+               goto err_free_dbgfs;
+       }
+#endif
+
        ret = iwl_request_firmware(drv, true);
 
        if (ret) {
                IWL_ERR(trans, "Couldn't request the fw\n");
-               kfree(drv);
-               drv = NULL;
+               goto err_fw;
        }
 
        return drv;
+
+err_fw:
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+err_free_dbgfs:
+       debugfs_remove_recursive(drv->dbgfs_drv);
+err_free_drv:
+#endif
+       kfree(drv);
+       drv = NULL;
+
+       return drv;
 }
 
 void iwl_drv_stop(struct iwl_drv *drv)
 {
        wait_for_completion(&drv->request_firmware_complete);
 
-       /* op_mode can be NULL if its start failed */
-       if (drv->op_mode)
-               iwl_op_mode_stop(drv->op_mode);
+       _iwl_op_mode_stop(drv);
 
        iwl_dealloc_ucode(drv);
 
@@ -1000,6 +1081,10 @@ void iwl_drv_stop(struct iwl_drv *drv)
                list_del(&drv->list);
        mutex_unlock(&iwlwifi_opmode_table_mtx);
 
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+       debugfs_remove_recursive(drv->dbgfs_drv);
+#endif
+
        kfree(drv);
 }
 
@@ -1022,15 +1107,18 @@ int iwl_opmode_register(const char *name, const struct iwl_op_mode_ops *ops)
 {
        int i;
        struct iwl_drv *drv;
+       struct iwlwifi_opmode_table *op;
 
        mutex_lock(&iwlwifi_opmode_table_mtx);
        for (i = 0; i < ARRAY_SIZE(iwlwifi_opmode_table); i++) {
-               if (strcmp(iwlwifi_opmode_table[i].name, name))
+               op = &iwlwifi_opmode_table[i];
+               if (strcmp(op->name, name))
                        continue;
-               iwlwifi_opmode_table[i].ops = ops;
-               list_for_each_entry(drv, &iwlwifi_opmode_table[i].drv, list)
-                       drv->op_mode = ops->start(drv->trans, drv->cfg,
-                                                 &drv->fw);
+               op->ops = ops;
+               /* TODO: need to handle exceptional case */
+               list_for_each_entry(drv, &op->drv, list)
+                       drv->op_mode = _iwl_op_mode_start(drv, op);
+
                mutex_unlock(&iwlwifi_opmode_table_mtx);
                return 0;
        }
@@ -1051,12 +1139,9 @@ void iwl_opmode_deregister(const char *name)
                iwlwifi_opmode_table[i].ops = NULL;
 
                /* call the stop routine for all devices */
-               list_for_each_entry(drv, &iwlwifi_opmode_table[i].drv, list) {
-                       if (drv->op_mode) {
-                               iwl_op_mode_stop(drv->op_mode);
-                               drv->op_mode = NULL;
-                       }
-               }
+               list_for_each_entry(drv, &iwlwifi_opmode_table[i].drv, list)
+                       _iwl_op_mode_stop(drv);
+
                mutex_unlock(&iwlwifi_opmode_table_mtx);
                return;
        }
@@ -1076,6 +1161,14 @@ static int __init iwl_drv_init(void)
        pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n");
        pr_info(DRV_COPYRIGHT "\n");
 
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+       /* Create the root of iwlwifi debugfs subsystem. */
+       iwl_dbgfs_root = debugfs_create_dir(DRV_NAME, NULL);
+
+       if (!iwl_dbgfs_root)
+               return -EFAULT;
+#endif
+
        return iwl_pci_register_driver();
 }
 module_init(iwl_drv_init);
@@ -1083,6 +1176,10 @@ module_init(iwl_drv_init);
 static void __exit iwl_drv_exit(void)
 {
        iwl_pci_unregister_driver();
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+       debugfs_remove_recursive(iwl_dbgfs_root);
+#endif
 }
 module_exit(iwl_drv_exit);
 
index 2cbf137..285de5f 100644 (file)
@@ -90,9 +90,9 @@
  * 4) The bus specific component configures the bus
  * 5) The bus specific component calls to the drv bus agnostic part
  *    (iwl_drv_start)
- * 6) iwl_drv_start fetches the fw ASYNC, iwl_ucode_callback
- * 7) iwl_ucode_callback parses the fw file
- * 8) iwl_ucode_callback starts the wifi implementation to matches the fw
+ * 6) iwl_drv_start fetches the fw ASYNC, iwl_req_fw_callback
+ * 7) iwl_req_fw_callback parses the fw file
+ * 8) iwl_req_fw_callback starts the wifi implementation to matches the fw
  */
 
 struct iwl_drv;
index 64886f9..c8d9b95 100644 (file)
@@ -134,7 +134,8 @@ struct iwl_cfg;
 struct iwl_op_mode_ops {
        struct iwl_op_mode *(*start)(struct iwl_trans *trans,
                                     const struct iwl_cfg *cfg,
-                                    const struct iwl_fw *fw);
+                                    const struct iwl_fw *fw,
+                                    struct dentry *dbgfs_dir);
        void (*stop)(struct iwl_op_mode *op_mode);
        int (*rx)(struct iwl_op_mode *op_mode, struct iwl_rx_cmd_buffer *rxb,
                  struct iwl_device_cmd *cmd);
index 92576a3..ff11542 100644 (file)
@@ -184,14 +184,20 @@ struct iwl_rx_packet {
  * @CMD_SYNC: The caller will be stalled until the fw responds to the command
  * @CMD_ASYNC: Return right away and don't want for the response
  * @CMD_WANT_SKB: valid only with CMD_SYNC. The caller needs the buffer of the
- *     response.
+ *     response. The caller needs to call iwl_free_resp when done.
+ * @CMD_WANT_HCMD: The caller needs to get the HCMD that was sent in the
+ *     response handler. Chunks flagged by %IWL_HCMD_DFL_NOCOPY won't be
+ *     copied. The pointer passed to the response handler is in the transport
+ *     ownership and don't need to be freed by the op_mode. This also means
+ *     that the pointer is invalidated after the op_mode's handler returns.
  * @CMD_ON_DEMAND: This command is sent by the test mode pipe.
  */
 enum CMD_MODE {
        CMD_SYNC = 0,
        CMD_ASYNC = BIT(0),
        CMD_WANT_SKB = BIT(1),
-       CMD_ON_DEMAND = BIT(2),
+       CMD_WANT_HCMD = BIT(2),
+       CMD_ON_DEMAND = BIT(3),
 };
 
 #define DEF_CMD_PAYLOAD_SIZE 320
@@ -460,6 +466,8 @@ struct iwl_trans {
        size_t dev_cmd_headroom;
        char dev_cmd_pool_name[50];
 
+       struct dentry *dbgfs_dir;
+
        /* pointer to trans specific struct */
        /*Ensure that this pointer will always be aligned to sizeof pointer */
        char trans_specific[0] __aligned(sizeof(void *));
index f4c3500..89bfb43 100644 (file)
@@ -282,8 +282,14 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (!trans_pcie->drv)
                goto out_free_trans;
 
+       /* register transport layer debugfs here */
+       if (iwl_trans_dbgfs_register(iwl_trans, iwl_trans->dbgfs_dir))
+               goto out_free_drv;
+
        return 0;
 
+out_free_drv:
+       iwl_drv_stop(trans_pcie->drv);
 out_free_trans:
        iwl_trans_pcie_free(iwl_trans);
        pci_set_drvdata(pdev, NULL);
index 4ffc18d..71c7994 100644 (file)
@@ -184,6 +184,7 @@ struct iwl_queue {
 
 struct iwl_pcie_tx_queue_entry {
        struct iwl_device_cmd *cmd;
+       struct iwl_device_cmd *copy_cmd;
        struct sk_buff *skb;
        struct iwl_cmd_meta meta;
 };
index d1a61ba..4983720 100644 (file)
@@ -421,13 +421,23 @@ static void iwl_rx_handle_rxbuf(struct iwl_trans *trans,
                index = SEQ_TO_INDEX(sequence);
                cmd_index = get_cmd_index(&txq->q, index);
 
-               if (reclaim)
-                       cmd = txq->entries[cmd_index].cmd;
-               else
+               if (reclaim) {
+                       struct iwl_pcie_tx_queue_entry *ent;
+                       ent = &txq->entries[cmd_index];
+                       cmd = ent->copy_cmd;
+                       WARN_ON_ONCE(!cmd && ent->meta.flags & CMD_WANT_HCMD);
+               } else {
                        cmd = NULL;
+               }
 
                err = iwl_op_mode_rx(trans->op_mode, &rxcb, cmd);
 
+               if (reclaim) {
+                       /* The original command isn't needed any more */
+                       kfree(txq->entries[cmd_index].copy_cmd);
+                       txq->entries[cmd_index].copy_cmd = NULL;
+               }
+
                /*
                 * After here, we should always check rxcb._page_stolen,
                 * if it is true then one of the handlers took the page.
index 1e86ea2..8488511 100644 (file)
@@ -492,10 +492,11 @@ static void iwl_tx_queue_free(struct iwl_trans *trans, int txq_id)
        iwl_tx_queue_unmap(trans, txq_id);
 
        /* De-alloc array of command/tx buffers */
-
        if (txq_id == trans_pcie->cmd_queue)
-               for (i = 0; i < txq->q.n_window; i++)
+               for (i = 0; i < txq->q.n_window; i++) {
                        kfree(txq->entries[i].cmd);
+                       kfree(txq->entries[i].copy_cmd);
+               }
 
        /* De-alloc circular buffer of TFDs */
        if (txq->q.n_bd) {
@@ -896,6 +897,7 @@ static int iwl_set_hw_ready(struct iwl_trans *trans)
 static int iwl_prepare_card_hw(struct iwl_trans *trans)
 {
        int ret;
+       int t = 0;
 
        IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n");
 
@@ -908,17 +910,15 @@ static int iwl_prepare_card_hw(struct iwl_trans *trans)
        iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
                    CSR_HW_IF_CONFIG_REG_PREPARE);
 
-       ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG,
-                          ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE,
-                          CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000);
+       do {
+               ret = iwl_set_hw_ready(trans);
+               if (ret >= 0)
+                       return 0;
 
-       if (ret < 0)
-               return ret;
+               usleep_range(200, 1000);
+               t += 200;
+       } while (t < 150000);
 
-       /* HW should be ready by now, check again. */
-       ret = iwl_set_hw_ready(trans);
-       if (ret >= 0)
-               return 0;
        return ret;
 }
 
@@ -1771,7 +1771,7 @@ void iwl_dump_csr(struct iwl_trans *trans)
 #define DEBUGFS_ADD_FILE(name, parent, mode) do {                      \
        if (!debugfs_create_file(#name, mode, parent, trans,            \
                                 &iwl_dbgfs_##name##_ops))              \
-               return -ENOMEM;                                         \
+               goto err;                                               \
 } while (0)
 
 /* file operation */
@@ -2035,6 +2035,10 @@ static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
        DEBUGFS_ADD_FILE(fh_reg, dir, S_IRUSR);
        DEBUGFS_ADD_FILE(fw_restart, dir, S_IWUSR);
        return 0;
+
+err:
+       IWL_ERR(trans, "failed to create the trans debugfs entry\n");
+       return -ENOMEM;
 }
 #else
 static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
index 6baf8de..392d2bc 100644 (file)
@@ -521,7 +521,7 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
        u16 copy_size, cmd_size;
        bool had_nocopy = false;
        int i;
-       u8 *cmd_dest;
+       u32 cmd_pos;
 #ifdef CONFIG_IWLWIFI_DEVICE_TRACING
        const void *trace_bufs[IWL_MAX_CMD_TFDS + 1] = {};
        int trace_lens[IWL_MAX_CMD_TFDS + 1] = {};
@@ -584,15 +584,31 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
                                         INDEX_TO_SEQ(q->write_ptr));
 
        /* and copy the data that needs to be copied */
-
-       cmd_dest = out_cmd->payload;
+       cmd_pos = offsetof(struct iwl_device_cmd, payload);
        for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
                if (!cmd->len[i])
                        continue;
                if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY)
                        break;
-               memcpy(cmd_dest, cmd->data[i], cmd->len[i]);
-               cmd_dest += cmd->len[i];
+               memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], cmd->len[i]);
+               cmd_pos += cmd->len[i];
+       }
+
+       WARN_ON_ONCE(txq->entries[idx].copy_cmd);
+
+       /*
+        * since out_cmd will be the source address of the FH, it will write
+        * the retry count there. So when the user needs to receivce the HCMD
+        * that corresponds to the response in the response handler, it needs
+        * to set CMD_WANT_HCMD.
+        */
+       if (cmd->flags & CMD_WANT_HCMD) {
+               txq->entries[idx].copy_cmd =
+                       kmemdup(out_cmd, cmd_pos, GFP_ATOMIC);
+               if (unlikely(!txq->entries[idx].copy_cmd)) {
+                       idx = -ENOMEM;
+                       goto out;
+               }
        }
 
        IWL_DEBUG_HC(trans,
index a034572..7001856 100644 (file)
@@ -227,7 +227,9 @@ static void lbtf_free_adapter(struct lbtf_private *priv)
        lbtf_deb_leave(LBTF_DEB_MAIN);
 }
 
-static void lbtf_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+static void lbtf_op_tx(struct ieee80211_hw *hw,
+                      struct ieee80211_tx_control *control,
+                      struct sk_buff *skb)
 {
        struct lbtf_private *priv = hw->priv;
 
index 0083839..72b0456 100644 (file)
@@ -709,7 +709,9 @@ static bool mac80211_hwsim_tx_frame_no_nl(struct ieee80211_hw *hw,
        return ack;
 }
 
-static void mac80211_hwsim_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+static void mac80211_hwsim_tx(struct ieee80211_hw *hw,
+                             struct ieee80211_tx_control *control,
+                             struct sk_buff *skb)
 {
        bool ack;
        struct ieee80211_tx_info *txi;
@@ -1727,6 +1729,7 @@ static const struct ieee80211_iface_limit hwsim_if_limits[] = {
 #endif
                                 BIT(NL80211_IFTYPE_AP) |
                                 BIT(NL80211_IFTYPE_P2P_GO) },
+       { .max = 1, .types = BIT(NL80211_IFTYPE_P2P_DEVICE) },
 };
 
 static const struct ieee80211_iface_combination hwsim_if_comb = {
@@ -1813,7 +1816,8 @@ static int __init init_mac80211_hwsim(void)
                        BIT(NL80211_IFTYPE_P2P_CLIENT) |
                        BIT(NL80211_IFTYPE_P2P_GO) |
                        BIT(NL80211_IFTYPE_ADHOC) |
-                       BIT(NL80211_IFTYPE_MESH_POINT);
+                       BIT(NL80211_IFTYPE_MESH_POINT) |
+                       BIT(NL80211_IFTYPE_P2P_DEVICE);
 
                hw->flags = IEEE80211_HW_MFP_CAPABLE |
                            IEEE80211_HW_SIGNAL_DBM |
index e535c93..d273273 100644 (file)
@@ -726,3 +726,29 @@ int mwifiex_get_tx_ba_stream_tbl(struct mwifiex_private *priv,
 
        return count;
 }
+
+/*
+ * This function retrieves the entry for specific tx BA stream table by RA and
+ * deletes it.
+ */
+void mwifiex_del_tx_ba_stream_tbl_by_ra(struct mwifiex_private *priv, u8 *ra)
+{
+       struct mwifiex_tx_ba_stream_tbl *tbl, *tmp;
+       unsigned long flags;
+
+       if (!ra)
+               return;
+
+       spin_lock_irqsave(&priv->tx_ba_stream_tbl_lock, flags);
+       list_for_each_entry_safe(tbl, tmp, &priv->tx_ba_stream_tbl_ptr, list) {
+               if (!memcmp(tbl->ra, ra, ETH_ALEN)) {
+                       spin_unlock_irqrestore(&priv->tx_ba_stream_tbl_lock,
+                                              flags);
+                       mwifiex_11n_delete_tx_ba_stream_tbl_entry(priv, tbl);
+                       spin_lock_irqsave(&priv->tx_ba_stream_tbl_lock, flags);
+               }
+       }
+       spin_unlock_irqrestore(&priv->tx_ba_stream_tbl_lock, flags);
+
+       return;
+}
index 28366e9..67c087c 100644 (file)
@@ -69,6 +69,7 @@ int mwifiex_cmd_recfg_tx_buf(struct mwifiex_private *priv,
 int mwifiex_cmd_amsdu_aggr_ctrl(struct host_cmd_ds_command *cmd,
                                int cmd_action,
                                struct mwifiex_ds_11n_amsdu_aggr_ctrl *aa_ctrl);
+void mwifiex_del_tx_ba_stream_tbl_by_ra(struct mwifiex_private *priv, u8 *ra);
 
 /*
  * This function checks whether AMPDU is allowed or not for a particular TID.
@@ -157,4 +158,18 @@ mwifiex_is_ba_stream_setup(struct mwifiex_private *priv,
 
        return false;
 }
+
+/*
+ * This function checks whether associated station is 11n enabled
+ */
+static inline int mwifiex_is_sta_11n_enabled(struct mwifiex_private *priv,
+                                            struct mwifiex_sta_node *node)
+{
+
+       if (!node || (priv->bss_role != MWIFIEX_BSS_ROLE_UAP) ||
+           !priv->ap_11n_enabled)
+               return 0;
+
+       return node->is_11n_enabled;
+}
 #endif /* !_MWIFIEX_11N_H_ */
index ab84eb9..395f1bf 100644 (file)
@@ -62,9 +62,7 @@ mwifiex_11n_form_amsdu_pkt(struct sk_buff *skb_aggr,
        };
        struct tx_packet_hdr *tx_header;
 
-       skb_put(skb_aggr, sizeof(*tx_header));
-
-       tx_header = (struct tx_packet_hdr *) skb_aggr->data;
+       tx_header = (void *)skb_put(skb_aggr, sizeof(*tx_header));
 
        /* Copy DA and SA */
        dt_offset = 2 * ETH_ALEN;
@@ -82,12 +80,10 @@ mwifiex_11n_form_amsdu_pkt(struct sk_buff *skb_aggr,
        tx_header->eth803_hdr.h_proto = htons(skb_src->len + LLC_SNAP_LEN);
 
        /* Add payload */
-       skb_put(skb_aggr, skb_src->len);
-       memcpy(skb_aggr->data + sizeof(*tx_header), skb_src->data,
-              skb_src->len);
-       *pad = (((skb_src->len + LLC_SNAP_LEN) & 3)) ? (4 - (((skb_src->len +
-                                                     LLC_SNAP_LEN)) & 3)) : 0;
-       skb_put(skb_aggr, *pad);
+       memcpy(skb_put(skb_aggr, skb_src->len), skb_src->data, skb_src->len);
+
+       /* Add padding for new MSDU to start from 4 byte boundary */
+       *pad = (4 - ((unsigned long)skb_aggr->tail & 0x3)) % 4;
 
        return skb_aggr->len + *pad;
 }
index 591ccd3..24e2582 100644 (file)
@@ -54,8 +54,13 @@ mwifiex_11n_dispatch_pkt(struct mwifiex_private *priv,
                        tbl->rx_reorder_ptr[i] = NULL;
                }
                spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
-               if (rx_tmp_ptr)
-                       mwifiex_process_rx_packet(priv->adapter, rx_tmp_ptr);
+               if (rx_tmp_ptr) {
+                       if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP)
+                               mwifiex_handle_uap_rx_forward(priv, rx_tmp_ptr);
+                       else
+                               mwifiex_process_rx_packet(priv->adapter,
+                                                         rx_tmp_ptr);
+               }
        }
 
        spin_lock_irqsave(&priv->rx_pkt_lock, flags);
@@ -97,7 +102,11 @@ mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv,
                rx_tmp_ptr = tbl->rx_reorder_ptr[i];
                tbl->rx_reorder_ptr[i] = NULL;
                spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
-               mwifiex_process_rx_packet(priv->adapter, rx_tmp_ptr);
+
+               if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP)
+                       mwifiex_handle_uap_rx_forward(priv, rx_tmp_ptr);
+               else
+                       mwifiex_process_rx_packet(priv->adapter, rx_tmp_ptr);
        }
 
        spin_lock_irqsave(&priv->rx_pkt_lock, flags);
@@ -148,7 +157,7 @@ mwifiex_del_rx_reorder_entry(struct mwifiex_private *priv,
  * This function returns the pointer to an entry in Rx reordering
  * table which matches the given TA/TID pair.
  */
-static struct mwifiex_rx_reorder_tbl *
+struct mwifiex_rx_reorder_tbl *
 mwifiex_11n_get_rx_reorder_tbl(struct mwifiex_private *priv, int tid, u8 *ta)
 {
        struct mwifiex_rx_reorder_tbl *tbl;
@@ -167,6 +176,31 @@ mwifiex_11n_get_rx_reorder_tbl(struct mwifiex_private *priv, int tid, u8 *ta)
        return NULL;
 }
 
+/* This function retrieves the pointer to an entry in Rx reordering
+ * table which matches the given TA and deletes it.
+ */
+void mwifiex_11n_del_rx_reorder_tbl_by_ta(struct mwifiex_private *priv, u8 *ta)
+{
+       struct mwifiex_rx_reorder_tbl *tbl, *tmp;
+       unsigned long flags;
+
+       if (!ta)
+               return;
+
+       spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
+       list_for_each_entry_safe(tbl, tmp, &priv->rx_reorder_tbl_ptr, list) {
+               if (!memcmp(tbl->ta, ta, ETH_ALEN)) {
+                       spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
+                                              flags);
+                       mwifiex_del_rx_reorder_entry(priv, tbl);
+                       spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
+               }
+       }
+       spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
+
+       return;
+}
+
 /*
  * This function finds the last sequence number used in the packets
  * buffered in Rx reordering table.
@@ -226,6 +260,7 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta,
        struct mwifiex_rx_reorder_tbl *tbl, *new_node;
        u16 last_seq = 0;
        unsigned long flags;
+       struct mwifiex_sta_node *node;
 
        /*
         * If we get a TID, ta pair which is already present dispatch all the
@@ -248,13 +283,19 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta,
        new_node->tid = tid;
        memcpy(new_node->ta, ta, ETH_ALEN);
        new_node->start_win = seq_num;
-       if (mwifiex_queuing_ra_based(priv))
-               /* TODO for adhoc */
+
+       if (mwifiex_queuing_ra_based(priv)) {
                dev_dbg(priv->adapter->dev,
-                       "info: ADHOC:last_seq=%d start_win=%d\n",
+                       "info: AP/ADHOC:last_seq=%d start_win=%d\n",
                        last_seq, new_node->start_win);
-       else
+               if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP) {
+                       node = mwifiex_get_sta_entry(priv, ta);
+                       if (node)
+                               last_seq = node->rx_seq[tid];
+               }
+       } else {
                last_seq = priv->rx_seq[tid];
+       }
 
        if (last_seq != MWIFIEX_DEF_11N_RX_SEQ_NUM &&
            last_seq >= new_node->start_win)
@@ -396,8 +437,13 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv,
 
        tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, ta);
        if (!tbl) {
-               if (pkt_type != PKT_TYPE_BAR)
-                       mwifiex_process_rx_packet(priv->adapter, payload);
+               if (pkt_type != PKT_TYPE_BAR) {
+                       if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP)
+                               mwifiex_handle_uap_rx_forward(priv, payload);
+                       else
+                               mwifiex_process_rx_packet(priv->adapter,
+                                                         payload);
+               }
                return 0;
        }
        start_win = tbl->start_win;
index 6c9815a..7284859 100644 (file)
@@ -38,6 +38,8 @@
 #define ADDBA_RSP_STATUS_ACCEPT 0
 
 #define MWIFIEX_DEF_11N_RX_SEQ_NUM     0xffff
+#define BA_SETUP_MAX_PACKET_THRESHOLD  16
+#define BA_SETUP_PACKET_OFFSET         16
 
 static inline void mwifiex_reset_11n_rx_seq_num(struct mwifiex_private *priv)
 {
@@ -68,5 +70,8 @@ struct mwifiex_rx_reorder_tbl *mwifiex_11n_get_rxreorder_tbl(struct
                                                           mwifiex_private
                                                           *priv, int tid,
                                                           u8 *ta);
+struct mwifiex_rx_reorder_tbl *
+mwifiex_11n_get_rx_reorder_tbl(struct mwifiex_private *priv, int tid, u8 *ta);
+void mwifiex_11n_del_rx_reorder_tbl_by_ta(struct mwifiex_private *priv, u8 *ta);
 
 #endif /* _MWIFIEX_11N_RXREORDER_H_ */
index 3f66ebb..dd0410d 100644 (file)
@@ -33,8 +33,10 @@ mwifiex-y += uap_cmd.o
 mwifiex-y += ie.o
 mwifiex-y += sta_cmdresp.o
 mwifiex-y += sta_event.o
+mwifiex-y += uap_event.o
 mwifiex-y += sta_tx.o
 mwifiex-y += sta_rx.o
+mwifiex-y += uap_txrx.o
 mwifiex-y += cfg80211.o
 mwifiex-$(CONFIG_DEBUG_FS) += debugfs.o
 obj-$(CONFIG_MWIFIEX) += mwifiex.o
index fe42137..e57f543 100644 (file)
@@ -99,7 +99,7 @@ mwifiex_cfg80211_del_key(struct wiphy *wiphy, struct net_device *netdev,
        const u8 bc_mac[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
        const u8 *peer_mac = pairwise ? mac_addr : bc_mac;
 
-       if (mwifiex_set_encode(priv, NULL, 0, key_index, peer_mac, 1)) {
+       if (mwifiex_set_encode(priv, NULL, NULL, 0, key_index, peer_mac, 1)) {
                wiphy_err(wiphy, "deleting the crypto keys\n");
                return -EFAULT;
        }
@@ -171,7 +171,8 @@ mwifiex_cfg80211_set_default_key(struct wiphy *wiphy, struct net_device *netdev,
 
        if (priv->bss_type == MWIFIEX_BSS_TYPE_UAP) {
                priv->wep_key_curr_index = key_index;
-       } else if (mwifiex_set_encode(priv, NULL, 0, key_index, NULL, 0)) {
+       } else if (mwifiex_set_encode(priv, NULL, NULL, 0, key_index,
+                                     NULL, 0)) {
                wiphy_err(wiphy, "set default Tx key index\n");
                return -EFAULT;
        }
@@ -207,7 +208,7 @@ mwifiex_cfg80211_add_key(struct wiphy *wiphy, struct net_device *netdev,
                return 0;
        }
 
-       if (mwifiex_set_encode(priv, params->key, params->key_len,
+       if (mwifiex_set_encode(priv, params, params->key, params->key_len,
                               key_index, peer_mac, 0)) {
                wiphy_err(wiphy, "crypto keys added\n");
                return -EFAULT;
@@ -748,6 +749,7 @@ static const u32 mwifiex_cipher_suites[] = {
        WLAN_CIPHER_SUITE_WEP104,
        WLAN_CIPHER_SUITE_TKIP,
        WLAN_CIPHER_SUITE_CCMP,
+       WLAN_CIPHER_SUITE_AES_CMAC,
 };
 
 /*
@@ -906,6 +908,8 @@ static int mwifiex_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *dev)
        if (mwifiex_del_mgmt_ies(priv))
                wiphy_err(wiphy, "Failed to delete mgmt IEs!\n");
 
+       priv->ap_11n_enabled = 0;
+
        if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_UAP_BSS_STOP,
                                  HostCmd_ACT_GEN_SET, 0, NULL)) {
                wiphy_err(wiphy, "Failed to stop the BSS\n");
@@ -1159,7 +1163,7 @@ mwifiex_cfg80211_assoc(struct mwifiex_private *priv, size_t ssid_len, u8 *ssid,
        priv->wep_key_curr_index = 0;
        priv->sec_info.encryption_mode = 0;
        priv->sec_info.is_authtype_auto = 0;
-       ret = mwifiex_set_encode(priv, NULL, 0, 0, NULL, 1);
+       ret = mwifiex_set_encode(priv, NULL, NULL, 0, 0, NULL, 1);
 
        if (mode == NL80211_IFTYPE_ADHOC) {
                /* "privacy" is set only for ad-hoc mode */
@@ -1206,8 +1210,9 @@ mwifiex_cfg80211_assoc(struct mwifiex_private *priv, size_t ssid_len, u8 *ssid,
                                "info: setting wep encryption"
                                " with key len %d\n", sme->key_len);
                        priv->wep_key_curr_index = sme->key_idx;
-                       ret = mwifiex_set_encode(priv, sme->key, sme->key_len,
-                                                sme->key_idx, NULL, 0);
+                       ret = mwifiex_set_encode(priv, NULL, sme->key,
+                                                sme->key_len, sme->key_idx,
+                                                NULL, 0);
                }
        }
 done:
index c68adec..c229ddd 100644 (file)
@@ -447,7 +447,10 @@ int mwifiex_process_event(struct mwifiex_adapter *adapter)
                        priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
        }
 
-       ret = mwifiex_process_sta_event(priv);
+       if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP)
+               ret = mwifiex_process_uap_event(priv);
+       else
+               ret = mwifiex_process_sta_event(priv);
 
        adapter->event_cause = 0;
        adapter->event_skb = NULL;
index 070ef25..400d360 100644 (file)
@@ -60,6 +60,9 @@
 #define MWIFIEX_SDIO_BLOCK_SIZE            256
 
 #define MWIFIEX_BUF_FLAG_REQUEUED_PKT      BIT(0)
+#define MWIFIEX_BUF_FLAG_BRIDGED_PKT      BIT(1)
+
+#define MWIFIEX_BRIDGED_PKTS_THRESHOLD     1024
 
 enum mwifiex_bss_type {
        MWIFIEX_BSS_TYPE_STA = 0,
index e831b44..ae06f31 100644 (file)
@@ -65,10 +65,12 @@ enum KEY_TYPE_ID {
        KEY_TYPE_ID_TKIP,
        KEY_TYPE_ID_AES,
        KEY_TYPE_ID_WAPI,
+       KEY_TYPE_ID_AES_CMAC,
 };
 #define KEY_MCAST      BIT(0)
 #define KEY_UNICAST    BIT(1)
 #define KEY_ENABLED    BIT(2)
+#define KEY_IGTK       BIT(10)
 
 #define WAPI_KEY_LEN                   50
 
@@ -424,10 +426,10 @@ struct txpd {
 struct rxpd {
        u8 bss_type;
        u8 bss_num;
-       u16 rx_pkt_length;
-       u16 rx_pkt_offset;
-       u16 rx_pkt_type;
-       u16 seq_num;
+       __le16 rx_pkt_length;
+       __le16 rx_pkt_offset;
+       __le16 rx_pkt_type;
+       __le16 seq_num;
        u8 priority;
        u8 rx_rate;
        s8 snr;
@@ -439,6 +441,31 @@ struct rxpd {
        u8 reserved;
 } __packed;
 
+struct uap_txpd {
+       u8 bss_type;
+       u8 bss_num;
+       __le16 tx_pkt_length;
+       __le16 tx_pkt_offset;
+       __le16 tx_pkt_type;
+       __le32 tx_control;
+       u8 priority;
+       u8 flags;
+       u8 pkt_delay_2ms;
+       u8 reserved1;
+       __le32 reserved2;
+};
+
+struct uap_rxpd {
+       u8 bss_type;
+       u8 bss_num;
+       __le16 rx_pkt_length;
+       __le16 rx_pkt_offset;
+       __le16 rx_pkt_type;
+       __le16 seq_num;
+       u8 priority;
+       u8 reserved1;
+};
+
 enum mwifiex_chan_scan_mode_bitmasks {
        MWIFIEX_PASSIVE_SCAN = BIT(0),
        MWIFIEX_DISABLE_CHAN_FILT = BIT(1),
@@ -558,6 +585,13 @@ struct mwifiex_ie_type_key_param_set {
        u8 key[50];
 } __packed;
 
+#define IGTK_PN_LEN            8
+
+struct mwifiex_cmac_param {
+       u8 ipn[IGTK_PN_LEN];
+       u8 key[WLAN_KEY_LEN_AES_CMAC];
+} __packed;
+
 struct host_cmd_ds_802_11_key_material {
        __le16 action;
        struct mwifiex_ie_type_key_param_set key_param_set;
index 21fdc6c..fad2c8d 100644 (file)
@@ -64,60 +64,72 @@ static void scan_delay_timer_fn(unsigned long data)
        struct cmd_ctrl_node *cmd_node, *tmp_node;
        unsigned long flags;
 
-       if (!mwifiex_wmm_lists_empty(adapter)) {
-               if (adapter->scan_delay_cnt == MWIFIEX_MAX_SCAN_DELAY_CNT) {
+       if (adapter->scan_delay_cnt == MWIFIEX_MAX_SCAN_DELAY_CNT) {
+               /*
+                * Abort scan operation by cancelling all pending scan
+                * commands
+                */
+               spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
+               list_for_each_entry_safe(cmd_node, tmp_node,
+                                        &adapter->scan_pending_q, list) {
+                       list_del(&cmd_node->list);
+                       cmd_node->wait_q_enabled = false;
+                       mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
+               }
+               spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
+
+               spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
+               adapter->scan_processing = false;
+               adapter->scan_delay_cnt = 0;
+               adapter->empty_tx_q_cnt = 0;
+               spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
+
+               if (priv->user_scan_cfg) {
+                       dev_dbg(priv->adapter->dev,
+                               "info: %s: scan aborted\n", __func__);
+                       cfg80211_scan_done(priv->scan_request, 1);
+                       priv->scan_request = NULL;
+                       kfree(priv->user_scan_cfg);
+                       priv->user_scan_cfg = NULL;
+               }
+               goto done;
+       }
+
+       if (!atomic_read(&priv->adapter->is_tx_received)) {
+               adapter->empty_tx_q_cnt++;
+               if (adapter->empty_tx_q_cnt == MWIFIEX_MAX_EMPTY_TX_Q_CNT) {
                        /*
-                        * Abort scan operation by cancelling all pending scan
-                        * command
+                        * No Tx traffic for 200msec. Get scan command from
+                        * scan pending queue and put to cmd pending queue to
+                        * resume scan operation
                         */
+                       adapter->scan_delay_cnt = 0;
+                       adapter->empty_tx_q_cnt = 0;
                        spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
-                       list_for_each_entry_safe(cmd_node, tmp_node,
-                                                &adapter->scan_pending_q,
-                                                list) {
-                               list_del(&cmd_node->list);
-                               cmd_node->wait_q_enabled = false;
-                               mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
-                       }
+                       cmd_node = list_first_entry(&adapter->scan_pending_q,
+                                                   struct cmd_ctrl_node, list);
+                       list_del(&cmd_node->list);
                        spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
                                               flags);
 
-                       spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
-                       adapter->scan_processing = false;
-                       spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock,
-                                              flags);
-
-                       if (priv->user_scan_cfg) {
-                               dev_dbg(priv->adapter->dev,
-                                       "info: %s: scan aborted\n", __func__);
-                               cfg80211_scan_done(priv->scan_request, 1);
-                               priv->scan_request = NULL;
-                               kfree(priv->user_scan_cfg);
-                               priv->user_scan_cfg = NULL;
-                       }
-               } else {
-                       /*
-                        * Tx data queue is still not empty, delay scan
-                        * operation further by 20msec.
-                        */
-                       mod_timer(&priv->scan_delay_timer, jiffies +
-                                 msecs_to_jiffies(MWIFIEX_SCAN_DELAY_MSEC));
-                       adapter->scan_delay_cnt++;
+                       mwifiex_insert_cmd_to_pending_q(adapter, cmd_node,
+                                                       true);
+                       goto done;
                }
-               queue_work(priv->adapter->workqueue, &priv->adapter->main_work);
        } else {
-               /*
-                * Tx data queue is empty. Get scan command from scan_pending_q
-                * and put to cmd_pending_q to resume scan operation
-                */
-               adapter->scan_delay_cnt = 0;
-               spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
-               cmd_node = list_first_entry(&adapter->scan_pending_q,
-                                           struct cmd_ctrl_node, list);
-               list_del(&cmd_node->list);
-               spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
-
-               mwifiex_insert_cmd_to_pending_q(adapter, cmd_node, true);
+               adapter->empty_tx_q_cnt = 0;
        }
+
+       /* Delay scan operation further by 20msec */
+       mod_timer(&priv->scan_delay_timer, jiffies +
+                 msecs_to_jiffies(MWIFIEX_SCAN_DELAY_MSEC));
+       adapter->scan_delay_cnt++;
+
+done:
+       if (atomic_read(&priv->adapter->is_tx_received))
+               atomic_set(&priv->adapter->is_tx_received, false);
+
+       return;
 }
 
 /*
@@ -196,6 +208,7 @@ static int mwifiex_init_priv(struct mwifiex_private *priv)
        priv->curr_bcn_size = 0;
        priv->wps_ie = NULL;
        priv->wps_ie_len = 0;
+       priv->ap_11n_enabled = 0;
 
        priv->scan_block = false;
 
@@ -345,6 +358,7 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter)
        memset(&adapter->arp_filter, 0, sizeof(adapter->arp_filter));
        adapter->arp_filter_size = 0;
        adapter->max_mgmt_ie_index = MAX_MGMT_IE_INDEX;
+       adapter->empty_tx_q_cnt = 0;
 }
 
 /*
@@ -410,6 +424,7 @@ static void mwifiex_free_lock_list(struct mwifiex_adapter *adapter)
                                list_del(&priv->wmm.tid_tbl_ptr[j].ra_list);
                        list_del(&priv->tx_ba_stream_tbl_ptr);
                        list_del(&priv->rx_reorder_tbl_ptr);
+                       list_del(&priv->sta_list);
                }
        }
 }
@@ -472,6 +487,7 @@ int mwifiex_init_lock_list(struct mwifiex_adapter *adapter)
                        spin_lock_init(&priv->rx_pkt_lock);
                        spin_lock_init(&priv->wmm.ra_list_spinlock);
                        spin_lock_init(&priv->curr_bcn_buf_lock);
+                       spin_lock_init(&priv->sta_list_spinlock);
                }
        }
 
@@ -504,6 +520,7 @@ int mwifiex_init_lock_list(struct mwifiex_adapter *adapter)
                }
                INIT_LIST_HEAD(&priv->tx_ba_stream_tbl_ptr);
                INIT_LIST_HEAD(&priv->rx_reorder_tbl_ptr);
+               INIT_LIST_HEAD(&priv->sta_list);
 
                spin_lock_init(&priv->tx_ba_stream_tbl_lock);
                spin_lock_init(&priv->rx_reorder_tbl_lock);
index 5019153..6a5eded 100644 (file)
@@ -213,7 +213,7 @@ struct mwifiex_debug_info {
 };
 
 #define MWIFIEX_KEY_INDEX_UNICAST      0x40000000
-#define WAPI_RXPN_LEN                  16
+#define PN_LEN                         16
 
 struct mwifiex_ds_encrypt_key {
        u32 key_disable;
@@ -222,7 +222,8 @@ struct mwifiex_ds_encrypt_key {
        u8 key_material[WLAN_MAX_KEY_LEN];
        u8 mac_addr[ETH_ALEN];
        u32 is_wapi_key;
-       u8 wapi_rxpn[WAPI_RXPN_LEN];
+       u8 pn[PN_LEN];          /* packet number */
+       u8 is_igtk_key;
 };
 
 struct mwifiex_power_cfg {
index 4680362..cb11552 100644 (file)
@@ -520,6 +520,9 @@ mwifiex_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
        mwifiex_wmm_add_buf_txqueue(priv, skb);
        atomic_inc(&priv->adapter->tx_pending);
 
+       if (priv->adapter->scan_delay_cnt)
+               atomic_set(&priv->adapter->is_tx_received, true);
+
        if (atomic_read(&priv->adapter->tx_pending) >= MAX_TX_PENDING) {
                mwifiex_set_trans_start(dev);
                mwifiex_stop_net_dev_queue(priv->netdev, priv->adapter);
index e7c2a82..994bc4f 100644 (file)
@@ -88,6 +88,7 @@ enum {
 #define MWIFIEX_MAX_TOTAL_SCAN_TIME    (MWIFIEX_TIMER_10S - MWIFIEX_TIMER_1S)
 
 #define MWIFIEX_MAX_SCAN_DELAY_CNT                     50
+#define MWIFIEX_MAX_EMPTY_TX_Q_CNT                     10
 #define MWIFIEX_SCAN_DELAY_MSEC                                20
 
 #define RSN_GTK_OUI_OFFSET                             2
@@ -199,6 +200,9 @@ struct mwifiex_ra_list_tbl {
        u8 ra[ETH_ALEN];
        u32 total_pkts_size;
        u32 is_11n_enabled;
+       u16 max_amsdu;
+       u16 pkt_count;
+       u8 ba_packet_thr;
 };
 
 struct mwifiex_tid_tbl {
@@ -431,6 +435,9 @@ struct mwifiex_private {
        u8 wmm_enabled;
        u8 wmm_qosinfo;
        struct mwifiex_wmm_desc wmm;
+       struct list_head sta_list;
+       /* spin lock for associated station list */
+       spinlock_t sta_list_spinlock;
        struct list_head tx_ba_stream_tbl_ptr;
        /* spin lock for tx_ba_stream_tbl_ptr queue */
        spinlock_t tx_ba_stream_tbl_lock;
@@ -486,6 +493,7 @@ struct mwifiex_private {
        u16 assocresp_idx;
        u16 rsn_idx;
        struct timer_list scan_delay_timer;
+       u8 ap_11n_enabled;
 };
 
 enum mwifiex_ba_status {
@@ -550,6 +558,19 @@ struct mwifiex_bss_priv {
        u64 fw_tsf;
 };
 
+/* This is AP specific structure which stores information
+ * about associated STA
+ */
+struct mwifiex_sta_node {
+       struct list_head list;
+       u8 mac_addr[ETH_ALEN];
+       u8 is_wmm_enabled;
+       u8 is_11n_enabled;
+       u8 ampdu_sta[MAX_NUM_TID];
+       u16 rx_seq[MAX_NUM_TID];
+       u16 max_amsdu;
+};
+
 struct mwifiex_if_ops {
        int (*init_if) (struct mwifiex_adapter *);
        void (*cleanup_if) (struct mwifiex_adapter *);
@@ -690,6 +711,9 @@ struct mwifiex_adapter {
        u8 country_code[IEEE80211_COUNTRY_STRING_LEN];
        u16 max_mgmt_ie_index;
        u8 scan_delay_cnt;
+       u8 empty_tx_q_cnt;
+       atomic_t is_tx_received;
+       atomic_t pending_bridged_pkts;
 };
 
 int mwifiex_init_lock_list(struct mwifiex_adapter *adapter);
@@ -780,7 +804,15 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *, u16 cmdresp_no,
                                struct host_cmd_ds_command *resp);
 int mwifiex_process_sta_rx_packet(struct mwifiex_adapter *,
                                  struct sk_buff *skb);
+int mwifiex_process_uap_rx_packet(struct mwifiex_adapter *adapter,
+                                 struct sk_buff *skb);
+int mwifiex_handle_uap_rx_forward(struct mwifiex_private *priv,
+                                 struct sk_buff *skb);
 int mwifiex_process_sta_event(struct mwifiex_private *);
+int mwifiex_process_uap_event(struct mwifiex_private *);
+struct mwifiex_sta_node *
+mwifiex_get_sta_entry(struct mwifiex_private *priv, u8 *mac);
+void mwifiex_delete_all_station_list(struct mwifiex_private *priv);
 void *mwifiex_process_sta_txpd(struct mwifiex_private *, struct sk_buff *skb);
 int mwifiex_sta_init_cmd(struct mwifiex_private *, u8 first_sta);
 int mwifiex_cmd_802_11_scan(struct host_cmd_ds_command *cmd,
@@ -949,9 +981,9 @@ int mwifiex_scan_networks(struct mwifiex_private *priv,
                          const struct mwifiex_user_scan_cfg *user_scan_in);
 int mwifiex_set_radio(struct mwifiex_private *priv, u8 option);
 
-int mwifiex_set_encode(struct mwifiex_private *priv, const u8 *key,
-                      int key_len, u8 key_index, const u8 *mac_addr,
-                      int disable);
+int mwifiex_set_encode(struct mwifiex_private *priv, struct key_params *kp,
+                      const u8 *key, int key_len, u8 key_index,
+                      const u8 *mac_addr, int disable);
 
 int mwifiex_set_gen_ie(struct mwifiex_private *priv, u8 *ie, int ie_len);
 
index 04dc7ca..215d07e 100644 (file)
@@ -989,6 +989,8 @@ mwifiex_config_scan(struct mwifiex_private *priv,
                        *max_chan_per_scan = 2;
                else if (chan_num < MWIFIEX_LIMIT_3_CHANNELS_PER_SCAN_CMD)
                        *max_chan_per_scan = 3;
+               else
+                       *max_chan_per_scan = 4;
        }
 }
 
@@ -1433,9 +1435,9 @@ int mwifiex_check_network_compatibility(struct mwifiex_private *priv,
                        if (ret)
                                dev_err(priv->adapter->dev, "cannot find ssid "
                                        "%s\n", bss_desc->ssid.ssid);
-                               break;
+                       break;
                default:
-                               ret = 0;
+                       ret = 0;
                }
        }
 
index df3a33c..0cc3406 100644 (file)
@@ -610,7 +610,7 @@ mwifiex_cmd_802_11_key_material(struct mwifiex_private *priv,
                memcpy(&key_material->key_param_set.key[2],
                       enc_key->key_material, enc_key->key_len);
                memcpy(&key_material->key_param_set.key[2 + enc_key->key_len],
-                      enc_key->wapi_rxpn, WAPI_RXPN_LEN);
+                      enc_key->pn, PN_LEN);
                key_material->key_param_set.length =
                        cpu_to_le16(WAPI_KEY_LEN + KEYPARAMSET_FIXED_LEN);
 
@@ -621,23 +621,38 @@ mwifiex_cmd_802_11_key_material(struct mwifiex_private *priv,
                return ret;
        }
        if (enc_key->key_len == WLAN_KEY_LEN_CCMP) {
-               dev_dbg(priv->adapter->dev, "cmd: WPA_AES\n");
-               key_material->key_param_set.key_type_id =
+               if (enc_key->is_igtk_key) {
+                       dev_dbg(priv->adapter->dev, "cmd: CMAC_AES\n");
+                       key_material->key_param_set.key_type_id =
+                                       cpu_to_le16(KEY_TYPE_ID_AES_CMAC);
+                       if (cmd_oid == KEY_INFO_ENABLED)
+                               key_material->key_param_set.key_info =
+                                               cpu_to_le16(KEY_ENABLED);
+                       else
+                               key_material->key_param_set.key_info =
+                                               cpu_to_le16(!KEY_ENABLED);
+
+                       key_material->key_param_set.key_info |=
+                                                       cpu_to_le16(KEY_IGTK);
+               } else {
+                       dev_dbg(priv->adapter->dev, "cmd: WPA_AES\n");
+                       key_material->key_param_set.key_type_id =
                                                cpu_to_le16(KEY_TYPE_ID_AES);
-               if (cmd_oid == KEY_INFO_ENABLED)
-                       key_material->key_param_set.key_info =
+                       if (cmd_oid == KEY_INFO_ENABLED)
+                               key_material->key_param_set.key_info =
                                                cpu_to_le16(KEY_ENABLED);
-               else
-                       key_material->key_param_set.key_info =
+                       else
+                               key_material->key_param_set.key_info =
                                                cpu_to_le16(!KEY_ENABLED);
 
-               if (enc_key->key_index & MWIFIEX_KEY_INDEX_UNICAST)
+                       if (enc_key->key_index & MWIFIEX_KEY_INDEX_UNICAST)
                                /* AES pairwise key: unicast */
-                       key_material->key_param_set.key_info |=
+                               key_material->key_param_set.key_info |=
                                                cpu_to_le16(KEY_UNICAST);
-               else            /* AES group key: multicast */
-                       key_material->key_param_set.key_info |=
+                       else    /* AES group key: multicast */
+                               key_material->key_param_set.key_info |=
                                                        cpu_to_le16(KEY_MCAST);
+               }
        } else if (enc_key->key_len == WLAN_KEY_LEN_TKIP) {
                dev_dbg(priv->adapter->dev, "cmd: WPA_TKIP\n");
                key_material->key_param_set.key_type_id =
@@ -668,6 +683,24 @@ mwifiex_cmd_802_11_key_material(struct mwifiex_private *priv,
                key_param_len = (u16)(enc_key->key_len + KEYPARAMSET_FIXED_LEN)
                                + sizeof(struct mwifiex_ie_types_header);
 
+               if (le16_to_cpu(key_material->key_param_set.key_type_id) ==
+                                                       KEY_TYPE_ID_AES_CMAC) {
+                       struct mwifiex_cmac_param *param =
+                                       (void *)key_material->key_param_set.key;
+
+                       memcpy(param->ipn, enc_key->pn, IGTK_PN_LEN);
+                       memcpy(param->key, enc_key->key_material,
+                              WLAN_KEY_LEN_AES_CMAC);
+
+                       key_param_len = sizeof(struct mwifiex_cmac_param);
+                       key_material->key_param_set.key_len =
+                                               cpu_to_le16(key_param_len);
+                       key_param_len += KEYPARAMSET_FIXED_LEN;
+                       key_material->key_param_set.length =
+                                               cpu_to_le16(key_param_len);
+                       key_param_len += sizeof(struct mwifiex_ie_types_header);
+               }
+
                cmd->size = cpu_to_le16(sizeof(key_material->action) + S_DS_GEN
                                        + key_param_len);
 
index b8614a8..dff51d5 100644 (file)
@@ -184,10 +184,9 @@ mwifiex_reset_connect_state(struct mwifiex_private *priv)
 int mwifiex_process_sta_event(struct mwifiex_private *priv)
 {
        struct mwifiex_adapter *adapter = priv->adapter;
-       int len, ret = 0;
+       int ret = 0;
        u32 eventcause = adapter->event_cause;
-       struct station_info sinfo;
-       struct mwifiex_assoc_event *event;
+       u16 ctrl;
 
        switch (eventcause) {
        case EVENT_DUMMY_HOST_WAKEUP_SIGNAL:
@@ -279,10 +278,16 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
 
        case EVENT_MIC_ERR_UNICAST:
                dev_dbg(adapter->dev, "event: UNICAST MIC ERROR\n");
+               cfg80211_michael_mic_failure(priv->netdev, priv->cfg_bssid,
+                                            NL80211_KEYTYPE_PAIRWISE,
+                                            -1, NULL, GFP_KERNEL);
                break;
 
        case EVENT_MIC_ERR_MULTICAST:
                dev_dbg(adapter->dev, "event: MULTICAST MIC ERROR\n");
+               cfg80211_michael_mic_failure(priv->netdev, priv->cfg_bssid,
+                                            NL80211_KEYTYPE_GROUP,
+                                            -1, NULL, GFP_KERNEL);
                break;
        case EVENT_MIB_CHANGED:
        case EVENT_INIT_DONE:
@@ -384,11 +389,11 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
                                              adapter->event_body);
                break;
        case EVENT_AMSDU_AGGR_CTRL:
-               dev_dbg(adapter->dev, "event:  AMSDU_AGGR_CTRL %d\n",
-                       *(u16 *) adapter->event_body);
+               ctrl = le16_to_cpu(*(__le16 *)adapter->event_body);
+               dev_dbg(adapter->dev, "event: AMSDU_AGGR_CTRL %d\n", ctrl);
+
                adapter->tx_buf_size =
-                       min(adapter->curr_tx_buf_size,
-                           le16_to_cpu(*(__le16 *) adapter->event_body));
+                               min_t(u16, adapter->curr_tx_buf_size, ctrl);
                dev_dbg(adapter->dev, "event: tx_buf_size %d\n",
                        adapter->tx_buf_size);
                break;
@@ -405,51 +410,6 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
                dev_dbg(adapter->dev, "event: HOSTWAKE_STAIE %d\n", eventcause);
                break;
 
-       case EVENT_UAP_STA_ASSOC:
-               memset(&sinfo, 0, sizeof(sinfo));
-               event = (struct mwifiex_assoc_event *)
-                       (adapter->event_body + MWIFIEX_UAP_EVENT_EXTRA_HEADER);
-               if (le16_to_cpu(event->type) == TLV_TYPE_UAP_MGMT_FRAME) {
-                       len = -1;
-
-                       if (ieee80211_is_assoc_req(event->frame_control))
-                               len = 0;
-                       else if (ieee80211_is_reassoc_req(event->frame_control))
-                               /* There will be ETH_ALEN bytes of
-                                * current_ap_addr before the re-assoc ies.
-                                */
-                               len = ETH_ALEN;
-
-                       if (len != -1) {
-                               sinfo.filled = STATION_INFO_ASSOC_REQ_IES;
-                               sinfo.assoc_req_ies = &event->data[len];
-                               len = (u8 *)sinfo.assoc_req_ies -
-                                     (u8 *)&event->frame_control;
-                               sinfo.assoc_req_ies_len =
-                                       le16_to_cpu(event->len) - (u16)len;
-                       }
-               }
-               cfg80211_new_sta(priv->netdev, event->sta_addr, &sinfo,
-                                GFP_KERNEL);
-               break;
-       case EVENT_UAP_STA_DEAUTH:
-               cfg80211_del_sta(priv->netdev, adapter->event_body +
-                                MWIFIEX_UAP_EVENT_EXTRA_HEADER, GFP_KERNEL);
-               break;
-       case EVENT_UAP_BSS_IDLE:
-               priv->media_connected = false;
-               break;
-       case EVENT_UAP_BSS_ACTIVE:
-               priv->media_connected = true;
-               break;
-       case EVENT_UAP_BSS_START:
-               dev_dbg(adapter->dev, "AP EVENT: event id: %#x\n", eventcause);
-               memcpy(priv->netdev->dev_addr, adapter->event_body+2, ETH_ALEN);
-               break;
-       case EVENT_UAP_MIC_COUNTERMEASURES:
-               /* For future development */
-               dev_dbg(adapter->dev, "AP EVENT: event id: %#x\n", eventcause);
-               break;
        default:
                dev_dbg(adapter->dev, "event: unknown event id: %#x\n",
                        eventcause);
index fb21360..3f02597 100644 (file)
@@ -942,20 +942,26 @@ mwifiex_drv_get_driver_version(struct mwifiex_adapter *adapter, char *version,
  * This function allocates the IOCTL request buffer, fills it
  * with requisite parameters and calls the IOCTL handler.
  */
-int mwifiex_set_encode(struct mwifiex_private *priv, const u8 *key,
-                       int key_len, u8 key_index,
-                       const u8 *mac_addr, int disable)
+int mwifiex_set_encode(struct mwifiex_private *priv, struct key_params *kp,
+                      const u8 *key, int key_len, u8 key_index,
+                      const u8 *mac_addr, int disable)
 {
        struct mwifiex_ds_encrypt_key encrypt_key;
 
        memset(&encrypt_key, 0, sizeof(struct mwifiex_ds_encrypt_key));
        encrypt_key.key_len = key_len;
+
+       if (kp && kp->cipher == WLAN_CIPHER_SUITE_AES_CMAC)
+               encrypt_key.is_igtk_key = true;
+
        if (!disable) {
                encrypt_key.key_index = key_index;
                if (key_len)
                        memcpy(encrypt_key.key_material, key, key_len);
                if (mac_addr)
                        memcpy(encrypt_key.mac_addr, mac_addr, ETH_ALEN);
+               if (kp && kp->seq && kp->seq_len)
+                       memcpy(encrypt_key.pn, kp->seq, kp->seq_len);
        } else {
                encrypt_key.key_disable = true;
                if (mac_addr)
index 02ce3b7..d91d5c0 100644 (file)
@@ -54,8 +54,8 @@ int mwifiex_process_rx_packet(struct mwifiex_adapter *adapter,
 
        local_rx_pd = (struct rxpd *) (skb->data);
 
-       rx_pkt_hdr = (struct rx_packet_hdr *) ((u8 *) local_rx_pd +
-                               local_rx_pd->rx_pkt_offset);
+       rx_pkt_hdr = (void *)local_rx_pd +
+                    le16_to_cpu(local_rx_pd->rx_pkt_offset);
 
        if (!memcmp(&rx_pkt_hdr->rfc1042_hdr,
                    rfc1042_eth_hdr, sizeof(rfc1042_eth_hdr))) {
@@ -125,7 +125,7 @@ int mwifiex_process_sta_rx_packet(struct mwifiex_adapter *adapter,
        struct mwifiex_rxinfo *rx_info = MWIFIEX_SKB_RXCB(skb);
        struct rx_packet_hdr *rx_pkt_hdr;
        u8 ta[ETH_ALEN];
-       u16 rx_pkt_type;
+       u16 rx_pkt_type, rx_pkt_offset, rx_pkt_length, seq_num;
        struct mwifiex_private *priv =
                        mwifiex_get_priv_by_id(adapter, rx_info->bss_num,
                                               rx_info->bss_type);
@@ -134,16 +134,17 @@ int mwifiex_process_sta_rx_packet(struct mwifiex_adapter *adapter,
                return -1;
 
        local_rx_pd = (struct rxpd *) (skb->data);
-       rx_pkt_type = local_rx_pd->rx_pkt_type;
+       rx_pkt_type = le16_to_cpu(local_rx_pd->rx_pkt_type);
+       rx_pkt_offset = le16_to_cpu(local_rx_pd->rx_pkt_offset);
+       rx_pkt_length = le16_to_cpu(local_rx_pd->rx_pkt_length);
+       seq_num = le16_to_cpu(local_rx_pd->seq_num);
 
-       rx_pkt_hdr = (struct rx_packet_hdr *) ((u8 *) local_rx_pd +
-                                       local_rx_pd->rx_pkt_offset);
+       rx_pkt_hdr = (void *)local_rx_pd + rx_pkt_offset;
 
-       if ((local_rx_pd->rx_pkt_offset + local_rx_pd->rx_pkt_length) >
-           (u16) skb->len) {
-               dev_err(adapter->dev, "wrong rx packet: len=%d,"
-                       " rx_pkt_offset=%d, rx_pkt_length=%d\n", skb->len,
-                      local_rx_pd->rx_pkt_offset, local_rx_pd->rx_pkt_length);
+       if ((rx_pkt_offset + rx_pkt_length) > (u16) skb->len) {
+               dev_err(adapter->dev,
+                       "wrong rx packet: len=%d, rx_pkt_offset=%d, rx_pkt_length=%d\n",
+                       skb->len, rx_pkt_offset, rx_pkt_length);
                priv->stats.rx_dropped++;
 
                if (adapter->if_ops.data_complete)
@@ -154,14 +155,14 @@ int mwifiex_process_sta_rx_packet(struct mwifiex_adapter *adapter,
                return ret;
        }
 
-       if (local_rx_pd->rx_pkt_type == PKT_TYPE_AMSDU) {
+       if (rx_pkt_type == PKT_TYPE_AMSDU) {
                struct sk_buff_head list;
                struct sk_buff *rx_skb;
 
                __skb_queue_head_init(&list);
 
-               skb_pull(skb, local_rx_pd->rx_pkt_offset);
-               skb_trim(skb, local_rx_pd->rx_pkt_length);
+               skb_pull(skb, rx_pkt_offset);
+               skb_trim(skb, rx_pkt_length);
 
                ieee80211_amsdu_to_8023s(skb, &list, priv->curr_addr,
                                         priv->wdev->iftype, 0, false);
@@ -189,17 +190,14 @@ int mwifiex_process_sta_rx_packet(struct mwifiex_adapter *adapter,
                memcpy(ta, rx_pkt_hdr->eth803_hdr.h_source, ETH_ALEN);
        } else {
                if (rx_pkt_type != PKT_TYPE_BAR)
-                       priv->rx_seq[local_rx_pd->priority] =
-                                               local_rx_pd->seq_num;
+                       priv->rx_seq[local_rx_pd->priority] = seq_num;
                memcpy(ta, priv->curr_bss_params.bss_descriptor.mac_address,
                       ETH_ALEN);
        }
 
        /* Reorder and send to OS */
-       ret = mwifiex_11n_rx_reorder_pkt(priv, local_rx_pd->seq_num,
-                                            local_rx_pd->priority, ta,
-                                            (u8) local_rx_pd->rx_pkt_type,
-                                            skb);
+       ret = mwifiex_11n_rx_reorder_pkt(priv, seq_num, local_rx_pd->priority,
+                                        ta, (u8) rx_pkt_type, skb);
 
        if (ret || (rx_pkt_type == PKT_TYPE_BAR)) {
                if (adapter->if_ops.data_complete)
index cecb272..985073d 100644 (file)
@@ -51,6 +51,9 @@ int mwifiex_handle_rx_packet(struct mwifiex_adapter *adapter,
        rx_info->bss_num = priv->bss_num;
        rx_info->bss_type = priv->bss_type;
 
+       if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP)
+               return mwifiex_process_uap_rx_packet(adapter, skb);
+
        return mwifiex_process_sta_rx_packet(adapter, skb);
 }
 EXPORT_SYMBOL_GPL(mwifiex_handle_rx_packet);
@@ -157,6 +160,8 @@ int mwifiex_write_data_complete(struct mwifiex_adapter *adapter,
                priv->stats.tx_errors++;
        }
 
+       if (tx_info->flags & MWIFIEX_BUF_FLAG_BRIDGED_PKT)
+               atomic_dec_return(&adapter->pending_bridged_pkts);
        if (atomic_dec_return(&adapter->tx_pending) >= LOW_TX_PENDING)
                goto done;
 
index f40e93f..c10aac0 100644 (file)
@@ -167,6 +167,7 @@ mwifiex_set_ht_params(struct mwifiex_private *priv,
        if (ht_ie) {
                memcpy(&bss_cfg->ht_cap, ht_ie + 2,
                       sizeof(struct ieee80211_ht_cap));
+               priv->ap_11n_enabled = 1;
        } else {
                memset(&bss_cfg->ht_cap , 0, sizeof(struct ieee80211_ht_cap));
                bss_cfg->ht_cap.cap_info = cpu_to_le16(MWIFIEX_DEF_HT_CAP);
diff --git a/drivers/net/wireless/mwifiex/uap_event.c b/drivers/net/wireless/mwifiex/uap_event.c
new file mode 100644 (file)
index 0000000..a33fa39
--- /dev/null
@@ -0,0 +1,290 @@
+/*
+ * Marvell Wireless LAN device driver: AP event handling
+ *
+ * Copyright (C) 2012, Marvell International Ltd.
+ *
+ * This software file (the "File") is distributed by Marvell International
+ * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * (the "License").  You may use, redistribute and/or modify this File in
+ * accordance with the terms and conditions of the License, a copy of which
+ * is available by writing to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
+ * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
+ *
+ * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
+ * ARE EXPRESSLY DISCLAIMED.  The License provides additional details about
+ * this warranty disclaimer.
+ */
+
+#include "decl.h"
+#include "main.h"
+#include "11n.h"
+
+/*
+ * This function will return the pointer to station entry in station list
+ * table which matches specified mac address.
+ * This function should be called after acquiring RA list spinlock.
+ * NULL is returned if station entry is not found in associated STA list.
+ */
+struct mwifiex_sta_node *
+mwifiex_get_sta_entry(struct mwifiex_private *priv, u8 *mac)
+{
+       struct mwifiex_sta_node *node;
+
+       if (!mac)
+               return NULL;
+
+       list_for_each_entry(node, &priv->sta_list, list) {
+               if (!memcmp(node->mac_addr, mac, ETH_ALEN))
+                       return node;
+       }
+
+       return NULL;
+}
+
+/*
+ * This function will add a sta_node entry to associated station list
+ * table with the given mac address.
+ * If entry exist already, existing entry is returned.
+ * If received mac address is NULL, NULL is returned.
+ */
+static struct mwifiex_sta_node *
+mwifiex_add_sta_entry(struct mwifiex_private *priv, u8 *mac)
+{
+       struct mwifiex_sta_node *node;
+       unsigned long flags;
+
+       if (!mac)
+               return NULL;
+
+       spin_lock_irqsave(&priv->sta_list_spinlock, flags);
+       node = mwifiex_get_sta_entry(priv, mac);
+       if (node)
+               goto done;
+
+       node = kzalloc(sizeof(struct mwifiex_sta_node), GFP_ATOMIC);
+       if (!node)
+               goto done;
+
+       memcpy(node->mac_addr, mac, ETH_ALEN);
+       list_add_tail(&node->list, &priv->sta_list);
+
+done:
+       spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
+       return node;
+}
+
+/*
+ * This function will search for HT IE in association request IEs
+ * and set station HT parameters accordingly.
+ */
+static void
+mwifiex_set_sta_ht_cap(struct mwifiex_private *priv, const u8 *ies,
+                      int ies_len, struct mwifiex_sta_node *node)
+{
+       const struct ieee80211_ht_cap *ht_cap;
+
+       if (!ies)
+               return;
+
+       ht_cap = (void *)cfg80211_find_ie(WLAN_EID_HT_CAPABILITY, ies, ies_len);
+       if (ht_cap) {
+               node->is_11n_enabled = 1;
+               node->max_amsdu = le16_to_cpu(ht_cap->cap_info) &
+                                 IEEE80211_HT_CAP_MAX_AMSDU ?
+                                 MWIFIEX_TX_DATA_BUF_SIZE_8K :
+                                 MWIFIEX_TX_DATA_BUF_SIZE_4K;
+       } else {
+               node->is_11n_enabled = 0;
+       }
+
+       return;
+}
+
+/*
+ * This function will delete a station entry from station list
+ */
+static void mwifiex_del_sta_entry(struct mwifiex_private *priv, u8 *mac)
+{
+       struct mwifiex_sta_node *node, *tmp;
+       unsigned long flags;
+
+       spin_lock_irqsave(&priv->sta_list_spinlock, flags);
+
+       node = mwifiex_get_sta_entry(priv, mac);
+       if (node) {
+               list_for_each_entry_safe(node, tmp, &priv->sta_list,
+                                        list) {
+                       list_del(&node->list);
+                       kfree(node);
+               }
+       }
+
+       spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
+       return;
+}
+
+/*
+ * This function will delete all stations from associated station list.
+ */
+static void mwifiex_del_all_sta_list(struct mwifiex_private *priv)
+{
+       struct mwifiex_sta_node *node, *tmp;
+       unsigned long flags;
+
+       spin_lock_irqsave(&priv->sta_list_spinlock, flags);
+
+       list_for_each_entry_safe(node, tmp, &priv->sta_list, list) {
+               list_del(&node->list);
+               kfree(node);
+       }
+
+       INIT_LIST_HEAD(&priv->sta_list);
+       spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
+       return;
+}
+
+/*
+ * This function handles AP interface specific events generated by firmware.
+ *
+ * Event specific routines are called by this function based
+ * upon the generated event cause.
+ *
+ *
+ * Events supported for AP -
+ *      - EVENT_UAP_STA_ASSOC
+ *      - EVENT_UAP_STA_DEAUTH
+ *      - EVENT_UAP_BSS_ACTIVE
+ *      - EVENT_UAP_BSS_START
+ *      - EVENT_UAP_BSS_IDLE
+ *      - EVENT_UAP_MIC_COUNTERMEASURES:
+ */
+int mwifiex_process_uap_event(struct mwifiex_private *priv)
+{
+       struct mwifiex_adapter *adapter = priv->adapter;
+       int len, i;
+       u32 eventcause = adapter->event_cause;
+       struct station_info sinfo;
+       struct mwifiex_assoc_event *event;
+       struct mwifiex_sta_node *node;
+       u8 *deauth_mac;
+       struct host_cmd_ds_11n_batimeout *ba_timeout;
+       u16 ctrl;
+
+       switch (eventcause) {
+       case EVENT_UAP_STA_ASSOC:
+               memset(&sinfo, 0, sizeof(sinfo));
+               event = (struct mwifiex_assoc_event *)
+                       (adapter->event_body + MWIFIEX_UAP_EVENT_EXTRA_HEADER);
+               if (le16_to_cpu(event->type) == TLV_TYPE_UAP_MGMT_FRAME) {
+                       len = -1;
+
+                       if (ieee80211_is_assoc_req(event->frame_control))
+                               len = 0;
+                       else if (ieee80211_is_reassoc_req(event->frame_control))
+                               /* There will be ETH_ALEN bytes of
+                                * current_ap_addr before the re-assoc ies.
+                                */
+                               len = ETH_ALEN;
+
+                       if (len != -1) {
+                               sinfo.filled = STATION_INFO_ASSOC_REQ_IES;
+                               sinfo.assoc_req_ies = &event->data[len];
+                               len = (u8 *)sinfo.assoc_req_ies -
+                                     (u8 *)&event->frame_control;
+                               sinfo.assoc_req_ies_len =
+                                       le16_to_cpu(event->len) - (u16)len;
+                       }
+               }
+               cfg80211_new_sta(priv->netdev, event->sta_addr, &sinfo,
+                                GFP_KERNEL);
+
+               node = mwifiex_add_sta_entry(priv, event->sta_addr);
+               if (!node) {
+                       dev_warn(adapter->dev,
+                                "could not create station entry!\n");
+                       return -1;
+               }
+
+               if (!priv->ap_11n_enabled)
+                       break;
+
+               mwifiex_set_sta_ht_cap(priv, sinfo.assoc_req_ies,
+                                      sinfo.assoc_req_ies_len, node);
+
+               for (i = 0; i < MAX_NUM_TID; i++) {
+                       if (node->is_11n_enabled)
+                               node->ampdu_sta[i] =
+                                             priv->aggr_prio_tbl[i].ampdu_user;
+                       else
+                               node->ampdu_sta[i] = BA_STREAM_NOT_ALLOWED;
+               }
+               memset(node->rx_seq, 0xff, sizeof(node->rx_seq));
+               break;
+       case EVENT_UAP_STA_DEAUTH:
+               deauth_mac = adapter->event_body +
+                            MWIFIEX_UAP_EVENT_EXTRA_HEADER;
+               cfg80211_del_sta(priv->netdev, deauth_mac, GFP_KERNEL);
+
+               if (priv->ap_11n_enabled) {
+                       mwifiex_11n_del_rx_reorder_tbl_by_ta(priv, deauth_mac);
+                       mwifiex_del_tx_ba_stream_tbl_by_ra(priv, deauth_mac);
+               }
+               mwifiex_del_sta_entry(priv, deauth_mac);
+               break;
+       case EVENT_UAP_BSS_IDLE:
+               priv->media_connected = false;
+               mwifiex_clean_txrx(priv);
+               mwifiex_del_all_sta_list(priv);
+               break;
+       case EVENT_UAP_BSS_ACTIVE:
+               priv->media_connected = true;
+               break;
+       case EVENT_UAP_BSS_START:
+               dev_dbg(adapter->dev, "AP EVENT: event id: %#x\n", eventcause);
+               memcpy(priv->netdev->dev_addr, adapter->event_body + 2,
+                      ETH_ALEN);
+               break;
+       case EVENT_UAP_MIC_COUNTERMEASURES:
+               /* For future development */
+               dev_dbg(adapter->dev, "AP EVENT: event id: %#x\n", eventcause);
+               break;
+       case EVENT_AMSDU_AGGR_CTRL:
+               ctrl = le16_to_cpu(*(__le16 *)adapter->event_body);
+               dev_dbg(adapter->dev, "event: AMSDU_AGGR_CTRL %d\n", ctrl);
+
+               if (priv->media_connected) {
+                       adapter->tx_buf_size =
+                               min_t(u16, adapter->curr_tx_buf_size, ctrl);
+                       dev_dbg(adapter->dev, "event: tx_buf_size %d\n",
+                               adapter->tx_buf_size);
+               }
+               break;
+       case EVENT_ADDBA:
+               dev_dbg(adapter->dev, "event: ADDBA Request\n");
+               if (priv->media_connected)
+                       mwifiex_send_cmd_async(priv, HostCmd_CMD_11N_ADDBA_RSP,
+                                              HostCmd_ACT_GEN_SET, 0,
+                                              adapter->event_body);
+               break;
+       case EVENT_DELBA:
+               dev_dbg(adapter->dev, "event: DELBA Request\n");
+               if (priv->media_connected)
+                       mwifiex_11n_delete_ba_stream(priv, adapter->event_body);
+               break;
+       case EVENT_BA_STREAM_TIEMOUT:
+               dev_dbg(adapter->dev, "event:  BA Stream timeout\n");
+               if (priv->media_connected) {
+                       ba_timeout = (void *)adapter->event_body;
+                       mwifiex_11n_ba_stream_timeout(priv, ba_timeout);
+               }
+               break;
+       default:
+               dev_dbg(adapter->dev, "event: unknown event id: %#x\n",
+                       eventcause);
+               break;
+       }
+
+       return 0;
+}
diff --git a/drivers/net/wireless/mwifiex/uap_txrx.c b/drivers/net/wireless/mwifiex/uap_txrx.c
new file mode 100644 (file)
index 0000000..6d814f0
--- /dev/null
@@ -0,0 +1,255 @@
+/*
+ * Marvell Wireless LAN device driver: AP TX and RX data handling
+ *
+ * Copyright (C) 2012, Marvell International Ltd.
+ *
+ * This software file (the "File") is distributed by Marvell International
+ * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * (the "License").  You may use, redistribute and/or modify this File in
+ * accordance with the terms and conditions of the License, a copy of which
+ * is available by writing to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
+ * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
+ *
+ * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
+ * ARE EXPRESSLY DISCLAIMED.  The License provides additional details about
+ * this warranty disclaimer.
+ */
+
+#include "decl.h"
+#include "ioctl.h"
+#include "main.h"
+#include "wmm.h"
+#include "11n_aggr.h"
+#include "11n_rxreorder.h"
+
+static void mwifiex_uap_queue_bridged_pkt(struct mwifiex_private *priv,
+                                        struct sk_buff *skb)
+{
+       struct mwifiex_adapter *adapter = priv->adapter;
+       struct uap_rxpd *uap_rx_pd;
+       struct rx_packet_hdr *rx_pkt_hdr;
+       struct sk_buff *new_skb;
+       struct mwifiex_txinfo *tx_info;
+       int hdr_chop;
+       struct timeval tv;
+       u8 rfc1042_eth_hdr[ETH_ALEN] = { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 };
+
+       uap_rx_pd = (struct uap_rxpd *)(skb->data);
+       rx_pkt_hdr = (void *)uap_rx_pd + le16_to_cpu(uap_rx_pd->rx_pkt_offset);
+
+       if ((atomic_read(&adapter->pending_bridged_pkts) >=
+                                            MWIFIEX_BRIDGED_PKTS_THRESHOLD)) {
+               dev_err(priv->adapter->dev,
+                       "Tx: Bridge packet limit reached. Drop packet!\n");
+               kfree_skb(skb);
+               return;
+       }
+
+       if (!memcmp(&rx_pkt_hdr->rfc1042_hdr,
+                   rfc1042_eth_hdr, sizeof(rfc1042_eth_hdr)))
+               /* Chop off the rxpd + the excess memory from
+                * 802.2/llc/snap header that was removed.
+                */
+               hdr_chop = (u8 *)eth_hdr - (u8 *)uap_rx_pd;
+       else
+               /* Chop off the rxpd */
+               hdr_chop = (u8 *)&rx_pkt_hdr->eth803_hdr - (u8 *)uap_rx_pd;
+
+       /* Chop off the leading header bytes so the it points
+        * to the start of either the reconstructed EthII frame
+        * or the 802.2/llc/snap frame.
+        */
+       skb_pull(skb, hdr_chop);
+
+       if (skb_headroom(skb) < MWIFIEX_MIN_DATA_HEADER_LEN) {
+               dev_dbg(priv->adapter->dev,
+                       "data: Tx: insufficient skb headroom %d\n",
+                       skb_headroom(skb));
+               /* Insufficient skb headroom - allocate a new skb */
+               new_skb =
+                       skb_realloc_headroom(skb, MWIFIEX_MIN_DATA_HEADER_LEN);
+               if (unlikely(!new_skb)) {
+                       dev_err(priv->adapter->dev,
+                               "Tx: cannot allocate new_skb\n");
+                       kfree_skb(skb);
+                       priv->stats.tx_dropped++;
+                       return;
+               }
+
+               kfree_skb(skb);
+               skb = new_skb;
+               dev_dbg(priv->adapter->dev, "info: new skb headroom %d\n",
+                       skb_headroom(skb));
+       }
+
+       tx_info = MWIFIEX_SKB_TXCB(skb);
+       tx_info->bss_num = priv->bss_num;
+       tx_info->bss_type = priv->bss_type;
+       tx_info->flags |= MWIFIEX_BUF_FLAG_BRIDGED_PKT;
+
+       do_gettimeofday(&tv);
+       skb->tstamp = timeval_to_ktime(tv);
+       mwifiex_wmm_add_buf_txqueue(priv, skb);
+       atomic_inc(&adapter->tx_pending);
+       atomic_inc(&adapter->pending_bridged_pkts);
+
+       if ((atomic_read(&adapter->tx_pending) >= MAX_TX_PENDING)) {
+               mwifiex_set_trans_start(priv->netdev);
+               mwifiex_stop_net_dev_queue(priv->netdev, priv->adapter);
+       }
+       return;
+}
+
+/*
+ * This function contains logic for AP packet forwarding.
+ *
+ * If a packet is multicast/broadcast, it is sent to kernel/upper layer
+ * as well as queued back to AP TX queue so that it can be sent to other
+ * associated stations.
+ * If a packet is unicast and RA is present in associated station list,
+ * it is again requeued into AP TX queue.
+ * If a packet is unicast and RA is not in associated station list,
+ * packet is forwarded to kernel to handle routing logic.
+ */
+int mwifiex_handle_uap_rx_forward(struct mwifiex_private *priv,
+                                 struct sk_buff *skb)
+{
+       struct mwifiex_adapter *adapter = priv->adapter;
+       struct uap_rxpd *uap_rx_pd;
+       struct rx_packet_hdr *rx_pkt_hdr;
+       u8 ra[ETH_ALEN];
+       struct sk_buff *skb_uap;
+
+       uap_rx_pd = (struct uap_rxpd *)(skb->data);
+       rx_pkt_hdr = (void *)uap_rx_pd + le16_to_cpu(uap_rx_pd->rx_pkt_offset);
+
+       /* don't do packet forwarding in disconnected state */
+       if (!priv->media_connected) {
+               dev_err(adapter->dev, "drop packet in disconnected state.\n");
+               dev_kfree_skb_any(skb);
+               return 0;
+       }
+
+       memcpy(ra, rx_pkt_hdr->eth803_hdr.h_dest, ETH_ALEN);
+
+       if (is_multicast_ether_addr(ra)) {
+               skb_uap = skb_copy(skb, GFP_ATOMIC);
+               mwifiex_uap_queue_bridged_pkt(priv, skb_uap);
+       } else {
+               if (mwifiex_get_sta_entry(priv, ra)) {
+                       /* Requeue Intra-BSS packet */
+                       mwifiex_uap_queue_bridged_pkt(priv, skb);
+                       return 0;
+               }
+       }
+
+       /* Forward unicat/Inter-BSS packets to kernel. */
+       return mwifiex_process_rx_packet(adapter, skb);
+}
+
+/*
+ * This function processes the packet received on AP interface.
+ *
+ * The function looks into the RxPD and performs sanity tests on the
+ * received buffer to ensure its a valid packet before processing it
+ * further. If the packet is determined to be aggregated, it is
+ * de-aggregated accordingly. Then skb is passed to AP packet forwarding logic.
+ *
+ * The completion callback is called after processing is complete.
+ */
+int mwifiex_process_uap_rx_packet(struct mwifiex_adapter *adapter,
+                                 struct sk_buff *skb)
+{
+       int ret;
+       struct uap_rxpd *uap_rx_pd;
+       struct mwifiex_rxinfo *rx_info = MWIFIEX_SKB_RXCB(skb);
+       struct rx_packet_hdr *rx_pkt_hdr;
+       u16 rx_pkt_type;
+       u8 ta[ETH_ALEN], pkt_type;
+       struct mwifiex_sta_node *node;
+
+       struct mwifiex_private *priv =
+                       mwifiex_get_priv_by_id(adapter, rx_info->bss_num,
+                                              rx_info->bss_type);
+
+       if (!priv)
+               return -1;
+
+       uap_rx_pd = (struct uap_rxpd *)(skb->data);
+       rx_pkt_type = le16_to_cpu(uap_rx_pd->rx_pkt_type);
+       rx_pkt_hdr = (void *)uap_rx_pd + le16_to_cpu(uap_rx_pd->rx_pkt_offset);
+
+       if ((le16_to_cpu(uap_rx_pd->rx_pkt_offset) +
+            le16_to_cpu(uap_rx_pd->rx_pkt_length)) > (u16) skb->len) {
+               dev_err(adapter->dev,
+                       "wrong rx packet: len=%d, offset=%d, length=%d\n",
+                       skb->len, le16_to_cpu(uap_rx_pd->rx_pkt_offset),
+                       le16_to_cpu(uap_rx_pd->rx_pkt_length));
+               priv->stats.rx_dropped++;
+
+               if (adapter->if_ops.data_complete)
+                       adapter->if_ops.data_complete(adapter, skb);
+               else
+                       dev_kfree_skb_any(skb);
+
+               return 0;
+       }
+
+       if (le16_to_cpu(uap_rx_pd->rx_pkt_type) == PKT_TYPE_AMSDU) {
+               struct sk_buff_head list;
+               struct sk_buff *rx_skb;
+
+               __skb_queue_head_init(&list);
+               skb_pull(skb, le16_to_cpu(uap_rx_pd->rx_pkt_offset));
+               skb_trim(skb, le16_to_cpu(uap_rx_pd->rx_pkt_length));
+
+               ieee80211_amsdu_to_8023s(skb, &list, priv->curr_addr,
+                                        priv->wdev->iftype, 0, false);
+
+               while (!skb_queue_empty(&list)) {
+                       rx_skb = __skb_dequeue(&list);
+                       ret = mwifiex_recv_packet(adapter, rx_skb);
+                       if (ret)
+                               dev_err(adapter->dev,
+                                       "AP:Rx A-MSDU failed");
+               }
+
+               return 0;
+       }
+
+       memcpy(ta, rx_pkt_hdr->eth803_hdr.h_source, ETH_ALEN);
+
+       if (rx_pkt_type != PKT_TYPE_BAR && uap_rx_pd->priority < MAX_NUM_TID) {
+               node = mwifiex_get_sta_entry(priv, ta);
+               if (node)
+                       node->rx_seq[uap_rx_pd->priority] =
+                                               le16_to_cpu(uap_rx_pd->seq_num);
+       }
+
+       if (!priv->ap_11n_enabled ||
+           (!mwifiex_11n_get_rx_reorder_tbl(priv, uap_rx_pd->priority, ta) &&
+           (le16_to_cpu(uap_rx_pd->rx_pkt_type) != PKT_TYPE_AMSDU))) {
+               ret = mwifiex_handle_uap_rx_forward(priv, skb);
+               return ret;
+       }
+
+       /* Reorder and send to kernel */
+       pkt_type = (u8)le16_to_cpu(uap_rx_pd->rx_pkt_type);
+       ret = mwifiex_11n_rx_reorder_pkt(priv, le16_to_cpu(uap_rx_pd->seq_num),
+                                        uap_rx_pd->priority, ta, pkt_type,
+                                        skb);
+
+       if (ret || (rx_pkt_type == PKT_TYPE_BAR)) {
+               if (adapter->if_ops.data_complete)
+                       adapter->if_ops.data_complete(adapter, skb);
+               else
+                       dev_kfree_skb_any(skb);
+       }
+
+       if (ret)
+               priv->stats.rx_dropped++;
+
+       return ret;
+}
index 3fa4d41..8ccd699 100644 (file)
@@ -127,6 +127,29 @@ mwifiex_wmm_allocate_ralist_node(struct mwifiex_adapter *adapter, u8 *ra)
        return ra_list;
 }
 
+/* This function returns random no between 16 and 32 to be used as threshold
+ * for no of packets after which BA setup is initiated.
+ */
+static u8 mwifiex_get_random_ba_threshold(void)
+{
+       u32 sec, usec;
+       struct timeval ba_tstamp;
+       u8 ba_threshold;
+
+       /* setup ba_packet_threshold here random number between
+        * [BA_SETUP_PACKET_OFFSET,
+        * BA_SETUP_PACKET_OFFSET+BA_SETUP_MAX_PACKET_THRESHOLD-1]
+        */
+
+       do_gettimeofday(&ba_tstamp);
+       sec = (ba_tstamp.tv_sec & 0xFFFF) + (ba_tstamp.tv_sec >> 16);
+       usec = (ba_tstamp.tv_usec & 0xFFFF) + (ba_tstamp.tv_usec >> 16);
+       ba_threshold = (((sec << 16) + usec) % BA_SETUP_MAX_PACKET_THRESHOLD)
+                                                     + BA_SETUP_PACKET_OFFSET;
+
+       return ba_threshold;
+}
+
 /*
  * This function allocates and adds a RA list for all TIDs
  * with the given RA.
@@ -137,6 +160,12 @@ mwifiex_ralist_add(struct mwifiex_private *priv, u8 *ra)
        int i;
        struct mwifiex_ra_list_tbl *ra_list;
        struct mwifiex_adapter *adapter = priv->adapter;
+       struct mwifiex_sta_node *node;
+       unsigned long flags;
+
+       spin_lock_irqsave(&priv->sta_list_spinlock, flags);
+       node = mwifiex_get_sta_entry(priv, ra);
+       spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
 
        for (i = 0; i < MAX_NUM_TID; ++i) {
                ra_list = mwifiex_wmm_allocate_ralist_node(adapter, ra);
@@ -145,14 +174,24 @@ mwifiex_ralist_add(struct mwifiex_private *priv, u8 *ra)
                if (!ra_list)
                        break;
 
-               if (!mwifiex_queuing_ra_based(priv))
+               ra_list->is_11n_enabled = 0;
+               if (!mwifiex_queuing_ra_based(priv)) {
                        ra_list->is_11n_enabled = IS_11N_ENABLED(priv);
-               else
-                       ra_list->is_11n_enabled = false;
+               } else {
+                       ra_list->is_11n_enabled =
+                                     mwifiex_is_sta_11n_enabled(priv, node);
+                       if (ra_list->is_11n_enabled)
+                               ra_list->max_amsdu = node->max_amsdu;
+               }
 
                dev_dbg(adapter->dev, "data: ralist %p: is_11n_enabled=%d\n",
                        ra_list, ra_list->is_11n_enabled);
 
+               if (ra_list->is_11n_enabled) {
+                       ra_list->pkt_count = 0;
+                       ra_list->ba_packet_thr =
+                                             mwifiex_get_random_ba_threshold();
+               }
                list_add_tail(&ra_list->list,
                              &priv->wmm.tid_tbl_ptr[i].ra_list);
 
@@ -647,6 +686,7 @@ mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv,
        skb_queue_tail(&ra_list->skb_head, skb);
 
        ra_list->total_pkts_size += skb->len;
+       ra_list->pkt_count++;
 
        atomic_inc(&priv->wmm.tx_pkts_queued);
 
@@ -986,10 +1026,17 @@ mwifiex_is_11n_aggragation_possible(struct mwifiex_private *priv,
 {
        int count = 0, total_size = 0;
        struct sk_buff *skb, *tmp;
+       int max_amsdu_size;
+
+       if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP && priv->ap_11n_enabled &&
+           ptr->is_11n_enabled)
+               max_amsdu_size = min_t(int, ptr->max_amsdu, max_buf_size);
+       else
+               max_amsdu_size = max_buf_size;
 
        skb_queue_walk_safe(&ptr->skb_head, skb, tmp) {
                total_size += skb->len;
-               if (total_size >= max_buf_size)
+               if (total_size >= max_amsdu_size)
                        break;
                if (++count >= MIN_NUM_AMSDU)
                        return true;
@@ -1050,6 +1097,7 @@ mwifiex_send_single_packet(struct mwifiex_private *priv,
                skb_queue_tail(&ptr->skb_head, skb);
 
                ptr->total_pkts_size += skb->len;
+               ptr->pkt_count++;
                tx_info->flags |= MWIFIEX_BUF_FLAG_REQUEUED_PKT;
                spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
                                       ra_list_flags);
@@ -1231,7 +1279,8 @@ mwifiex_dequeue_tx_packet(struct mwifiex_adapter *adapter)
                /* ra_list_spinlock has been freed in
                   mwifiex_send_single_packet() */
        } else {
-               if (mwifiex_is_ampdu_allowed(priv, tid)) {
+               if (mwifiex_is_ampdu_allowed(priv, tid) &&
+                   ptr->pkt_count > ptr->ba_packet_thr) {
                        if (mwifiex_space_avail_for_new_ba_stream(adapter)) {
                                mwifiex_create_ba_tbl(priv, ptr->ra, tid,
                                                      BA_SETUP_INPROGRESS);
index 224e03a..5099e53 100644 (file)
@@ -1830,12 +1830,14 @@ static inline void mwl8k_tx_count_packet(struct ieee80211_sta *sta, u8 tid)
 }
 
 static void
-mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb)
+mwl8k_txq_xmit(struct ieee80211_hw *hw,
+              int index,
+              struct ieee80211_sta *sta,
+              struct sk_buff *skb)
 {
        struct mwl8k_priv *priv = hw->priv;
        struct ieee80211_tx_info *tx_info;
        struct mwl8k_vif *mwl8k_vif;
-       struct ieee80211_sta *sta;
        struct ieee80211_hdr *wh;
        struct mwl8k_tx_queue *txq;
        struct mwl8k_tx_desc *tx;
@@ -1867,7 +1869,6 @@ mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb)
        wh = &((struct mwl8k_dma_data *)skb->data)->wh;
 
        tx_info = IEEE80211_SKB_CB(skb);
-       sta = tx_info->control.sta;
        mwl8k_vif = MWL8K_VIF(tx_info->control.vif);
 
        if (tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
@@ -2019,8 +2020,8 @@ mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb)
        tx->pkt_phys_addr = cpu_to_le32(dma);
        tx->pkt_len = cpu_to_le16(skb->len);
        tx->rate_info = 0;
-       if (!priv->ap_fw && tx_info->control.sta != NULL)
-               tx->peer_id = MWL8K_STA(tx_info->control.sta)->peer_id;
+       if (!priv->ap_fw && sta != NULL)
+               tx->peer_id = MWL8K_STA(sta)->peer_id;
        else
                tx->peer_id = 0;
 
@@ -4364,7 +4365,9 @@ static void mwl8k_rx_poll(unsigned long data)
 /*
  * Core driver operations.
  */
-static void mwl8k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+static void mwl8k_tx(struct ieee80211_hw *hw,
+                    struct ieee80211_tx_control *control,
+                    struct sk_buff *skb)
 {
        struct mwl8k_priv *priv = hw->priv;
        int index = skb_get_queue_mapping(skb);
@@ -4376,7 +4379,7 @@ static void mwl8k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
                return;
        }
 
-       mwl8k_txq_xmit(hw, index, skb);
+       mwl8k_txq_xmit(hw, index, control->sta, skb);
 }
 
 static int mwl8k_start(struct ieee80211_hw *hw)
index 1403709..1ef1bfe 100644 (file)
@@ -76,6 +76,7 @@ struct p54_channel_entry {
        u16 freq;
        u16 data;
        int index;
+       int max_power;
        enum ieee80211_band band;
 };
 
@@ -173,6 +174,7 @@ static int p54_generate_band(struct ieee80211_hw *dev,
        for (i = 0, j = 0; (j < list->band_channel_num[band]) &&
                           (i < list->entries); i++) {
                struct p54_channel_entry *chan = &list->channels[i];
+               struct ieee80211_channel *dest = &tmp->channels[j];
 
                if (chan->band != band)
                        continue;
@@ -190,14 +192,15 @@ static int p54_generate_band(struct ieee80211_hw *dev,
                        continue;
                }
 
-               tmp->channels[j].band = chan->band;
-               tmp->channels[j].center_freq = chan->freq;
+               dest->band = chan->band;
+               dest->center_freq = chan->freq;
+               dest->max_power = chan->max_power;
                priv->survey[*chan_num].channel = &tmp->channels[j];
                priv->survey[*chan_num].filled = SURVEY_INFO_NOISE_DBM |
                        SURVEY_INFO_CHANNEL_TIME |
                        SURVEY_INFO_CHANNEL_TIME_BUSY |
                        SURVEY_INFO_CHANNEL_TIME_TX;
-               tmp->channels[j].hw_value = (*chan_num);
+               dest->hw_value = (*chan_num);
                j++;
                (*chan_num)++;
        }
@@ -229,10 +232,11 @@ err_out:
        return ret;
 }
 
-static void p54_update_channel_param(struct p54_channel_list *list,
-                                    u16 freq, u16 data)
+static struct p54_channel_entry *p54_update_channel_param(struct p54_channel_list *list,
+                                                         u16 freq, u16 data)
 {
-       int band, i;
+       int i;
+       struct p54_channel_entry *entry = NULL;
 
        /*
         * usually all lists in the eeprom are mostly sorted.
@@ -241,30 +245,78 @@ static void p54_update_channel_param(struct p54_channel_list *list,
         */
        for (i = list->entries; i >= 0; i--) {
                if (freq == list->channels[i].freq) {
-                       list->channels[i].data |= data;
+                       entry = &list->channels[i];
                        break;
                }
        }
 
        if ((i < 0) && (list->entries < list->max_entries)) {
                /* entry does not exist yet. Initialize a new one. */
-               band = p54_get_band_from_freq(freq);
+               int band = p54_get_band_from_freq(freq);
 
                /*
                 * filter out frequencies which don't belong into
                 * any supported band.
                 */
-               if (band < 0)
-                       return ;
+               if (band >= 0) {
+                       i = list->entries++;
+                       list->band_channel_num[band]++;
+
+                       entry = &list->channels[i];
+                       entry->freq = freq;
+                       entry->band = band;
+                       entry->index = ieee80211_frequency_to_channel(freq);
+                       entry->max_power = 0;
+                       entry->data = 0;
+               }
+       }
 
-               i = list->entries++;
-               list->band_channel_num[band]++;
+       if (entry)
+               entry->data |= data;
 
-               list->channels[i].freq = freq;
-               list->channels[i].data = data;
-               list->channels[i].band = band;
-               list->channels[i].index = ieee80211_frequency_to_channel(freq);
-               /* TODO: parse output_limit and fill max_power */
+       return entry;
+}
+
+static int p54_get_maxpower(struct p54_common *priv, void *data)
+{
+       switch (priv->rxhw & PDR_SYNTH_FRONTEND_MASK) {
+       case PDR_SYNTH_FRONTEND_LONGBOW: {
+               struct pda_channel_output_limit_longbow *pda = data;
+               int j;
+               u16 rawpower = 0;
+               pda = data;
+               for (j = 0; j < ARRAY_SIZE(pda->point); j++) {
+                       struct pda_channel_output_limit_point_longbow *point =
+                               &pda->point[j];
+                       rawpower = max_t(u16,
+                               rawpower, le16_to_cpu(point->val_qpsk));
+                       rawpower = max_t(u16,
+                               rawpower, le16_to_cpu(point->val_bpsk));
+                       rawpower = max_t(u16,
+                               rawpower, le16_to_cpu(point->val_16qam));
+                       rawpower = max_t(u16,
+                               rawpower, le16_to_cpu(point->val_64qam));
+               }
+               /* longbow seems to use 1/16 dBm units */
+               return rawpower / 16;
+               }
+
+       case PDR_SYNTH_FRONTEND_DUETTE3:
+       case PDR_SYNTH_FRONTEND_DUETTE2:
+       case PDR_SYNTH_FRONTEND_FRISBEE:
+       case PDR_SYNTH_FRONTEND_XBOW: {
+               struct pda_channel_output_limit *pda = data;
+               u8 rawpower = 0;
+               rawpower = max(rawpower, pda->val_qpsk);
+               rawpower = max(rawpower, pda->val_bpsk);
+               rawpower = max(rawpower, pda->val_16qam);
+               rawpower = max(rawpower, pda->val_64qam);
+               /* raw values are in 1/4 dBm units */
+               return rawpower / 4;
+               }
+
+       default:
+               return 20;
        }
 }
 
@@ -315,12 +367,19 @@ static int p54_generate_channel_lists(struct ieee80211_hw *dev)
                }
 
                if (i < priv->output_limit->entries) {
-                       freq = le16_to_cpup((__le16 *) (i *
-                                           priv->output_limit->entry_size +
-                                           priv->output_limit->offset +
-                                           priv->output_limit->data));
-
-                       p54_update_channel_param(list, freq, CHAN_HAS_LIMIT);
+                       struct p54_channel_entry *tmp;
+
+                       void *data = (void *) ((unsigned long) i *
+                               priv->output_limit->entry_size +
+                               priv->output_limit->offset +
+                               priv->output_limit->data);
+
+                       freq = le16_to_cpup((__le16 *) data);
+                       tmp = p54_update_channel_param(list, freq,
+                                                      CHAN_HAS_LIMIT);
+                       if (tmp) {
+                               tmp->max_power = p54_get_maxpower(priv, data);
+                       }
                }
 
                if (i < priv->curve_data->entries) {
@@ -834,11 +893,12 @@ good_eeprom:
                goto err;
        }
 
+       priv->rxhw = synth & PDR_SYNTH_FRONTEND_MASK;
+
        err = p54_generate_channel_lists(dev);
        if (err)
                goto err;
 
-       priv->rxhw = synth & PDR_SYNTH_FRONTEND_MASK;
        if (priv->rxhw == PDR_SYNTH_FRONTEND_XBOW)
                p54_init_xbow_synth(priv);
        if (!(synth & PDR_SYNTH_24_GHZ_DISABLED))
index afde72b..20ebe39 100644 (file)
@@ -57,6 +57,18 @@ struct pda_channel_output_limit {
        u8 rate_set_size;
 } __packed;
 
+struct pda_channel_output_limit_point_longbow {
+       __le16 val_bpsk;
+       __le16 val_qpsk;
+       __le16 val_16qam;
+       __le16 val_64qam;
+} __packed;
+
+struct pda_channel_output_limit_longbow {
+       __le16 freq;
+       struct pda_channel_output_limit_point_longbow point[3];
+} __packed;
+
 struct pda_pa_curve_data_sample_rev0 {
        u8 rf_power;
        u8 pa_detector;
index 3d8d622..de1d46b 100644 (file)
@@ -526,7 +526,9 @@ int p54_init_leds(struct p54_common *priv);
 void p54_unregister_leds(struct p54_common *priv);
 
 /* xmit functions */
-void p54_tx_80211(struct ieee80211_hw *dev, struct sk_buff *skb);
+void p54_tx_80211(struct ieee80211_hw *dev,
+                 struct ieee80211_tx_control *control,
+                 struct sk_buff *skb);
 int p54_tx_cancel(struct p54_common *priv, __le32 req_id);
 void p54_tx(struct p54_common *priv, struct sk_buff *skb);
 
index 7cffea7..5e91ad0 100644 (file)
@@ -158,7 +158,7 @@ static int p54_beacon_update(struct p54_common *priv,
         * to cancel the old beacon template by hand, instead the firmware
         * will release the previous one through the feedback mechanism.
         */
-       p54_tx_80211(priv->hw, beacon);
+       p54_tx_80211(priv->hw, NULL, beacon);
        priv->tsf_high32 = 0;
        priv->tsf_low32 = 0;
 
index 89318ad..b439079 100644 (file)
@@ -488,6 +488,58 @@ static int p54p_open(struct ieee80211_hw *dev)
        return 0;
 }
 
+static void p54p_firmware_step2(const struct firmware *fw,
+                               void *context)
+{
+       struct p54p_priv *priv = context;
+       struct ieee80211_hw *dev = priv->common.hw;
+       struct pci_dev *pdev = priv->pdev;
+       int err;
+
+       if (!fw) {
+               dev_err(&pdev->dev, "Cannot find firmware (isl3886pci)\n");
+               err = -ENOENT;
+               goto out;
+       }
+
+       priv->firmware = fw;
+
+       err = p54p_open(dev);
+       if (err)
+               goto out;
+       err = p54_read_eeprom(dev);
+       p54p_stop(dev);
+       if (err)
+               goto out;
+
+       err = p54_register_common(dev, &pdev->dev);
+       if (err)
+               goto out;
+
+out:
+
+       complete(&priv->fw_loaded);
+
+       if (err) {
+               struct device *parent = pdev->dev.parent;
+
+               if (parent)
+                       device_lock(parent);
+
+               /*
+                * This will indirectly result in a call to p54p_remove.
+                * Hence, we don't need to bother with freeing any
+                * allocated ressources at all.
+                */
+               device_release_driver(&pdev->dev);
+
+               if (parent)
+                       device_unlock(parent);
+       }
+
+       pci_dev_put(pdev);
+}
+
 static int __devinit p54p_probe(struct pci_dev *pdev,
                                const struct pci_device_id *id)
 {
@@ -496,6 +548,7 @@ static int __devinit p54p_probe(struct pci_dev *pdev,
        unsigned long mem_addr, mem_len;
        int err;
 
+       pci_dev_get(pdev);
        err = pci_enable_device(pdev);
        if (err) {
                dev_err(&pdev->dev, "Cannot enable new PCI device\n");
@@ -537,6 +590,7 @@ static int __devinit p54p_probe(struct pci_dev *pdev,
        priv = dev->priv;
        priv->pdev = pdev;
 
+       init_completion(&priv->fw_loaded);
        SET_IEEE80211_DEV(dev, &pdev->dev);
        pci_set_drvdata(pdev, dev);
 
@@ -561,32 +615,12 @@ static int __devinit p54p_probe(struct pci_dev *pdev,
        spin_lock_init(&priv->lock);
        tasklet_init(&priv->tasklet, p54p_tasklet, (unsigned long)dev);
 
-       err = request_firmware(&priv->firmware, "isl3886pci",
-                              &priv->pdev->dev);
-       if (err) {
-               dev_err(&pdev->dev, "Cannot find firmware (isl3886pci)\n");
-               err = request_firmware(&priv->firmware, "isl3886",
-                                      &priv->pdev->dev);
-               if (err)
-                       goto err_free_common;
-       }
-
-       err = p54p_open(dev);
-       if (err)
-               goto err_free_common;
-       err = p54_read_eeprom(dev);
-       p54p_stop(dev);
-       if (err)
-               goto err_free_common;
-
-       err = p54_register_common(dev, &pdev->dev);
-       if (err)
-               goto err_free_common;
-
-       return 0;
+       err = request_firmware_nowait(THIS_MODULE, 1, "isl3886pci",
+                                     &priv->pdev->dev, GFP_KERNEL,
+                                     priv, p54p_firmware_step2);
+       if (!err)
+               return 0;
 
- err_free_common:
-       release_firmware(priv->firmware);
        pci_free_consistent(pdev, sizeof(*priv->ring_control),
                            priv->ring_control, priv->ring_control_dma);
 
@@ -601,6 +635,7 @@ static int __devinit p54p_probe(struct pci_dev *pdev,
        pci_release_regions(pdev);
  err_disable_dev:
        pci_disable_device(pdev);
+       pci_dev_put(pdev);
        return err;
 }
 
@@ -612,8 +647,9 @@ static void __devexit p54p_remove(struct pci_dev *pdev)
        if (!dev)
                return;
 
-       p54_unregister_common(dev);
        priv = dev->priv;
+       wait_for_completion(&priv->fw_loaded);
+       p54_unregister_common(dev);
        release_firmware(priv->firmware);
        pci_free_consistent(pdev, sizeof(*priv->ring_control),
                            priv->ring_control, priv->ring_control_dma);
index 7aa509f..68405c1 100644 (file)
@@ -105,6 +105,7 @@ struct p54p_priv {
        struct sk_buff *tx_buf_data[32];
        struct sk_buff *tx_buf_mgmt[4];
        struct completion boot_comp;
+       struct completion fw_loaded;
 };
 
 #endif /* P54USB_H */
index f38786e..5861e13 100644 (file)
@@ -676,8 +676,9 @@ int p54_rx(struct ieee80211_hw *dev, struct sk_buff *skb)
 EXPORT_SYMBOL_GPL(p54_rx);
 
 static void p54_tx_80211_header(struct p54_common *priv, struct sk_buff *skb,
-                               struct ieee80211_tx_info *info, u8 *queue,
-                               u32 *extra_len, u16 *flags, u16 *aid,
+                               struct ieee80211_tx_info *info,
+                               struct ieee80211_sta *sta,
+                               u8 *queue, u32 *extra_len, u16 *flags, u16 *aid,
                                bool *burst_possible)
 {
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
@@ -746,8 +747,8 @@ static void p54_tx_80211_header(struct p54_common *priv, struct sk_buff *skb,
                        }
                }
 
-               if (info->control.sta)
-                       *aid = info->control.sta->aid;
+               if (sta)
+                       *aid = sta->aid;
                break;
        }
 }
@@ -767,7 +768,9 @@ static u8 p54_convert_algo(u32 cipher)
        }
 }
 
-void p54_tx_80211(struct ieee80211_hw *dev, struct sk_buff *skb)
+void p54_tx_80211(struct ieee80211_hw *dev,
+                 struct ieee80211_tx_control *control,
+                 struct sk_buff *skb)
 {
        struct p54_common *priv = dev->priv;
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -784,7 +787,7 @@ void p54_tx_80211(struct ieee80211_hw *dev, struct sk_buff *skb)
        u8 nrates = 0, nremaining = 8;
        bool burst_allowed = false;
 
-       p54_tx_80211_header(priv, skb, info, &queue, &extra_len,
+       p54_tx_80211_header(priv, skb, info, control->sta, &queue, &extra_len,
                            &hdr_flags, &aid, &burst_allowed);
 
        if (p54_tx_qos_accounting_alloc(priv, skb, queue)) {
index 8afb546..f991e8b 100644 (file)
@@ -1287,7 +1287,9 @@ void rt2x00lib_rxdone(struct queue_entry *entry, gfp_t gfp);
 /*
  * mac80211 handlers.
  */
-void rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
+void rt2x00mac_tx(struct ieee80211_hw *hw,
+                 struct ieee80211_tx_control *control,
+                 struct sk_buff *skb);
 int rt2x00mac_start(struct ieee80211_hw *hw);
 void rt2x00mac_stop(struct ieee80211_hw *hw);
 int rt2x00mac_add_interface(struct ieee80211_hw *hw,
index a6b88bd..a59048f 100644 (file)
@@ -194,7 +194,7 @@ static void rt2x00lib_bc_buffer_iter(void *data, u8 *mac,
         */
        skb = ieee80211_get_buffered_bc(rt2x00dev->hw, vif);
        while (skb) {
-               rt2x00mac_tx(rt2x00dev->hw, skb);
+               rt2x00mac_tx(rt2x00dev->hw, NULL, skb);
                skb = ieee80211_get_buffered_bc(rt2x00dev->hw, vif);
        }
 }
index 4ff26c2..c3d0f2f 100644 (file)
@@ -99,7 +99,9 @@ static int rt2x00mac_tx_rts_cts(struct rt2x00_dev *rt2x00dev,
        return retval;
 }
 
-void rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+void rt2x00mac_tx(struct ieee80211_hw *hw,
+                 struct ieee80211_tx_control *control,
+                 struct sk_buff *skb)
 {
        struct rt2x00_dev *rt2x00dev = hw->priv;
        struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
index f7e74a0..e488b94 100644 (file)
@@ -315,6 +315,7 @@ static void rt2x00queue_create_tx_descriptor_plcp(struct rt2x00_dev *rt2x00dev,
 static void rt2x00queue_create_tx_descriptor_ht(struct rt2x00_dev *rt2x00dev,
                                                struct sk_buff *skb,
                                                struct txentry_desc *txdesc,
+                                               struct ieee80211_sta *sta,
                                                const struct rt2x00_rate *hwrate)
 {
        struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
@@ -322,11 +323,11 @@ static void rt2x00queue_create_tx_descriptor_ht(struct rt2x00_dev *rt2x00dev,
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
        struct rt2x00_sta *sta_priv = NULL;
 
-       if (tx_info->control.sta) {
+       if (sta) {
                txdesc->u.ht.mpdu_density =
-                   tx_info->control.sta->ht_cap.ampdu_density;
+                   sta->ht_cap.ampdu_density;
 
-               sta_priv = sta_to_rt2x00_sta(tx_info->control.sta);
+               sta_priv = sta_to_rt2x00_sta(sta);
                txdesc->u.ht.wcid = sta_priv->wcid;
        }
 
@@ -341,8 +342,8 @@ static void rt2x00queue_create_tx_descriptor_ht(struct rt2x00_dev *rt2x00dev,
                 * MIMO PS should be set to 1 for STA's using dynamic SM PS
                 * when using more then one tx stream (>MCS7).
                 */
-               if (tx_info->control.sta && txdesc->u.ht.mcs > 7 &&
-                   ((tx_info->control.sta->ht_cap.cap &
+               if (sta && txdesc->u.ht.mcs > 7 &&
+                   ((sta->ht_cap.cap &
                      IEEE80211_HT_CAP_SM_PS) >>
                     IEEE80211_HT_CAP_SM_PS_SHIFT) ==
                    WLAN_HT_CAP_SM_PS_DYNAMIC)
@@ -409,7 +410,8 @@ static void rt2x00queue_create_tx_descriptor_ht(struct rt2x00_dev *rt2x00dev,
 
 static void rt2x00queue_create_tx_descriptor(struct rt2x00_dev *rt2x00dev,
                                             struct sk_buff *skb,
-                                            struct txentry_desc *txdesc)
+                                            struct txentry_desc *txdesc,
+                                            struct ieee80211_sta *sta)
 {
        struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
@@ -503,7 +505,7 @@ static void rt2x00queue_create_tx_descriptor(struct rt2x00_dev *rt2x00dev,
 
        if (test_bit(REQUIRE_HT_TX_DESC, &rt2x00dev->cap_flags))
                rt2x00queue_create_tx_descriptor_ht(rt2x00dev, skb, txdesc,
-                                                   hwrate);
+                                                  sta, hwrate);
        else
                rt2x00queue_create_tx_descriptor_plcp(rt2x00dev, skb, txdesc,
                                                      hwrate);
@@ -595,7 +597,7 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
         * after that we are free to use the skb->cb array
         * for our information.
         */
-       rt2x00queue_create_tx_descriptor(queue->rt2x00dev, skb, &txdesc);
+       rt2x00queue_create_tx_descriptor(queue->rt2x00dev, skb, &txdesc, NULL);
 
        /*
         * All information is retrieved from the skb->cb array,
@@ -740,7 +742,7 @@ int rt2x00queue_update_beacon_locked(struct rt2x00_dev *rt2x00dev,
         * after that we are free to use the skb->cb array
         * for our information.
         */
-       rt2x00queue_create_tx_descriptor(rt2x00dev, intf->beacon->skb, &txdesc);
+       rt2x00queue_create_tx_descriptor(rt2x00dev, intf->beacon->skb, &txdesc, NULL);
 
        /*
         * Fill in skb descriptor
index aceaf68..021d83e 100644 (file)
@@ -244,7 +244,9 @@ static irqreturn_t rtl8180_interrupt(int irq, void *dev_id)
        return IRQ_HANDLED;
 }
 
-static void rtl8180_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
+static void rtl8180_tx(struct ieee80211_hw *dev,
+                      struct ieee80211_tx_control *control,
+                      struct sk_buff *skb)
 {
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
@@ -710,7 +712,7 @@ static void rtl8180_beacon_work(struct work_struct *work)
        /* TODO: use actual beacon queue */
        skb_set_queue_mapping(skb, 0);
 
-       rtl8180_tx(dev, skb);
+       rtl8180_tx(dev, NULL, skb);
 
 resched:
        /*
index 5330240..7811b63 100644 (file)
@@ -228,7 +228,9 @@ static void rtl8187_tx_cb(struct urb *urb)
        }
 }
 
-static void rtl8187_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
+static void rtl8187_tx(struct ieee80211_hw *dev,
+                      struct ieee80211_tx_control *control,
+                      struct sk_buff *skb)
 {
        struct rtl8187_priv *priv = dev->priv;
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -1076,7 +1078,7 @@ static void rtl8187_beacon_work(struct work_struct *work)
        /* TODO: use actual beacon queue */
        skb_set_queue_mapping(skb, 0);
 
-       rtl8187_tx(dev, skb);
+       rtl8187_tx(dev, NULL, skb);
 
 resched:
        /*
index 942e56b..59381fe 100644 (file)
@@ -1341,9 +1341,8 @@ int rtl_send_smps_action(struct ieee80211_hw *hw,
                rtlpriv->cfg->ops->update_rate_tbl(hw, sta, 0);
 
                info->control.rates[0].idx = 0;
-               info->control.sta = sta;
                info->band = hw->conf.channel->band;
-               rtlpriv->intf_ops->adapter_tx(hw, skb, &tcb_desc);
+               rtlpriv->intf_ops->adapter_tx(hw, sta, skb, &tcb_desc);
        }
 err_free:
        return 0;
index a18ad2a..a7c0e52 100644 (file)
@@ -124,7 +124,9 @@ static void rtl_op_stop(struct ieee80211_hw *hw)
        mutex_unlock(&rtlpriv->locks.conf_mutex);
 }
 
-static void rtl_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+static void rtl_op_tx(struct ieee80211_hw *hw,
+                     struct ieee80211_tx_control *control,
+                     struct sk_buff *skb)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
@@ -138,8 +140,8 @@ static void rtl_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
        if (!test_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status))
                goto err_free;
 
-       if (!rtlpriv->intf_ops->waitq_insert(hw, skb))
-               rtlpriv->intf_ops->adapter_tx(hw, skb, &tcb_desc);
+       if (!rtlpriv->intf_ops->waitq_insert(hw, control->sta, skb))
+               rtlpriv->intf_ops->adapter_tx(hw, control->sta, skb, &tcb_desc);
 
        return;
 
index 80f75d3..aad9d44 100644 (file)
@@ -504,7 +504,7 @@ static void _rtl_pci_tx_chk_waitq(struct ieee80211_hw *hw)
                                _rtl_update_earlymode_info(hw, skb,
                                                           &tcb_desc, tid);
 
-                       rtlpriv->intf_ops->adapter_tx(hw, skb, &tcb_desc);
+                       rtlpriv->intf_ops->adapter_tx(hw, NULL, skb, &tcb_desc);
                }
        }
 }
@@ -929,7 +929,7 @@ static void _rtl_pci_prepare_bcn_tasklet(struct ieee80211_hw *hw)
        info = IEEE80211_SKB_CB(pskb);
        pdesc = &ring->desc[0];
        rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *) pdesc,
-               info, pskb, BEACON_QUEUE, &tcb_desc);
+               info, NULL, pskb, BEACON_QUEUE, &tcb_desc);
 
        __skb_queue_tail(&ring->queue, pskb);
 
@@ -1305,11 +1305,10 @@ int rtl_pci_reset_trx_ring(struct ieee80211_hw *hw)
 }
 
 static bool rtl_pci_tx_chk_waitq_insert(struct ieee80211_hw *hw,
+                                       struct ieee80211_sta *sta,
                                        struct sk_buff *skb)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-       struct ieee80211_sta *sta = info->control.sta;
        struct rtl_sta_info *sta_entry = NULL;
        u8 tid = rtl_get_tid(skb);
 
@@ -1337,13 +1336,14 @@ static bool rtl_pci_tx_chk_waitq_insert(struct ieee80211_hw *hw,
        return true;
 }
 
-static int rtl_pci_tx(struct ieee80211_hw *hw, struct sk_buff *skb,
-               struct rtl_tcb_desc *ptcb_desc)
+static int rtl_pci_tx(struct ieee80211_hw *hw,
+                     struct ieee80211_sta *sta,
+                     struct sk_buff *skb,
+                     struct rtl_tcb_desc *ptcb_desc)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_sta_info *sta_entry = NULL;
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-       struct ieee80211_sta *sta = info->control.sta;
        struct rtl8192_tx_ring *ring;
        struct rtl_tx_desc *pdesc;
        u8 idx;
@@ -1418,7 +1418,7 @@ static int rtl_pci_tx(struct ieee80211_hw *hw, struct sk_buff *skb,
                rtlpriv->cfg->ops->led_control(hw, LED_CTL_TX);
 
        rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *)pdesc,
-                       info, skb, hw_queue, ptcb_desc);
+                       info, sta, skb, hw_queue, ptcb_desc);
 
        __skb_queue_tail(&ring->queue, skb);
 
index 5216664..390d6d4 100644 (file)
@@ -596,7 +596,9 @@ bool rtl92ce_rx_query_desc(struct ieee80211_hw *hw,
 
 void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
                          struct ieee80211_hdr *hdr, u8 *pdesc_tx,
-                         struct ieee80211_tx_info *info, struct sk_buff *skb,
+                         struct ieee80211_tx_info *info,
+                         struct ieee80211_sta *sta,
+                         struct sk_buff *skb,
                          u8 hw_queue, struct rtl_tcb_desc *tcb_desc)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -604,7 +606,6 @@ void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
        struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
        struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
        bool defaultadapter = true;
-       struct ieee80211_sta *sta;
        u8 *pdesc = pdesc_tx;
        u16 seq_number;
        __le16 fc = hdr->frame_control;
index c4adb97..a7cdd51 100644 (file)
@@ -713,6 +713,7 @@ struct rx_desc_92c {
 void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
                          struct ieee80211_hdr *hdr,
                          u8 *pdesc, struct ieee80211_tx_info *info,
+                         struct ieee80211_sta *sta,
                          struct sk_buff *skb, u8 hw_queue,
                          struct rtl_tcb_desc *ptcb_desc);
 bool rtl92ce_rx_query_desc(struct ieee80211_hw *hw,
index 2e6eb35..27863d7 100644 (file)
@@ -496,7 +496,9 @@ static void _rtl_tx_desc_checksum(u8 *txdesc)
 
 void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw,
                          struct ieee80211_hdr *hdr, u8 *pdesc_tx,
-                         struct ieee80211_tx_info *info, struct sk_buff *skb,
+                         struct ieee80211_tx_info *info,
+                         struct ieee80211_sta *sta,
+                         struct sk_buff *skb,
                          u8 queue_index,
                          struct rtl_tcb_desc *tcb_desc)
 {
@@ -504,7 +506,6 @@ void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw,
        struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
        struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
        bool defaultadapter = true;
-       struct ieee80211_sta *sta = info->control.sta = info->control.sta;
        u8 *qc = ieee80211_get_qos_ctl(hdr);
        u8 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
        u16 seq_number;
index 332b06e..725c53a 100644 (file)
@@ -420,7 +420,9 @@ struct sk_buff *rtl8192c_tx_aggregate_hdl(struct ieee80211_hw *,
                                           struct sk_buff_head *);
 void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw,
                          struct ieee80211_hdr *hdr, u8 *pdesc_tx,
-                         struct ieee80211_tx_info *info, struct sk_buff *skb,
+                         struct ieee80211_tx_info *info,
+                         struct ieee80211_sta *sta,
+                         struct sk_buff *skb,
                          u8 queue_index,
                          struct rtl_tcb_desc *tcb_desc);
 void rtl92cu_fill_fake_txdesc(struct ieee80211_hw *hw, u8 * pDesc,
index f80690d..4686f34 100644 (file)
@@ -551,7 +551,9 @@ static void _rtl92de_insert_emcontent(struct rtl_tcb_desc *ptcb_desc,
 
 void rtl92de_tx_fill_desc(struct ieee80211_hw *hw,
                          struct ieee80211_hdr *hdr, u8 *pdesc_tx,
-                         struct ieee80211_tx_info *info, struct sk_buff *skb,
+                         struct ieee80211_tx_info *info,
+                         struct ieee80211_sta *sta,
+                         struct sk_buff *skb,
                          u8 hw_queue, struct rtl_tcb_desc *ptcb_desc)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -559,7 +561,6 @@ void rtl92de_tx_fill_desc(struct ieee80211_hw *hw,
        struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
        struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
        struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
-       struct ieee80211_sta *sta = info->control.sta;
        u8 *pdesc = pdesc_tx;
        u16 seq_number;
        __le16 fc = hdr->frame_control;
index 057a524..c1b5dfb 100644 (file)
@@ -730,6 +730,7 @@ struct rx_desc_92d {
 void rtl92de_tx_fill_desc(struct ieee80211_hw *hw,
                          struct ieee80211_hdr *hdr,
                          u8 *pdesc, struct ieee80211_tx_info *info,
+                         struct ieee80211_sta *sta,
                          struct sk_buff *skb, u8 hw_queue,
                          struct rtl_tcb_desc *ptcb_desc);
 bool rtl92de_rx_query_desc(struct ieee80211_hw *hw,
index 36d1cb3..28c53fb 100644 (file)
@@ -591,14 +591,15 @@ bool rtl92se_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats,
 
 void rtl92se_tx_fill_desc(struct ieee80211_hw *hw,
                struct ieee80211_hdr *hdr, u8 *pdesc_tx,
-               struct ieee80211_tx_info *info, struct sk_buff *skb,
+               struct ieee80211_tx_info *info,
+               struct ieee80211_sta *sta,
+               struct sk_buff *skb,
                u8 hw_queue, struct rtl_tcb_desc *ptcb_desc)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
        struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
        struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
-       struct ieee80211_sta *sta = info->control.sta;
        u8 *pdesc = pdesc_tx;
        u16 seq_number;
        __le16 fc = hdr->frame_control;
index 011e7b0..64dd66f 100644 (file)
@@ -31,6 +31,7 @@
 
 void rtl92se_tx_fill_desc(struct ieee80211_hw *hw, struct ieee80211_hdr *hdr,
                          u8 *pdesc, struct ieee80211_tx_info *info,
+                         struct ieee80211_sta *sta,
                          struct sk_buff *skb, u8 hw_queue,
                          struct rtl_tcb_desc *ptcb_desc);
 void rtl92se_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc, bool firstseg,
index aa970fc..9140469 100644 (file)
@@ -848,8 +848,10 @@ static void _rtl_usb_transmit(struct ieee80211_hw *hw, struct sk_buff *skb,
        _rtl_submit_tx_urb(hw, _urb);
 }
 
-static void _rtl_usb_tx_preprocess(struct ieee80211_hw *hw, struct sk_buff *skb,
-                           u16 hw_queue)
+static void _rtl_usb_tx_preprocess(struct ieee80211_hw *hw,
+                                  struct ieee80211_sta *sta,
+                                  struct sk_buff *skb,
+                                  u16 hw_queue)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
@@ -891,7 +893,7 @@ static void _rtl_usb_tx_preprocess(struct ieee80211_hw *hw, struct sk_buff *skb,
                seq_number += 1;
                seq_number <<= 4;
        }
-       rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *)pdesc, info, skb,
+       rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *)pdesc, info, sta, skb,
                                        hw_queue, &tcb_desc);
        if (!ieee80211_has_morefrags(hdr->frame_control)) {
                if (qc)
@@ -901,7 +903,9 @@ static void _rtl_usb_tx_preprocess(struct ieee80211_hw *hw, struct sk_buff *skb,
                rtlpriv->cfg->ops->led_control(hw, LED_CTL_TX);
 }
 
-static int rtl_usb_tx(struct ieee80211_hw *hw, struct sk_buff *skb,
+static int rtl_usb_tx(struct ieee80211_hw *hw,
+                     struct ieee80211_sta *sta,
+                     struct sk_buff *skb,
                      struct rtl_tcb_desc *dummy)
 {
        struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
@@ -913,7 +917,7 @@ static int rtl_usb_tx(struct ieee80211_hw *hw, struct sk_buff *skb,
        if (unlikely(is_hal_stop(rtlhal)))
                goto err_free;
        hw_queue = rtlusb->usb_mq_to_hwq(fc, skb_get_queue_mapping(skb));
-       _rtl_usb_tx_preprocess(hw, skb, hw_queue);
+       _rtl_usb_tx_preprocess(hw, sta, skb, hw_queue);
        _rtl_usb_transmit(hw, skb, hw_queue);
        return NETDEV_TX_OK;
 
@@ -923,6 +927,7 @@ err_free:
 }
 
 static bool rtl_usb_tx_chk_waitq_insert(struct ieee80211_hw *hw,
+                                       struct ieee80211_sta *sta,
                                        struct sk_buff *skb)
 {
        return false;
index cdaa21f..40153e7 100644 (file)
@@ -122,7 +122,7 @@ enum rt_eeprom_type {
        EEPROM_BOOT_EFUSE,
 };
 
-enum rtl_status {
+enum ttl_status {
        RTL_STATUS_INTERFACE_START = 0,
 };
 
@@ -1418,6 +1418,7 @@ struct rtl_hal_ops {
        void (*fill_tx_desc) (struct ieee80211_hw *hw,
                              struct ieee80211_hdr *hdr, u8 *pdesc_tx,
                              struct ieee80211_tx_info *info,
+                             struct ieee80211_sta *sta,
                              struct sk_buff *skb, u8 hw_queue,
                              struct rtl_tcb_desc *ptcb_desc);
        void (*fill_fake_txdesc) (struct ieee80211_hw *hw, u8 *pDesc,
@@ -1475,11 +1476,15 @@ struct rtl_intf_ops {
        int (*adapter_start) (struct ieee80211_hw *hw);
        void (*adapter_stop) (struct ieee80211_hw *hw);
 
-       int (*adapter_tx) (struct ieee80211_hw *hw, struct sk_buff *skb,
-                       struct rtl_tcb_desc *ptcb_desc);
+       int (*adapter_tx) (struct ieee80211_hw *hw,
+                          struct ieee80211_sta *sta,
+                          struct sk_buff *skb,
+                          struct rtl_tcb_desc *ptcb_desc);
        void (*flush)(struct ieee80211_hw *hw, bool drop);
        int (*reset_trx_ring) (struct ieee80211_hw *hw);
-       bool (*waitq_insert) (struct ieee80211_hw *hw, struct sk_buff *skb);
+       bool (*waitq_insert) (struct ieee80211_hw *hw,
+                             struct ieee80211_sta *sta,
+                             struct sk_buff *skb);
 
        /*pci */
        void (*disable_aspm) (struct ieee80211_hw *hw);
index 3118c42..441cbcc 100644 (file)
@@ -354,7 +354,9 @@ out:
        return ret;
 }
 
-static void wl1251_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+static void wl1251_op_tx(struct ieee80211_hw *hw,
+                        struct ieee80211_tx_control *control,
+                        struct sk_buff *skb)
 {
        struct wl1251 *wl = hw->priv;
        unsigned long flags;
index 7254860..ff830cf 100644 (file)
@@ -1181,7 +1181,9 @@ out:
        return ret;
 }
 
-static void wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+static void wl1271_op_tx(struct ieee80211_hw *hw,
+                        struct ieee80211_tx_control *control,
+                        struct sk_buff *skb)
 {
        struct wl1271 *wl = hw->priv;
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -1197,7 +1199,7 @@ static void wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
        mapping = skb_get_queue_mapping(skb);
        q = wl1271_tx_get_queue(mapping);
 
-       hlid = wl12xx_tx_get_hlid(wl, wlvif, skb);
+       hlid = wl12xx_tx_get_hlid(wl, wlvif, skb, control->sta);
 
        spin_lock_irqsave(&wl->wl_lock, flags);
 
index f0081f7..1a2f31c 100644 (file)
@@ -130,16 +130,13 @@ bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb)
 }
 EXPORT_SYMBOL(wl12xx_is_dummy_packet);
 
-u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif,
-                        struct sk_buff *skb)
+static u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                               struct sk_buff *skb, struct ieee80211_sta *sta)
 {
-       struct ieee80211_tx_info *control = IEEE80211_SKB_CB(skb);
-
-       if (control->control.sta) {
+       if (sta) {
                struct wl1271_station *wl_sta;
 
-               wl_sta = (struct wl1271_station *)
-                               control->control.sta->drv_priv;
+               wl_sta = (struct wl1271_station *)sta->drv_priv;
                return wl_sta->hlid;
        } else {
                struct ieee80211_hdr *hdr;
@@ -156,7 +153,7 @@ u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif,
 }
 
 u8 wl12xx_tx_get_hlid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
-                     struct sk_buff *skb)
+                     struct sk_buff *skb, struct ieee80211_sta *sta)
 {
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
 
@@ -164,7 +161,7 @@ u8 wl12xx_tx_get_hlid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
                return wl->system_hlid;
 
        if (wlvif->bss_type == BSS_TYPE_AP_BSS)
-               return wl12xx_tx_get_hlid_ap(wl, wlvif, skb);
+               return wl12xx_tx_get_hlid_ap(wl, wlvif, skb, sta);
 
        if ((test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
             test_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags)) &&
@@ -344,13 +341,12 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct wl12xx_vif *wlvif,
 
 /* caller must hold wl->mutex */
 static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct wl12xx_vif *wlvif,
-                                  struct sk_buff *skb, u32 buf_offset)
+                                  struct sk_buff *skb, u32 buf_offset, u8 hlid)
 {
        struct ieee80211_tx_info *info;
        u32 extra = 0;
        int ret = 0;
        u32 total_len;
-       u8 hlid;
        bool is_dummy;
        bool is_gem = false;
 
@@ -359,9 +355,13 @@ static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct wl12xx_vif *wlvif,
                return -EINVAL;
        }
 
+       if (hlid == WL12XX_INVALID_LINK_ID) {
+               wl1271_error("invalid hlid. dropping skb 0x%p", skb);
+               return -EINVAL;
+       }
+
        info = IEEE80211_SKB_CB(skb);
 
-       /* TODO: handle dummy packets on multi-vifs */
        is_dummy = wl12xx_is_dummy_packet(wl, skb);
 
        if ((wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE) &&
@@ -386,11 +386,6 @@ static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct wl12xx_vif *wlvif,
 
                is_gem = (cipher == WL1271_CIPHER_SUITE_GEM);
        }
-       hlid = wl12xx_tx_get_hlid(wl, wlvif, skb);
-       if (hlid == WL12XX_INVALID_LINK_ID) {
-               wl1271_error("invalid hlid. dropping skb 0x%p", skb);
-               return -EINVAL;
-       }
 
        ret = wl1271_tx_allocate(wl, wlvif, skb, extra, buf_offset, hlid,
                                 is_gem);
@@ -517,7 +512,8 @@ static struct sk_buff *wl12xx_lnk_skb_dequeue(struct wl1271 *wl,
 }
 
 static struct sk_buff *wl12xx_vif_skb_dequeue(struct wl1271 *wl,
-                                             struct wl12xx_vif *wlvif)
+                                             struct wl12xx_vif *wlvif,
+                                             u8 *hlid)
 {
        struct sk_buff *skb = NULL;
        int i, h, start_hlid;
@@ -544,10 +540,11 @@ static struct sk_buff *wl12xx_vif_skb_dequeue(struct wl1271 *wl,
        if (!skb)
                wlvif->last_tx_hlid = 0;
 
+       *hlid = wlvif->last_tx_hlid;
        return skb;
 }
 
-static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl)
+static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl, u8 *hlid)
 {
        unsigned long flags;
        struct wl12xx_vif *wlvif = wl->last_wlvif;
@@ -556,7 +553,7 @@ static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl)
        /* continue from last wlvif (round robin) */
        if (wlvif) {
                wl12xx_for_each_wlvif_continue(wl, wlvif) {
-                       skb = wl12xx_vif_skb_dequeue(wl, wlvif);
+                       skb = wl12xx_vif_skb_dequeue(wl, wlvif, hlid);
                        if (skb) {
                                wl->last_wlvif = wlvif;
                                break;
@@ -565,13 +562,15 @@ static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl)
        }
 
        /* dequeue from the system HLID before the restarting wlvif list */
-       if (!skb)
+       if (!skb) {
                skb = wl12xx_lnk_skb_dequeue(wl, &wl->links[wl->system_hlid]);
+               *hlid = wl->system_hlid;
+       }
 
        /* do a new pass over the wlvif list */
        if (!skb) {
                wl12xx_for_each_wlvif(wl, wlvif) {
-                       skb = wl12xx_vif_skb_dequeue(wl, wlvif);
+                       skb = wl12xx_vif_skb_dequeue(wl, wlvif, hlid);
                        if (skb) {
                                wl->last_wlvif = wlvif;
                                break;
@@ -591,6 +590,7 @@ static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl)
                int q;
 
                skb = wl->dummy_packet;
+               *hlid = wl->system_hlid;
                q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
                spin_lock_irqsave(&wl->wl_lock, flags);
                WARN_ON_ONCE(wl->tx_queue_count[q] <= 0);
@@ -602,7 +602,7 @@ static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl)
 }
 
 static void wl1271_skb_queue_head(struct wl1271 *wl, struct wl12xx_vif *wlvif,
-                                 struct sk_buff *skb)
+                                 struct sk_buff *skb, u8 hlid)
 {
        unsigned long flags;
        int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
@@ -610,7 +610,6 @@ static void wl1271_skb_queue_head(struct wl1271 *wl, struct wl12xx_vif *wlvif,
        if (wl12xx_is_dummy_packet(wl, skb)) {
                set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
        } else {
-               u8 hlid = wl12xx_tx_get_hlid(wl, wlvif, skb);
                skb_queue_head(&wl->links[hlid].tx_queue[q], skb);
 
                /* make sure we dequeue the same packet next time */
@@ -686,26 +685,30 @@ int wlcore_tx_work_locked(struct wl1271 *wl)
        unsigned long active_hlids[BITS_TO_LONGS(WL12XX_MAX_LINKS)] = {0};
        int ret = 0;
        int bus_ret = 0;
+       u8 hlid;
 
        if (unlikely(wl->state == WL1271_STATE_OFF))
                return 0;
 
-       while ((skb = wl1271_skb_dequeue(wl))) {
+       while ((skb = wl1271_skb_dequeue(wl, &hlid))) {
                struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
                bool has_data = false;
 
                wlvif = NULL;
                if (!wl12xx_is_dummy_packet(wl, skb) && info->control.vif)
                        wlvif = wl12xx_vif_to_data(info->control.vif);
+               else
+                       hlid = wl->system_hlid;
 
                has_data = wlvif && wl1271_tx_is_data_present(skb);
-               ret = wl1271_prepare_tx_frame(wl, wlvif, skb, buf_offset);
+               ret = wl1271_prepare_tx_frame(wl, wlvif, skb, buf_offset,
+                                             hlid);
                if (ret == -EAGAIN) {
                        /*
                         * Aggregation buffer is full.
                         * Flush buffer and try again.
                         */
-                       wl1271_skb_queue_head(wl, wlvif, skb);
+                       wl1271_skb_queue_head(wl, wlvif, skb, hlid);
 
                        buf_offset = wlcore_hw_pre_pkt_send(wl, buf_offset,
                                                            last_len);
@@ -722,7 +725,7 @@ int wlcore_tx_work_locked(struct wl1271 *wl)
                         * Firmware buffer is full.
                         * Queue back last skb, and stop aggregating.
                         */
-                       wl1271_skb_queue_head(wl, wlvif, skb);
+                       wl1271_skb_queue_head(wl, wlvif, skb, hlid);
                        /* No work left, avoid scheduling redundant tx work */
                        set_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
                        goto out_ack;
@@ -732,7 +735,7 @@ int wlcore_tx_work_locked(struct wl1271 *wl)
                                 * fw still expects dummy packet,
                                 * so re-enqueue it
                                 */
-                               wl1271_skb_queue_head(wl, wlvif, skb);
+                               wl1271_skb_queue_head(wl, wlvif, skb, hlid);
                        else
                                ieee80211_free_txskb(wl->hw, skb);
                        goto out_ack;
index 1e939b0..349520d 100644 (file)
@@ -243,10 +243,8 @@ u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum ieee80211_band band);
 u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set,
                                enum ieee80211_band rate_band);
 u32 wl1271_tx_min_rate_get(struct wl1271 *wl, u32 rate_set);
-u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif,
-                        struct sk_buff *skb);
 u8 wl12xx_tx_get_hlid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
-                     struct sk_buff *skb);
+                     struct sk_buff *skb, struct ieee80211_sta *sta);
 void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid);
 void wl1271_handle_tx_low_watermark(struct wl1271 *wl);
 bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb);
index c9e2660..4598801 100644 (file)
@@ -937,7 +937,9 @@ static int fill_ctrlset(struct zd_mac *mac,
  * control block of the skbuff will be initialized. If necessary the incoming
  * mac80211 queues will be stopped.
  */
-static void zd_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+static void zd_op_tx(struct ieee80211_hw *hw,
+                    struct ieee80211_tx_control *control,
+                    struct sk_buff *skb)
 {
        struct zd_mac *mac = zd_hw_mac(hw);
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -1176,7 +1178,7 @@ static void zd_beacon_done(struct zd_mac *mac)
                skb = ieee80211_get_buffered_bc(mac->hw, mac->vif);
                if (!skb)
                        break;
-               zd_op_tx(mac->hw, skb);
+               zd_op_tx(mac->hw, NULL, skb);
        }
 
        /*
index 650f79a..c934fe8 100644 (file)
@@ -1712,7 +1712,7 @@ static void netback_changed(struct xenbus_device *dev,
                break;
 
        case XenbusStateConnected:
-               netif_notify_peers(netdev);
+               netdev_notify_peers(netdev);
                break;
 
        case XenbusStateClosing:
index 7e2ddc0..c625086 100644 (file)
@@ -190,16 +190,30 @@ static void ssb_mips_flash_detect(struct ssb_mipscore *mcore)
 {
        struct ssb_bus *bus = mcore->dev->bus;
 
-       mcore->flash_buswidth = 2;
-       if (bus->chipco.dev) {
-               mcore->flash_window = 0x1c000000;
-               mcore->flash_window_size = 0x02000000;
+       /* When there is no chipcommon on the bus there is 4MB flash */
+       if (!bus->chipco.dev) {
+               mcore->flash_buswidth = 2;
+               mcore->flash_window = SSB_FLASH1;
+               mcore->flash_window_size = SSB_FLASH1_SZ;
+               return;
+       }
+
+       /* There is ChipCommon, so use it to read info about flash */
+       switch (bus->chipco.capabilities & SSB_CHIPCO_CAP_FLASHT) {
+       case SSB_CHIPCO_FLASHT_STSER:
+       case SSB_CHIPCO_FLASHT_ATSER:
+               pr_err("Serial flash not supported\n");
+               break;
+       case SSB_CHIPCO_FLASHT_PARA:
+               pr_debug("Found parallel flash\n");
+               mcore->flash_window = SSB_FLASH2;
+               mcore->flash_window_size = SSB_FLASH2_SZ;
                if ((ssb_read32(bus->chipco.dev, SSB_CHIPCO_FLASH_CFG)
                               & SSB_CHIPCO_CFG_DS16) == 0)
                        mcore->flash_buswidth = 1;
-       } else {
-               mcore->flash_window = 0x1fc00000;
-               mcore->flash_window_size = 0x00400000;
+               else
+                       mcore->flash_buswidth = 2;
+               break;
        }
 }
 
index 0ca857a..48aa136 100644 (file)
@@ -119,7 +119,9 @@ static void wbsoft_configure_filter(struct ieee80211_hw *dev,
        *total_flags = new_flags;
 }
 
-static void wbsoft_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
+static void wbsoft_tx(struct ieee80211_hw *dev,
+                     struct ieee80211_tx_control *control,
+                     struct sk_buff *skb)
 {
        struct wbsoft_priv *priv = dev->priv;
 
index 344713b..76628e3 100644 (file)
@@ -43,7 +43,6 @@ fw-shipped-$(CONFIG_CASSINI) += sun/cassini.bin
 fw-shipped-$(CONFIG_COMPUTONE) += intelliport2.bin
 fw-shipped-$(CONFIG_CHELSIO_T3) += cxgb3/t3b_psram-1.1.0.bin \
                                   cxgb3/t3c_psram-1.1.0.bin \
-                                  cxgb3/t3fw-7.10.0.bin \
                                   cxgb3/ael2005_opt_edc.bin \
                                   cxgb3/ael2005_twx_edc.bin \
                                   cxgb3/ael2020_twx_edc.bin
diff --git a/firmware/cxgb3/t3fw-7.10.0.bin.ihex b/firmware/cxgb3/t3fw-7.10.0.bin.ihex
deleted file mode 100644 (file)
index 96399d8..0000000
+++ /dev/null
@@ -1,1935 +0,0 @@
-:1000000060007400200380002003700000001000D6
-:1000100000002000E100028400070000E1000288E7
-:1000200000010000E0000000E00000A0010000006E
-:1000300044444440E3000183200200002001E0002A
-:100040002001FF101FFFD0001FFFC000E300043C91
-:100050000200000020006C841FFFC2A020006CCCB6
-:100060001FFFC2A420006D0C1FFFC2A820006D80DE
-:100070001FFFC2AC200003C0C00000E43100EA3121
-:1000800000A13100A03103020002ED306E2A05000C
-:10009000ED3100020002160012FFDBC03014FFDA5F
-:1000A000D30FD30FD30F03431F244C107249F0D347
-:1000B0000FD30FD30F12FFD5230A00240A00D30F4A
-:1000C000D30FD30F03431F244C107249F0D30FD327
-:1000D0000FD30F14FFCE03421F14FFCB03421F1296
-:1000E000FFCCC0302D37302D37342D37382D373CED
-:1000F000233D017233ED00020012FFC4C0302F37E0
-:10010000002F37102F37202F3730233D017233ED6A
-:1001100000020012FFBEC0302737002737102737F4
-:1001200020273730233D017233ED03020012FFB95F
-:1001300013FFBA0C0200932012FFB913FFB90C028F
-:1001400000932012FFB8C0319320822012FFB71312
-:10015000FFB7932012FFB715FFB316FFB6C030D715
-:100160002005660160001B00000000000000000088
-:10017000043605000200D30FD30F05330C6E3B1479
-:100180000747140704437631E604360505330C6F40
-:100190003BED00020012FFA615FFA3230A00D720A3
-:1001A000070443043E0505330C0747146F3BF00377
-:1001B000020012FFA1C03014FFA1D30FD30FD30F41
-:1001C0009340B4447249F2D30FD30FD30F14FF9B63
-:1001D000834014FF9B834012FF9B230A0014FF9A65
-:1001E000D30FD30FD30F9340B4447249F2D30FD33C
-:1001F0000FD30F14FF95834012FF95C92F832084DE
-:10020000218522BC22743B0F8650B4559630B433FE
-:100210007433F463FFE60000653FE1655FDE12FFC3
-:100220007C230A0028374028374428374828374C91
-:10023000233D017233ED03020000020012FF7AC079
-:1002400032032E0503020012FF7813FF819320C0B2
-:1002500011014931004831010200C00014FF7E0441
-:10026000D23115FF7D945014FF7D04D33115FF7CEE
-:10027000945014FF7C04D43115FF7C24560014FFE5
-:100280007B04D53115FF7B24560010FF7A03000054
-:10029000000000000000000000000000000000005E
-:1002A000000000000000000000000000000000004E
-:1002B000000000000000000000000000000000003E
-:1002C000000000000000000000000000000000002E
-:1002D000000000000000000000000000000000001E
-:1002E000000000000000000000000000000000000E
-:1002F00000000000000000000000000000000000FE
-:1003000000000000000000000000000000000000ED
-:1003100000000000000000000000000000000000DD
-:1003200000000000000000000000000000000000CD
-:1003300000000000000000000000000000000000BD
-:1003400000000000000000000000000000000000AD
-:10035000000000000000000000000000000000009D
-:10036000000000000000000000000000000000008D
-:10037000000000000000000000000000000000007D
-:10038000000000000000000000000000000000006D
-:10039000000000000000000000000000000000005D
-:1003A000000000000000000000000000000000004D
-:1003B000000000000000000000000000000000003D
-:1003C000000000000000000000000000000000002D
-:1003D000000000000000000000000000000000001D
-:1003E000000000000000000000000000000000000D
-:1003F00000000000000000000000000000000000FD
-:1004000000000000000000000000000000000000EC
-:1004100000000000000000000000000000000000DC
-:1004200063FFFC000000000000000000000000006E
-:100430000000000000000000000000001FFC0000A1
-:100440001FFC0000E30005C81FFC00001FFC0000AB
-:10045000E30005C81FFC00001FFC0000E30005C806
-:100460001FFFC0001FFFC000E30005C81FFFC00042
-:100470001FFFC018E30005C81FFFC0181FFFC018EA
-:10048000E30005E01FFFC0181FFFC294E30005E072
-:100490001FFFC2941FFFC294E300085C1FFFC2A0AD
-:1004A0001FFFC59CE300085C200000002000016ADB
-:1004B000E3000B582000018020000180E3000CC401
-:1004C0002000020020000203E3000CC42000021CF4
-:1004D00020000220E3000CC8200002202000022699
-:1004E000E3000CCC2000023C20000240E3000CD4CE
-:1004F0002000024020000249E3000CD82000024CFA
-:1005000020000250E3000CE42000025020000259B9
-:10051000E3000CE82000025C20000260E3000CF421
-:100520002000026020000269E3000CF82000026C49
-:1005300020000270E3000D04200002702000027908
-:10054000E3000D082000028C2000028CE3000D1453
-:100550002000029020000293E3000D14200002AC62
-:10056000200002B0E3000D18200002D0200002F2AB
-:10057000E3000D1C200003B0200003B0E3000D4099
-:10058000200003B0200003B0E3000D40200003B0C2
-:10059000200003B0E3000D40200003B0200003B0B2
-:1005A000E3000D40200003B020006EA4E3000D40E6
-:1005B00020006EA420006EA4E30078340000000048
-:1005C00000000000000000001FFC00001FFC0000F5
-:1005D0001FFFC5A01FFFC69020006EA820006EA8B8
-:1005E000DEFFFE000000080CDEADBEEF1FFFC2B054
-:1005F0001FFCFE001FFFC0A41FFFC5D0300000007D
-:10060000003FFFFF8040000010000000080FFFFFC8
-:100610001FFFC27D000FFFFF804FFFFF8000000023
-:1006200000000880B000000560500000600000007D
-:1006300040000011350000004100000010000001E2
-:100640002000000000001000400000000500000035
-:10065000800000190400000000000800E100020012
-:1006600010000005806000007000000020000009FC
-:10067000001FF8008000001EA0000000F80000002D
-:1006800007FFFFFF080000001800000001008001C4
-:10069000420000001FFFC22D1FFFC0EC00010080C0
-:1006A000604000001A0000000C0000001000000A6A
-:1006B000000030000001000080000018FC00000075
-:1006C0008000000100004000600008008000001C65
-:1006D0008000001A030000008000040004030403EB
-:1006E00050000003FFFFBFFF1FFFC3E400000FFF28
-:1006F000FFFFF000000016D00000FFF7A50000008B
-:100700001FFFC4C01FFFC4710001000800000B20C0
-:10071000202FFF801FFFC46500002C00FFFEFFF8A4
-:1007200000FFFFFF1FFFC58800002000FFFFDFFF65
-:100730000000FFEF010011001FFFC3E21FFFC5A073
-:10074000FFFFEFFF0000FFFB1FFFC6501FFFBEB003
-:10075000FFFFF7FF1FFFC0740000FFFD1FFFC64033
-:100760000001FBD01FFFC5C01FFFC6801FFFC5A132
-:10077000E0FFFE001FFFC5B0000080001FFFC54C5A
-:100780001FFFC5C41FFFC0781FFFC4E41FFCFFD8B4
-:10079000000100817FFFFFFFE1000600000027103D
-:1007A0001FFCFE301FFCFE701FFFC5481FFFC56009
-:1007B0000003D0901FFFC5742B5063802B507980AD
-:1007C0002B5090802B50A6801FFFC4790100110F81
-:1007D000202FFE0020300080202FFF000000FFFFB0
-:1007E0000001FFF82B50B2002B50B208000100109E
-:1007F0002B50B1802B50B2802B50BA000001001159
-:100800002B50BD282B50BC802B50BDA020300000A9
-:10081000DFFFFE005000000200C0000002000000E8
-:10082000FFFFF7F41FFFC07C000FF800044000003A
-:10083000001000000C4000001C400000E00000A080
-:100840001FFFC5501FFD00081FFFC5641FFFC578AF
-:100850001FFFC58CE1000690E10006EC00000000DF
-:100860000000000000000000000000000100000087
-:100870000000000000000000000000002010004008
-:10088000201000402010004020140080200C0000A8
-:10089000200C0000200C00002010004020140080DC
-:1008A0002014008020140080201800C0201C0100AB
-:1008B000201C0100201C010020200140201800C045
-:1008C000201800C0201800C0201C0100201800C003
-:1008D000201800C0201800C0201C0100202001406A
-:1008E00020200140202001402020094020200940F4
-:1008F000202009402020094020240980FFFFFFFF1D
-:10090000FFFFFFFFFFFFFFFF0000000000000000EF
-:1009100000000000000000000000000020005588DA
-:1009200020005458200055882000558820005394FA
-:100930002000539420005394200051D4200051D41F
-:10094000200051CC2000513820004FE020004DC045
-:1009500020004B94000000000000000020005558CB
-:1009600020005424200054C8200054C82000527C89
-:100970002000527C2000527C2000527C2000527CBF
-:10098000200051C42000527C20004F0020004D70F8
-:1009900020004B40000000000000000020000BF091
-:1009A00020003ADC200004C02000473020000BE883
-:1009B000200041F4200003F0200046F020004B1CF2
-:1009C00020003F0020003E1C20003A58200038E85C
-:1009D00020003658200031B820003C7820002DD06F
-:1009E0002000286420006828200023F0200020D068
-:1009F0002000207C20001D68200018602000158841
-:100A000020000E5420000C3420001134200013204C
-:100A1000200043EC20003EB420000BF8200004C06E
-:100A200000000000000000000000000000000000C6
-:100A300000000000000000000000000000000000B6
-:100A400000000000000000000000000000000000A6
-:100A50000000000000000000000000000000000096
-:100A60000000000000000000000000000000000086
-:100A70000000000000000000000000000000000076
-:100A80000000000000000000000000000000000066
-:100A90000000000000000000000000000000000056
-:100AA0003264000000000000326400006400640052
-:100AB00064006400640064006400640000000000DE
-:100AC0000000000000000000000000000000000026
-:100AD0000000000000000000000000000000000016
-:100AE0000000000000000000000000000000000006
-:100AF00000000000000000000000000000000000F6
-:100B000000000000000010000000000000000000D5
-:100B100000000000000000000000000000001000C5
-:100B200000000000000000000000000000000000C5
-:100B300000432380000000000000000000000000CF
-:100B400000000000000000000000000000000000A5
-:100B50000000000000000000005C94015D94025E53
-:100B600094035F94004300000000000000000000B8
-:100B70000000000000000000000000000000000075
-:100B80000000000000000000000000000000000065
-:100B90000000000000000000005C90015D90025E1B
-:100BA00090035F9000530000000000000000000070
-:100BB0000000000000000000000000000000000035
-:100BC0000000000000000000000000000000000025
-:100BD0000000000000000000009C94001D90019D9A
-:100BE00094029E94039F94040894050994060A9421
-:100BF000070B94004300000000000000000000000C
-:100C000000000000000000000000000000000000E4
-:100C10000000000000000000009C90019D90029EDA
-:100C200090071D90039F90047890057990067A9024
-:100C3000077B90005300000000000000000000004F
-:100C400000000000000000000000000000000000A4
-:100C5000000000000000000000DC94001D9001DD99
-:100C60009402DE9403DF940404940505940606942C
-:100C70000707940808940909940A0A940B0B940036
-:100C80004300000000000000000000000000000021
-:100C9000000000000000000000DC9001DD9002DE9A
-:100CA000900B1D9003DF9004B49005B59006B690AC
-:100CB00007B79008B89009B9900ABA900BBB90009A
-:100CC0005300000063FFFC0020006C6010FFFF0A6F
-:100CD0000000000020006C8400D23110FFFE0A00EA
-:100CE0000000000020006CCC00D33110FFFE0A0091
-:100CF0000000000020006D0C00D43110FFFE0A003F
-:100D00000000000020006D8000D53110FFFE0A00B9
-:100D10000000000063FFFC00E00000A012FFF7826B
-:100D200020028257C82163FFFC12FFF303E830045E
-:100D3000EE3005C03093209421952263FFFC000023
-:100D40001FFFD000000400201FFFC5A01FFFC6909A
-:100D5000200A0011FFFB13FFFB03E631010200161E
-:100D6000FFFA17FFFAD30F776B069060B4667763CC
-:100D7000F85415F3541AA50F140063FFF90000008E
-:100D80006C1004C020D10F006C1004C0C71AEF060D
-:100D9000D830BC2BD72085720D4211837105450BCD
-:100DA000957202330C2376017B3B04233D0893713B
-:100DB000A32D12EEFE19EEFEA2767D632C2E0A0004
-:100DC000088202280A01038E380E0E42C8EE29A6B8
-:100DD0007E6D4A0500208800308C8271D10FC0F0F2
-:100DE000028F387FC0EA63FFE400C0F1C050037E89
-:100DF0000CA2EE0E3D1208820203F538050542CB27
-:100E00005729A67E2FDC100F4F366DFA050020887B
-:100E100000308CBC75C03008E208280A0105833810
-:100E2000030342C93E29A67E0D480CD30F6D8A05E7
-:100E300000208800B08C8271D10FC05008F5387541
-:100E4000C0C163FFBBC06002863876C0DA63FFD4DE
-:100E50006C101216EED8C1F9C1E8C1C72B221E28AA
-:100E6000221DC0D07B81352920060BB702299CFAB0
-:100E7000655008282072288CFF2824726491642A07
-:100E8000B0000CA80C64816F0EA90C6492BB7FA10A
-:100E90003FC1CE7CA13669AC336000370029200603
-:100EA000D7D0299CFACC57282072288CFF2824728E
-:100EB0006491392AD0000CA80C6481680EA90C64D6
-:100EC000931F7FA10BC1CE7CA10268AC06C020D1CC
-:100ED0000F2D25028A32C0900A6E5065E5B529248F
-:100EE00067090F4765F5B12C200C1FEEB30CCE112E
-:100EF000AFEE29E286B44879830260058219EEAF2D
-:100F000009C90A2992A36890078F2009FF0C65F58B
-:100F10006E2FE28564F56865559628221D7B810554
-:100F2000D9B060000200C0908B9417EEA50B881416
-:100F300087740B0B47A87718EEA309BB100877023C
-:100F400097F018EEA117EEA208A8010B8802074738
-:100F5000021BEE9E97F10B880298F22790232B90AC
-:100F60002204781006BB1007471208BB0228902104
-:100F70000777100C88100788020B880217EE968BF3
-:100F80003307BB0187340B880298F3979997F48B4A
-:100F90009587399BF588968B3898F688979BF897B4
-:100FA000F998F717EE8D28E28507C7082D74CF084A
-:100FB000480B28E68565550F2B221E28221D7B89AC
-:100FC000022B0A0064BF052CB00728B000DA200607
-:100FD000880A28824CC0D10B8000DBA065AFE76394
-:100FE000FEEA0000292072659E946004E72A2072C0
-:100FF00065AEBF6004DE00002EB0032C2067D4E095
-:1010000065C1058A328C330AFF500C4554BC5564C7
-:10101000F4EB19EE72882A09A90109880C64821F71
-:10102000C0926000DD2ED0032A2067D4E065A0D8EE
-:101030008A328B330AFC500B4554BC5564C4BE192C
-:10104000EE67882A09A9017989D50BEA5064A4E3DF
-:101050000CEE11C0F02F16132E16168AE78CE82A14
-:1010600016128EE9DFC0AAEA7EAB01B1CF0BA85001
-:101070006583468837DBC0AE89991E789B022BCCEE
-:10108000012B161B29120E2B0A0029161A7FC307E3
-:101090007FC9027EAB01C0B165B49D8B352F0A00BC
-:1010A0002A0A007AC30564C3CB2F0A0165F4892B91
-:1010B00012162B1619005104C0C100CC1A2CCCFFFB
-:1010C0002C16170CFC132C16182B121A2A121BDCC8
-:1010D000505819B6C0D0C0902E5CF42C12172812AC
-:1010E000182F121B2A121A08FF010CAA01883407B4
-:1010F0004C0AAB8B2812192BC6162F86082A860994
-:101100002E74102924672E70038975B1EA2A74039E
-:10111000B09909490C659DB42B20672D250265B354
-:10112000FA2B221E2C221D7BC901C0B064BD9D2C50
-:10113000B00728B000DA2006880A28824CC0D10BFC
-:101140008000DBA065AFE763FD8289BAB199659045
-:101150009788341CEE2398BA8F331EEE1C0F4F5421
-:101160002FB42C8D2A8A320EDD020CAC017DC966AB
-:101170000A49516F92608A3375A65B2CB0130AED51
-:10118000510DCD010D0D410C0C417DC9492EB01200
-:10119000B0EE65E3C6C0D08E378CB88A368FB97C86
-:1011A000A3077AC9027EFB01C0D1CED988350AAD2A
-:1011B000020E8E0878EB022DAC0189B7DAC0AF9B26
-:1011C00079BB01B1CADCB0C0B07DA3077AD9027C7B
-:1011D000EB01C0B164B161C091292467C020D10F77
-:1011E00000008ADAB1AA64A0C02C20672D25026510
-:1011F000C3111DEDF68A321EEDFB0DAD010EDD0CA7
-:1012000065D28A0A4E516FE202600281C0902924A1
-:1012100067090F4765F2F828221D7B89022B0A0017
-:1012200064BCA92CB00728B000DA2006880A2882FE
-:101230004CC0D10B8000DBA065AFE763FC8E0000E3
-:101240000CE9506492ED0CEF11C080281611AFBF6D
-:101250002F16198EF88BF7DAE08FF92B1610ABFBEF
-:101260007FBB01B1EA0CA8506580D68837DCE0AFBF
-:1012700089991C789B022CEC012C161B29120C2C32
-:101280000A0029161A7AE3077AE9027FBB01C0C176
-:1012900065C2A58B352C0A002A0A007AE30564E1B1
-:1012A000CA2C0A0164CE0D60028E88341BEDCD98E5
-:1012B000DA8F331EEDC60F4F542FD42C8C2A8A326E
-:1012C0000ECC020BAB010CBB0C65BF0A0A49516E78
-:1012D000920263FF018A330AAB5064BEF92CD0132B
-:1012E0000AEE510ECE010E0E410C0C410ECC0C65D7
-:1012F000CEE42FD012B0FF65F26EC0B08E378CD81E
-:101300008A362FD2097CA3077AC9027EFB01C0B1BD
-:1013100065BEC38835DBA0AE8E78EB01B1AB89D753
-:10132000DAC0AF9D79DB01B1CAC0C07BA3077AB92F
-:10133000027DEB01C0C165CE9DC090292467C0200D
-:10134000D10F88378C3698140CE90C29161408F83C
-:101350000C981D78FB07281214B088281614891DD4
-:101360009F159B16C0F02B121429161A2B161B8BD7
-:10137000147AE30B7AE90688158E1678EB01C0F132
-:1013800065F1BA29121A2F12118A352E121B9A1AD8
-:10139000AFEE2F1210C0A0AF9F79FB01B1EE9F11ED
-:1013A000881AC0F098107AE30A7EA9052A12017AF9
-:1013B0008B01C0F164F08160018389368B37991706
-:1013C0000BE80C981F09C90C29161578EB07281291
-:1013D00015B088281615D9C09A199E188A1F2E1282
-:1013E000152A161A2E161BDAC0C0E08C177F930B35
-:1013F0007FA90688188F1978FB01C0E165E13E29B5
-:10140000121A2F12138A352E121B9A1BAFEE2F12AF
-:1014100012C0A0AF9F79FB01B1EE9F13881BC0F0F3
-:1014200098127AE30A7EA9052A12037A8B01C0F189
-:1014300065F10A2E12162E16192A121B005104C02D
-:10144000E100EE1AB0EE2E16170EFF132F16180F2E
-:10145000CC01ACAA2F121A0EBC01ACFC7FCB01B19F
-:10146000AA2A161B2C161A63FC5E00007FB30263C7
-:10147000FE3163FE2B7EB30263FC3063FC2A000066
-:101480006450C0DA20DBC058168AC020D10FC0914A
-:1014900063FD7A00C09163FA44DA20DB70C0D12E7C
-:1014A0000A80C09A2924682C7007581575D2A0D1DB
-:1014B0000F03470B18ED4DDB70A8287873022B7DC6
-:1014C000F8D9B063FA6100002A2C74DB40580EEEA4
-:1014D00063FAE4000029221D2D25027B9901C0B08A
-:1014E000C9B62CB00728B000DA2006880A28824C3A
-:1014F000C0D10B8000DBA065AFE7C020D10FC09149
-:1015000063FBFF00022A0258024C0AA202060000F6
-:10151000022A025802490AA202060000DB70DA2001
-:10152000C0D12E0A80C09E2924682C7007581554FB
-:10153000C020D10FC09463FBC9C09663FBC4C096A2
-:1015400063FBBF002A2C74DB30DC405BFE0FDBA0AA
-:10155000C2A02AB4002C200C63FF27008D358CB765
-:101560007DCB0263FDD263FC6D8F358ED77FEB029E
-:1015700063FDC563FC6000006C1004C020D10F0047
-:101580006C1004C020D10F006C10042B221E2822E6
-:101590001DC0A0C0942924062A25027B8901DBA056
-:1015A000C9B913ED04DA2028B0002CB00703880A6B
-:1015B00028824CC0D10B8000DBA065AFE7C020D1F2
-:1015C0000F0000006C10042C20062A210268C805B8
-:1015D00028CCF965812E0A094C6591048F30C1B879
-:1015E0000F8F147FB00528212365812716ECF3297E
-:1015F000629E6F98026000F819ECEF2992266890BD
-:10160000078A2009AA0C65A0E72A629D64A0E12B45
-:10161000200C0CB911A6992D92866FD9026000DBBF
-:101620001DECE70DBD0A2DD2A368D0078E200DEE6C
-:101630000C65E0C7279285C0E06470BF1DECEC68C4
-:10164000434E1CECEB8A2B0CAA029A708920089955
-:10165000110D99029971882A98748F329F752821EB
-:1016600004088811987718ECDC0CBF11A6FF2DF246
-:1016700085A8B82E84CF2DDC282DF685C85A2A2CB3
-:1016800074DB40580E81D2A0D10FC020D10F0000D2
-:101690000029CCF96490B12C20668931B1CC0C0CB6
-:1016A000472C24666EC60260008509F85065807F6D
-:1016B0001CECD18A2B0F08400B881008AA020CAA38
-:1016C000029A7089200899110D99029971883398AE
-:1016D000738C329C728A2A9A748934997563FF7D5F
-:1016E00000CC57DA20DB30DC4058155FC020D10F2A
-:1016F00000DA20C0B65815EE63FFE500DA20581571
-:10170000EC63FFDC00DA20DB30DC40DD5058167A79
-:10171000D2A0D10FC858DA20DB305814C72A2102D2
-:1017200065AFBDC09409A90229250263FFB200007C
-:101730002B21045814731DECADC0E02E24668F30AD
-:101740002B200C0F8F1463FF66292138C088798302
-:101750001F8C310CFC5064CF562B2104C0C0581490
-:10176000681DECA2C0E08F302B200C0F8F1463FF9C
-:101770003E2C20662B2104B1CC0C0C472C2466583F
-:1017800014601DEC9AC0E02E24668F302B200C0FC5
-:101790008F1463FF1A0000006C1004C0B7C0A116BC
-:1017A000EC9615EC88D720D840B822C04005350209
-:1017B0009671957002A438040442C94B1AEC7B1947
-:1017C000EC7C29A67EC140D30F6D4A0500808800BD
-:1017D000208C220A88A272D10FC05008A53875B09B
-:1017E000E363FFD76C10069313941129200665520A
-:1017F00088C0716898052A9CF965A29816EC6F2933
-:1018000021028A1309094C6590CD8AA00A6A512ADF
-:10181000ACFD65A0C2CC5FDB30DA208C115815120C
-:10182000C0519A13C7BF9BA98E132EE20968E060CE
-:101830002F629E1DEC606FF8026000842DD2266836
-:10184000D0052F22007DF9782C629DC79064C0706E
-:101850009C108A132B200C2AA0200CBD11A6DD0A97
-:101860004F14BFA809880129D286AF88288C09792E
-:101870008B591FEC520FBF0A2FF2A368F0052822E4
-:10188000007F894729D285D4906590756000430018
-:10189000002B200C1FEC4A0CBD11A6DD29D2860FAF
-:1018A000BF0A6E96102FF2A368F00488207F890586
-:1018B00029D285659165DA2058157DC95C6001FFE4
-:1018C00000DA20C0B658157A60000C00C09063FFA3
-:1018D000B50000DA205815766551E48D138C11DBC4
-:1018E000D08DD0022A020D6D515813E39A1364A1D2
-:1018F000CEC75F8FA195A9C0510F0F479F1163FEFF
-:10190000FD00C091C0F12820062C2066288CF9A784
-:10191000CC0C0C472C24666FC6098D138DD170DE5C
-:1019200002290A00099D02648159C9D38A102B211A
-:10193000045813F38A13C0B02B24662EA2092AA0E0
-:10194000200E28141CEC298D1315EC1DC1700A778C
-:101950003685562DDC28AC2C9C12DED0A8557CD3C5
-:10196000022EDDF8D3E0DA40055B02DC305BFF8A53
-:10197000D4A028200CB455C0D02B0A882F0A800C84
-:101980008C11A6CC29C285AF3FAB9929C6851CEC2A
-:1019900012DEF0AC882D84CF28120229120378F3CE
-:1019A000022EFDF8289020D3E007880CC1700808AB
-:1019B00047289420087736657FAB891313EC10898C
-:1019C00090C0F47797491BEC0EC1CA2821048513F7
-:1019D000099E4006EE11875304881185520E880235
-:1019E0000C88029BA09FA18F2B9DA598A497A795DB
-:1019F000A603FF029FA22C200C1EEBF7AECE0CCC50
-:101A00001106CC082BC2852DE4CF2BBC202BC6851C
-:101A10002A2C748B11580D9CD2A0D10F28203DC0C8
-:101A2000E07C877F2E24670E0A4765A07B1AEBF5C2
-:101A300088201EEBE38F138EE48FF40888110A8848
-:101A4000020F8F14AFEE1FEBF098910FEE029E90F5
-:101A50001EEBEFC0801AEBE02CD285AABAB8CC28D6
-:101A6000A4CF2CD6852C21022F20720ECC02B1FFE0
-:101A70002F24722C2502C020D10F871387700707EF
-:101A80004763FD6E282138C099798B0263FE9ADD89
-:101A9000F063FE9500DA20DB308C11DD505815968E
-:101AA000D2A0D10FC0E163FF7A8B138C11DD50C03F
-:101AB000AA2E0A802A2468DA205813F1D2A0D10F66
-:101AC000C020D10F6C1006292102C0D07597102AB2
-:101AD00032047FA70A8B357FBF052D25020DD90261
-:101AE000090C4C65C18216EBB41EEBB228629EC095
-:101AF000FA78F30260018829E2266890078A2009B3
-:101B0000AA0C65A17A2A629DDFA064A1772B200C24
-:101B10000CBC11A6CC29C286C08C79830260015707
-:101B200019EBA709B90A2992A368900788200988A8
-:101B30000C65814327C2851CEBA964713A89310980
-:101B40008B140CBB016FB11D2C20669F10B1CC0C07
-:101B50000C472C24666EC60260014009FF5065F1F7
-:101B60003A8A102AAC188934C0C47F973C18EBA974
-:101B70001BEBA88F359C719B708B209D7408BB025A
-:101B80009B72C08298751BEBA40F08409B730F8853
-:101B90001198777FF70B2F2102284A0008FF022FA8
-:101BA0002502C0B4600004000000C0B07E97048F1E
-:101BB000362F25227D970488372825217C9736C02B
-:101BC000F1C0900AF9382F3C200909426490861927
-:101BD000EB7618EB7728967E00F08800A08C00F05A
-:101BE0008800A08C00F08800A08C2A629D2DE4A2C1
-:101BF0002AAC182A669D89307797388F338A321835
-:101C0000EB8007BE0B2C2104B4BB04CC1198E0C0C0
-:101C10008498E1882B9DE59AE69FE71AEB78099F67
-:101C20004006FF110FCC020A880298E2C1FC0FCCDB
-:101C3000022CE604C9B82C200C1EEB670CCA11AEAE
-:101C4000CC06AA0829A2852DC4CF09B90B29A685DF
-:101C5000CF5CC020D10FC081C0900F8938C0877978
-:101C6000880263FF7263FF6600CC57DA20DB30DC4A
-:101C7000405813FDC020D10FDA2058148D63FFE8BF
-:101C8000C0A063FE82DA20C0B658148963FFD90071
-:101C9000DB402A2C74580CFCD2A0D10F8A102B21C7
-:101CA000045813171EEB44C0D02D246663FEB10008
-:101CB0006C1006D62019EB3F1EEB4128610217EB92
-:101CC0003E08084C65805F8A300A6A5169A3572B29
-:101CD000729E6EB83F2A922668A0048C607AC9343E
-:101CE0002A729D2C4CFECAAB2B600CB64F0CBD115A
-:101CF000A7DD28D2860EBE0A78FB269C112EE2A311
-:101D00002C160068E0052F62007EF91522D285CFDF
-:101D10002560000D00DA60C0B6581465C85A60012D
-:101D20000F00DA60581462655106DC40DB308D30FC
-:101D3000DA600D6D515812D0D3A064A0F384A1C015
-:101D40005104044763FF6D00C0B02C60668931B157
-:101D5000CC0C0C472C64666FC60270960A2B61048B
-:101D60005812E7C0B02B64666550B42A3C10C0E737
-:101D7000DC20C0D1C0F002DF380F0F4264F09019B0
-:101D8000EB0A18EB0B28967E8D106DDA0500A08803
-:101D900000C08CC0A089301DEB1A77975388328C15
-:101DA000108F3302CE0BC02492E12261049DE00427
-:101DB00022118D6B9BE59FE798E61FEB1009984079
-:101DC0000688110822020FDD02C18D9DE208220261
-:101DD00092E4B4C22E600C1FEB000CE811A7882C13
-:101DE0008285AFEE0C220B2BE4CF228685D2A0D1C8
-:101DF0000F28600CD2A08C1119EAF80C8D11A9885B
-:101E0000A7DD2ED2852B84CF0ECC0B2CD685D10FFF
-:101E1000C0F00ADF387FE80263FF6C63FF600000F8
-:101E20002A6C74C0B2DC20DD405812C5C0B063FF1C
-:101E300063C020D10F0000006C10042920062A2264
-:101E40001EC0392C221D232468C0307AC107DDA0B2
-:101E5000600004000000C0D06E9738C08F2E0A804A
-:101E60002B2014C0962924060EBB022E21022B24FF
-:101E7000147E8004232502DE307AC10EC8ABDBD08D
-:101E8000DA202C0A00580B062E21020E0F4CC8FE39
-:101E90006000690068956528210208084C65805C2F
-:101EA0001AEAC61EEAC42BA29EC09A7B9B5E2BE256
-:101EB0002668B0048C207BC95329A29D1FEAC16407
-:101EC000904A9390C0C31DEAD52B21049D9608BB70
-:101ED000110CBB029B979B911CEAD2C08523E4A204
-:101EE0002BA29D2824068DFA282102B0DD2BBC30C0
-:101EF0002BA69D9DFA0C8802282502C8D2C020D1AD
-:101F00000F8EF912EAC82E2689C020D10FDA20C020
-:101F1000B65813E7C020D10F6C10062A2006941083
-:101F200068A80528ACF965825029210209094C6589
-:101F3000920ACC5FDB30DA208C1058134BC051D39F
-:101F4000A0C7AF9A3AC0D01CEA9D14EAA31EEA9C2F
-:101F50008F3A16EA99B1FB64B13128629E6F88020C
-:101F60006001ED294C332992266890078A2009AA3E
-:101F70000C65A1DC2A629DC08E64A1D42B200C0CC0
-:101F8000B7110677082972867983026001CD0CB9F2
-:101F90000A2992A36890082C220009CC0C65C1BBC9
-:101FA0002772856471B5282006288CF96481E52C98
-:101FB00020668931B1CC0C0C472C24666EC60260B9
-:101FC00001A109F85065819B2A21048CE488361E02
-:101FD000EA7D088914A9CC08084709881019EA92F3
-:101FE0000ECC029C7099718C2A1EEA9008CC020ECD
-:101FF000CC029C722E302C293013283012049910F8
-:102000000688100CEE109F740EAE0209880208EECE
-:10201000029E738C3704AA119C758938C0F4997696
-:102020008839C0C1987718EA828E359C7B9E780EDD
-:102030008E1408EE029E7A8E301CEA7177E73088A3
-:102040003289339C7C9F7D0E9C4006CC118F2B29BE
-:1020500076132D76112876120CAA0218EA68C1C9E7
-:102060000CAA022A761008FF029F7EC0AA60000117
-:10207000C0A6A4BC0CB911A6992892852DC4CF087E
-:10208000A80B289685655100C020D10F2B200C0C81
-:10209000B7110677082A72860CB90A6FA902600187
-:1020A000182992A36890082A220009AA0C65A109A0
-:1020B0002A728564A1032C203D0C2C4064C08C8CBA
-:1020C000350C8C1464C0848FE57CF37F8C360C8CCB
-:1020D0001464C0777CF374283013C0FC78F86CC0AB
-:1020E00090292467090C4765C0D719EA4718EA45C3
-:1020F0008F208C3508FF110C8C1408FF0288E49F98
-:10210000A1AC8C09CC029CA08C369FA30C8C14AC87
-:102110008809880298A218EA3DA4BC2F72852DC4B4
-:10212000CF2FFC102F76852F210229207208FF0265
-:10213000B2992924722F2502C020D10F00CC57DA82
-:1021400020DB308C105812C8C020D10FC09163FF23
-:102150008FDA20C0B658135663FFE100DA20581317
-:102160005463FFD82B21045811E61EEA152B200CCE
-:10217000C0D02D24668F3A63FE4DDA20DB30DC4080
-:10218000DD505813DDD2A0D10F2A2C748B10580BC0
-:10219000BED2A0D10F292138C08879832E8C310C72
-:1021A000FC5064CE222B2104C0C05811D5C0D01ED3
-:1021B000EA048F3A2B200C63FE0DDA2058133C639F
-:1021C000FF7ADA205BFF1CD2A0D10F002C20662BF7
-:1021D0002104B1CC0C0C472C24665811C91EE9F817
-:1021E0002B200CC0D02D24668F3A63FDDA0000004E
-:1021F0006C10089514C061C1B0D9402A203DC04080
-:102200000BAA010A64382A200629160568A8052C9D
-:10221000ACF965C33F1DE9EA6440052F120464F27E
-:10222000A02621021EE9E606064C6562E615E9E2F3
-:102230006440D98A352930039A130A990C6490CCEA
-:102240002C200C8B139C100CCC11A5CC9C112CC2F7
-:1022500086B4BB7CB3026002D78F100EFE0A2EE25A
-:10226000A368E0098620D30F0E660C6562C2881150
-:102270002882856482BA891364905EDA80D9308CB2
-:10228000201EE9E01FE9E11DE9CE8B138DD4D4B007
-:102290007FB718B88A293C10853608C6110E660229
-:1022A0009681058514A5D50F550295800418146DE7
-:1022B0008927889608CB110888140EBB02A8D82954
-:1022C0009C200F88029BA198A088929BA308881449
-:1022D000A8D80F880298A22AAC1019E9CCC0C08FE8
-:1022E000131EE9BD86118D10286285AEDD08FF0B37
-:1022F0002CD4CF2821022F66858B352A207209889D
-:1023000002ABAA2825022A2472C020D10F29529E8E
-:1023100018E9A96F980260020B28822668800829B4
-:10232000220008990C6591FC2A529DC1CE9A126434
-:10233000A1F22B200C2620060CB8110588082D824E
-:10234000860EBE0A7DC3026002052EE2A368E00885
-:102350002F22000EFF0C65F1F6288285D780DE80E3
-:102360006482009816266CF96462012C206688311C
-:102370002CCC010C0C472C24666EC6026001BC08F4
-:10238000FD5065D1B61DE9AB1CE98F19E9962A21EC
-:10239000048B2D2830102F211D0C88100BFB090AEF
-:1023A00088020988020CBB026441529B709D71989F
-:1023B00072C04D8D35D9E064D06ED730DBD0D830C7
-:1023C0007FD714273C10BCE92632168C3996E69C40
-:1023D000E78A37B4382AE6080B131464304A2A8295
-:1023E0001686799A9696978C778A7D9C982B821779
-:1023F0002C7C209A9A2A9C189B99867BB03B298C2E
-:10240000086DB9218BC996A52692162AAC18B899E1
-:102410009BA196A08BC786CD9BA22B921596A49BC1
-:10242000A386CB2CCC2026A605C0346BD4200D3B34
-:102430000C0DD8090E880A7FB705C0909988BC8812
-:10244000C0900B1A126DAA069988998B288C18C017
-:10245000D01BE97A1CE97916E96EB1FF2A211C2309
-:10246000E6130F0F4F26E6122F251D7FA906C0F099
-:10247000C08028251D05F6111AE9678F202BE61567
-:102480002CE6162DE61726E6180AFA022AE6142983
-:102490002006299CF96490F829200C8D14C0801A1C
-:1024A000E94E0C9C11AA99A5CCDA202BC285289460
-:1024B000CF0B4B0B2BC685C0B08C155811BBD2A0CF
-:1024C000D10F8A356FA546D8308BD56DA90C8A8679
-:1024D0000A8A14CBA77AB335288C10C080282467C9
-:1024E000080B4765B10BDA20DB302C12055811DEE2
-:1024F000D3A0C0C1C0D02DA4039C1463FD22863696
-:102500006461059B709D719872C04D63FEA4C0818B
-:1025100063FFC9008814CC87DA20DB308C15581192
-:10252000D2C020D10FDA20C0B658126163FFE40098
-:1025300000DA208B1058125E63FFD8009E178A12B3
-:102540002B21045810EF8E17C09029246663FE34A7
-:10255000C08063FE06DA20DB308C15DD505812E6B1
-:10256000D2A0D10FDA2058125263FFA7002B2138D6
-:10257000C0A87BAB026001048C310CFC5064CE041B
-:102580008A122B2104C0C098175810DD8E1763FDE6
-:10259000F32D21382DDCFF0D0D4F2D253865DEF78D
-:1025A00028206A7F87050826416460A3C09016E949
-:1025B000141CE9232A200723E61BB1AA0CFD0226DE
-:1025C000E61A2B200A29E61D2DE61E0CBB022BE67F
-:1025D0001C8B260A0A472BE6208B282AE53E2BE691
-:1025E000212924072820062A2064688346B44463EE
-:1025F000FEA5DB30DA208C158D142E0A80C08E28C3
-:10260000246858111FD2A0D10F2E7C4819E8ED2A5A
-:1026100032162B76129D712D761328761489960A20
-:102620002A14AA990C9902997069ED71C14663FD4B
-:102630008100000064AFB51DE8E22C20168DD20A9F
-:10264000CC0C00D10400CC1AACBC9C2963FF9D00CB
-:102650002B21046EB81E2C2066B8CC0C0C472C2401
-:1026600066C9C09E178A125810A68E17C0348F20D4
-:10267000C0D02D2466C06826240663FF2E8A122B44
-:1026800021042C20669817B1CC0C0C472C246658DA
-:10269000109C8E178716C0D02D246663FCE68D35FE
-:1026A000C08064D04AD9E0DC30DBE0DF301AE8E5F6
-:1026B000B188B4FF16E8E584C92D9DFF87C82CCCEE
-:1026C0001027D63006460127D6320A440117E8DF24
-:1026D00024D631A74727D63324F21596B794B68D62
-:1026E000C3BCBB9DB58D35299C107D83C22F211D98
-:1026F000C14663FD330000006C1006292006289CAB
-:10270000F86582BF2921022B200C09094C6590E154
-:1027100016E8AA0CBA11A6AA2DA2862C0A127DC30D
-:102720000260028C19E8A609B90A2992A3689007E9
-:102730008C2009CC0C65C27829A2856492722D6226
-:102740009E1AE89C6FD80260026E2AA22629160102
-:1027500068A0082B22000ABB0C65B25C29629DC1EF
-:102760008C6492542A21200A806099102C203CC746
-:10277000EF000F3E010B3EB1BD0FDB390BBB098FE4
-:10278000260DBD112DDC1C0D0D410EDD038E27B174
-:10279000DD0D0D410FEE0C0DBB0B2BBC1C0BB7025E
-:1027A0007EC71C2C21257BCB162D1AFC0CBA0C0DD8
-:1027B000A16000093E01073EB1780987390B770A0D
-:1027C00077EB0260020A2C2123282121B1CC0C0CCA
-:1027D0004F2C25237C8B29B0CD2D2523C855DA20FD
-:1027E000DB30581095292102CC96C0E80E9E022EAF
-:1027F0002502CC57DA20DB30DC4058111BC020D139
-:102800000F2C20668931B1CC0C0C472C24666EC687
-:10281000026001D309FD5065D1CD2F0A012E301180
-:1028200029221464E01128221B090C4400C1040071
-:10283000FA1A0A880228261B2E3010C0A0C0B094B5
-:102840001295131CE85F88302CC022088D147787FE
-:1028500004C0F10CFA38C041C0F225203CC0840805
-:1028600058010F5F010F4B3805354007BB10C0F012
-:10287000084F3808FF100FBB0228ECFEC0F0084FCD
-:1028800038842B0BA8100AFF102A21200F88020B76
-:10289000880208440218E86E8F1108440228212596
-:1028A0000A2A140828140488110A88022A21049488
-:1028B000F08B2004E41008BB1104BB02C04A04BB27
-:1028C000029BF1842A08AB110BEB0294F40A541119
-:1028D0000B44020555100D1B4094F707BB100B5518
-:1028E00002085502C08195F68433C05094F3B19428
-:1028F0008B3295F898F99BF2C080C1BC24261499BC
-:10290000FA9BF598FB853895FC843A94FD8B3B9BAC
-:10291000FE883998FF853525F6108436851324F610
-:10292000118B3784122BF612C0B064C07E893077C9
-:1029300097438D3288332E30108F111CE83109995E
-:10294000400699112CF614C0C42CF6158C2B2DF6CC
-:102950001A28F61B2BF61904A81109880208EE02A2
-:1029600019E827C18008EE0209C90229F6162EF6D9
-:1029700018C09E600001C09A2F200C18E8170CFEAA
-:1029800011A8FFA6EE2DE2852BF4CF0D9D0B2DE6B1
-:1029900085C87F8A268929A7AA9A260A990C090937
-:1029A00048292525655050C020D10F00C09A63FFEB
-:1029B000C6DA2058113F63FE38DA20C0B658113C01
-:1029C00063FE2E0068973C2B9CFD64BE24C020D182
-:1029D0000FDA20DB705810F8C0C0C0D10ADA390A0B
-:1029E000DC3865CDE063FE098A102B2104580FC442
-:1029F000C0B02B246663FE21DB402A2C745809A248
-:102A0000D2A0D10FDA20580FC963FCF76C1004C0B4
-:102A100020D10F006C1004290A801EE80E1FE80E5A
-:102A20001CE7E60C2B11ACBB2C2CFC2DB2850FCC7B
-:102A3000029ED19CD0C051C07013E80A14E8091856
-:102A4000E8072AB285A82804240A234691A986B853
-:102A5000AA2AB685A98827849F25649FD10F0000E4
-:102A60006C100AD630283010292006288CF9648290
-:102A70009B68980B2A9CF965A1B2022A02580FABF9
-:102A800089371BE7CFC89164520E2A21020A0C4CE9
-:102A900065C2588D3019E7C874D7052E212365E229
-:102AA0009E2F929E1AE7C46FF8026002532AA22654
-:102AB00068A0082C22000ACC0C65C2442A929D64AE
-:102AC000A23E9A151FE7BE8D67C1E6C8DD2B6206E0
-:102AD00018E7BC64B0052880217B8B432B200C18A1
-:102AE000E7B60CBC11A8CC29C28679EB460FBE0A0A
-:102AF0002EE2A368E0052F22007EF9372CC2859CC8
-:102B00001864C2332B212F87660B7B360B790C6F31
-:102B10009D266ED2462C203D7BC740CE5560001EC0
-:102B20002A200CC1B28C205811229A1864A2458D1B
-:102B30006763FFCFC0C063FFC5D7B063FFD300C0DA
-:102B4000E06000022E60030EDB0C6EB20EDC700C37
-:102B5000EA11AA6A2AAC20580199D7A0DA20DB70C2
-:102B6000C1C82D21205810BC8C268B279A160CBB6F
-:102B70000C7AB3348F18896399F3886298F28E6562
-:102B80009EF82D60108A189D1768D729C0D09DA97E
-:102B90002C22182B22139CAB9BAA97A58E667E73C2
-:102BA00002600097CF5860001FDA208B1658108201
-:102BB00065A13863FFBDC081C0908F18C0A29AF98B
-:102BC00099FB98FA97F563FFD2DB30DA20DC4058A6
-:102BD0001026C051D6A0C0C02BA0102CA4039B1758
-:102BE0002C1208022A02066B02DF702D60038E177A
-:102BF0009D149E100CDD11C0E0AD6D2DDC20580140
-:102C0000188C148B16ACAC2C64038A268929ABAAC9
-:102C10000A990C9A26886609094829252507880CEF
-:102C200098662F2218A7FF2F261863FE96DA20DB5E
-:102C300030DC40DD50581130D2A0D10FC0302C20F4
-:102C4000668961B1CC0C0C472C24666EC60260000C
-:102C5000D2C03009FD5065D0CA8E6764E0696470E7
-:102C600066DB608C18DF70DA202D60038E170CDDB8
-:102C7000119E10AD6D2DDC201EE7755800F923263E
-:102C800018DA208B16DC402F2213DD50B1FF2F26DF
-:102C900013580FC5D2A0D10F0028203D0848406529
-:102CA0008DE76F953EDA308DB56D990C8CA80C8C44
-:102CB00014CACF7CD32D2AAC10C090292467090DEB
-:102CC0004764DDC5600092002C1208066B022D6C73
-:102CD00020077F028E17DA209E101EE75C58007DC9
-:102CE00063FF9A00C09163FFD1000000655081DA54
-:102CF00020DB60DC40580FDCC020C0F02FA403D1E3
-:102D00000FDA20C0B658106A63FFE000006F95022A
-:102D100063FD6CDA20DB30DC40DD50C4E0580F5836
-:102D2000D2A0D10F8A152B2104580EF52324662832
-:102D30006010981763FF2100DA2058105D63FFAB25
-:102D4000C858DB30DA20580F3C2A210265AF9CC0FE
-:102D50009409A90229250263FF91DB30DC40DD5094
-:102D6000C0A32E0A802A2468DA20580F45D2A0D1A9
-:102D70000FC020D10FDA202B200C58107263FF6B8C
-:102D80006C1004282006C062288CF8658125C0508C
-:102D9000C7DF2B221BC0E12A206B29212300A104BD
-:102DA000B099292523B1AA00EC1A0BC4010A0A44E0
-:102DB0002A246B04E4390DCC030CBB012B261B64C5
-:102DC000406929200C1BE6FC0C9A110BAA082FA2C3
-:102DD000861BE6FA6FF9026000B60B9B0A2BB2A3C2
-:102DE00068B0082C22000BCC0C65C0A42BA2851D5A
-:102DF000E71E64B09B8C2B2421040DCC029CB08870
-:102E000020C0C50888110C880298B1882A0844118E
-:102E100098B48F3494B79FB5C0401EE6EF2DA285BD
-:102E20000E9E0825E4CF2DDC282DA6852921020938
-:102E3000094C68941A689820C9402A210265A00BA1
-:102E40002A221E2B221D7AB10265A079C020D10F43
-:102E50002C212365CFDE6000082E21212D21237E29
-:102E6000DBD52B221E2F221D2525027BF901C0B0A8
-:102E700064BFC413E6D02CB00728B000DA20038862
-:102E80000A28824CC0D10B8000DBA065AFE763FF4E
-:102E9000A62A2C74C0B02C0A02580E2F1CE6F49CF3
-:102EA000A08B2008BB1106BB029BA1893499A263A9
-:102EB000FF790000262468DA20DB30DC40DD505842
-:102EC000108ED2A0D10FDA202B200C580FF9C02081
-:102ED000D10F00006C1006073D14C080DC30DB40D1
-:102EE000DA20C047C02123BC3003283808084277C5
-:102EF0004001B1DD64815A1EE6AC19E6AD29E67EDB
-:102F0000D30F6DDA0500508800308CC0E0C020255A
-:102F1000A03C14E6ABB6D38FC0C0D00F87142440BA
-:102F2000220F8940941077F704C081048238C0F1E1
-:102F30000B2810C044C02204540104FD3802520181
-:102F400002FE3808DD10821C07EE100E6E020EDD48
-:102F500002242CFEC0E004FE380AEE100E88020D9A
-:102F600088028DAB1EE69B08D8020E880298B0C07E
-:102F7000E80428100E5E0184A025A125084411084C
-:102F80004402052514045511043402C0810E8E3903
-:102F900094B18FAA84109FB475660C26A11FC0F24D
-:102FA000062614600009000026A120C0F20626149F
-:102FB0000565020F770107873905E61007781008C5
-:102FC000660206550295B625A1040AE611085811B5
-:102FD00008280208660296B7C060644056649053A1
-:102FE000067E11C0F489C288C30B340B96459847FE
-:102FF000994618E6829F410459110E99021FE680F6
-:10300000020E4708D80298420E99029F40C1E00E76
-:10301000990299442FA00CB4380CF91114E66F1ED4
-:10302000E666A4FFAE992E928526F4CF0E880B2873
-:103030009685D10F2BA00C1FE6601CE6670CBE1115
-:10304000ACBBAFEE2DE28526B4CF0D3D0B2DE68552
-:10305000D10FC08005283878480263FEA263FE962F
-:103060006C1006C0C06570F18830C03008871477D6
-:103070008712C0B0C0A619E652299022C030CC9762
-:10308000C031600003C0B0C0A6C0E0C091C0D4C0D1
-:103090008225203C0B3F109712831CC070085801FA
-:1030A0000D5D01089738C0800B98380777100488A9
-:1030B00010086802087702C0800D98382D3CFE0881
-:1030C00088100D9E388D2B0AEE1008EE0207EE02D6
-:1030D0000CB8100FDD02053B400EDD029D4089203B
-:1030E000043D100899110D99022D210409A9020827
-:1030F000DD119941872A05B9100D3D020ABB110D5A
-:10310000BB02087702974428212587120828140457
-:103110008811071E4007EE100E99027566092621D8
-:103120001F062614600006002621200626140868C3
-:10313000029B47098802984629200CD2C0C0800C07
-:103140009E111BE6251FE61CAB99AFEE2DE28528EC
-:1031500094CF0DAD0B2DE685D10FDD40C0A6C0B0DC
-:103160008E51CAE0B2AAB1BB2DDC108F500E78365A
-:10317000981008770C9FD898D989538F5299119934
-:10318000DB9FDA7E8309B1CC255C10C97763FFCF62
-:1031900088108D1108E70C9751AD8DD7F078DB01C1
-:1031A000B1F79D5397528830C03008871408884083
-:1031B000648ED565BEC963FEBC0000006C1004D7E8
-:1031C00020B03A8820C0308221CAA0742B1E2972F8
-:1031D000046D080FC980C9918575B133A2527A3B3D
-:1031E0000B742B0863FFE900649FECD10FD240D130
-:1031F0000F0000006C100AD6302E3027D950DA406C
-:1032000015E5F02430269A1529160464E00264932B
-:10321000732920062A9CF865A3CE2A2102270A04D6
-:103220000A0B4C65B3978C3074C7052D212365D4E8
-:10323000A0C0A62B0A032C2200580F3664A3B9178E
-:10324000E5DE8E389A1664E3BA2F6027285021C92C
-:10325000F37E8311C2B08C202A200C580F55D7A0C2
-:10326000CDA16004A200C2B08C202A200C580F29E6
-:10327000D7A064A4862F212E8B680FBF360FB90C00
-:103280006F9D54296027D5B06E920528203D7B8F15
-:103290004CDA20DB50C1C42D211F580EEF8B269A2B
-:1032A000189A1989272AAC380B990C7A9353896399
-:1032B000C08099738F6298789F728E659E798D67B2
-:1032C0009D7B8C6695759C7A8E687E53026000B1FA
-:1032D0008B1465B050600038DBF063FFA5008A14E2
-:1032E000C9A92E60030E9B0C6EB2A5DC500CEA112E
-:1032F000AA6A2AAC285BFFB1D5A063FF93C0E06344
-:10330000FFE2DA208B18580EAC65A2B163FF9E0075
-:1033100000DA20DB308C15580E54D6A0C0C0C0D1C6
-:103320002D16042CA403DC70DA20DB60DF502D6046
-:1033300003C0E09E109D171EE5B90CDD110D6D0850
-:103340002DDC285BFF478E668F678817AF5FA8A8C4
-:1033500028640375FB01B1EE8A189E669F67892673
-:103360008829AA9909880C99268E6808084805EECC
-:103370000C28252515E5939E6865EECC63FEE600D6
-:103380000000C9432F21232B21212FFC010F0F4FB8
-:103390002F25237FBB026003142C20668961B1CCEA
-:1033A0000C0C472C24666EC60260022809FD50658D
-:1033B000D22264E1B62E602764E1B0DC70DF50DA1F
-:1033C00020DB601EE5AB2D6003C08098100CDD1182
-:1033D000AD6D2DDC285BFF22644181C0442B0A00C7
-:1033E0008C202A200C580ECB0AA70265A00FC0B073
-:1033F0002C22002A200C580EC7D7A064AFEFDA2089
-:10340000C1BCC1C82D21208F188E268929AFEE9E00
-:10341000260E990C090948292525580E8FC090C001
-:1034200050C0C288609A191EE566C0A12EE022082D
-:103430008F14778704C0810E8938C0800B93102DBC
-:10344000203C2921200CDC0104DB010929140BA8F4
-:10345000380CA5380D3D401CE57E8B2B08881007E5
-:1034600055100855020533022821250F154003BBCE
-:10347000020CBB0207551005D3100828140ADD11F1
-:103480000488110988020533022921040833029BAC
-:1034900070C0808A201BE57708AA110BAA029A71D6
-:1034A000C0A1852A9376957408931103DD020ADD85
-:1034B000029D778C63C1DC9C738B6298789A799BB0
-:1034C00072232214C0C0B1352526149C7B9D7593B0
-:1034D0007A2B621A9B7C2A621C9A7D28621D987E38
-:1034E00025621B957F2362172376102D62182D7697
-:1034F000112C62192C761264E0B98E6077E73DC01A
-:10350000FE13E53E1DE53FC1818A628B6304951180
-:103510000E9C4006CC110C5502247615085502C0AD
-:10352000802D76148D2B2B761B2A761A287619255A
-:10353000761803DD022D76166000030000C0FA2E17
-:10354000200C19E52518E51CA9E90CEE11A8EEC020
-:10355000802DE2852894CF0DFD0B2DE685DA208B9A
-:10356000198C158D14580D90D2A0D10FDC70DF503E
-:10357000DB602D6C28C0A01EE53E9A10DA205BFEB1
-:103580005563FE53002B203D0B4B4065BC826FE51D
-:1035900027DA308F556DE90C8EAA0E8E14C9E87E9D
-:1035A000F3162AAC10C090292467090F4764FC6009
-:1035B00060015F00C0FA63FF85C09163FFE8881473
-:1035C000658168DA20DB608C15580DA7C020C0909B
-:1035D00029A403D10F8A162B2104580CC9C0A02A94
-:1035E00024668E6863FDCA00002B9CF965B0FDDA85
-:1035F00020580CCE63FC220000DA20C0B6580E2CF6
-:1036000063FFBA002B200C0CBE11A7EE2DE286C181
-:10361000C27DC30260011819E4E909B90A2992A31D
-:103620006890082A220009AA0C65A10326E2856495
-:1036300060FD2C20668931B1CC0C0C472C24666FC0
-:10364000C60270960C8A162B2104580CADC0D02DE2
-:1036500024668E3077E74D1CE4E91BE4E98F32885D
-:1036600033C0A42D21040E994006991104DD1109DF
-:10367000DD029A61C19009DD029B60C0908B2B9D99
-:10368000649F66986799650CBB029B6228200C1AA0
-:10369000E4D2AA8A0C8811A7882F828529A4CF2F6B
-:1036A000FC202F86858A1465A0A6C020D10FB0FC0F
-:1036B0008B142C2523C8B7022A02066B02580CDE95
-:1036C0002A210265AEF7C0D80DAD022D250263FE9A
-:1036D000EC008E14C8E8DA20DB30580CD72A21021F
-:1036E00065AEDA07AF022F250263FED100DA20DBD8
-:1036F000308C158D14580E80D2A0D10FDA202B20DB
-:103700000C580DEB63FEB600DA202B200C580E0D82
-:1037100063FEAADA20DB308C152D12042E0A8028D5
-:103720000A00282468580CD663FAE500C020D10F9F
-:10373000DA20580DDF8914CD92DA20DB308C155851
-:103740000D4ADBA0C020C0A02AB403D10FC020D1F5
-:103750000F2A2C748B1558064CD2A0D10F000000F4
-:103760006C100E28210224160108084C6583A91F3D
-:10377000E49229F29E6F98026003AD1EE48E29E266
-:10378000266890082A220009AA0C65A39B24F29DB2
-:103790006443952A31160A4B412B240BB4BB0B0B07
-:1037A000472B240C0CB611AF66286286C1CC78C3B7
-:1037B0000260037F19E48209B90A2992A36890077D
-:1037C0008C2009CC0C65C36B276285647365293135
-:1037D00009C0D02D24668C3599139C2A88369C14F8
-:1037E000982B8E3798159E169E2C8C38C0E10C5C59
-:1037F000149C179C2D88392925042E251D28251C4D
-:103800002C3028C0822C243C2930290C0C4708C8B5
-:103810000129243D29311598189912090841089960
-:103820000C299CEC29251F7EC725921C8212282A70
-:1038300000082060991B01023E00093EB128098260
-:1038400039891B0E221102990C821C29251F821C0A
-:10385000941D951E24211F15E4880451609A10C1FF
-:10386000802B1610252014961F05054301063E00E7
-:103870000D3EB16B0DB6398B3C2D9CFC08663606AF
-:10388000441C893D2E26132E26142E26152E246B1D
-:1038900025241406D61CC05025261825261B2524B1
-:1038A000672524682832112525232525242525254B
-:1038B00025252C2925222D25202B252124252E26A2
-:1038C000252F14E46F16E46D1BE45298192D211C6A
-:1038D000C08498719B70892095759577957F967CAB
-:1038E000967E98799B7894731BE46714E4680C388F
-:1038F000400288100C064015E464016610947D9B1C
-:1039000074841D1BE444086602957B18E431851E0F
-:103910000B99029972997A0866022B121096768694
-:103920001F6FD2026001C8C0A0991A6D080AB1AA1F
-:1039300000A10400E81A7D8B0263FFEE891AC0E043
-:10394000961F1DE43E2B1610951E941D28203D2920
-:10395000761A297612C040C051C0B22D76130806DF
-:10396000408D170B8801065E380AEE101BE44A08EA
-:103970005438B0A609661188140B44102B761B042A
-:10398000EE028B1614E44308DA1406EE020D8810DA
-:103990002A761E86131AE41C04EE020D66110866D0
-:1039A000022E76160D14141EE41A0D44110BD814B1
-:1039B0000866020A44022E76182E76102476172600
-:1039C000761FC084287619287611C76F0C24400F03
-:1039D00044111CE3FB26761D26761C2676152676DA
-:1039E000148A262676242676252976222E762028E5
-:1039F00076218E1888150DB91016E4278BC70D880F
-:103A0000110E5E39ADBB851904EE022676230988B6
-:103A100002861F89102876260A04480544110505E8
-:103A2000480E551105440204EE02851E841D2E76B3
-:103A3000272820069B2D29246A2E31172B12102EA1
-:103A40002538CC83C0D02D2407C0D7090840648016
-:103A50008E9A290928416480AA64E0B42D2406C006
-:103A60009809E9362D0AA02A628501C404ADAA2D61
-:103A700021042A668508DD11883F8E3E2732100812
-:103A8000EA1800C40408E8180088110ECE5308771D
-:103A900002C08308DD029D4118E401090D4E9840E3
-:103AA00088209A4397449D4517E3FE1DE3CB058884
-:103AB0001108EE02ADBDC08007EE029E4228D4CFB1
-:103AC0002AF29D87CA2AAC18B1772AF69D1AE3B963
-:103AD00097CA28A4A268711C655060C020D10F004D
-:103AE0002D2406C080C09809E9360E893863FF731B
-:103AF000C0A063FE481BE3CB1AE3EB2AB68963FF41
-:103B0000D600000065EF54C098C0D82D240663FF8E
-:103B1000522D2406C09063FF4ACC57DA20DB308C4C
-:103B200011580C51C020D10F00DA20C0B6580CE05B
-:103B300063FFE500DA20580CDE63FFDC2A2C748B6F
-:103B400011580551D2A0D10F6C10062820068A33D7
-:103B50006F8202600161C05013E39729210216E3CE
-:103B600096699204252502D9502C20159A2814E331
-:103B7000948F2627200B0AFE0C0477092B712064F2
-:103B8000E1398E428D436FBC0260016F00E104B0E9
-:103B9000C800881A08A80808D80298272B200668A9
-:103BA000B32ECE972B221E2C221D0111027BC901A0
-:103BB000C0B064B0172CB00728B000DA2003880A20
-:103BC00028824CC0D10B8000DBA065AFE7C020D1BC
-:103BD0000F2D206464DFCA8B29C0F10BAB0C66BFCC
-:103BE000C02B200C0CBC11A6CC28C2862E0A08784B
-:103BF000EB611EE3720EBE0A2EE2A368E0052822E6
-:103C0000007E894F29C2851EE37E6490461FE38CA7
-:103C10009E90C084989128200A95930F88029892CC
-:103C20008E200FEE029E942F200788262F950A984B
-:103C3000969A972E200625240768E3432921022A15
-:103C4000C2851DE3652AAC20ADBD25D4CF2AC6852B
-:103C500063FF4E002E2065CBEDC082282465C9F697
-:103C600005E4310002002A62821BE36D2941020B48
-:103C7000AA022A668209E43129210263FF23000097
-:103C800064DFB88F422E201600F1040DEE0C00EE1A
-:103C90001AAEAE9E2963FFA38A202B3221B1AA9AC5
-:103CA000B0293221283223B4992936217989A92BC8
-:103CB00032222B362163FFA0C020D10F9F2725245D
-:103CC00015ACB82875202B2006C0C12EBCFE64E0C0
-:103CD000AB68B7772DBCFD65DEC72D2064C0F064EE
-:103CE000D0868E290EAE0C66E089C0F128205A28B5
-:103CF0008CFE08CF3865FEE863FF580000E00493AF
-:103D000010C0810AF30C038339C78F08D80308A8B1
-:103D10000108F80C080819A83303C80CA8B82875BE
-:103D200020030B472B24158310CBB700E104B0BC54
-:103D300000CC1AACAC0CDC029C27659E5EC0B20BBA
-:103D4000990209094F29250263FE50002D206A0DB2
-:103D50002D4165DF7EDA20C0B0580CA864AF18C0D2
-:103D6000F163FEEF9F2763FFD02E221F65EE3263C3
-:103D7000FF79000028221F658E2763FF6E25240629
-:103D800029210263FE1B00006C10066571332B4C69
-:103D900018C0C7293C18C0A1C08009A8380808422B
-:103DA0006481101CE3011AE3022AC67E2A5CFDD35B
-:103DB0000F6DAA0500B08800908C8940C0A00988CA
-:103DC000471FE32B080B47094C50090D5304DD1026
-:103DD000B4CC04CC100D5D029D310CBB029B30882D
-:103DE000438E2098350FEE029E328D26D850A6DDE8
-:103DF0009D268E40C0900E5E5064E0971CE3111E1D
-:103E0000E300038B0BC0F49FB19EB02D200A99B341
-:103E10000CDD029DB28F200CFF029FB48E262D2058
-:103E2000079EB68C282DB50A9CB72924072F20069B
-:103E30002B206469F339CBB61DE2E22320168DD224
-:103E40000B330C00D10400331AB48DA3C393292281
-:103E5000200C13E2E11FE2D80C2E11AFEEA32229B1
-:103E600024CF2FE285D2A00FDD0B2DE685D10F00E8
-:103E70002E200CB48C0CEB111FE2D81DE2CFAFEE5C
-:103E8000ADBB22B28529E4CF02C20B22B685D2A0F7
-:103E9000D10F00002E200C1CE2C81FE2CF0CEB114A
-:103EA000AFEEACBB22B28529E4CF02820B22B685ED
-:103EB000D2A0D10FC0D00BAD387DC80263FEEC6339
-:103EC000FEE08E40272C747BEE12DA70C0B32C3CDF
-:103ED00018DD50580A9B8940C08063FEE3066E02DD
-:103EE000022A02DB30DC40DD505800049A10DB501F
-:103EF000DA70580465881063FEF700006C100692B3
-:103F0000121EE2B98C40AE2D0C8C472E3C1804CA10
-:103F10000BD9A07DA30229ADF875C302600084C04F
-:103F2000B0C023C0A09D106D0844B89F0EB80A8D84
-:103F3000900EB70BB8770D6D36ADAA9D800D660C4F
-:103F4000D8F000808800708C879068B124B2227706
-:103F5000D3278891C0D0CB879890279C1000708879
-:103F600000F08C9D91CB6FC08108BB0375CB36638D
-:103F7000FFB4B1222EEC1863FFD485920D770C8626
-:103F8000939790A6D67D6B01B1559693959260005C
-:103F900016B3CC2D9C188810D9D078D3C729DDF85A
-:103FA00063FFC100C0238A421BE2C000CD322D4412
-:103FB000029B3092318942854379A1051EE2BC0EF5
-:103FC000550187121BE2AB897095350B9902993226
-:103FD00088420A880C98428676A6A696768F44AFC9
-:103FE000AF9F44D10F0000006C10089311D63088A9
-:103FF00030C0910863510808470598389812282165
-:1040000002293CFD08084C6581656591628A630A56
-:104010002B5065B18B0A6F142E0AFF7CA60A2C2048
-:104020005ACCC42D0A022D245A7FE0026002158961
-:104030002888261FE29F09880C65820F2E200B0F0F
-:10404000EE0B2DE0FE2EE0FF08DD110EDD021EE27C
-:1040500099AEDD1EE2991CE2990EDD010DCC37C14F
-:1040600080084837B88DB488981089601AE2557B6B
-:1040700096218B622AA0219C147BA3179D132A20D2
-:104080000C8B108C20580BCA8C148D13DBA0CEAC7B
-:104090006001C4002E200C1BE2480CEA110BAA0898
-:1040A0002BA2861FE2467BDB3B0FEF0A2FF2A368B1
-:1040B000F0052822007F892C2BA28564B0AA876294
-:1040C0008826DE700C7936097A0C6FAD1C8F279B21
-:1040D0001508FF0C77F3197E7B729D139C149B15BA
-:1040E000CF56600025C0B063FFD0D79063FFDD00DE
-:1040F000009D139C14DA20DB70580B2F8B158C1449
-:104100008D1365A06A8E6263FFCC00DA208B11DC10
-:1041100040580AD5D6A08B15C051DE70DA20DC607D
-:10412000DD405BFF768D138C14D9A02E200C1BE292
-:10413000221FE2290CEA11AFEFC0E0ABAA2BA28547
-:104140002EF4CF0B990B29A68563FF1D00DA20DC26
-:1041500060DD40DE708912282007DF50A9882824FE
-:10416000075BFF09D2A0D10F00DBE0DA20580B502B
-:104170006550EF2A20140A3A4065A0EBDB60DC4072
-:10418000DD30022A025809BCD6A064A0D584A183E0
-:10419000A00404470305479512036351C05163FE11
-:1041A0005C2C2006D30F28CCFD6480A568C704C012
-:1041B000932924062C2006C0B18D641FE2019D279F
-:1041C0009D289D298FF29D2600F10400BB1A00F066
-:1041D00004B0BE0EDD01C0F0ADBB8D652F24070D10
-:1041E0000E5E01EE11AEBB2E0AFEB0BB0B0B190E1C
-:1041F000BB36C0E20B0B470EBB372B241618E1F978
-:104200000A09450D0B422B240B29240AB4BE2E2487
-:104210000C7D88572920162FCCFDB09D0A5C520DCD
-:10422000CC362C246465FDEC0C0C4764CDE618E11B
-:10423000E48E2888820C9F0C00810400FF1AAFEEE8
-:104240009E2963FDCF1CE21163FE13001CE20B6389
-:10425000FE0C8D6563FFA500DA202B200C580B396E
-:10426000645F0FC020D10F00C020D10FC09329245C
-:1042700016C09363FFA000006C1004C06017E1CD6E
-:104280001DE1D0C3812931012A300829240A78A1EF
-:1042900008C3B27BA172D260D10FC0C16550512654
-:1042A00025022AD0202F200B290AFB2B20142E2098
-:1042B0001526241509BB010DFF0928F1202B241414
-:1042C000A8EE2EF52064A0A92B221E28221D011184
-:1042D000027B8901DB6064B0172CB00728B000DADC
-:1042E0002007880A28824CC0D10B8000DBA065AF74
-:1042F000E7DB30DC40DD50DA205800DE29210209FE
-:104300000B4CCAB2D2A0D10F00CC5A2C30087BC1C2
-:10431000372ED02064E02D022A02033B02DC40DD70
-:10432000505800D4D2A0D10F2B2014B0BB2B241492
-:104330000B0F4164F0797CB7CAC0C10C9C022C25DC
-:1043400002D2A0D10FC020D10F2E200669E2C126D3
-:1043500024062B221E2F221D29200B2820150D9903
-:10436000092A9120262415AA882895207BF14960E6
-:104370000048B0BB2B24140B0A4164A0627CB70236
-:104380002C25022B221E2C221DD30F7BC901C0B06D
-:10439000C9B62CB00728B000DA2007880A28824C5A
-:1043A000C0D10B8000DBA065AFE7C020D10F0000BB
-:1043B000262406D2A0D10F0000DB601DE18164BF7E
-:1043C0004F2CB00728B000DA2007880A28824CC09A
-:1043D000D10B8000DBA065AFE71DE17963FF310001
-:1043E00026240663FF9C00006C1004282006260A81
-:1043F000046F856364502A2920147D9724022A02C1
-:10440000DB30DC40DD50580019292102090A4CC874
-:10441000A2C020D10FC0B10B9B022B2502C020D11E
-:104420000F00022A02033B022C0A015800D1C9AA3C
-:10443000DA20DB30DC40580A0C29A011D3A07E978B
-:10444000082C0AFD0C9C012CA411C0512D2014062F
-:10445000DD022D241463FFA4DA20DB30DC40DD50C4
-:10446000C0E0580987D2A0D10F0000006C100616DA
-:10447000E1521CE152655157C0E117E14E2821027B
-:104480002D220008084C6580932B32000B695129BE
-:104490009CFD6590872A629E6EA84C2A722668A0B1
-:1044A000027AD9432A629DCBAD7CBE502B200C0CE6
-:1044B000BD11A6DD28D2862F4C0478FB160CBF0A4E
-:1044C0002FF2A368F0052822007F89072DD285D31B
-:1044D0000F65D0742A210419E17AD30F7A9B2EDA62
-:1044E00020580883600035002D21041BE1757DBB39
-:1044F00024DA20C0B658087ECA546001030B2B5042
-:104500002B240BB4BB0B0B472B240C63FFA0DA202E
-:10451000580A67600006DA20C0B6580A656550E0A0
-:10452000DC40DB302D3200022A020D6D515808D2DA
-:104530001CE123D3A064A0C8C05184A18EA00404B0
-:10454000470E0E4763FF3500002B2104C08B8931D5
-:10455000C070DF7009F950098F386EB8172C2066CB
-:10456000AECC0C0C472C24667CFB099D105808E44B
-:104570008D1027246694D11EE126B8DC9ED06550AC
-:1045800056C0D7B83AC0B1C0F00CBF380F0F42CBFD
-:10459000F119E10518E10728967EB04BD30F6DBAEB
-:1045A0000500A08800C08C2C200CC0201DE10B0C45
-:1045B000CF11A6FF2EF285ADCC27C4CF0E4E0B2E09
-:1045C000F685D10FC0800AB83878D0CD63FFC1001E
-:1045D0008E300E0E4763FEA12A2C742B0A01044D67
-:1045E000025808D72F200C12E0FC0CF911A699A252
-:1045F000FF27F4CF289285D2A008480B289685D1B2
-:104600000FC020D10F0000006C1004C060CB55DB40
-:1046100030DC40055D02022A025BFF942921020979
-:10462000084CC882D2A0D10F2B2014B0BB2B24146D
-:104630000B0C41CBC57DB7EBC0C10C9C022C2502F5
-:10464000D2A0D10F0000022A02033B02066C02C076
-:10465000D0C7F72E201428310126250228240A0F5E
-:10466000EE012E241458010E63FFA300262406D267
-:10467000A0D10F006C1006282102D62008084C6536
-:10468000809D2B200C12E0CC0CB811A2882A8286C7
-:10469000B5497A930260009719E0C909B90A2992CD
-:1046A000A36890082A620009AA0C65A08228828566
-:1046B0001CE0D46480799C80B887B14B9B819B10AF
-:1046C000655074C0A7D970280A01C0D0078D380D75
-:1046D0000D42CBDE1FE0B51EE0B62EF67ED830D3FD
-:1046E0000F6D4A0500808800908C2E3008C0A00015
-:1046F000EE322E740028600C19E0B80C8D11A2DD8A
-:10470000A988C0202CD2852284CFD2A00CBC0B2C2F
-:10471000D685D10FC0F0038F387FA0C063FFB400EF
-:10472000CC582A6C74DB30DC4058080BC020D10F09
-:10473000DA605809DF63FFE7DD402A6C74C0B0DC43
-:104740007058087F2E30088B1000EE322E7400282F
-:10475000600C19E0A10C8D11A2DDA988C0202CD21B
-:10476000852284CFD2A00CBC0B2CD685D10F0000A3
-:104770006C1004292014282006B19929241468817A
-:1047800024C0AF2C0A012B21022C24067BA004C0DC
-:10479000D02D2502022A02033B02044C02C0D0584D
-:1047A00000C0D2A0D10FC020D10F00006C1004298E
-:1047B0003101C2B429240A2A3011C28378A16C7B4A
-:1047C000A1696450472C2006C0686FC562CA572D86
-:1047D00020147CD722DA20DB30DC40DD505BFFA5E3
-:1047E000292102090E4CC8E2C020D10FC0F10F9F51
-:1047F000022F2502C020D10FDA20DB30C0C05BFFC2
-:10480000DC28201406880228241463FFC7292015F9
-:104810001BE06C2A200BC0C09C240BAA092BA120F2
-:104820002C2415AB9929A52063FF9900C020D10F36
-:10483000DA20DB30DC40DD50C0E0580891D2A0D156
-:104840000F0000006C1004CB5513E06725221F0DEC
-:10485000461106550CA32326221E25261F06440BAF
-:1048600024261E734B1DC852D240D10F280A80C087
-:104870004024261FA82828261E28261DD240D10FF6
-:10488000C020D10F244DF824261E63FFD80000005D
-:104890006C1004D620282006C0706E85026000D4FB
-:1048A0001DE04E19E04612E0442A8CFC64A1302B36
-:1048B0006102B44C0B0B4C65B0A22B600C8A600CEF
-:1048C000B8110288082E828609B90A7EC3026000E8
-:1048D0009A2992A368900509AA0C65A08E28828562
-:1048E000648088B8891BE04A94819B80655155C0DB
-:1048F000B7B8382A0A01C0C009AC380C0C4264C0F1
-:10490000421FE0291EE02B2EF67EB04AD30F6DAA7F
-:104910000500808800908CC0A029600C0C9C11A21E
-:10492000CC2BC285AD990B4B0B2BC6852860062777
-:1049300094CF6881222D6015D2A0C9D2C0E22E6426
-:1049400006D10F00C0F008AF387FB0BD63FFB100E3
-:10495000276406D2A0D10F00D2A0D10F00CC57DA25
-:1049600060DB30DC405808C0C020D10FDA60580945
-:104970005063FFE80028221E29221DD30F789901D9
-:10498000C080C1D6C1C11BE018C122AB6B6480429C
-:1049900078913F2A80000CAE0C64E0BB02AF0C643F
-:1049A000F0B52EACEC64E0AF0DAF0C64F0A92EAC0A
-:1049B000E864E0A32FACE764F09D2EACE664E097DA
-:1049C0002F800708F80BDA807B83022A8DF8D8A0A5
-:1049D00065AFBC28612308D739D97060007B00001F
-:1049E0002B600C0CB811A2882C82862A0A087CAB9A
-:1049F0007E09BA0A2AA2A368A0052C62007AC96FB0
-:104A00002A828564A0691FDFFE276504C0E3C0C455
-:104A10002E64069CA11CE02B9FA02E600A97A30C7D
-:104A2000EE029EA28F600CFF029FA42E60147AEF0C
-:104A30004627A417ADBC2F828527C4CF2FFC202F7B
-:104A4000868563FE692A6C74C0B1DC90DD4058072E
-:104A5000BC1DDFE163FEC100D9A0DA60DB30C2D04B
-:104A6000C1E0DC4009DE39DD50580805D2A0D10F85
-:104A7000DA6058090F63FEE4290A0129A4170DBF63
-:104A8000082E828527F4CF2EEC202E868564500BCD
-:104A90002A6C74DB4058017CD2A0D10FC020D10F0A
-:104AA0006C10062B221E28221D93107B8901C0B09A
-:104AB000C0C9C03BC1F20406401DDFCBC0E2C074D8
-:104AC0000747010E4E01AD2D9E11C0402E0A146401
-:104AD000B06E6D084428221D7B81652AB0007EA13E
-:104AE0003B7FA1477B51207CA14968A91768AA1484
-:104AF00073A111C09F79A10CC18B78A107C1AE2908
-:104B00000A1E29B4007CA12B2AB0070BAB0BDAB02C
-:104B10007DB3022ABDF8DBA0CAA563FFB428B0109C
-:104B200089116987BB649FB863FFDC00647FB4634D
-:104B3000FFD50000646FD0C041C1AE2AB40063FF4E
-:104B4000C62B2102CEBE2A221D2B221E7AB12A8C10
-:104B5000107CB1217AB901C0B0C9B913DF96DA204F
-:104B600028B0002CB00703880A28824CC0D10B80E3
-:104B700000DBA065AFE7D240D10F8910659FD463F9
-:104B8000FFF300006C1008C0D0C8598C30292102F6
-:104B90000C0C4760000C8E300E1E5065E19E2921E2
-:104BA00002C0C116DF85090B4C65B0908A300A6ED1
-:104BB0005168E3026000852F629E1BDF7E6EF85312
-:104BC0002BB22668B0052E22007BE94727629DB7ED
-:104BD00048CB7F97102B200CB04E0CBF11A6FF299D
-:104BE000F2869E12798B4117DF7507B70A2772A3E9
-:104BF000687004882077893029F285DF90D7906526
-:104C000090652A210419DFAE7A9B22DA205806B873
-:104C1000600029002C21041BDFAA7CBB18DA20C00D
-:104C2000B65806B3C95860014CC09063FFCCDA2077
-:104C300058089F600006DA20C0B658089D655135B7
-:104C4000DC40DB308D30DA200D6D5158070BC0D0C1
-:104C5000D3A064A120292102C05184A18CA0040406
-:104C6000470C0C4763FF3E00C09B8831DBD008F83F
-:104C700050089B3828210498116E8823282066ACA0
-:104C80008C0C0C472C24667CBB159F139E148A1039
-:104C90008B1158071B8E148F13C0D02D24668A30B9
-:104CA000C092C1C81BDF5B7FA6099BF099F12CF471
-:104CB0000827FC106550A4B83ADF70C051C08007C7
-:104CC000583808084264806718DF3819DF392986A8
-:104CD0007E6A420AD30F6DE90500A08800F08CC0FF
-:104CE000A08930B4E37F9628C0F207E90B2C940822
-:104CF0009B909F912F200C12DF380CF811A6882969
-:104D00008285A2FF2DF4CFD2A009330B238685D153
-:104D10000F22200C891218DF300C2B11A6BBA82201
-:104D20002D24CF2CB285D2A00C990B29B685D10F9A
-:104D3000C087C0900A593879809663FF8ADB30DAE1
-:104D400020C0C1C0D05BFF56292102C0D02A9CFEE2
-:104D500065AE4D2D2502C09063FE45009E142A2CA1
-:104D600074C0B1DC70DD405806F68E14C0D01BDF75
-:104D700028C1C863FF6AC020D10F00006C1006284C
-:104D8000210217DF0D08084C65824929729E6F9831
-:104D90000260025019DF082A922668A0078B200AB9
-:104DA000BB0C65B23F2A729DC0CB64A2371DDF04E5
-:104DB000C0602B3008C0F164B0712E0AFFB0B86437
-:104DC00081512DBCFE64D0F364505C2A2C74044BDA
-:104DD000025800AD0AA2020600000000001ADF0817
-:104DE0002C20076EBB0260022218DEFE13DF081BB8
-:104DF000DF36C0E229200A9AD09ED1ABCB039902BC
-:104E000099D223B08026B480B13308330293D318EB
-:104E1000DEF20CFD11A7DD2CD285A8F82684CF0C7C
-:104E2000EC0B2CD685655FA2C020D10F2B21048806
-:104E300031DE6008F85008CE386EB8102C2066B10C
-:104E4000CC0C0C472C24667CEB026001AF2E30109A
-:104E50002930112C301300993200CB3264E1452AFD
-:104E600030141EDF1A00AA3278CF050E9C092BC41D
-:104E70007F1CDF1766A0050E98092A8480B4A71846
-:104E8000DF15C76F009104AC9CDBC000AE1A00F3C5
-:104E90001A6EC1048BD00BCB0C1CDF0F08B81C069C
-:104EA0003303AC882A848B2CD03627848C03CC0126
-:104EB0000ECC022CD4365801AD63FF0B2F200C0C06
-:104EC000FB11A7BB2DB286C0987D9302600121190A
-:104ED000DEBB09F90A2992A36890082D220009DD9A
-:104EE0000C65D10C2DB285DE6064D10488312B2194
-:104EF0000408F85008CE386FB80263FEDF2C206635
-:104F0000B1CC0C0C472C24667CE30263FECE9D10D2
-:104F100060013100293108292504283014B0886443
-:104F200080A62B31092B240AC0812B30162FD423C5
-:104F30002B240BB4BC2C240C8D378B36292504DE96
-:104F4000D00D8E39DCB00B8C390ECC0264CE7808D3
-:104F50009C1101C4048F380DBE1800C4040DB8188C
-:104F600000881108FF02C08308CC0218DECC9CA187
-:104F700098A018DECB8C209EA39FA405CC110BCF4C
-:104F800053C1E09EA50CFF0208FF029FA218DE8914
-:104F90002624662C729D2684A22CCC182C769D6328
-:104FA000FE250000002D30121CDECD00DA3278DF45
-:104FB000050C9E0B2AE47F66B0050C9F0B2BF4803A
-:104FC0002A301100AA3263FEEC2E240A2B31099BF1
-:104FD0002B63FF5300CC57DA20DB30DC405807222C
-:104FE000C020D10F00DA20C0B65807B163FFE5003A
-:104FF00000DBF0DA205807AE63FFD9000058064006
-:105000001DDE70C0F126246663FE41008B20280A55
-:10501000FFB1CE23200A2C21040E0E472E24077840
-:1050200031359AD02CD50A96D319DEA62ED416C0C7
-:105030008398D1C0E309B80298D409390299D226DD
-:10504000240763FDC958062E8D102624662B2104E3
-:105050002F200C63FD86000008B81119DE6808EEE9
-:1050600002882B9ED59AD0C0EF09880298D204C935
-:10507000110E990299D4C0E49ED163FFC1000000D3
-:105080006C1004C020D10F006C100485210D381164
-:1050900014DE478622A42408660C962205330B935F
-:1050A00021743B13C862D230D10FC030BC29992182
-:1050B00099209322D230D10F233DF8932163FFE34F
-:1050C0006C100AD620941817DE3CD930B8389819DD
-:1050D0009914655256C0E1D2E02E61021DDE390EF0
-:1050E0000E4C65E1628F308E190F6F512FFCFD65FC
-:1050F000F1558EE129D0230E8F5077E66B8F181E65
-:10510000DE78B0FF0FF4110F1F146590CE18DE7516
-:105110008C60A8CCC0B119DE2728600B09CC0B0D20
-:10512000880929812028811E2A0A0009880C08BACA
-:10513000381BDE6B0CA90A2992947B9B0260008CC1
-:105140002B600C94160CBD11A7DD29D286B84879C6
-:1051500083026000D219DE1909B80A2882A39817C1
-:105160006880026000A36000A51ADE5F84180AEE62
-:1051700001CA981BDE108C192BB0008CC06EB313C3
-:105180001DDE0D0C1C520DCC0B2DC295C0A17EDB7B
-:10519000AE6000380C0C5360000900000018DE51AE
-:1051A0008C60A8CCC0B119DE0328600B09CC0B0DB4
-:1051B000880929812028811E2A0A0009880C08BA3A
-:1051C000380CA90A2992947E930263FF72DA60C0B8
-:1051D000BA58073764507360026A00001ADDF68C13
-:1051E000192AA0008CC06EA31A18DDF20C1C5208FC
-:1051F000CC0B18DE3B2BC295C0A178B30263FF3FF6
-:1052000063FFC9000C0C5363FF0989607899182962
-:10521000D285C9922B729E1DDDE76EB8232DD22652
-:10522000991369D00B60000DDA60580721600017F0
-:105230000088607D890A9A1A29729D9C129915CF5F
-:1052400095DA60C0B658071A6551F98D148C18DBD1
-:10525000D08DD0066A020D6D51580587D3A09A14DF
-:1052600064A1E182A085A1B8AF9F1905054702029C
-:10527000479518C05163FE602B6104C08B8931C013
-:10528000A009F950098A386EB81F2C6066A2CC0CB0
-:105290000C472C64667CAB119F119E1B8A15580528
-:1052A000988E1B8F11C0A02A64669F1164F0E58957
-:1052B0001388190FFD022E0A006DD9172F810300E4
-:1052C000908DAEFE0080889F9200908C008088B800
-:1052D0009900908C65514E8A10851A8B301FDDC85D
-:1052E000881229600708580A2C82942D61040ECC7C
-:1052F0000C2C86946FDB3C1CDDF4AC9C29C0800B2D
-:105300005D50A29909094729C48065D0DA2E600C46
-:10531000C0D01FDDB10CE811AFEEA7882282852D29
-:10532000E4CF02420B228685D2A0D10F8E300E0E22
-:105330004763FDA2A29C0C0C472C64077AB6CD8B68
-:10534000602E600A280AFF08E80C64810E18DDDD73
-:1053500083168213B33902330B2C34162D350AC051
-:105360002392319F30C020923308B20208E80292A3
-:10537000349832C0802864072B600CD2A01CDD96C4
-:105380000CBE11A7EE2DE285ACBB28B4CF0D9D0B52
-:105390002DE685D10F8B1888138D30B88C0D8F4773
-:1053A0000D4950B4990499100D0D5F04DD1009FFEB
-:1053B000029F800DBB029B8165508D851AB83AC053
-:1053C000F1C0800CF83808084264806B1BDD771947
-:1053D000DD7829B67E8D18B0DD6DDA0500A0880075
-:1053E000C08CC0A063FEF30082138B161DDD8828DD
-:1053F000600AC0E02EC4800D880202B20B99239F80
-:1054000020C0D298229D2122600CB2BB0C2D11A786
-:10541000DD28D28508BB0B18DD702BD685A8222E7F
-:1054200024CFD2A0D10F9E1B851A2A6C748B185BD7
-:10543000FF168E1B63FEA300C087C0900AF938795F
-:10544000809263FF86C020D10F9E1B2A6C74C0B16E
-:105450008D1858053B8E1B851A63FE7E886B821360
-:10546000891608BE110ECE0202920B9E25B4991E1B
-:10547000DD639F200E88029822C0EF04D8110E88A9
-:10548000029824C0E49E21C080D2A02B600C286426
-:10549000071CDD510CBE11A7EE2DE285ACBB28B474
-:1054A000CF0D9D0B2DE685D10F0000006C1004C0C0
-:1054B00020D10F006C10048633C071C03060000131
-:1054C000B13300310400741A0462017460F1D10F29
-:1054D0006C1004022A02033B025BFFF61CDD391B41
-:1054E000DD83C79F88B009A903098A019AB0798032
-:1054F0001EC0F00FE4311DDD300002002BD2821EF1
-:10550000DD7C2AC1020EBB022BD6820AE431D10F08
-:1055100028C102C19009880208084F28C50208E482
-:1055200031D10F006C1004C0C00CE43112DD251A1B
-:10553000DD2200020029A28218DD701BDD6E26210B
-:10554000020B990108660129A68226250206E4318C
-:1055500014DD6B15DD66236A9023261685502426FC
-:1055600015252617222C50D10F0000006C1008D6EC
-:10557000102B0A64291AB41ADD0F0D23111CDD103B
-:105580000F2511B81898130E551118DD5DAC55A8EC
-:1055900038AA332C80FF2A80FEA933288D01298068
-:1055A0000108AA112880000CAA02088811098802A3
-:1055B00008AA1C288C0828160458086814DD010A5B
-:1055C000A70224411A2A30802B120407AA2858085F
-:1055D00063B1338B13B4559A6004AC28B4662C566F
-:1055E0002B7B69E016DD3A9412C050C0D017DCF472
-:1055F0009D15D370D4102F60802E60829F169E1749
-:10560000881672891A8D128C402A607F0DCC282B47
-:105610003A200CAA28580851C0B10ABE372E354886
-:105620008F1772F91A8D128C402A60810DCC282BAD
-:105630003A200CAA28580849C0B10ABE372E354A6C
-:10564000B233B444B1556952B6B466C0508F15B880
-:1056500077D370B2FF9F156EF899D10F6C1004C00C
-:1056600021D10F006C1004270A001CDCD31FDCE4DE
-:105670001EDCE71DDCD01ADD141BDD22C02824B09F
-:10568000006D2A75AA48288080C09164806100411D
-:105690000415DCCBC03125503600361A06550105FD
-:1056A00095390C56110C66082962966E974D0D5966
-:1056B0000A29922468900812DD0602420872993B7A
-:1056C00023629512DCC8CB349F300282020E440262
-:1056D000C092993194329233AD52246295C0902495
-:1056E0004C1024669524B0002924A0AA42292480C5
-:1056F000B177B14404044224B400D10FD10FD10FCB
-:105700006C10041ADCAC2AA00058021C5BFFD50206
-:105710002A02033B025BFFD11BDCAAC9A12CB10208
-:10572000C0D40DCC020C0C4F2CB5020CE431D10FBF
-:10573000C0A00AE43118DCA00002002F828219DC2C
-:10574000B32EB10209FF022F86820EE431D10F0081
-:105750006C1004C02002E43114DC9A16DC970002BD
-:1057600000226282234102732F0603E431C020D15C
-:105770000F19DCE61ADCE52841020A2A0109880132
-:105780002A668228450208E43115DCDC12DCE125BA
-:105790004621D10F6C1004292006289CF96480A0B2
-:1057A0002A9CFD65A0968A288D262F0A087AD9049E
-:1057B0002B221FC8BD2C206464C0812E22090EAE8E
-:1057C0000C66E0782B200C1EDC7C0CBC11AECC28C7
-:1057D000C28619DC7A78F3026000AD09B90A299211
-:1057E000A36890082E220009EE0C65E09B29C28573
-:1057F0001FDC846490929F90C0E41FDC919E9128EE
-:10580000200AC0E09E930F8802989288200F880299
-:1058100098942F20079A979D962F950A2E24072853
-:10582000200629206468833328C28512DC6B288C0B
-:1058300020A2B22E24CF28C685C020D10FC020D1EF
-:105840000F2A206A0111020A2A4165AF52DA20C0EC
-:10585000B05805EA64AFE5C021D10F00649FC81FAE
-:10586000DC582D20168FF209DD0C00F10400DD1A42
-:10587000ADAD9D2912DC5928C285A2B22E24CF28B5
-:105880008C2028C685C020D10FC021D10F00000078
-:105890006C1004260A001BDC9F15DC4928206517C4
-:1058A000DC46288CFE6480940C4D110DBD082CD272
-:1058B000F52BD2F42ED2F77CB13DB4BB2BD6F47BC2
-:1058C000E9052BD2F62BD6F47CB92C2AD2F62AD6AF
-:1058D000F52AD6F406E4310002002872822AFAFF83
-:1058E000004104290A012F510200991A0A9903095B
-:1058F00088012876820FE4312624652BD2F48E5C51
-:105900002CD2F5B0EE9E5C7BCB1629D2F62FD2F7C7
-:105910000CB80C09FF0C08FF0C0F2F14C8F960001D
-:10592000320BCA0C0A2A14CEA92B5102C0C20CBBDE
-:10593000020B0B4F2B55020BE431D10F00DB30DA99
-:10594000205BFF941BDC7464AF5D0C4D11ADBD6337
-:10595000FFA8000006E4310002002F728218DC303C
-:105960002E510208FF022F76820EE431D10F000083
-:105970006C1004C03003E43116DC1015DC11000299
-:105980000024628274472118DC64875C084801287F
-:105990006682CD7319DC620C2A11AA99229283299E
-:1059A00092847291038220CC292B51020BE431C0E6
-:1059B00020D10F001FDC5B2E51020FEE012E55028D
-:1059C0000EE431B02DB17C9C5C12DC5608DD112D4B
-:1059D000561DD10F6C10061BDBF71EDBF922B00041
-:1059E0001ADC526F23721DDC39C04818DC511FDCF1
-:1059F0004FDC10D5C083F000808600508A6D4A4F7E
-:105A00000F35110D34092440800B560A296294B1D8
-:105A1000330E55092251480F44110C440A8740099E
-:105A2000A80C02883622514907883608770CA899B5
-:105A30002966949740296295874109A80C02883607
-:105A400007883608770CA899296695974103034281
-:105A5000B13808084298F0D10F1CDC3613DC372728
-:105A6000B0002332B5647057C091C0D016DC351534
-:105A7000DC33C0402AC00003884328C4006D793C51
-:105A8000004104B14400971A7780148E502FB295CC
-:105A90002DB695AFEE2EED2006EE369E5060001826
-:105AA00077A00983509D5023B69560000223B295DC
-:105AB000223D2006223622B695B455B8BBD10F0040
-:105AC00003884328C400D10F6C1004C04004E431A3
-:105AD00015DC1D000200885013DC1CCB815BFFBD70
-:105AE0001CDC1B0C2D11ADCC2BC2822AC28394501E
-:105AF0007BAB142EC28429C2850ABD0C0E990C0DF5
-:105B0000990C0929146000050BA90C092914993076
-:105B100015DBAC2A51020AE4312A2CFC58004B2B2D
-:105B200032000AA2022BBCFF9B30CCB6C8A4D2A084
-:105B3000D10F000004E4311EDBA00002002DE28240
-:105B40002FBAFF2C51020FDD012DE6820CE431D17A
-:105B50000F0000006C1004D10F0000006C1004C096
-:105B600020D10F006C100413DBFAC0D103230923EA
-:105B7000318FC0A06F340260008D19DB8F1BDB906A
-:105B800017DBF30C2811A8772672832572822CFA72
-:105B9000FF76514788502E7285255C0425768275E4
-:105BA000E9052572842576827659292E72842E760F
-:105BB000822E76830AE431000200239282002104BF
-:105BC0002FB10200D61A0C66030633012396820F0A
-:105BD000E43126728325728260000200D8A07659D3
-:105BE000220AE43100020023928200210400D21A2A
-:105BF0002FB1020C22030232012296820FE431D22D
-:105C000080D10F00D280D10FC020D10F6C1004DBE7
-:105C100030862015DB68280A00282502DA2028B003
-:105C2000002CB00705880A28824C2D0A010B800041
-:105C3000DBA065AFE61ADB610A4A0A29A2A3C7BF47
-:105C4000769101D10F2BA6A3D10F00006C1004C0D8
-:105C5000D1C7CF1BDB5B19DB5817DB560C2811A80B
-:105C60007786758574C0A076516288508E77B4555A
-:105C7000957475E903857695747659278F769F75A7
-:105C80009F740AE431000200239282B42E2FB102E5
-:105C900000E10400D61A0C66030633012396820F36
-:105CA000E431867583747639280AE4310002002EC7
-:105CB0009282B42200210424B10200DF1A0CFF03F7
-:105CC0000FEE012E968204E431D280D10FD8A07657
-:105CD00051D6D280D10F00006C1004290A801EDB3F
-:105CE0005D1FDB5D1CDB350C2B11ACBB2C2CFC2DA4
-:105CF000B2850FCC029ED19CD0C051C07013DB592D
-:105D000014DB5818DB562AB285A82804240A234637
-:105D100091A986B8AA2AB685A98827849F25649F59
-:105D2000D10F00006C100419DB8B0C2A11A9A98972
-:105D300090C484798B761BDB79ABAC2AC2832CC2EE
-:105D4000847AC1688AA02BBC30D3A064A05E0B2BE0
-:105D50000A2CB2A319DB4268C0071DDB7FD30F7D7D
-:105D6000C94AA929299D0129901F68913270A6036B
-:105D7000D3A0CA9E689210C7AF2AB6A32A2CFC5B98
-:105D8000FFB3D230D10F000013DB7503A3018C31B8
-:105D90001DDB130C8C140DCC012CB6A363FFDC00AF
-:105DA000C020D10FDA205BFFCCC020D10FC020D1A2
-:105DB0000F0000006C1004DB30C0D019DAFEDA20CE
-:105DC00028300022300708481209880A28824CDC53
-:105DD000200B80001BDAF90C4A11ABAA29A2840916
-:105DE000290B29A684D10F006C1004C04118DAF2E7
-:105DF00017DAF40C2611A727277038A866256286C3
-:105E0000007104A35500441A75414822628415DBD1
-:105E10001502320BC922882117DAF10884140744CD
-:105E200001754905C834C020D10FD10F0809471D9D
-:105E3000DB4AC0B28E201FDADF0E0E43AFEC2BC45C
-:105E4000A00FEE0A2DE6242A6284C0200A990B29AD
-:105E50006684D10FC020D10F6C1004DB30C0D01885
-:105E6000DAD5DA2025300022300708580A28824C7B
-:105E7000DC200B80008931709E121BDACF0C4A1196
-:105E8000ABAA29A28409290B29A684D10F09C952DA
-:105E900068532600910418DACAC0A12F811600AAFF
-:105EA0001A0AFF022F85161EDAC40C4D11AEDD2C26
-:105EB000D2840C2C0B2CD684D10FC0811FDAC1B830
-:105EC0009A0A0A472EF11600A10400881A08EE0269
-:105ED0002EF5161DDAB90C4C11ADCC2BC2840B2B50
-:105EE0000B2BC684D10F00006C1004DB30C0D0191E
-:105EF000DAB1DA2028300022300709880A28824CDB
-:105F0000DC200B80001CDAAC0C4B11ACBB2AB28439
-:105F10000A2A0B2AB684D10F6C1004C04118DAA6E5
-:105F200016DAA80C2711A626266038A87225228624
-:105F3000006104A35500441A7541082222840232EC
-:105F40000BD10F00C020D10F6C100415DB050249E6
-:105F5000142956112452120208430F8811C07300ED
-:105F6000810400361A008104C78F00771A0877036E
-:105F7000074401064402245612D10F006C10066E2D
-:105F800023026000AC6420A7C0A0851013DADD16E0
-:105F9000DAF4C040A6AA2BA2AE0B19416490666841
-:105FA000915D68925268933C2AA2AA283C7F288C73
-:105FB0007F0A0A4D2980012880002AACF208881146
-:105FC0000988027589462B3D0129B0002BB00108D4
-:105FD00099110B99027A9934B8332A2A00B1447284
-:105FE00049B160004A7FBF0715DADF63FFB90000DF
-:105FF000253AE863FFB10000253AE863FFA90000F5
-:10600000250A6463FFA1C05A63FF9C0000705F080B
-:106010002534FF058C142C34FE70AF0B0A8D142E22
-:106020003D012AE4012DE400DA405BFD5063FFA747
-:10603000D10FD10F6C10041ADA6219DA5F1CDACAB8
-:106040001BDACBC080C07160000D00000022A438B4
-:10605000B1AA299C107B915F26928679C2156E6247
-:1060600062C0206D080AB12200210400741A764B28
-:10607000DB63FFEE2292850D6311032514645FCF6D
-:10608000D650032D436DD9039820B4220644146DD5
-:106090004922982098219822982398249825982678
-:1060A000982798289829982A982B982C982D982EDC
-:1060B000982F222C4063FF971EDA4027E68027E6C0
-:1060C00081D10F00C02063FF830000006C1004C06A
-:1060D00062C04112DA3B1ADA3713DA522AA00023DF
-:1060E000322D19DA9F2BACFE2992AE6EA30260000E
-:1060F0008E090E402D1AC2C2CD0EDC392C251A6431
-:10610000B0895BFF9E15DA9A1ADA952B3AE80A3ABB
-:10611000015805922B211A0ABB28D3A09B50580581
-:10612000A92B52000ABB082A0A005805A815DA91C3
-:106130002D21022C3AE80C3C2804DD022D25029C7E
-:10614000505805A08B50AABBC0A15805A01CDA8AE4
-:106150002D21020C3C2806DD0213DA882D25029C35
-:10616000305805988B30AABBC0A25805982A210246
-:10617000C0B40BAA020A0A4F2A25025805ACD10F57
-:10618000242423C3CC2C251A63FF760018DA801C44
-:10619000DA7C19DA7D1BDA7B17DA4F85202E0AFDAF
-:1061A0001FDA7C2D203624F47A24F47E24F4820E27
-:1061B000DD0124F4862E0AF707552806DD02C07596
-:1061C0000EDD01050506AB5BA959C0E8AC5C24C433
-:1061D000AB0EDD0227C4AC2E0ADFA85527B4EC0EA7
-:1061E000DD0124B4EBC2E027942C0EDD0224942BB5
-:1061F0002E0A800D0D4627546C24546B0EDD022DA3
-:10620000243663FEFC0000006C10042A0A302B0ABE
-:10621000035BFF4D12DA53C390292616C3A1C0B306
-:10622000C08A2826175BFF48C03CC3B12B26161A2C
-:10623000D9E42AA02023261764A079C3A2C0B15BA9
-:10624000FF42C3A2C0B15BFF40C3C22C2616C2AF3F
-:10625000C0B12326175BFF3CC28F282616C0FE2F35
-:106260002617C2E22E26162A0AA1C0B1C0D82D26B2
-:10627000175BFF352A0AA12A2616C3A6C0B3C1920E
-:106280002926175BFF31C3C62C2616C1B32A0AA2E2
-:106290002B2617C0B35BFF2C290AA2292616C1851D
-:1062A000282617C2FB2F2616C0E72E26171DDA391F
-:1062B0002D2610D10FC3A2C0B35BFF2363FF820062
-:1062C0006C10041CDA031BD9ED18DA3317DA341614
-:1062D000DA3415DA34C0E0C0D414D9FF1FD9B9C0FC
-:1062E000288FF06D2A36DAC0D9C07C5B020FC90C4A
-:1062F0001CD9F90C9C28A8C3A6C22A36802A25845A
-:10630000A4C2A7CC2D248C2B248A2B24872E248B4B
-:10631000B1BB2E369F2C369E2C369DB1AC1CD9D7E6
-:106320001BDA22C0286D2A33DAC0D9C07C5B020F89
-:10633000C90C1CD9E80C9C28A8C3A6C22A36802BFD
-:106340002584A4C2B1BBA7CC2D248C2E248B2A2457
-:106350008A2E369F2C369E2C369DB1ACC07919D929
-:10636000D81BDA1413DA121ADA1218DA1314D9D97C
-:1063700016DA1304F42812DA1204660C040506A2D5
-:1063800052A858AA5AA3539B3029A50027848AC033
-:1063900091C0A52A848C29848B17DA0B18DA0AA7F6
-:1063A0005726361D26361E2E361F16DA0813DA0833
-:1063B000A65504330C2826C82E75002D54AC2E5437
-:1063C000AB2E54AA2326E62326E52E26E7D10F007E
-:1063D0006C100613D99417D9E224723D2232937FB0
-:1063E0002F0B6D08052832937F8F0263FFF3C0C423
-:1063F000C0B01AD973C051D94004593929A4206EAC
-:1064000044020BB502C3281ED96EDDB025E4220577
-:106410002D392DE421C0501ED9EF19D9DF18D9DF4D
-:1064200016D9E11DD9ED94102A724517D9AB6DA983
-:106430004BD450B3557A5B17DF50756B071FD9608B
-:106440008FF00F5F0C12D9A302F228AE2222D68160
-:10645000D54013D9A0746B0715D95A855005450C42
-:10646000035328B145A73FA832A93322369D2236CF
-:106470009E2436802B369F2BF48B2CF48C14D969F8
-:1064800024424DC030041414C84C6D0806B13304C6
-:106490001414C84263FFF20015D947C44000310408
-:1064A0001AD948C0D193A200DD1AC138B0DD9DA32E
-:1064B00018D95D2B824D29824E29A5202882537A36
-:1064C000871E2C54008E106FE45D12D93D2F2121C0
-:1064D0002321202F251F04330C23252023251ED103
-:1064E0000FC06218D99F88807E87D98910265400F2
-:1064F0006F94191BD9332AB1200A1A1404AA0C2A42
-:10650000B5202AB5212AB51E2AB51FD10F1BD92CBB
-:106510002AB1200A1A1403AA0C2AB5202AB5212A66
-:10652000B51E2AB51FD10F001CD9262BC1212DC1A4
-:10653000202BC51F03DD0C2DC5202DC51ED10F003E
-:106540006C100619D91F14D98612D93615D9A3C7CC
-:106550003FC0E02E56A82E56A92E56AA2E56AB2383
-:10656000262918D946DB101CD99DC0D42A42452DB6
-:1065700016012C160000B0890A880C98905BFF94D5
-:106580002C22E318D90F0C5C149C842B22E48C84FD
-:10659000B1BB0B5B140CBB0C9B852A22E50A5A1479
-:1065A0002A86062922CD0959142986072F22892FE8
-:1065B00086095BFF435BFF1423463BC1B01ED90035
-:1065C0001DD9602AE1022D463A0BAA020A0A4F2A77
-:1065D000E5025804965BFEBD5BFE96C050C0B01647
-:1065E000D8F614D8FE17D96FC0C0C73E93122C2618
-:1065F0002DC0306000440000007F9F0FB155091985
-:1066000014659FF4C0500AA9027FA7EF18D8EADAF0
-:106610005008580A28822C2B0A000B8000005104D5
-:10662000D2A0C091C7AF00991A0A99039912CE3827
-:1066300064206BD3202B20072516032C12022A621C
-:10664000827CA86318D8DC01110208580A28822C21
-:10665000DA500B8000D2A0643FD58A310A8A140434
-:10666000AA01C82A2B22010B8B1404BB017BA9456C
-:10667000DDA07A7B081DD8D22DD2000DAD0CDB3009
-:1066800019D8CD1AD91488130ADA28DC801DD951FB
-:1066900009880A28823C0DAA080B8000652F93D335
-:1066A00020C0B063FF9400007FAF34B155005004A8
-:1066B0000A091963FF42DAB07B7B081AD8C12AA203
-:1066C000000ABA0C1BD9048C310BAB280C8A141CA1
-:1066D000D941ACBB1CD94104AA012BC68163FF8FF1
-:1066E000645F60C050C0B0C7CE9C1263FF5500000D
-:1066F0006C100427221EC08008E4311BD8AF0002B2
-:10670000002AB28219D8AF003104C06100661A298C
-:1067100091020A6A022AB68209E43115D90C0C38B2
-:1067200011A8532832822432842A8CFC7841102903
-:1067300021022A368297A0096902292502D10F0079
-:106740002B21022C32850B6B022CCCFC2C36829731
-:10675000C02B2502D10F00006C1004C0E71DD89299
-:106760001CD8940D4911D7208B228A200B4B0BD2B9
-:10677000A007A80C9B72288CF4C8346F8E026000AE
-:10678000A31FD88AA298AF7B78B334C93DC081C01B
-:10679000F0028F380F0F42C9FA2CD67ED5206D4AF1
-:1067A0000500308800508C887008980878B16DD248
-:1067B000A09870D10FC0F0038F387FE0DE63FFD860
-:1067C000027B0CAFBB0B990C643047D830C0F1C0D2
-:1067D0005002F5380505426450792CD67E0B3612EE
-:1067E0002F6C100F4F366DFA0500808800208C0644
-:1067F000440CC081C05003B208237C0C03853805CB
-:10680000054264505A2CD67ED30F6D4A050020886D
-:1068100000308CD2A0A798BC889870D10FD2A0BCB1
-:10682000799970D10FD2302BAD08C0F1C0500BF563
-:1068300038050542CB542CD67E083F14260A100F8B
-:10684000660C0646366D6A0500208800B08C8270A2
-:1068500063FF2D00C05003F53875E08063FF7A00B8
-:10686000C06002863876E09F63FF9900C05003F550
-:106870003875E0C463FFBE006C1004D62068520F68
-:10688000695324DA20DB30DC405800F7D2A0D10F66
-:10689000DA20DB30DC405800F49A2424240EC02196
-:1068A00022640FC020D10F00B83BB04C2A2C748951
-:1068B000242D200E2E200FA4DDB1EE2E240FB0DDEE
-:1068C0002D240E2890072D9003A488B088B1DD2DCB
-:1068D00094032894075BFFA069511DC0E082242A1D
-:1068E000600F18D8BF2A240329600E8F202924079F
-:1068F00008FF029F209E64D10FC020D10F0000002E
-:106900006C1004942319D8B7C0B3083A110BAA022B
-:10691000992019D8299A2116D827C05028929D2548
-:1069200064A2288C1828969DD10F00006C100428B2
-:106930002066C038232406B788282466D10F0000BB
-:106940006C10060D3C111AD819D820035B0C862256
-:106950000D55118221AA8902320B928105630C9395
-:10696000820C550C792B54CB531CD8111DD80FC059
-:10697000F7A256C031C0A0043A380A0A42769343BF
-:10698000044302C9AB2CD67ED30F6DBA0500208814
-:1069900000308C8281A25272917D92818382C83EA6
-:1069A000D10FC071C06002763876F0DB63FFD5008E
-:1069B000C020BC89998199809282D10F222DF892B2
-:1069C0008163FFA219D7FA02860CA9669611D940F5
-:1069D000063612961006BB0C64A0442CD67E8A1094
-:1069E000D30F6DAA0500208800908CBC828311C053
-:1069F000E0A433240A01034E380E0E42CAEC2CD612
-:106A00007E6DBA0500208800308C821102520CA2E3
-:106A100082BC22928163FF83BC82928163FF7C00EF
-:106A2000C06002363876F0B563FFAF00C070024731
-:106A30003877F0CC63FFC6006C100414D7EBC1525A
-:106A4000A424CA3128221D73811C292102659016B5
-:106A50002A300075A912022A02033B022C3007C01B
-:106A6000D25801D5653FDCD10F2B300703BB0B0B90
-:106A7000BA0274B3022ABDF8D3A063FFC4000000B9
-:106A80006C1004292006C0706E9741292102C08F26
-:106A90002A2014C0B62B240606AA022A24147980C0
-:106AA000022725022A221E2C221D7AC10EC8ABDA2B
-:106AB00020DB302C0A00033D025BF7F96450892D7E
-:106AC00021020D0D4CC9D3C020D10F00002E9CFB1C
-:106AD00064E0962F21020F0F4C65F0A51AD7B71E60
-:106AE000D7B529A29EC08A798B712BE22668B004A3
-:106AF0008C207BC96629A29D1FD7B264905D9790B8
-:106B0000C0C31DD7C62B21049D9608BB110CBB0228
-:106B10009B919B971CD7C3C08527E4A22BA29D28DD
-:106B200024068DFA282102B0DD2BBC302BA69D9DBA
-:106B3000FA0C8802282502C8D2C020D10F8EF91283
-:106B4000D7B92E2689C020D10F283000688938DABD
-:106B500020DB30DC4058004463FF6300022A022B34
-:106B60000A065800D3220A00D10F655010293000C0
-:106B7000689924022A02033B02DC4058003BC020F3
-:106B8000D10FD270D10F00002A2C74033B02044CA9
-:106B9000025BFEF163FF2700DB30DC402A2C745BD4
-:106BA000FEEEC020D10F00006C1004C83F8926887B
-:106BB00029A399992609880C080848282525CC522C
-:106BC000C020D10FDB402A2C745BF92FD2A0D10F4B
-:106BD0006C1004D820D73082220D451105220C926A
-:106BE0008264207407420B13D771D420A3837323CC
-:106BF00002242DF8858074514CBC82C0906D08161B
-:106C000000408800708C773903D720C0918680744B
-:106C10003901D42074610263FFE2CA98C097C04171
-:106C20001BD7F2C0A00B8B0C0B4A380A0A42C9AA28
-:106C30001DD75E1CD75F2CD67EC140D30F6D4A0591
-:106C400000208800308C9780D270D10FBC8FC0E0BC
-:106C50000F4E387E90E263FFD6BC8292819280C054
-:106C6000209282D10F0000006C1006C0D71CD74EB6
-:106C70001BD7500D4911D7202E221F28221D0E4E42
-:106C80000BD280078A0C2E761F2AAC80C8346FAED8
-:106C9000026000CB2F0A801AD754A29EAA7A7EA344
-:106CA0003FC93FC0E1C05002E538050542CA552B37
-:106CB000C67EDB20D30F6D4A0500308800B08C2ED5
-:106CC000721DAE9E0EA50C645086D2802E761DC01D
-:106CD00091298403D10FC05003E53875D0D363FFE9
-:106CE000CD15D741027E0CA5EE643051C0A1250A16
-:106CF0000002A538033A020505426450922BC67E75
-:106D00000E35129510255C10054536D30F6D5A05CA
-:106D100000A08800208CC0A1A3E2C05023FA800309
-:106D2000730C03A538AF730505426450722BC67E01
-:106D3000851005450C6D5A0500208800308CD280E6
-:106D4000C0A10E9B0CAB7BAFBB2B761D2A8403D15D
-:106D50000FD280C0C1AF7D2D761D2C8403D10F00D2
-:106D6000D2302E8D08C0F1C0500EF538050542CB4B
-:106D7000592BC67E0A3F14C1600F660C064636D3F7
-:106D80000F6D6A0500208800E08C22721D63FF03EE
-:106D9000C061C05003653875D80263FF6263FF5C51
-:106DA000C05002A53875D08763FF8100C06003F62C
-:106DB0003876D0BF63FFB9006C10042A2015292053
-:106DC0001614D6FF0A990CCB9D2E200B04ED092B2F
-:106DD000D1208F2809BC36ACAA0CBB0C2BD5200ABD
-:106DE0000A472A2415CAAF8B438942B0A8009104F0
-:106DF00000881AA8FF0FBB029B278F260FB80C78BC
-:106E00003B1AC020D10F0000292102C0A20A99021A
-:106E1000292502C021D10F008B2763FFDC2BD12055
-:106E20000CAA0C0A0A472A2415ACBB2BD520C9AEE4
-:106E30008B438C288F42B0AD00F10400DD1AADCC3D
-:106E40000CBB029B27DA20B7EB580019C021D10FE9
-:106E50009F2763FFEF0000006C100428203C643083
-:106E60004705306000073E01053EB156076539050C
-:106E70004928C77FA933030641076603B1660606A2
-:106E800041A6337E871E222125291AFC732B150269
-:106E9000380C09816000063E01023EB124064239E9
-:106EA00003220AD10FD230D10FC05163FFC00000BE
-:106EB0006C100427221EC08008E4311DD6BF0002DA
-:106EC000002CD2821BD6BF003104C06100661A2B91
-:106ED000B1020C6C022CD6820BE43119D7440C3A67
-:106EE00011AA932832829780253282243284B455A5
-:106EF00025368275410A292102096902292502D114
-:106F00000F2A21022B32830A6A022B36822A25029B
-:106F1000D10F00006C100418D6A80C2711087708B0
-:106F2000267286253C04765B1315D6A405220A2218
-:106F300022A3682002742904227285D10FC020D1B7
-:106F40000F0000006C100419D6A727221EC080096C
-:106F5000770208E4311DD6980002002CD2821BD69D
-:106F600098003104C06100661A2BB1020C6C022C2F
-:106F7000D6820BE43119D71D0C3A11AA932832821C
-:106F80009780253282243284B45525368275410B90
-:106F90002A21020A6A022A2502D10F002B21022C83
-:106FA00032830B6B022C36822B2502D10F0000009E
-:106FB0006C10041BD6810C2A11ABAA29A286B43806
-:106FC000798B221BD67E19D6A50B2B0A2BB2A309CF
-:106FD000290868B00274B90D299D0129901F6E928D
-:106FE0000822A285D10FC020D10FC892C020D10F96
-:106FF000DA205BEE88C020D10F0000006C10041472
-:10700000D66E28429E19D66B6F88026000BA29920C
-:10701000266890078A2009AA0C65A0AC2A429DC068
-:10702000DC64A0A42B200C19D6650CBC11A4CC2EBA
-:10703000C28609B90A7ED30260009A2992A3689099
-:10704000078D2009DD0C65D08C25C2856450862D06
-:107050002104C0306ED80D2C2066B8CC0C0C472C07
-:10706000246665C07B1CD6E218D66B1AD66219D688
-:10707000731DD667C0E49E519D508F209357935542
-:1070800099539A569A5408FF021AD6839F5288261B
-:107090009F5A9E599D58935E9C5D935C9A5B08082D
-:1070A00048058811985FC0D81FD64C0CB911A49917
-:1070B000289285AFBF23F4CF288C402896858E2652
-:1070C0002D24069E29C020D10FCA33DA20C0B65B1A
-:1070D000FF78C72FD10FC93ADA205BFF75C72FD1D0
-:1070E0000FDBD05BFE072324662B200C63FF7500AB
-:1070F000C72FD10FC72FD10F6C1004C85B292006F2
-:1071000068941C689607C020D10FC020D10FDA20E8
-:10711000DB30DC40DD502E0A005BFE59D2A0D10FDF
-:107120002E200C18D6250CEF11A8FF29F286C08856
-:10713000798B791AD6220AEA0A2AA2A368A0048BBC
-:10714000207AB96823F2856430621BD62C290A8024
-:107150002C20682820672D21040B881104DD1108DC
-:10716000DD020DCC02C0842D4A100DCC021DD624A8
-:1071700098319D308A2B99379C340BAA02C0C09C51
-:10718000359C369A322A2C74DB4028F285C0D328ED
-:107190008C2028F6852C25042D24061FD60FDD40D3
-:1071A000AFEE2CE4CF5BFDE6D2A0D10F00DA20DBFE
-:1071B000E05BFF3FC020D10F6C100AD6302A2006BA
-:1071C00024160128ACF86583862B2122C0F22A21DF
-:1071D00024CC572AAC010A0A4F2A25247ABB026024
-:1071E000037F2C21020C0C4C65C3192E22158D3205
-:1071F000C0910EDD0C65D39088381ED5EF64836B8B
-:107200008C37C0B8C0960CB9399914B49A9A120D3B
-:10721000991199138F6718D5EAC9FB2880217F83BC
-:10722000168B142C22002A200C5BFF61D4A064A3CF
-:10723000B38F6760002800002B200C89120CBA1154
-:10724000AEAA2CA2861DD5DD7C9B3E0DBD0A2DD29B
-:10725000A368D00488207D893024A28564436427F4
-:10726000212E07F73607F90C6F9D01D7F0DA20DBE6
-:1072700070C1C42D211F5BFEF889268827DDA00977
-:10728000880C7A8B179A10600006C04063FFCC0010
-:1072900000DA208B105BFEC88D1065A267C0E09EEF
-:1072A000488C649C498B658A669B4A9A4B97458FAC
-:1072B000677F7302600120CD529D10DA20DB302CF5
-:1072C00012015BFE698D10C051D6A08FA7C0C08A85
-:1072D00068974D9A4C8869896A984E994F8E6A8A48
-:1072E00069AE7E77EB01B1AA9E6A9A698B60C0A0F5
-:1072F0000B8E1477B701C0A1C091C08493159D1760
-:107300009516C0D025203CC030085801089338C0DD
-:1073100082083310085B010535400B9D3807DD10EE
-:107320000BAB100E19402A211F07991003DD020D27
-:10733000BB020553100933020A55112921250A2AD7
-:10734000140929140499110A99020933028A2B2974
-:1073500021040BAA021BD6270899110955020855CA
-:10736000020BAA029A408920881408991109880200
-:1073700019D5A61DD62109880298418B2A934695D6
-:107380004783150DBB0285168D179B448A65896658
-:10739000AACAA97C77CB01B1AA07FB0C9C669A65A7
-:1073A00088268E29AD87972607EE0C0E0E482E25CF
-:1073B000259B672B200C87131ED5800CB911AE9925
-:1073C000289285A78828968517D584C090A7BB29C1
-:1073D000B4CF871863FE3C008C60C0E0C091C0F061
-:1073E000C034C0B82A210428203C08AA110B8B0104
-:1073F000038301039F380B9B39C03208FF100388B9
-:1074000001089E380C881407EE100FEE0203880165
-:1074100008983905BF1029211F0ABB1107881008D9
-:10742000FF020BAA0218D57809291403AA022B21FE
-:107430002583200B2B1404BB110833110FBB020B47
-:1074400099028B148F2A0B33020833028B2B647042
-:10745000868868974D984C8769886A9341994697C2
-:107460004E984FC07077C701C0719A4718D5E30B8B
-:107470007C100CEC0208F802984418D5E00CBC0211
-:1074800008CC029C402A200C295CFEC0801FD54AF3
-:107490001CD5520CAE112B2124ACAAAFEEB0BB8F81
-:1074A000132CE28528A4CFAFCC2CE6852A22152BFD
-:1074B0002524B1AA2A26156490DBC9D28F262E2254
-:1074C000090DFF082F26060FEE0C0E0E482E25255F
-:1074D0006550E4C020D10F00C07093419F4499468D
-:1074E0009A4777C70A1CD5362CC022C0810C873832
-:1074F0001CD5C40B781008E80208B8020C88029862
-:107500004063FF8000CC57DA20DB608C115BFDD636
-:10751000292102689806689403C020D10F2B221EEF
-:10752000C0A029221D2A25027B9901C0B064BFE8B2
-:1075300013D5212CB00728B000DA2003880A28824E
-:107540004CC0D10B8000DBA065AFE763FFCA000031
-:1075500068A779DA20DB30DC40DD505BFEE7D2A0A3
-:10756000D10FC16DC19D29252C60000429252CD681
-:10757000902624672F2468DA20DB308C11DD502E12
-:107580000A805BFD3FD2A0D10FC168C1A82A252C7B
-:1075900063FFDD000000C8DF8C268B29ADCC9C2664
-:1075A0000CBB0C0B0B482B25252A2C74DB602C12F2
-:1075B000015BFD87D2A0D10F2A2C748B115BF6B230
-:1075C000D2A0D10FDA205BFE3A63FF3800DA20C088
-:1075D000B15BFE8A64ABF1655F352D2124B1DD2DF1
-:1075E000252463FF1FDA202B200C5BFE5663FF145B
-:1075F00012D5858220028257C82163FFFC12D581F3
-:1076000003E83004EE3005B13093209421952263D5
-:10761000FFFC000010D57D910092019302940311AC
-:10762000D554821001EA30A21101F031C04004E4C7
-:107630001600020011D5768210234A00032202921E
-:107640001011D540C021921004E4318403830282DA
-:1076500001810000D23001230000000010D56D919F
-:107660000092019302940311D543821001EA30A2E3
-:107670001101F131C04004E41600020011D564820A
-:107680001013D4E7032202921004E431840383022E
-:107690008201810000D330013300000010D55E91DB
-:1076A00000810165104981026510448103CF1F925A
-:1076B000019302940311D531821001EA30A2110125
-:1076C000F231C04004E41600020011D550821013BC
-:1076D000D4CF032202921004E43184038302820196
-:1076E000C010910391029101810000D43001430048
-:1076F00012D500C03028374028374428374828376B
-:107700004C233D017233ED03020063FFFC000000D7
-:1077100010D542910092019302940311D54082103A
-:10772000921011D4F28310032202921011D53D124F
-:10773000D5049210C04004E41600020011D5348232
-:107740001013D4EB032202921004E4318403830269
-:107750008201810000D53001530000006C10026EE0
-:10776000322FD620056F04043F04745B2A05440CB5
-:1077700000410400331A220A006D490D73630403AB
-:10778000660CB1220F2211031314736302222C0121
-:10779000D10FC83BD10F000073630CC021D10F0083
-:1077A0000000000044495630C020D10F6C10020088
-:1077B00040046B4C07032318020219D10F0203196E
-:1077C000C020D10F6C100202EA30D10F6C1002CC35
-:1077D0002503F03160000F006F220503F1316000D6
-:1077E000056F230503F231000200D10F6C1002CCAB
-:1077F0002502F030D10F00006F220402F130D10FCA
-:107800006F230402F230D10FC020D10F6C1002227E
-:107810000A20230A006D280E2837402837442837CD
-:107820004828374C233D01030200D10F6C1002029F
-:10783000E431D10F0A0000004368656C73696F2062
-:1078400046572044454255473D3020284275696CD3
-:1078500074204D6F6E204D61722020382031373AF0
-:1078600032383A3135205053542032303130206F85
-:107870006E20636C656F70617472612E61736963F1
-:1078800064657369676E6572732E636F6D3A2F68F6
-:107890006F6D652F66656C69782F772F66775F3718
-:1078A0002E392D6977617270292C205665727369A3
-:1078B0006F6E2054337878203030372E30612E3080
-:1078C00030202D20313030373061303010070A0041
-:0478D0000BDFE8756D
-:00000001FF
index dd1ed1b..a856e7f 100644 (file)
@@ -680,7 +680,7 @@ static inline int may_follow_link(struct path *link, struct nameidata *nd)
 
        /* Allowed if owner and follower match. */
        inode = link->dentry->d_inode;
-       if (current_cred()->fsuid == inode->i_uid)
+       if (uid_eq(current_cred()->fsuid, inode->i_uid))
                return 0;
 
        /* Allowed if parent directory not sticky and world-writable. */
@@ -689,7 +689,7 @@ static inline int may_follow_link(struct path *link, struct nameidata *nd)
                return 0;
 
        /* Allowed if parent directory and link owner match. */
-       if (parent->i_uid == inode->i_uid)
+       if (uid_eq(parent->i_uid, inode->i_uid))
                return 0;
 
        path_put_conditional(link, nd);
@@ -759,7 +759,7 @@ static int may_linkat(struct path *link)
        /* Source inode owner (or CAP_FOWNER) can hardlink all they like,
         * otherwise, it must be a safe source.
         */
-       if (cred->fsuid == inode->i_uid || safe_hardlink_source(inode) ||
+       if (uid_eq(cred->fsuid, inode->i_uid) || safe_hardlink_source(inode) ||
            capable(CAP_FOWNER))
                return 0;
 
index 14cf9de..99dffab 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/export.h>
 #include <linux/seq_file.h>
 #include <linux/slab.h>
+#include <linux/cred.h>
 
 #include <asm/uaccess.h>
 #include <asm/page.h>
@@ -56,6 +57,9 @@ int seq_open(struct file *file, const struct seq_operations *op)
        memset(p, 0, sizeof(*p));
        mutex_init(&p->lock);
        p->op = op;
+#ifdef CONFIG_USER_NS
+       p->user_ns = file->f_cred->user_ns;
+#endif
 
        /*
         * Wrappers around seq_open(e.g. swaps_open) need to be
index fa21760..1f2c1c7 100644 (file)
@@ -195,6 +195,7 @@ header-y += in_route.h
 header-y += sock_diag.h
 header-y += inet_diag.h
 header-y += unix_diag.h
+header-y += packet_diag.h
 header-y += inotify.h
 header-y += input.h
 header-y += ioctl.h
index d323a4b..6ba45d2 100644 (file)
 #define  BCMA_CC_CHIPST_4706_SFLASH_TYPE       BIT(2) /* 0: 8b-p/ST-s flash, 1: 16b-p/Atmal-s flash */
 #define  BCMA_CC_CHIPST_4706_MIPS_BENDIAN      BIT(3) /* 0: little, 1: big endian */
 #define  BCMA_CC_CHIPST_4706_PCIE1_DISABLE     BIT(5) /* PCIE1 enable strap pin */
+#define  BCMA_CC_CHIPST_5357_NAND_BOOT         BIT(4) /* NAND boot, valid for CC rev 38 and/or BCM5357 */
 #define BCMA_CC_JCMD                   0x0030          /* Rev >= 10 only */
 #define  BCMA_CC_JCMD_START            0x80000000
 #define  BCMA_CC_JCMD_BUSY             0x80000000
 #define  BCMA_CC_SROM_CONTROL_SIZE_16K 0x00000004
 #define  BCMA_CC_SROM_CONTROL_SIZE_SHIFT       1
 #define  BCMA_CC_SROM_CONTROL_PRESENT  0x00000001
+/* Block 0x140 - 0x190 registers are chipset specific */
+#define BCMA_CC_4706_FLASHSCFG         0x18C           /* Flash struct configuration */
+#define  BCMA_CC_4706_FLASHSCFG_MASK   0x000000ff
+#define  BCMA_CC_4706_FLASHSCFG_SF1    0x00000001      /* 2nd serial flash present */
+#define  BCMA_CC_4706_FLASHSCFG_PF1    0x00000002      /* 2nd parallel flash present */
+#define  BCMA_CC_4706_FLASHSCFG_SF1_TYPE       0x00000004      /* 2nd serial flash type : 0 : ST, 1 : Atmel */
+#define  BCMA_CC_4706_FLASHSCFG_NF1    0x00000008      /* 2nd NAND flash present */
+#define  BCMA_CC_4706_FLASHSCFG_1ST_MADDR_SEG_MASK     0x000000f0
+#define  BCMA_CC_4706_FLASHSCFG_1ST_MADDR_SEG_4MB      0x00000010      /* 4MB */
+#define  BCMA_CC_4706_FLASHSCFG_1ST_MADDR_SEG_8MB      0x00000020      /* 8MB */
+#define  BCMA_CC_4706_FLASHSCFG_1ST_MADDR_SEG_16MB     0x00000030      /* 16MB */
+#define  BCMA_CC_4706_FLASHSCFG_1ST_MADDR_SEG_32MB     0x00000040      /* 32MB */
+#define  BCMA_CC_4706_FLASHSCFG_1ST_MADDR_SEG_64MB     0x00000050      /* 64MB */
+#define  BCMA_CC_4706_FLASHSCFG_1ST_MADDR_SEG_128MB    0x00000060      /* 128MB */
+#define  BCMA_CC_4706_FLASHSCFG_1ST_MADDR_SEG_256MB    0x00000070      /* 256MB */
+/* NAND flash registers for BCM4706 (corerev = 31) */
+#define BCMA_CC_NFLASH_CTL             0x01A0
+#define  BCMA_CC_NFLASH_CTL_ERR                0x08000000
+#define BCMA_CC_NFLASH_CONF            0x01A4
+#define BCMA_CC_NFLASH_COL_ADDR                0x01A8
+#define BCMA_CC_NFLASH_ROW_ADDR                0x01AC
+#define BCMA_CC_NFLASH_DATA            0x01B0
+#define BCMA_CC_NFLASH_WAITCNT0                0x01B4
 /* 0x1E0 is defined as shared BCMA_CLKCTLST */
 #define BCMA_CC_HW_WORKAROUND          0x01E4 /* Hardware workaround (rev >= 20) */
 #define BCMA_CC_UART0_DATA             0x0300
 #define BCMA_CC_PLLCTL_ADDR            0x0660
 #define BCMA_CC_PLLCTL_DATA            0x0664
 #define BCMA_CC_SPROM                  0x0800 /* SPROM beginning */
+/* NAND flash MLC controller registers (corerev >= 38) */
+#define BCMA_CC_NAND_REVISION          0x0C00
+#define BCMA_CC_NAND_CMD_START         0x0C04
+#define BCMA_CC_NAND_CMD_ADDR_X                0x0C08
+#define BCMA_CC_NAND_CMD_ADDR          0x0C0C
+#define BCMA_CC_NAND_CMD_END_ADDR      0x0C10
+#define BCMA_CC_NAND_CS_NAND_SELECT    0x0C14
+#define BCMA_CC_NAND_CS_NAND_XOR       0x0C18
+#define BCMA_CC_NAND_SPARE_RD0         0x0C20
+#define BCMA_CC_NAND_SPARE_RD4         0x0C24
+#define BCMA_CC_NAND_SPARE_RD8         0x0C28
+#define BCMA_CC_NAND_SPARE_RD12                0x0C2C
+#define BCMA_CC_NAND_SPARE_WR0         0x0C30
+#define BCMA_CC_NAND_SPARE_WR4         0x0C34
+#define BCMA_CC_NAND_SPARE_WR8         0x0C38
+#define BCMA_CC_NAND_SPARE_WR12                0x0C3C
+#define BCMA_CC_NAND_ACC_CONTROL       0x0C40
+#define BCMA_CC_NAND_CONFIG            0x0C48
+#define BCMA_CC_NAND_TIMING_1          0x0C50
+#define BCMA_CC_NAND_TIMING_2          0x0C54
+#define BCMA_CC_NAND_SEMAPHORE         0x0C58
+#define BCMA_CC_NAND_DEVID             0x0C60
+#define BCMA_CC_NAND_DEVID_X           0x0C64
+#define BCMA_CC_NAND_BLOCK_LOCK_STATUS 0x0C68
+#define BCMA_CC_NAND_INTFC_STATUS      0x0C6C
+#define BCMA_CC_NAND_ECC_CORR_ADDR_X   0x0C70
+#define BCMA_CC_NAND_ECC_CORR_ADDR     0x0C74
+#define BCMA_CC_NAND_ECC_UNC_ADDR_X    0x0C78
+#define BCMA_CC_NAND_ECC_UNC_ADDR      0x0C7C
+#define BCMA_CC_NAND_READ_ERROR_COUNT  0x0C80
+#define BCMA_CC_NAND_CORR_STAT_THRESHOLD       0x0C84
+#define BCMA_CC_NAND_READ_ADDR_X       0x0C90
+#define BCMA_CC_NAND_READ_ADDR         0x0C94
+#define BCMA_CC_NAND_PAGE_PROGRAM_ADDR_X       0x0C98
+#define BCMA_CC_NAND_PAGE_PROGRAM_ADDR 0x0C9C
+#define BCMA_CC_NAND_COPY_BACK_ADDR_X  0x0CA0
+#define BCMA_CC_NAND_COPY_BACK_ADDR    0x0CA4
+#define BCMA_CC_NAND_BLOCK_ERASE_ADDR_X        0x0CA8
+#define BCMA_CC_NAND_BLOCK_ERASE_ADDR  0x0CAC
+#define BCMA_CC_NAND_INV_READ_ADDR_X   0x0CB0
+#define BCMA_CC_NAND_INV_READ_ADDR     0x0CB4
+#define BCMA_CC_NAND_BLK_WR_PROTECT    0x0CC0
+#define BCMA_CC_NAND_ACC_CONTROL_CS1   0x0CD0
+#define BCMA_CC_NAND_CONFIG_CS1                0x0CD4
+#define BCMA_CC_NAND_TIMING_1_CS1      0x0CD8
+#define BCMA_CC_NAND_TIMING_2_CS1      0x0CDC
+#define BCMA_CC_NAND_SPARE_RD16                0x0D30
+#define BCMA_CC_NAND_SPARE_RD20                0x0D34
+#define BCMA_CC_NAND_SPARE_RD24                0x0D38
+#define BCMA_CC_NAND_SPARE_RD28                0x0D3C
+#define BCMA_CC_NAND_CACHE_ADDR                0x0D40
+#define BCMA_CC_NAND_CACHE_DATA                0x0D44
+#define BCMA_CC_NAND_CTRL_CONFIG       0x0D48
+#define BCMA_CC_NAND_CTRL_STATUS       0x0D4C
 
 /* Divider allocation in 4716/47162/5356 */
 #define BCMA_CC_PMU5_MAINPLL_CPU       1
 /* 4313 Chip specific ChipControl register bits */
 #define BCMA_CCTRL_4313_12MA_LED_DRIVE         0x00000007      /* 12 mA drive strengh for later 4313 */
 
+/* BCM5357 ChipControl register bits */
+#define BCMA_CHIPCTL_5357_EXTPA                        BIT(14)
+#define BCMA_CHIPCTL_5357_ANT_MUX_2O3          BIT(15)
+#define BCMA_CHIPCTL_5357_NFLASH               BIT(16)
+#define BCMA_CHIPCTL_5357_I2S_PINS_ENABLE      BIT(18)
+#define BCMA_CHIPCTL_5357_I2CSPI_PINS_ENABLE   BIT(19)
+
 /* Data for the PMU, if available.
  * Check availability with ((struct bcma_chipcommon)->capabilities & BCMA_CC_CAP_PMU)
  */
@@ -430,6 +515,26 @@ struct bcma_pflash {
        u32 window_size;
 };
 
+#ifdef CONFIG_BCMA_SFLASH
+struct bcma_sflash {
+       bool present;
+       u32 window;
+       u32 blocksize;
+       u16 numblocks;
+       u32 size;
+};
+#endif
+
+#ifdef CONFIG_BCMA_NFLASH
+struct mtd_info;
+
+struct bcma_nflash {
+       bool present;
+
+       struct mtd_info *mtd;
+};
+#endif
+
 struct bcma_serial_port {
        void *regs;
        unsigned long clockspeed;
@@ -450,6 +555,12 @@ struct bcma_drv_cc {
        struct bcma_chipcommon_pmu pmu;
 #ifdef CONFIG_BCMA_DRIVER_MIPS
        struct bcma_pflash pflash;
+#ifdef CONFIG_BCMA_SFLASH
+       struct bcma_sflash sflash;
+#endif
+#ifdef CONFIG_BCMA_NFLASH
+       struct bcma_nflash nflash;
+#endif
 
        int nr_serial_ports;
        struct bcma_serial_port serial_ports[4];
index 5a71d57..6c9cb93 100644 (file)
 #define  BCMA_CLKCTLST_HAVEHTREQ       0x00000010 /* HT available request */
 #define  BCMA_CLKCTLST_HWCROFF         0x00000020 /* Force HW clock request off */
 #define  BCMA_CLKCTLST_EXTRESREQ       0x00000700 /* Mask of external resource requests */
+#define  BCMA_CLKCTLST_EXTRESREQ_SHIFT 8
 #define  BCMA_CLKCTLST_HAVEALP         0x00010000 /* ALP available */
 #define  BCMA_CLKCTLST_HAVEHT          0x00020000 /* HT available */
 #define  BCMA_CLKCTLST_BP_ON_ALP       0x00040000 /* RO: running on ALP clock */
 #define  BCMA_CLKCTLST_BP_ON_HT                0x00080000 /* RO: running on HT clock */
 #define  BCMA_CLKCTLST_EXTRESST                0x07000000 /* Mask of external resource status */
+#define  BCMA_CLKCTLST_EXTRESST_SHIFT  24
 /* Is there any BCM4328 on BCMA bus? */
 #define  BCMA_CLKCTLST_4328A0_HAVEHT   0x00010000 /* 4328a0 has reversed bits */
 #define  BCMA_CLKCTLST_4328A0_HAVEALP  0x00020000 /* 4328a0 has reversed bits */
@@ -83,4 +85,6 @@
                                                         * (2 ZettaBytes), high 32 bits
                                                         */
 
+#define BCMA_SFLASH                    0x1c000000
+
 #endif /* LINUX_BCMA_REGS_H_ */
index 21eff41..fcb4f8e 100644 (file)
@@ -45,8 +45,10 @@ struct ethtool_cmd {
                                 * bits) in Mbps. Please use
                                 * ethtool_cmd_speed()/_set() to
                                 * access it */
-       __u8    eth_tp_mdix;
-       __u8    reserved2;
+       __u8    eth_tp_mdix;    /* twisted pair MDI-X status */
+       __u8    eth_tp_mdix_ctrl; /* twisted pair MDI-X control, when set,
+                                  * link should be renegotiated if necessary
+                                  */
        __u32   lp_advertising; /* Features the link partner advertises */
        __u32   reserved[2];
 };
@@ -1229,10 +1231,13 @@ struct ethtool_ops {
 #define AUTONEG_DISABLE                0x00
 #define AUTONEG_ENABLE         0x01
 
-/* Mode MDI or MDI-X */
-#define ETH_TP_MDI_INVALID     0x00
-#define ETH_TP_MDI             0x01
-#define ETH_TP_MDI_X           0x02
+/* MDI or MDI-X status/control - if MDI/MDI_X/AUTO is set then
+ * the driver is required to renegotiate link
+ */
+#define ETH_TP_MDI_INVALID     0x00 /* status: unknown; control: unsupported */
+#define ETH_TP_MDI             0x01 /* status: MDI;     control: force MDI */
+#define ETH_TP_MDI_X           0x02 /* status: MDI-X;   control: force MDI-X */
+#define ETH_TP_MDI_AUTO                0x03 /*                  control: auto-select */
 
 /* Wake-On-Lan options. */
 #define WAKE_PHY               (1 << 0)
index b80506b..24df9e7 100644 (file)
@@ -67,4 +67,14 @@ static inline unsigned long hash_ptr(const void *ptr, unsigned int bits)
 {
        return hash_long((unsigned long)ptr, bits);
 }
+
+static inline u32 hash32_ptr(const void *ptr)
+{
+       unsigned long val = (unsigned long)ptr;
+
+#if BITS_PER_LONG == 64
+       val ^= (val >> 32);
+#endif
+       return (u32)val;
+}
 #endif /* _LINUX_HASH_H */
index f0e69c6..9adcc29 100644 (file)
@@ -92,6 +92,7 @@
 #define ARPHRD_PHONET  820             /* PhoNet media type            */
 #define ARPHRD_PHONET_PIPE 821         /* PhoNet pipe header           */
 #define ARPHRD_CAIF    822             /* CAIF media type              */
+#define ARPHRD_IP6GRE  823             /* GRE over IPv6                */
 
 #define ARPHRD_VOID      0xFFFF        /* Void type, nothing is known */
 #define ARPHRD_NONE      0xFFFE        /* zero header length */
index aa2e167..6d88a7f 100644 (file)
@@ -67,6 +67,9 @@ struct team_port {
        struct netpoll *np;
 #endif
 
+       s32 priority; /* lower number ~ higher priority */
+       u16 queue_id;
+       struct list_head qom_list; /* node in queue override mapping list */
        long mode_priv[0];
 };
 
@@ -105,7 +108,7 @@ struct team_mode_ops {
        bool (*transmit)(struct team *team, struct sk_buff *skb);
        int (*port_enter)(struct team *team, struct team_port *port);
        void (*port_leave)(struct team *team, struct team_port *port);
-       void (*port_change_mac)(struct team *team, struct team_port *port);
+       void (*port_change_dev_addr)(struct team *team, struct team_port *port);
        void (*port_enabled)(struct team *team, struct team_port *port);
        void (*port_disabled)(struct team *team, struct team_port *port);
 };
@@ -115,6 +118,7 @@ enum team_option_type {
        TEAM_OPTION_TYPE_STRING,
        TEAM_OPTION_TYPE_BINARY,
        TEAM_OPTION_TYPE_BOOL,
+       TEAM_OPTION_TYPE_S32,
 };
 
 struct team_option_inst_info {
@@ -131,6 +135,7 @@ struct team_gsetter_ctx {
                        u32 len;
                } bin_val;
                bool bool_val;
+               s32 s32_val;
        } data;
        struct team_option_inst_info *info;
 };
@@ -182,6 +187,8 @@ struct team {
 
        const struct team_mode *mode;
        struct team_mode_ops ops;
+       bool queue_override_enabled;
+       struct list_head *qom_lists; /* array of queue override mapping lists */
        long mode_priv[TEAM_MODE_PRIV_LONGS];
 };
 
@@ -231,7 +238,7 @@ static inline struct team_port *team_get_port_by_index_rcu(struct team *team,
        return NULL;
 }
 
-extern int team_port_set_team_mac(struct team_port *port);
+extern int team_port_set_team_dev_addr(struct team_port *port);
 extern int team_options_register(struct team *team,
                                 const struct team_option *option,
                                 size_t option_count);
index 5efff60..8c5035a 100644 (file)
@@ -75,6 +75,9 @@ enum {
        IFLA_GRE_TTL,
        IFLA_GRE_TOS,
        IFLA_GRE_PMTUDISC,
+       IFLA_GRE_ENCAP_LIMIT,
+       IFLA_GRE_FLOWINFO,
+       IFLA_GRE_FLAGS,
        __IFLA_GRE_MAX,
 };
 
index a810987..e6ff12d 100644 (file)
@@ -74,8 +74,6 @@ static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb)
 /* found in socket.c */
 extern void vlan_ioctl_set(int (*hook)(struct net *, void __user *));
 
-struct vlan_info;
-
 static inline int is_vlan_dev(struct net_device *dev)
 {
         return dev->priv_flags & IFF_802_1Q_VLAN;
@@ -101,6 +99,8 @@ extern int vlan_vids_add_by_dev(struct net_device *dev,
                                const struct net_device *by_dev);
 extern void vlan_vids_del_by_dev(struct net_device *dev,
                                 const struct net_device *by_dev);
+
+extern bool vlan_uses_dev(const struct net_device *dev);
 #else
 static inline struct net_device *
 __vlan_find_dev_deep(struct net_device *real_dev, u16 vlan_id)
@@ -151,6 +151,11 @@ static inline void vlan_vids_del_by_dev(struct net_device *dev,
                                        const struct net_device *by_dev)
 {
 }
+
+static inline bool vlan_uses_dev(const struct net_device *dev)
+{
+       return false;
+}
 #endif
 
 /**
index f1362b5..e788c18 100644 (file)
@@ -159,6 +159,7 @@ struct inet_diag_handler {
 struct inet_connection_sock;
 int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
                              struct sk_buff *skb, struct inet_diag_req_v2 *req,
+                             struct user_namespace *user_ns,
                              u32 pid, u32 seq, u16 nlmsg_flags,
                              const struct nlmsghdr *unlh);
 void inet_diag_dump_icsk(struct inet_hashinfo *h, struct sk_buff *skb,
index 67f9dda..d032780 100644 (file)
@@ -104,9 +104,14 @@ static inline void ipv4_devconf_setall(struct in_device *in_dev)
 #define IN_DEV_ANDCONF(in_dev, attr) \
        (IPV4_DEVCONF_ALL(dev_net(in_dev->dev), attr) && \
         IN_DEV_CONF_GET((in_dev), attr))
-#define IN_DEV_ORCONF(in_dev, attr) \
-       (IPV4_DEVCONF_ALL(dev_net(in_dev->dev), attr) || \
+
+#define IN_DEV_NET_ORCONF(in_dev, net, attr) \
+       (IPV4_DEVCONF_ALL(net, attr) || \
         IN_DEV_CONF_GET((in_dev), attr))
+
+#define IN_DEV_ORCONF(in_dev, attr) \
+       IN_DEV_NET_ORCONF(in_dev, dev_net(in_dev->dev), attr)
+
 #define IN_DEV_MAXCONF(in_dev, attr) \
        (max(IPV4_DEVCONF_ALL(dev_net(in_dev->dev), attr), \
             IN_DEV_CONF_GET((in_dev), attr)))
@@ -133,6 +138,8 @@ static inline void ipv4_devconf_setall(struct in_device *in_dev)
                                        IN_DEV_ORCONF((in_dev), \
                                                      PROMOTE_SECONDARIES)
 #define IN_DEV_ROUTE_LOCALNET(in_dev)  IN_DEV_ORCONF(in_dev, ROUTE_LOCALNET)
+#define IN_DEV_NET_ROUTE_LOCALNET(in_dev, net) \
+       IN_DEV_NET_ORCONF(in_dev, net, ROUTE_LOCALNET)
 
 #define IN_DEV_RX_REDIRECTS(in_dev) \
        ((IN_DEV_FORWARD(in_dev) && \
index bf22b03..48af63c 100644 (file)
@@ -31,4 +31,21 @@ struct ip6_tnl_parm {
        struct in6_addr raddr;  /* remote tunnel end-point address */
 };
 
+struct ip6_tnl_parm2 {
+       char name[IFNAMSIZ];    /* name of tunnel device */
+       int link;               /* ifindex of underlying L2 interface */
+       __u8 proto;             /* tunnel protocol */
+       __u8 encap_limit;       /* encapsulation limit for tunnel */
+       __u8 hop_limit;         /* hop limit for tunnel */
+       __be32 flowinfo;        /* traffic class and flowlabel for tunnel */
+       __u32 flags;            /* tunnel flags */
+       struct in6_addr laddr;  /* local tunnel end-point address */
+       struct in6_addr raddr;  /* remote tunnel end-point address */
+
+       __be16                  i_flags;
+       __be16                  o_flags;
+       __be32                  i_key;
+       __be32                  o_key;
+};
+
 #endif
index 8268054..05e3c2c 100644 (file)
@@ -312,7 +312,13 @@ extern void jiffies_to_timespec(const unsigned long jiffies,
 extern unsigned long timeval_to_jiffies(const struct timeval *value);
 extern void jiffies_to_timeval(const unsigned long jiffies,
                               struct timeval *value);
+
 extern clock_t jiffies_to_clock_t(unsigned long x);
+static inline clock_t jiffies_delta_to_clock_t(long delta)
+{
+       return jiffies_to_clock_t(max(0L, delta));
+}
+
 extern unsigned long clock_t_to_jiffies(unsigned long x);
 extern u64 jiffies_64_to_clock_t(u64 x);
 extern u64 nsec_to_clock_t(u64 x);
index 7cccafe..6c40684 100644 (file)
@@ -377,5 +377,88 @@ static inline void mdio45_ethtool_gset(const struct mdio_if_info *mdio,
 extern int mdio_mii_ioctl(const struct mdio_if_info *mdio,
                          struct mii_ioctl_data *mii_data, int cmd);
 
+/**
+ * mmd_eee_cap_to_ethtool_sup_t
+ * @eee_cap: value of the MMD EEE Capability register
+ *
+ * A small helper function that translates MMD EEE Capability (3.20) bits
+ * to ethtool supported settings.
+ */
+static inline u32 mmd_eee_cap_to_ethtool_sup_t(u16 eee_cap)
+{
+       u32 supported = 0;
+
+       if (eee_cap & MDIO_EEE_100TX)
+               supported |= SUPPORTED_100baseT_Full;
+       if (eee_cap & MDIO_EEE_1000T)
+               supported |= SUPPORTED_1000baseT_Full;
+       if (eee_cap & MDIO_EEE_10GT)
+               supported |= SUPPORTED_10000baseT_Full;
+       if (eee_cap & MDIO_EEE_1000KX)
+               supported |= SUPPORTED_1000baseKX_Full;
+       if (eee_cap & MDIO_EEE_10GKX4)
+               supported |= SUPPORTED_10000baseKX4_Full;
+       if (eee_cap & MDIO_EEE_10GKR)
+               supported |= SUPPORTED_10000baseKR_Full;
+
+       return supported;
+}
+
+/**
+ * mmd_eee_adv_to_ethtool_adv_t
+ * @eee_adv: value of the MMD EEE Advertisement/Link Partner Ability registers
+ *
+ * A small helper function that translates the MMD EEE Advertisment (7.60)
+ * and MMD EEE Link Partner Ability (7.61) bits to ethtool advertisement
+ * settings.
+ */
+static inline u32 mmd_eee_adv_to_ethtool_adv_t(u16 eee_adv)
+{
+       u32 adv = 0;
+
+       if (eee_adv & MDIO_EEE_100TX)
+               adv |= ADVERTISED_100baseT_Full;
+       if (eee_adv & MDIO_EEE_1000T)
+               adv |= ADVERTISED_1000baseT_Full;
+       if (eee_adv & MDIO_EEE_10GT)
+               adv |= ADVERTISED_10000baseT_Full;
+       if (eee_adv & MDIO_EEE_1000KX)
+               adv |= ADVERTISED_1000baseKX_Full;
+       if (eee_adv & MDIO_EEE_10GKX4)
+               adv |= ADVERTISED_10000baseKX4_Full;
+       if (eee_adv & MDIO_EEE_10GKR)
+               adv |= ADVERTISED_10000baseKR_Full;
+
+       return adv;
+}
+
+/**
+ * ethtool_adv_to_mmd_eee_adv_t
+ * @adv: the ethtool advertisement settings
+ *
+ * A small helper function that translates ethtool advertisement settings
+ * to EEE advertisements for the MMD EEE Advertisement (7.60) and
+ * MMD EEE Link Partner Ability (7.61) registers.
+ */
+static inline u16 ethtool_adv_to_mmd_eee_adv_t(u32 adv)
+{
+       u16 reg = 0;
+
+       if (adv & ADVERTISED_100baseT_Full)
+               reg |= MDIO_EEE_100TX;
+       if (adv & ADVERTISED_1000baseT_Full)
+               reg |= MDIO_EEE_1000T;
+       if (adv & ADVERTISED_10000baseT_Full)
+               reg |= MDIO_EEE_10GT;
+       if (adv & ADVERTISED_1000baseKX_Full)
+               reg |= MDIO_EEE_1000KX;
+       if (adv & ADVERTISED_10000baseKX4_Full)
+               reg |= MDIO_EEE_10GKX4;
+       if (adv & ADVERTISED_10000baseKR_Full)
+               reg |= MDIO_EEE_10GKR;
+
+       return reg;
+}
+
 #endif /* __KERNEL__ */
 #endif /* __LINUX_MDIO_H__ */
index 59dc05f..ccac82e 100644 (file)
@@ -1553,7 +1553,7 @@ struct packet_type {
 #define NETDEV_PRE_TYPE_CHANGE 0x000E
 #define NETDEV_POST_TYPE_CHANGE        0x000F
 #define NETDEV_POST_INIT       0x0010
-#define NETDEV_UNREGISTER_BATCH 0x0011
+#define NETDEV_UNREGISTER_FINAL 0x0011
 #define NETDEV_RELEASE         0x0012
 #define NETDEV_NOTIFY_PEERS    0x0013
 #define NETDEV_JOIN            0x0014
@@ -2227,6 +2227,7 @@ static inline void dev_hold(struct net_device *dev)
  * kind of lower layer not just hardware media.
  */
 
+extern void linkwatch_init_dev(struct net_device *dev);
 extern void linkwatch_fire_event(struct net_device *dev);
 extern void linkwatch_forget_dev(struct net_device *dev);
 
@@ -2249,8 +2250,6 @@ extern void netif_carrier_on(struct net_device *dev);
 
 extern void netif_carrier_off(struct net_device *dev);
 
-extern void netif_notify_peers(struct net_device *dev);
-
 /**
  *     netif_dormant_on - mark device as dormant.
  *     @dev: network device
@@ -2599,8 +2598,7 @@ extern void               __dev_set_rx_mode(struct net_device *dev);
 extern int             dev_set_promiscuity(struct net_device *dev, int inc);
 extern int             dev_set_allmulti(struct net_device *dev, int inc);
 extern void            netdev_state_change(struct net_device *dev);
-extern int             netdev_bonding_change(struct net_device *dev,
-                                             unsigned long event);
+extern void            netdev_notify_peers(struct net_device *dev);
 extern void            netdev_features_change(struct net_device *dev);
 /* Load a device via the kmod */
 extern void            dev_load(struct net *net, const char *name);
index f74dd13..c9fdde2 100644 (file)
@@ -165,6 +165,7 @@ struct netlink_skb_parms {
        struct ucred            creds;          /* Skb credentials      */
        __u32                   pid;
        __u32                   dst_group;
+       struct sock             *ssk;
 };
 
 #define NETLINK_CB(skb)                (*(struct netlink_skb_parms*)&((skb)->cb))
index 2f38788..4584162 100644 (file)
  *     %NL80211_ATTR_IFINDEX is now on %NL80211_ATTR_WIPHY_FREQ with
  *     %NL80211_ATTR_WIPHY_CHANNEL_TYPE.
  *
+ * @NL80211_CMD_START_P2P_DEVICE: Start the given P2P Device, identified by
+ *     its %NL80211_ATTR_WDEV identifier. It must have been created with
+ *     %NL80211_CMD_NEW_INTERFACE previously. After it has been started, the
+ *     P2P Device can be used for P2P operations, e.g. remain-on-channel and
+ *     public action frame TX.
+ * @NL80211_CMD_STOP_P2P_DEVICE: Stop the given P2P Device, identified by
+ *     its %NL80211_ATTR_WDEV identifier.
+ *
  * @NL80211_CMD_MAX: highest used command number
  * @__NL80211_CMD_AFTER_LAST: internal use
  */
@@ -708,6 +716,9 @@ enum nl80211_commands {
 
        NL80211_CMD_CH_SWITCH_NOTIFY,
 
+       NL80211_CMD_START_P2P_DEVICE,
+       NL80211_CMD_STOP_P2P_DEVICE,
+
        /* add new commands above here */
 
        /* used to define NL80211_CMD_MAX below */
@@ -1575,6 +1586,10 @@ enum nl80211_attrs {
  * @NL80211_IFTYPE_MESH_POINT: mesh point
  * @NL80211_IFTYPE_P2P_CLIENT: P2P client
  * @NL80211_IFTYPE_P2P_GO: P2P group owner
+ * @NL80211_IFTYPE_P2P_DEVICE: P2P device interface type, this is not a netdev
+ *     and therefore can't be created in the normal ways, use the
+ *     %NL80211_CMD_START_P2P_DEVICE and %NL80211_CMD_STOP_P2P_DEVICE
+ *     commands to create and destroy one
  * @NL80211_IFTYPE_MAX: highest interface type number currently defined
  * @NUM_NL80211_IFTYPES: number of defined interface types
  *
@@ -1593,6 +1608,7 @@ enum nl80211_iftype {
        NL80211_IFTYPE_MESH_POINT,
        NL80211_IFTYPE_P2P_CLIENT,
        NL80211_IFTYPE_P2P_GO,
+       NL80211_IFTYPE_P2P_DEVICE,
 
        /* keep last */
        NUM_NL80211_IFTYPES,
@@ -2994,12 +3010,18 @@ enum nl80211_ap_sme_features {
  * @NL80211_FEATURE_CELL_BASE_REG_HINTS: This driver has been tested
  *     to work properly to suppport receiving regulatory hints from
  *     cellular base stations.
+ * @NL80211_FEATURE_P2P_DEVICE_NEEDS_CHANNEL: If this is set, an active
+ *     P2P Device (%NL80211_IFTYPE_P2P_DEVICE) requires its own channel
+ *     in the interface combinations, even when it's only used for scan
+ *     and remain-on-channel. This could be due to, for example, the
+ *     remain-on-channel implementation requiring a channel context.
  */
 enum nl80211_feature_flags {
-       NL80211_FEATURE_SK_TX_STATUS    = 1 << 0,
-       NL80211_FEATURE_HT_IBSS         = 1 << 1,
-       NL80211_FEATURE_INACTIVITY_TIMER = 1 << 2,
-       NL80211_FEATURE_CELL_BASE_REG_HINTS = 1 << 3,
+       NL80211_FEATURE_SK_TX_STATUS                    = 1 << 0,
+       NL80211_FEATURE_HT_IBSS                         = 1 << 1,
+       NL80211_FEATURE_INACTIVITY_TIMER                = 1 << 2,
+       NL80211_FEATURE_CELL_BASE_REG_HINTS             = 1 << 3,
+       NL80211_FEATURE_P2P_DEVICE_NEEDS_CHANNEL        = 1 << 4,
 };
 
 /**
index 912c27a..6ef49b8 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/phy.h>
 #include <linux/of.h>
 
+#ifdef CONFIG_OF
 extern int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np);
 extern struct phy_device *of_phy_find_device(struct device_node *phy_np);
 extern struct phy_device *of_phy_connect(struct net_device *dev,
@@ -24,4 +25,36 @@ extern struct phy_device *of_phy_connect_fixed_link(struct net_device *dev,
 
 extern struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np);
 
+#else /* CONFIG_OF */
+int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
+{
+       return -ENOSYS;
+}
+
+struct phy_device *of_phy_find_device(struct device_node *phy_np)
+{
+       return NULL;
+}
+
+struct phy_device *of_phy_connect(struct net_device *dev,
+                                        struct device_node *phy_np,
+                                        void (*hndlr)(struct net_device *),
+                                        u32 flags, phy_interface_t iface)
+{
+       return NULL;
+}
+
+struct phy_device *of_phy_connect_fixed_link(struct net_device *dev,
+                                        void (*hndlr)(struct net_device *),
+                                        phy_interface_t iface)
+{
+       return NULL;
+}
+
+struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np)
+{
+       return NULL;
+}
+#endif /* CONFIG_OF */
+
 #endif /* __LINUX_OF_MDIO_H */
diff --git a/include/linux/packet_diag.h b/include/linux/packet_diag.h
new file mode 100644 (file)
index 0000000..93f5fa9
--- /dev/null
@@ -0,0 +1,72 @@
+#ifndef __PACKET_DIAG_H__
+#define __PACKET_DIAG_H__
+
+#include <linux/types.h>
+
+struct packet_diag_req {
+       __u8    sdiag_family;
+       __u8    sdiag_protocol;
+       __u16   pad;
+       __u32   pdiag_ino;
+       __u32   pdiag_show;
+       __u32   pdiag_cookie[2];
+};
+
+#define PACKET_SHOW_INFO       0x00000001 /* Basic packet_sk information */
+#define PACKET_SHOW_MCLIST     0x00000002 /* A set of packet_diag_mclist-s */
+#define PACKET_SHOW_RING_CFG   0x00000004 /* Rings configuration parameters */
+#define PACKET_SHOW_FANOUT     0x00000008
+
+struct packet_diag_msg {
+       __u8    pdiag_family;
+       __u8    pdiag_type;
+       __u16   pdiag_num;
+
+       __u32   pdiag_ino;
+       __u32   pdiag_cookie[2];
+};
+
+enum {
+       PACKET_DIAG_INFO,
+       PACKET_DIAG_MCLIST,
+       PACKET_DIAG_RX_RING,
+       PACKET_DIAG_TX_RING,
+       PACKET_DIAG_FANOUT,
+
+       PACKET_DIAG_MAX,
+};
+
+struct packet_diag_info {
+       __u32   pdi_index;
+       __u32   pdi_version;
+       __u32   pdi_reserve;
+       __u32   pdi_copy_thresh;
+       __u32   pdi_tstamp;
+       __u32   pdi_flags;
+
+#define PDI_RUNNING    0x1
+#define PDI_AUXDATA    0x2
+#define PDI_ORIGDEV    0x4
+#define PDI_VNETHDR    0x8
+#define PDI_LOSS       0x10
+};
+
+struct packet_diag_mclist {
+       __u32   pdmc_index;
+       __u32   pdmc_count;
+       __u16   pdmc_type;
+       __u16   pdmc_alen;
+       __u8    pdmc_addr[MAX_ADDR_LEN];
+};
+
+struct packet_diag_ring {
+       __u32   pdr_block_size;
+       __u32   pdr_block_nr;
+       __u32   pdr_frame_size;
+       __u32   pdr_frame_nr;
+       __u32   pdr_retire_tmo;
+       __u32   pdr_sizeof_priv;
+       __u32   pdr_features;
+};
+
+#endif
index 6fdf027..0ec590b 100644 (file)
@@ -354,6 +354,37 @@ static inline bool rfkill_blocked(struct rfkill *rfkill)
 }
 #endif /* RFKILL || RFKILL_MODULE */
 
+
+#ifdef CONFIG_RFKILL_LEDS
+/**
+ * rfkill_get_led_trigger_name - Get the LED trigger name for the button's LED.
+ * This function might return a NULL pointer if registering of the
+ * LED trigger failed. Use this as "default_trigger" for the LED.
+ */
+const char *rfkill_get_led_trigger_name(struct rfkill *rfkill);
+
+/**
+ * rfkill_set_led_trigger_name -- set the LED trigger name
+ * @rfkill: rfkill struct
+ * @name: LED trigger name
+ *
+ * This function sets the LED trigger name of the radio LED
+ * trigger that rfkill creates. It is optional, but if called
+ * must be called before rfkill_register() to be effective.
+ */
+void rfkill_set_led_trigger_name(struct rfkill *rfkill, const char *name);
+#else
+static inline const char *rfkill_get_led_trigger_name(struct rfkill *rfkill)
+{
+       return NULL;
+}
+
+static inline void
+rfkill_set_led_trigger_name(struct rfkill *rfkill, const char *name)
+{
+}
+#endif
+
 #endif /* __KERNEL__ */
 
 #endif /* RFKILL_H */
index 83c44ee..68a04a3 100644 (file)
@@ -13,6 +13,7 @@ struct file;
 struct path;
 struct inode;
 struct dentry;
+struct user_namespace;
 
 struct seq_file {
        char *buf;
@@ -25,6 +26,9 @@ struct seq_file {
        struct mutex lock;
        const struct seq_operations *op;
        int poll_event;
+#ifdef CONFIG_USER_NS
+       struct user_namespace *user_ns;
+#endif
        void *private;
 };
 
@@ -128,6 +132,16 @@ int seq_put_decimal_ull(struct seq_file *m, char delimiter,
 int seq_put_decimal_ll(struct seq_file *m, char delimiter,
                        long long num);
 
+static inline struct user_namespace *seq_user_ns(struct seq_file *seq)
+{
+#ifdef CONFIG_USER_NS
+       return seq->user_ns;
+#else
+       extern struct user_namespace init_user_ns;
+       return &init_user_ns;
+#endif
+}
+
 #define SEQ_START_TOKEN ((void *)1)
 /*
  * Helpers for iteration over list_head-s in seq_files
index 7632c87..b33a3a1 100644 (file)
@@ -846,13 +846,16 @@ static inline int skb_shared(const struct sk_buff *skb)
  *
  *     NULL is returned on a memory allocation failure.
  */
-static inline struct sk_buff *skb_share_check(struct sk_buff *skb,
-                                             gfp_t pri)
+static inline struct sk_buff *skb_share_check(struct sk_buff *skb, gfp_t pri)
 {
        might_sleep_if(pri & __GFP_WAIT);
        if (skb_shared(skb)) {
                struct sk_buff *nskb = skb_clone(skb, pri);
-               kfree_skb(skb);
+
+               if (likely(nskb))
+                       consume_skb(skb);
+               else
+                       kfree_skb(skb);
                skb = nskb;
        }
        return skb;
index 00bc189..ad6e3a6 100644 (file)
 enum
 {
        IPSTATS_MIB_NUM = 0,
+/* frequently written fields in fast path, kept in same cache line */
        IPSTATS_MIB_INPKTS,                     /* InReceives */
+       IPSTATS_MIB_INOCTETS,                   /* InOctets */
+       IPSTATS_MIB_INDELIVERS,                 /* InDelivers */
+       IPSTATS_MIB_OUTFORWDATAGRAMS,           /* OutForwDatagrams */
+       IPSTATS_MIB_OUTPKTS,                    /* OutRequests */
+       IPSTATS_MIB_OUTOCTETS,                  /* OutOctets */
+/* other fields */
        IPSTATS_MIB_INHDRERRORS,                /* InHdrErrors */
        IPSTATS_MIB_INTOOBIGERRORS,             /* InTooBigErrors */
        IPSTATS_MIB_INNOROUTES,                 /* InNoRoutes */
@@ -26,9 +33,6 @@ enum
        IPSTATS_MIB_INUNKNOWNPROTOS,            /* InUnknownProtos */
        IPSTATS_MIB_INTRUNCATEDPKTS,            /* InTruncatedPkts */
        IPSTATS_MIB_INDISCARDS,                 /* InDiscards */
-       IPSTATS_MIB_INDELIVERS,                 /* InDelivers */
-       IPSTATS_MIB_OUTFORWDATAGRAMS,           /* OutForwDatagrams */
-       IPSTATS_MIB_OUTPKTS,                    /* OutRequests */
        IPSTATS_MIB_OUTDISCARDS,                /* OutDiscards */
        IPSTATS_MIB_OUTNOROUTES,                /* OutNoRoutes */
        IPSTATS_MIB_REASMTIMEOUT,               /* ReasmTimeout */
@@ -42,8 +46,6 @@ enum
        IPSTATS_MIB_OUTMCASTPKTS,               /* OutMcastPkts */
        IPSTATS_MIB_INBCASTPKTS,                /* InBcastPkts */
        IPSTATS_MIB_OUTBCASTPKTS,               /* OutBcastPkts */
-       IPSTATS_MIB_INOCTETS,                   /* InOctets */
-       IPSTATS_MIB_OUTOCTETS,                  /* OutOctets */
        IPSTATS_MIB_INMCASTOCTETS,              /* InMcastOctets */
        IPSTATS_MIB_OUTMCASTOCTETS,             /* OutMcastOctets */
        IPSTATS_MIB_INBCASTOCTETS,              /* InBcastOctets */
index 1a6b004..c2b02a5 100644 (file)
 #define SSB_CHIPCO_FLASHCTL_ST_SE      0x02D8          /* Sector Erase */
 #define SSB_CHIPCO_FLASHCTL_ST_BE      0x00C7          /* Bulk Erase */
 #define SSB_CHIPCO_FLASHCTL_ST_DP      0x00B9          /* Deep Power-down */
-#define SSB_CHIPCO_FLASHCTL_ST_RSIG    0x03AB          /* Read Electronic Signature */
+#define SSB_CHIPCO_FLASHCTL_ST_RES     0x03AB          /* Read Electronic Signature */
+#define SSB_CHIPCO_FLASHCTL_ST_CSA     0x1000          /* Keep chip select asserted */
+#define SSB_CHIPCO_FLASHCTL_ST_SSE     0x0220          /* Sub-sector Erase */
 
 /* Status register bits for ST flashes */
 #define SSB_CHIPCO_FLASHSTA_ST_WIP     0x01            /* Write In Progress */
index c989284..0b1e3f2 100644 (file)
@@ -89,8 +89,8 @@
 
 #define  TIPC_CMD_GET_REMOTE_MNG    0x4003    /* tx none, rx unsigned */
 #define  TIPC_CMD_GET_MAX_PORTS     0x4004    /* tx none, rx unsigned */
-#define  TIPC_CMD_GET_MAX_PUBL      0x4005    /* tx none, rx unsigned */
-#define  TIPC_CMD_GET_MAX_SUBSCR    0x4006    /* tx none, rx unsigned */
+#define  TIPC_CMD_GET_MAX_PUBL      0x4005    /* obsoleted */
+#define  TIPC_CMD_GET_MAX_SUBSCR    0x4006    /* obsoleted */
 #define  TIPC_CMD_GET_MAX_ZONES     0x4007    /* obsoleted */
 #define  TIPC_CMD_GET_MAX_CLUSTERS  0x4008    /* obsoleted */
 #define  TIPC_CMD_GET_MAX_NODES     0x4009    /* obsoleted */
 #define  TIPC_CMD_SET_NODE_ADDR     0x8001    /* tx net_addr, rx none */
 #define  TIPC_CMD_SET_REMOTE_MNG    0x8003    /* tx unsigned, rx none */
 #define  TIPC_CMD_SET_MAX_PORTS     0x8004    /* tx unsigned, rx none */
-#define  TIPC_CMD_SET_MAX_PUBL      0x8005    /* tx unsigned, rx none */
-#define  TIPC_CMD_SET_MAX_SUBSCR    0x8006    /* tx unsigned, rx none */
+#define  TIPC_CMD_SET_MAX_PUBL      0x8005    /* obsoleted */
+#define  TIPC_CMD_SET_MAX_SUBSCR    0x8006    /* obsoleted */
 #define  TIPC_CMD_SET_MAX_ZONES     0x8007    /* obsoleted */
 #define  TIPC_CMD_SET_MAX_CLUSTERS  0x8008    /* obsoleted */
 #define  TIPC_CMD_SET_MAX_NODES     0x8009    /* obsoleted */
index 7f7df93..b630dae 100644 (file)
@@ -3,6 +3,7 @@
 #define _ARP_H
 
 #include <linux/if_arp.h>
+#include <linux/hash.h>
 #include <net/neighbour.h>
 
 
@@ -10,7 +11,7 @@ extern struct neigh_table arp_tbl;
 
 static inline u32 arp_hashfn(u32 key, const struct net_device *dev, u32 hash_rnd)
 {
-       u32 val = key ^ dev->ifindex;
+       u32 val = key ^ hash32_ptr(dev);
 
        return val * hash_rnd;
 }
index 5d23521..53539ac 100644 (file)
@@ -157,7 +157,7 @@ enum {
 typedef struct ax25_uid_assoc {
        struct hlist_node       uid_node;
        atomic_t                refcount;
-       uid_t                   uid;
+       kuid_t                  uid;
        ax25_address            call;
 } ax25_uid_assoc;
 
@@ -434,7 +434,7 @@ extern unsigned long ax25_display_timer(struct timer_list *);
 
 /* ax25_uid.c */
 extern int  ax25_uid_policy;
-extern ax25_uid_assoc *ax25_findbyuid(uid_t);
+extern ax25_uid_assoc *ax25_findbyuid(kuid_t);
 extern int __must_check ax25_uid_ioctl(int, struct sockaddr_ax25 *);
 extern const struct file_operations ax25_uid_fops;
 extern void ax25_uid_free(void);
index 565d4be..ede0369 100644 (file)
@@ -27,6 +27,7 @@
 
 #include <linux/poll.h>
 #include <net/sock.h>
+#include <linux/seq_file.h>
 
 #ifndef AF_BLUETOOTH
 #define AF_BLUETOOTH   31
@@ -202,6 +203,10 @@ enum {
 struct bt_sock_list {
        struct hlist_head head;
        rwlock_t          lock;
+#ifdef CONFIG_PROC_FS
+        struct file_operations   fops;
+        int (* custom_seq_show)(struct seq_file *, void *);
+#endif
 };
 
 int  bt_sock_register(int proto, const struct net_proto_family *ops);
@@ -292,6 +297,11 @@ extern void hci_sock_cleanup(void);
 extern int bt_sysfs_init(void);
 extern void bt_sysfs_cleanup(void);
 
+extern int  bt_procfs_init(struct module* module, struct net *net, const char *name,
+                          struct bt_sock_list* sk_list,
+                          int (* seq_show)(struct seq_file *, void *));
+extern void bt_procfs_cleanup(struct net *net, const char *name);
+
 extern struct dentry *bt_debugfs;
 
 int l2cap_init(void);
index ccd723e..23cf413 100644 (file)
 /* First BR/EDR Controller shall have ID = 0 */
 #define HCI_BREDR_ID   0
 
+/* AMP controller status */
+#define AMP_CTRL_POWERED_DOWN                  0x00
+#define AMP_CTRL_BLUETOOTH_ONLY                        0x01
+#define AMP_CTRL_NO_CAPACITY                   0x02
+#define AMP_CTRL_LOW_CAPACITY                  0x03
+#define AMP_CTRL_MEDIUM_CAPACITY               0x04
+#define AMP_CTRL_HIGH_CAPACITY                 0x05
+#define AMP_CTRL_FULL_CAPACITY                 0x06
+
 /* HCI device quirks */
 enum {
        HCI_QUIRK_RESET_ON_CLOSE,
@@ -1295,6 +1304,8 @@ struct hci_ev_num_comp_blocks {
 } __packed;
 
 /* Low energy meta events */
+#define LE_CONN_ROLE_MASTER    0x00
+
 #define HCI_EV_LE_CONN_COMPLETE                0x01
 struct hci_ev_le_conn_complete {
        __u8     status;
index 475b8c0..41d9439 100644 (file)
@@ -115,12 +115,6 @@ struct oob_data {
        u8 randomizer[16];
 };
 
-struct adv_entry {
-       struct list_head list;
-       bdaddr_t bdaddr;
-       u8 bdaddr_type;
-};
-
 struct le_scan_params {
        u8 type;
        u16 interval;
@@ -356,16 +350,16 @@ extern rwlock_t hci_cb_list_lock;
 
 /* ----- HCI interface to upper protocols ----- */
 extern int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr);
-extern int l2cap_connect_cfm(struct hci_conn *hcon, u8 status);
+extern void l2cap_connect_cfm(struct hci_conn *hcon, u8 status);
 extern int l2cap_disconn_ind(struct hci_conn *hcon);
-extern int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason);
+extern void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason);
 extern int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt);
 extern int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb,
                              u16 flags);
 
 extern int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr);
-extern int sco_connect_cfm(struct hci_conn *hcon, __u8 status);
-extern int sco_disconn_cfm(struct hci_conn *hcon, __u8 reason);
+extern void sco_connect_cfm(struct hci_conn *hcon, __u8 status);
+extern void sco_disconn_cfm(struct hci_conn *hcon, __u8 reason);
 extern int sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb);
 
 /* ----- Inquiry cache ----- */
@@ -587,8 +581,7 @@ void hci_conn_put_device(struct hci_conn *conn);
 
 static inline void hci_conn_hold(struct hci_conn *conn)
 {
-       BT_DBG("hcon %p refcnt %d -> %d", conn, atomic_read(&conn->refcnt),
-              atomic_read(&conn->refcnt) + 1);
+       BT_DBG("hcon %p orig refcnt %d", conn, atomic_read(&conn->refcnt));
 
        atomic_inc(&conn->refcnt);
        cancel_delayed_work(&conn->disc_work);
@@ -596,8 +589,7 @@ static inline void hci_conn_hold(struct hci_conn *conn)
 
 static inline void hci_conn_put(struct hci_conn *conn)
 {
-       BT_DBG("hcon %p refcnt %d -> %d", conn, atomic_read(&conn->refcnt),
-              atomic_read(&conn->refcnt) - 1);
+       BT_DBG("hcon %p orig refcnt %d", conn, atomic_read(&conn->refcnt));
 
        if (atomic_dec_and_test(&conn->refcnt)) {
                unsigned long timeo;
@@ -1056,7 +1048,7 @@ int mgmt_discovering(struct hci_dev *hdev, u8 discovering);
 int mgmt_interleaved_discovery(struct hci_dev *hdev);
 int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type);
 int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type);
-
+bool mgmt_valid_hdev(struct hci_dev *hdev);
 int mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, u8 persistent);
 
 /* HCI info for socket */
index a7679f8..d206296 100644 (file)
@@ -671,20 +671,8 @@ enum {
        L2CAP_EV_RECV_FRAME,
 };
 
-static inline void l2cap_chan_hold(struct l2cap_chan *c)
-{
-       BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->refcnt));
-
-       atomic_inc(&c->refcnt);
-}
-
-static inline void l2cap_chan_put(struct l2cap_chan *c)
-{
-       BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->refcnt));
-
-       if (atomic_dec_and_test(&c->refcnt))
-               kfree(c);
-}
+void l2cap_chan_hold(struct l2cap_chan *c);
+void l2cap_chan_put(struct l2cap_chan *c);
 
 static inline void l2cap_chan_lock(struct l2cap_chan *chan)
 {
@@ -771,7 +759,6 @@ int l2cap_add_scid(struct l2cap_chan *chan,  __u16 scid);
 
 struct l2cap_chan *l2cap_chan_create(void);
 void l2cap_chan_close(struct l2cap_chan *chan, int reason);
-void l2cap_chan_destroy(struct l2cap_chan *chan);
 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
                       bdaddr_t *dst, u8 dst_type);
 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
index ca356a7..50993a5 100644 (file)
@@ -108,8 +108,8 @@ struct smp_cmd_security_req {
 #define SMP_CONFIRM_FAILED             0x04
 #define SMP_PAIRING_NOTSUPP            0x05
 #define SMP_ENC_KEY_SIZE               0x06
-#define SMP_CMD_NOTSUPP                0x07
-#define SMP_UNSPECIFIED                0x08
+#define SMP_CMD_NOTSUPP                        0x07
+#define SMP_UNSPECIFIED                        0x08
 #define SMP_REPEATED_ATTEMPTS          0x09
 
 #define SMP_MIN_ENC_KEY_SIZE           7
@@ -123,8 +123,8 @@ struct smp_chan {
        struct l2cap_conn *conn;
        u8              preq[7]; /* SMP Pairing Request */
        u8              prsp[7]; /* SMP Pairing Response */
-       u8              prnd[16]; /* SMP Pairing Random (local) */
-       u8              rrnd[16]; /* SMP Pairing Random (remote) */
+       u8              prnd[16]; /* SMP Pairing Random (local) */
+       u8              rrnd[16]; /* SMP Pairing Random (remote) */
        u8              pcnf[16]; /* SMP Pairing Confirm */
        u8              tk[16]; /* SMP Temporary Key */
        u8              enc_key_size;
index 3d254e1..ba2e616 100644 (file)
@@ -1439,7 +1439,8 @@ struct cfg80211_gtk_rekey_data {
  * @add_virtual_intf: create a new virtual interface with the given name,
  *     must set the struct wireless_dev's iftype. Beware: You must create
  *     the new netdev in the wiphy's network namespace! Returns the struct
- *     wireless_dev, or an ERR_PTR.
+ *     wireless_dev, or an ERR_PTR. For P2P device wdevs, the driver must
+ *     also set the address member in the wdev.
  *
  * @del_virtual_intf: remove the virtual interface
  *
@@ -1618,6 +1619,9 @@ struct cfg80211_gtk_rekey_data {
  * @get_channel: Get the current operating channel for the virtual interface.
  *     For monitor interfaces, it should return %NULL unless there's a single
  *     current monitoring channel.
+ *
+ * @start_p2p_device: Start the given P2P device.
+ * @stop_p2p_device: Stop the given P2P device.
  */
 struct cfg80211_ops {
        int     (*suspend)(struct wiphy *wiphy, struct cfg80211_wowlan *wow);
@@ -1834,6 +1838,11 @@ struct cfg80211_ops {
                (*get_channel)(struct wiphy *wiphy,
                               struct wireless_dev *wdev,
                               enum nl80211_channel_type *type);
+
+       int     (*start_p2p_device)(struct wiphy *wiphy,
+                                   struct wireless_dev *wdev);
+       void    (*stop_p2p_device)(struct wiphy *wiphy,
+                                  struct wireless_dev *wdev);
 };
 
 /*
@@ -2397,6 +2406,8 @@ struct cfg80211_cached_keys;
  * @cleanup_work: work struct used for cleanup that can't be done directly
  * @beacon_interval: beacon interval used on this device for transmitting
  *     beacons, 0 when not valid
+ * @address: The address for this device, valid only if @netdev is %NULL
+ * @p2p_started: true if this is a P2P Device that has been started
  */
 struct wireless_dev {
        struct wiphy *wiphy;
@@ -2415,7 +2426,9 @@ struct wireless_dev {
 
        struct work_struct cleanup_work;
 
-       bool use_4addr;
+       bool use_4addr, p2p_started;
+
+       u8 address[ETH_ALEN] __aligned(sizeof(u16));
 
        /* currently used for IBSS and SME - might be rearranged later */
        u8 ssid[IEEE80211_MAX_SSID_LEN];
@@ -2463,6 +2476,13 @@ struct wireless_dev {
 #endif
 };
 
+static inline u8 *wdev_address(struct wireless_dev *wdev)
+{
+       if (wdev->netdev)
+               return wdev->netdev->dev_addr;
+       return wdev->address;
+}
+
 /**
  * wdev_priv - return wiphy priv from wireless_dev
  *
@@ -3530,6 +3550,22 @@ void cfg80211_ch_switch_notify(struct net_device *dev, int freq,
  */
 u32 cfg80211_calculate_bitrate(struct rate_info *rate);
 
+/**
+ * cfg80211_unregister_wdev - remove the given wdev
+ * @wdev: struct wireless_dev to remove
+ *
+ * Call this function only for wdevs that have no netdev assigned,
+ * e.g. P2P Devices. It removes the device from the list so that
+ * it can no longer be used. It is necessary to call this function
+ * even when cfg80211 requests the removal of the interface by
+ * calling the del_virtual_intf() callback. The function must also
+ * be called when the driver wishes to unregister the wdev, e.g.
+ * when the device is unbound from the driver.
+ *
+ * Requires the RTNL to be held.
+ */
+void cfg80211_unregister_wdev(struct wireless_dev *wdev);
+
 /* Logging, debugging and troubleshooting/diagnostic helpers. */
 
 /* wiphy_printk helpers, similar to dev_printk */
index 621e351..9a78810 100644 (file)
@@ -396,11 +396,15 @@ static inline void dst_confirm(struct dst_entry *dst)
 static inline int dst_neigh_output(struct dst_entry *dst, struct neighbour *n,
                                   struct sk_buff *skb)
 {
-       struct hh_cache *hh;
+       const struct hh_cache *hh;
+
+       if (dst->pending_confirm) {
+               unsigned long now = jiffies;
 
-       if (unlikely(dst->pending_confirm)) {
-               n->confirmed = jiffies;
                dst->pending_confirm = 0;
+               /* avoid dirtying neighbour */
+               if (n->confirmed != now)
+                       n->confirmed = now;
        }
 
        hh = &n->hh;
index 7139254..7f0df13 100644 (file)
@@ -183,6 +183,9 @@ struct ieee80211_radiotap_header {
  *     Contains a bitmap of known fields/flags, the flags, and
  *     the MCS index.
  *
+ * IEEE80211_RADIOTAP_AMPDU_STATUS     u32, u16, u8, u8        unitless
+ *
+ *     Contains the AMPDU information for the subframe.
  */
 enum ieee80211_radiotap_type {
        IEEE80211_RADIOTAP_TSFT = 0,
@@ -205,6 +208,7 @@ enum ieee80211_radiotap_type {
        IEEE80211_RADIOTAP_DATA_RETRIES = 17,
 
        IEEE80211_RADIOTAP_MCS = 19,
+       IEEE80211_RADIOTAP_AMPDU_STATUS = 20,
 
        /* valid in every it_present bitmap, even vendor namespaces */
        IEEE80211_RADIOTAP_RADIOTAP_NAMESPACE = 29,
@@ -270,6 +274,13 @@ enum ieee80211_radiotap_type {
 #define IEEE80211_RADIOTAP_MCS_FMT_GF          0x08
 #define IEEE80211_RADIOTAP_MCS_FEC_LDPC                0x10
 
+/* For IEEE80211_RADIOTAP_AMPDU_STATUS */
+#define IEEE80211_RADIOTAP_AMPDU_REPORT_ZEROLEN                0x0001
+#define IEEE80211_RADIOTAP_AMPDU_IS_ZEROLEN            0x0002
+#define IEEE80211_RADIOTAP_AMPDU_LAST_KNOWN            0x0004
+#define IEEE80211_RADIOTAP_AMPDU_IS_LAST               0x0008
+#define IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_ERR         0x0010
+#define IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_KNOWN       0x0020
 
 /* helpers */
 static inline int ieee80211_get_radiotap_len(unsigned char *data)
index 358fb86..e03047f 100644 (file)
@@ -5,6 +5,8 @@
 #include <linux/netdevice.h>
 #include <linux/ip6_tunnel.h>
 
+#define IP6TUNNEL_ERR_TIMEO (30*HZ)
+
 /* capable of sending packets */
 #define IP6_TNL_F_CAP_XMIT 0x10000
 /* capable of receiving packets */
 /* determine capability on a per-packet basis */
 #define IP6_TNL_F_CAP_PER_PACKET 0x40000
 
-/* IPv6 tunnel */
+struct __ip6_tnl_parm {
+       char name[IFNAMSIZ];    /* name of tunnel device */
+       int link;               /* ifindex of underlying L2 interface */
+       __u8 proto;             /* tunnel protocol */
+       __u8 encap_limit;       /* encapsulation limit for tunnel */
+       __u8 hop_limit;         /* hop limit for tunnel */
+       __be32 flowinfo;        /* traffic class and flowlabel for tunnel */
+       __u32 flags;            /* tunnel flags */
+       struct in6_addr laddr;  /* local tunnel end-point address */
+       struct in6_addr raddr;  /* remote tunnel end-point address */
+
+       __be16                  i_flags;
+       __be16                  o_flags;
+       __be32                  i_key;
+       __be32                  o_key;
+};
 
+/* IPv6 tunnel */
 struct ip6_tnl {
        struct ip6_tnl __rcu *next;     /* next tunnel in list */
        struct net_device *dev; /* virtual device associated with tunnel */
-       struct ip6_tnl_parm parms;      /* tunnel configuration parameters */
+       struct __ip6_tnl_parm parms;    /* tunnel configuration parameters */
        struct flowi fl;        /* flowi template for xmit */
        struct dst_entry *dst_cache;    /* cached dst */
        u32 dst_cookie;
+
+       int err_count;
+       unsigned long err_time;
+
+       /* These fields used only by GRE */
+       __u32 i_seqno;  /* The last seen seqno  */
+       __u32 o_seqno;  /* The last output seqno */
+       int hlen;       /* Precalculated GRE header length */
+       int mlink;
 };
 
 /* Tunnel encapsulation limit destination sub-option */
@@ -31,4 +58,14 @@ struct ipv6_tlv_tnl_enc_lim {
        __u8 encap_limit;       /* tunnel encapsulation limit   */
 } __packed;
 
+struct dst_entry *ip6_tnl_dst_check(struct ip6_tnl *t);
+void ip6_tnl_dst_reset(struct ip6_tnl *t);
+void ip6_tnl_dst_store(struct ip6_tnl *t, struct dst_entry *dst);
+int ip6_tnl_rcv_ctl(struct ip6_tnl *t, const struct in6_addr *laddr,
+               const struct in6_addr *raddr);
+int ip6_tnl_xmit_ctl(struct ip6_tnl *t);
+__u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw);
+__u32 ip6_tnl_get_cap(struct ip6_tnl *t, const struct in6_addr *laddr,
+                            const struct in6_addr *raddr);
+
 #endif
index 95374d1..ee75ccd 100644 (file)
@@ -808,8 +808,6 @@ struct netns_ipvs {
        struct list_head        rs_table[IP_VS_RTAB_SIZE];
        /* ip_vs_app */
        struct list_head        app_list;
-       /* ip_vs_ftp */
-       struct ip_vs_app        *ftp_app;
        /* ip_vs_proto */
        #define IP_VS_PROTO_TAB_SIZE    32      /* must be power of 2 */
        struct ip_vs_proto_data *proto_data_table[IP_VS_PROTO_TAB_SIZE];
@@ -890,6 +888,7 @@ struct netns_ipvs {
        unsigned int            sysctl_sync_refresh_period;
        int                     sysctl_sync_retries;
        int                     sysctl_nat_icmp_send;
+       int                     sysctl_pmtu_disc;
 
        /* ip_vs_lblc */
        int                     sysctl_lblc_expiration;
@@ -976,6 +975,11 @@ static inline int sysctl_sync_sock_size(struct netns_ipvs *ipvs)
        return ipvs->sysctl_sync_sock_size;
 }
 
+static inline int sysctl_pmtu_disc(struct netns_ipvs *ipvs)
+{
+       return ipvs->sysctl_pmtu_disc;
+}
+
 #else
 
 static inline int sysctl_sync_threshold(struct netns_ipvs *ipvs)
@@ -1018,6 +1022,11 @@ static inline int sysctl_sync_sock_size(struct netns_ipvs *ipvs)
        return 0;
 }
 
+static inline int sysctl_pmtu_disc(struct netns_ipvs *ipvs)
+{
+       return 1;
+}
+
 #endif
 
 /*
@@ -1179,7 +1188,8 @@ extern void ip_vs_service_net_cleanup(struct net *net);
  *      (from ip_vs_app.c)
  */
 #define IP_VS_APP_MAX_PORTS  8
-extern int register_ip_vs_app(struct net *net, struct ip_vs_app *app);
+extern struct ip_vs_app *register_ip_vs_app(struct net *net,
+                                           struct ip_vs_app *app);
 extern void unregister_ip_vs_app(struct net *net, struct ip_vs_app *app);
 extern int ip_vs_bind_app(struct ip_vs_conn *cp, struct ip_vs_protocol *pp);
 extern void ip_vs_unbind_app(struct ip_vs_conn *cp);
index 01c34b3..9bed5d4 100644 (file)
@@ -34,6 +34,7 @@
 #define NEXTHDR_IPV6           41      /* IPv6 in IPv6 */
 #define NEXTHDR_ROUTING                43      /* Routing header. */
 #define NEXTHDR_FRAGMENT       44      /* Fragmentation/reassembly header. */
+#define NEXTHDR_GRE            47      /* GRE header. */
 #define NEXTHDR_ESP            50      /* Encapsulating security payload. */
 #define NEXTHDR_AUTH           51      /* Authentication header. */
 #define NEXTHDR_ICMP           58      /* ICMP for IPv6. */
@@ -222,7 +223,10 @@ struct ip6_flowlabel {
        struct ipv6_txoptions   *opt;
        unsigned long           linger;
        u8                      share;
-       u32                     owner;
+       union {
+               struct pid *pid;
+               kuid_t uid;
+       } owner;
        unsigned long           lastuse;
        unsigned long           expires;
        struct net              *fl_net;
index bb86aa6..71f8262 100644 (file)
@@ -171,6 +171,7 @@ struct ieee80211_low_level_stats {
  * @BSS_CHANGED_IDLE: Idle changed for this BSS/interface.
  * @BSS_CHANGED_SSID: SSID changed for this BSS (AP mode)
  * @BSS_CHANGED_AP_PROBE_RESP: Probe Response changed for this BSS (AP mode)
+ * @BSS_CHANGED_PS: PS changed for this BSS (STA mode)
  */
 enum ieee80211_bss_change {
        BSS_CHANGED_ASSOC               = 1<<0,
@@ -190,6 +191,7 @@ enum ieee80211_bss_change {
        BSS_CHANGED_IDLE                = 1<<14,
        BSS_CHANGED_SSID                = 1<<15,
        BSS_CHANGED_AP_PROBE_RESP       = 1<<16,
+       BSS_CHANGED_PS                  = 1<<17,
 
        /* when adding here, make sure to change ieee80211_reconfig */
 };
@@ -266,6 +268,8 @@ enum ieee80211_rssi_event {
  * @idle: This interface is idle. There's also a global idle flag in the
  *     hardware config which may be more appropriate depending on what
  *     your driver/device needs to do.
+ * @ps: power-save mode (STA only). This flag is NOT affected by
+ *     offchannel/dynamic_ps operations.
  * @ssid: The SSID of the current vif. Only valid in AP-mode.
  * @ssid_len: Length of SSID given in @ssid.
  * @hidden_ssid: The SSID of the current vif is hidden. Only valid in AP-mode.
@@ -296,6 +300,7 @@ struct ieee80211_bss_conf {
        bool arp_filter_enabled;
        bool qos;
        bool idle;
+       bool ps;
        u8 ssid[IEEE80211_MAX_SSID_LEN];
        size_t ssid_len;
        bool hidden_ssid;
@@ -522,9 +527,6 @@ struct ieee80211_tx_rate {
  *  (2) driver internal use (if applicable)
  *  (3) TX status information - driver tells mac80211 what happened
  *
- * The TX control's sta pointer is only valid during the ->tx call,
- * it may be NULL.
- *
  * @flags: transmit info flags, defined above
  * @band: the band to transmit on (use for checking for races)
  * @hw_queue: HW queue to put the frame on, skb_get_queue_mapping() gives the AC
@@ -555,6 +557,7 @@ struct ieee80211_tx_info {
                                        struct ieee80211_tx_rate rates[
                                                IEEE80211_TX_MAX_RATES];
                                        s8 rts_cts_rate_idx;
+                                       /* 3 bytes free */
                                };
                                /* only needed before rate control */
                                unsigned long jiffies;
@@ -562,7 +565,7 @@ struct ieee80211_tx_info {
                        /* NB: vif can be NULL for injected frames */
                        struct ieee80211_vif *vif;
                        struct ieee80211_key_conf *hw_key;
-                       struct ieee80211_sta *sta;
+                       /* 8 bytes free */
                } control;
                struct {
                        struct ieee80211_tx_rate rates[IEEE80211_TX_MAX_RATES];
@@ -673,21 +676,41 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info)
  * @RX_FLAG_HT_GF: This frame was received in a HT-greenfield transmission, if
  *     the driver fills this value it should add %IEEE80211_RADIOTAP_MCS_HAVE_FMT
  *     to hw.radiotap_mcs_details to advertise that fact
+ * @RX_FLAG_AMPDU_DETAILS: A-MPDU details are known, in particular the reference
+ *     number (@ampdu_reference) must be populated and be a distinct number for
+ *     each A-MPDU
+ * @RX_FLAG_AMPDU_REPORT_ZEROLEN: driver reports 0-length subframes
+ * @RX_FLAG_AMPDU_IS_ZEROLEN: This is a zero-length subframe, for
+ *     monitoring purposes only
+ * @RX_FLAG_AMPDU_LAST_KNOWN: last subframe is known, should be set on all
+ *     subframes of a single A-MPDU
+ * @RX_FLAG_AMPDU_IS_LAST: this subframe is the last subframe of the A-MPDU
+ * @RX_FLAG_AMPDU_DELIM_CRC_ERROR: A delimiter CRC error has been detected
+ *     on this subframe
+ * @RX_FLAG_AMPDU_DELIM_CRC_KNOWN: The delimiter CRC field is known (the CRC
+ *     is stored in the @ampdu_delimiter_crc field)
  */
 enum mac80211_rx_flags {
-       RX_FLAG_MMIC_ERROR      = 1<<0,
-       RX_FLAG_DECRYPTED       = 1<<1,
-       RX_FLAG_MMIC_STRIPPED   = 1<<3,
-       RX_FLAG_IV_STRIPPED     = 1<<4,
-       RX_FLAG_FAILED_FCS_CRC  = 1<<5,
-       RX_FLAG_FAILED_PLCP_CRC = 1<<6,
-       RX_FLAG_MACTIME_MPDU    = 1<<7,
-       RX_FLAG_SHORTPRE        = 1<<8,
-       RX_FLAG_HT              = 1<<9,
-       RX_FLAG_40MHZ           = 1<<10,
-       RX_FLAG_SHORT_GI        = 1<<11,
-       RX_FLAG_NO_SIGNAL_VAL   = 1<<12,
-       RX_FLAG_HT_GF           = 1<<13,
+       RX_FLAG_MMIC_ERROR              = BIT(0),
+       RX_FLAG_DECRYPTED               = BIT(1),
+       RX_FLAG_MMIC_STRIPPED           = BIT(3),
+       RX_FLAG_IV_STRIPPED             = BIT(4),
+       RX_FLAG_FAILED_FCS_CRC          = BIT(5),
+       RX_FLAG_FAILED_PLCP_CRC         = BIT(6),
+       RX_FLAG_MACTIME_MPDU            = BIT(7),
+       RX_FLAG_SHORTPRE                = BIT(8),
+       RX_FLAG_HT                      = BIT(9),
+       RX_FLAG_40MHZ                   = BIT(10),
+       RX_FLAG_SHORT_GI                = BIT(11),
+       RX_FLAG_NO_SIGNAL_VAL           = BIT(12),
+       RX_FLAG_HT_GF                   = BIT(13),
+       RX_FLAG_AMPDU_DETAILS           = BIT(14),
+       RX_FLAG_AMPDU_REPORT_ZEROLEN    = BIT(15),
+       RX_FLAG_AMPDU_IS_ZEROLEN        = BIT(16),
+       RX_FLAG_AMPDU_LAST_KNOWN        = BIT(17),
+       RX_FLAG_AMPDU_IS_LAST           = BIT(18),
+       RX_FLAG_AMPDU_DELIM_CRC_ERROR   = BIT(19),
+       RX_FLAG_AMPDU_DELIM_CRC_KNOWN   = BIT(20),
 };
 
 /**
@@ -711,17 +734,22 @@ enum mac80211_rx_flags {
  *     HT rates are use (RX_FLAG_HT)
  * @flag: %RX_FLAG_*
  * @rx_flags: internal RX flags for mac80211
+ * @ampdu_reference: A-MPDU reference number, must be a different value for
+ *     each A-MPDU but the same for each subframe within one A-MPDU
+ * @ampdu_delimiter_crc: A-MPDU delimiter CRC
  */
 struct ieee80211_rx_status {
        u64 mactime;
        u32 device_timestamp;
-       u16 flag;
+       u32 ampdu_reference;
+       u32 flag;
        u16 freq;
        u8 rate_idx;
        u8 rx_flags;
        u8 band;
        u8 antenna;
        s8 signal;
+       u8 ampdu_delimiter_crc;
 };
 
 /**
@@ -1074,6 +1102,16 @@ enum sta_notify_cmd {
 };
 
 /**
+ * struct ieee80211_tx_control - TX control data
+ *
+ * @sta: station table entry, this sta pointer may be NULL and
+ *     it is not allowed to copy the pointer, due to RCU.
+ */
+struct ieee80211_tx_control {
+       struct ieee80211_sta *sta;
+};
+
+/**
  * enum ieee80211_hw_flags - hardware flags
  *
  * These flags are used to indicate hardware capabilities to
@@ -1203,6 +1241,10 @@ enum sta_notify_cmd {
  *     queue mapping in order to use different queues (not just one per AC)
  *     for different virtual interfaces. See the doc section on HW queue
  *     control for more details.
+ *
+ * @IEEE80211_HW_P2P_DEV_ADDR_FOR_INTF: Use the P2P Device address for any
+ *     P2P Interface. This will be honoured even if more than one interface
+ *     is supported.
  */
 enum ieee80211_hw_flags {
        IEEE80211_HW_HAS_RATE_CONTROL                   = 1<<0,
@@ -1230,6 +1272,7 @@ enum ieee80211_hw_flags {
        IEEE80211_HW_AP_LINK_PS                         = 1<<22,
        IEEE80211_HW_TX_AMPDU_SETUP_IN_HW               = 1<<23,
        IEEE80211_HW_SCAN_WHILE_IDLE                    = 1<<24,
+       IEEE80211_HW_P2P_DEV_ADDR_FOR_INTF              = 1<<25,
 };
 
 /**
@@ -1884,10 +1927,14 @@ enum ieee80211_frame_release_type {
  * @IEEE80211_RC_BW_CHANGED: The bandwidth that can be used to transmit
  *     to this station changed.
  * @IEEE80211_RC_SMPS_CHANGED: The SMPS state of the station changed.
+ * @IEEE80211_RC_SUPP_RATES_CHANGED: The supported rate set of this peer
+ *     changed (in IBSS mode) due to discovering more information about
+ *     the peer.
  */
 enum ieee80211_rate_control_changed {
        IEEE80211_RC_BW_CHANGED         = BIT(0),
        IEEE80211_RC_SMPS_CHANGED       = BIT(1),
+       IEEE80211_RC_SUPP_RATES_CHANGED = BIT(2),
 };
 
 /**
@@ -2264,7 +2311,9 @@ enum ieee80211_rate_control_changed {
  *     The callback is optional and can (should!) sleep.
  */
 struct ieee80211_ops {
-       void (*tx)(struct ieee80211_hw *hw, struct sk_buff *skb);
+       void (*tx)(struct ieee80211_hw *hw,
+                  struct ieee80211_tx_control *control,
+                  struct sk_buff *skb);
        int (*start)(struct ieee80211_hw *hw);
        void (*stop)(struct ieee80211_hw *hw);
 #ifdef CONFIG_PM
index 96a3b5c..980d263 100644 (file)
@@ -49,6 +49,7 @@ enum {
 #include <linux/types.h>
 #include <linux/if_arp.h>
 #include <linux/netdevice.h>
+#include <linux/hash.h>
 
 #include <net/neighbour.h>
 
@@ -134,7 +135,7 @@ static inline u32 ndisc_hashfn(const void *pkey, const struct net_device *dev, _
 {
        const u32 *p32 = pkey;
 
-       return (((p32[0] ^ dev->ifindex) * hash_rnd[0]) +
+       return (((p32[0] ^ hash32_ptr(dev)) * hash_rnd[0]) +
                (p32[1] * hash_rnd[1]) +
                (p32[2] * hash_rnd[2]) +
                (p32[3] * hash_rnd[3]));
index 344d898..0dab173 100644 (file)
@@ -334,18 +334,22 @@ static inline int neigh_hh_bridge(struct hh_cache *hh, struct sk_buff *skb)
 }
 #endif
 
-static inline int neigh_hh_output(struct hh_cache *hh, struct sk_buff *skb)
+static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb)
 {
        unsigned int seq;
        int hh_len;
 
        do {
-               int hh_alen;
-
                seq = read_seqbegin(&hh->hh_lock);
                hh_len = hh->hh_len;
-               hh_alen = HH_DATA_ALIGN(hh_len);
-               memcpy(skb->data - hh_alen, hh->hh_data, hh_alen);
+               if (likely(hh_len <= HH_DATA_MOD)) {
+                       /* this is inlined by gcc */
+                       memcpy(skb->data - HH_DATA_MOD, hh->hh_data, HH_DATA_MOD);
+               } else {
+                       int hh_alen = HH_DATA_ALIGN(hh_len);
+
+                       memcpy(skb->data - hh_alen, hh->hh_data, hh_alen);
+               }
        } while (read_seqretry(&hh->hh_lock, seq));
 
        skb_push(skb, hh_len);
index ae1cd6c..5ae57f1 100644 (file)
@@ -15,6 +15,7 @@
 #include <net/netns/packet.h>
 #include <net/netns/ipv4.h>
 #include <net/netns/ipv6.h>
+#include <net/netns/sctp.h>
 #include <net/netns/dccp.h>
 #include <net/netns/x_tables.h>
 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
@@ -66,6 +67,7 @@ struct net {
        struct hlist_head       *dev_name_head;
        struct hlist_head       *dev_index_head;
        unsigned int            dev_base_seq;   /* protected by rtnl_mutex */
+       int                     ifindex;
 
        /* core fib_rules */
        struct list_head        rules_ops;
@@ -80,6 +82,9 @@ struct net {
 #if IS_ENABLED(CONFIG_IPV6)
        struct netns_ipv6       ipv6;
 #endif
+#if defined(CONFIG_IP_SCTP) || defined(CONFIG_IP_SCTP_MODULE)
+       struct netns_sctp       sctp;
+#endif
 #if defined(CONFIG_IP_DCCP) || defined(CONFIG_IP_DCCP_MODULE)
        struct netns_dccp       dccp;
 #endif
@@ -104,6 +109,13 @@ struct net {
        struct sock             *diag_nlsk;
 };
 
+/*
+ * ifindex generation is per-net namespace, and loopback is
+ * always the 1st device in ns (see net_dev_init), thus any
+ * loopback device should get ifindex 1
+ */
+
+#define LOOPBACK_IFINDEX       1
 
 #include <linux/seq_file_net.h>
 
index 785f37a..09175d5 100644 (file)
  *   nla_put_u16(skb, type, value)     add u16 attribute to skb
  *   nla_put_u32(skb, type, value)     add u32 attribute to skb
  *   nla_put_u64(skb, type, value)     add u64 attribute to skb
+ *   nla_put_s8(skb, type, value)      add s8 attribute to skb
+ *   nla_put_s16(skb, type, value)     add s16 attribute to skb
+ *   nla_put_s32(skb, type, value)     add s32 attribute to skb
+ *   nla_put_s64(skb, type, value)     add s64 attribute to skb
  *   nla_put_string(skb, type, str)    add string attribute to skb
  *   nla_put_flag(skb, type)           add flag attribute to skb
  *   nla_put_msecs(skb, type, jiffies) add msecs attribute to skb
  *   nla_get_u16(nla)                  get payload for a u16 attribute
  *   nla_get_u32(nla)                  get payload for a u32 attribute
  *   nla_get_u64(nla)                  get payload for a u64 attribute
+ *   nla_get_s8(nla)                   get payload for a s8 attribute
+ *   nla_get_s16(nla)                  get payload for a s16 attribute
+ *   nla_get_s32(nla)                  get payload for a s32 attribute
+ *   nla_get_s64(nla)                  get payload for a s64 attribute
  *   nla_get_flag(nla)                 return 1 if flag is true
  *   nla_get_msecs(nla)                        get payload for a msecs attribute
  *
@@ -160,6 +168,10 @@ enum {
        NLA_NESTED_COMPAT,
        NLA_NUL_STRING,
        NLA_BINARY,
+       NLA_S8,
+       NLA_S16,
+       NLA_S32,
+       NLA_S64,
        __NLA_TYPE_MAX,
 };
 
@@ -183,6 +195,8 @@ enum {
  *    NLA_NESTED_COMPAT    Minimum length of structure payload
  *    NLA_U8, NLA_U16,
  *    NLA_U32, NLA_U64,
+ *    NLA_S8, NLA_S16,
+ *    NLA_S32, NLA_S64,
  *    NLA_MSECS            Leaving the length field zero will verify the
  *                         given type fits, using it verifies minimum length
  *                         just like "All other"
@@ -879,6 +893,50 @@ static inline int nla_put_le64(struct sk_buff *skb, int attrtype, __le64 value)
 }
 
 /**
+ * nla_put_s8 - Add a s8 netlink attribute to a socket buffer
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @value: numeric value
+ */
+static inline int nla_put_s8(struct sk_buff *skb, int attrtype, s8 value)
+{
+       return nla_put(skb, attrtype, sizeof(s8), &value);
+}
+
+/**
+ * nla_put_s16 - Add a s16 netlink attribute to a socket buffer
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @value: numeric value
+ */
+static inline int nla_put_s16(struct sk_buff *skb, int attrtype, s16 value)
+{
+       return nla_put(skb, attrtype, sizeof(s16), &value);
+}
+
+/**
+ * nla_put_s32 - Add a s32 netlink attribute to a socket buffer
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @value: numeric value
+ */
+static inline int nla_put_s32(struct sk_buff *skb, int attrtype, s32 value)
+{
+       return nla_put(skb, attrtype, sizeof(s32), &value);
+}
+
+/**
+ * nla_put_s64 - Add a s64 netlink attribute to a socket buffer
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @value: numeric value
+ */
+static inline int nla_put_s64(struct sk_buff *skb, int attrtype, s64 value)
+{
+       return nla_put(skb, attrtype, sizeof(s64), &value);
+}
+
+/**
  * nla_put_string - Add a string netlink attribute to a socket buffer
  * @skb: socket buffer to add attribute to
  * @attrtype: attribute type
@@ -994,6 +1052,46 @@ static inline __be64 nla_get_be64(const struct nlattr *nla)
 }
 
 /**
+ * nla_get_s32 - return payload of s32 attribute
+ * @nla: s32 netlink attribute
+ */
+static inline s32 nla_get_s32(const struct nlattr *nla)
+{
+       return *(s32 *) nla_data(nla);
+}
+
+/**
+ * nla_get_s16 - return payload of s16 attribute
+ * @nla: s16 netlink attribute
+ */
+static inline s16 nla_get_s16(const struct nlattr *nla)
+{
+       return *(s16 *) nla_data(nla);
+}
+
+/**
+ * nla_get_s8 - return payload of s8 attribute
+ * @nla: s8 netlink attribute
+ */
+static inline s8 nla_get_s8(const struct nlattr *nla)
+{
+       return *(s8 *) nla_data(nla);
+}
+
+/**
+ * nla_get_s64 - return payload of s64 attribute
+ * @nla: s64 netlink attribute
+ */
+static inline s64 nla_get_s64(const struct nlattr *nla)
+{
+       s64 tmp;
+
+       nla_memcpy(&tmp, nla, sizeof(tmp));
+
+       return tmp;
+}
+
+/**
  * nla_get_flag - return payload of flag attribute
  * @nla: flag netlink attribute
  */
index 1474dd6..3516dc0 100644 (file)
@@ -5,6 +5,7 @@
 #ifndef __NETNS_IPV4_H__
 #define __NETNS_IPV4_H__
 
+#include <linux/uidgid.h>
 #include <net/inet_frag.h>
 
 struct tcpm_hash_bucket;
@@ -62,7 +63,7 @@ struct netns_ipv4 {
        int sysctl_icmp_ratemask;
        int sysctl_icmp_errors_use_inbound_ifaddr;
 
-       unsigned int sysctl_ping_group_range[2];
+       kgid_t sysctl_ping_group_range[2];
        long sysctl_tcp_mem[3];
 
        atomic_t rt_genid;
index cb4e894..17ec2b9 100644 (file)
@@ -5,10 +5,10 @@
 #define __NETNS_PACKET_H__
 
 #include <linux/rculist.h>
-#include <linux/spinlock.h>
+#include <linux/mutex.h>
 
 struct netns_packet {
-       spinlock_t              sklist_lock;
+       struct mutex            sklist_lock;
        struct hlist_head       sklist;
 };
 
diff --git a/include/net/netns/sctp.h b/include/net/netns/sctp.h
new file mode 100644 (file)
index 0000000..5e5eb1f
--- /dev/null
@@ -0,0 +1,131 @@
+#ifndef __NETNS_SCTP_H__
+#define __NETNS_SCTP_H__
+
+struct sock;
+struct proc_dir_entry;
+struct sctp_mib;
+struct ctl_table_header;
+
+struct netns_sctp {
+       DEFINE_SNMP_STAT(struct sctp_mib, sctp_statistics);
+
+#ifdef CONFIG_PROC_FS
+       struct proc_dir_entry *proc_net_sctp;
+#endif
+#ifdef CONFIG_SYSCTL
+       struct ctl_table_header *sysctl_header;
+#endif
+       /* This is the global socket data structure used for responding to
+        * the Out-of-the-blue (OOTB) packets.  A control sock will be created
+        * for this socket at the initialization time.
+        */
+       struct sock *ctl_sock;
+
+       /* This is the global local address list.
+        * We actively maintain this complete list of addresses on
+        * the system by catching address add/delete events.
+        *
+        * It is a list of sctp_sockaddr_entry.
+        */
+       struct list_head local_addr_list;
+       struct list_head addr_waitq;
+       struct timer_list addr_wq_timer;
+       struct list_head auto_asconf_splist;
+       spinlock_t addr_wq_lock;
+
+       /* Lock that protects the local_addr_list writers */
+       spinlock_t local_addr_lock;
+
+       /* RFC2960 Section 14. Suggested SCTP Protocol Parameter Values
+        *
+        * The following protocol parameters are RECOMMENDED:
+        *
+        * RTO.Initial              - 3  seconds
+        * RTO.Min                  - 1  second
+        * RTO.Max                 -  60 seconds
+        * RTO.Alpha                - 1/8  (3 when converted to right shifts.)
+        * RTO.Beta                 - 1/4  (2 when converted to right shifts.)
+        */
+       unsigned int rto_initial;
+       unsigned int rto_min;
+       unsigned int rto_max;
+
+       /* Note: rto_alpha and rto_beta are really defined as inverse
+        * powers of two to facilitate integer operations.
+        */
+       int rto_alpha;
+       int rto_beta;
+
+       /* Max.Burst                - 4 */
+       int max_burst;
+
+       /* Whether Cookie Preservative is enabled(1) or not(0) */
+       int cookie_preserve_enable;
+
+       /* Valid.Cookie.Life        - 60  seconds  */
+       unsigned int valid_cookie_life;
+
+       /* Delayed SACK timeout  200ms default*/
+       unsigned int sack_timeout;
+
+       /* HB.interval              - 30 seconds  */
+       unsigned int hb_interval;
+
+       /* Association.Max.Retrans  - 10 attempts
+        * Path.Max.Retrans         - 5  attempts (per destination address)
+        * Max.Init.Retransmits     - 8  attempts
+        */
+       int max_retrans_association;
+       int max_retrans_path;
+       int max_retrans_init;
+       /* Potentially-Failed.Max.Retrans sysctl value
+        * taken from:
+        * http://tools.ietf.org/html/draft-nishida-tsvwg-sctp-failover-05
+        */
+       int pf_retrans;
+
+       /*
+        * Policy for preforming sctp/socket accounting
+        * 0   - do socket level accounting, all assocs share sk_sndbuf
+        * 1   - do sctp accounting, each asoc may use sk_sndbuf bytes
+        */
+       int sndbuf_policy;
+
+       /*
+        * Policy for preforming sctp/socket accounting
+        * 0   - do socket level accounting, all assocs share sk_rcvbuf
+        * 1   - do sctp accounting, each asoc may use sk_rcvbuf bytes
+        */
+       int rcvbuf_policy;
+
+       int default_auto_asconf;
+
+       /* Flag to indicate if addip is enabled. */
+       int addip_enable;
+       int addip_noauth;
+
+       /* Flag to indicate if PR-SCTP is enabled. */
+       int prsctp_enable;
+
+       /* Flag to idicate if SCTP-AUTH is enabled */
+       int auth_enable;
+
+       /*
+        * Policy to control SCTP IPv4 address scoping
+        * 0   - Disable IPv4 address scoping
+        * 1   - Enable IPv4 address scoping
+        * 2   - Selectively allow only IPv4 private addresses
+        * 3   - Selectively allow only IPv4 link local address
+        */
+       int scope_policy;
+
+       /* Threshold for rwnd update SACKS.  Receive buffer shifted this many
+        * bits is an indicator of when to send and window update SACK.
+        */
+       int rwnd_upd_shift;
+
+       /* Threshold for autoclose timeout, in seconds. */
+       unsigned long max_autoclose;
+};
+
+#endif /* __NETNS_SCTP_H__ */
index d9611e0..4616f46 100644 (file)
@@ -188,7 +188,8 @@ struct tcf_proto_ops {
 
        unsigned long           (*get)(struct tcf_proto*, u32 handle);
        void                    (*put)(struct tcf_proto*, unsigned long);
-       int                     (*change)(struct tcf_proto*, unsigned long,
+       int                     (*change)(struct sk_buff *,
+                                       struct tcf_proto*, unsigned long,
                                        u32 handle, struct nlattr **,
                                        unsigned long *);
        int                     (*delete)(struct tcf_proto*, unsigned long);
index ff49964..9c6414f 100644 (file)
 /*
  * sctp/protocol.c
  */
-extern struct sock *sctp_get_ctl_sock(void);
-extern int sctp_copy_local_addr_list(struct sctp_bind_addr *,
+extern int sctp_copy_local_addr_list(struct net *, struct sctp_bind_addr *,
                                     sctp_scope_t, gfp_t gfp,
                                     int flags);
 extern struct sctp_pf *sctp_get_pf_specific(sa_family_t family);
 extern int sctp_register_pf(struct sctp_pf *, sa_family_t);
-extern void sctp_addr_wq_mgmt(struct sctp_sockaddr_entry *, int);
+extern void sctp_addr_wq_mgmt(struct net *, struct sctp_sockaddr_entry *, int);
 
 /*
  * sctp/socket.c
@@ -140,12 +139,12 @@ extern int sctp_asconf_mgmt(struct sctp_sock *, struct sctp_sockaddr_entry *);
 /*
  * sctp/primitive.c
  */
-int sctp_primitive_ASSOCIATE(struct sctp_association *, void *arg);
-int sctp_primitive_SHUTDOWN(struct sctp_association *, void *arg);
-int sctp_primitive_ABORT(struct sctp_association *, void *arg);
-int sctp_primitive_SEND(struct sctp_association *, void *arg);
-int sctp_primitive_REQUESTHEARTBEAT(struct sctp_association *, void *arg);
-int sctp_primitive_ASCONF(struct sctp_association *, void *arg);
+int sctp_primitive_ASSOCIATE(struct net *, struct sctp_association *, void *arg);
+int sctp_primitive_SHUTDOWN(struct net *, struct sctp_association *, void *arg);
+int sctp_primitive_ABORT(struct net *, struct sctp_association *, void *arg);
+int sctp_primitive_SEND(struct net *, struct sctp_association *, void *arg);
+int sctp_primitive_REQUESTHEARTBEAT(struct net *, struct sctp_association *, void *arg);
+int sctp_primitive_ASCONF(struct net *, struct sctp_association *, void *arg);
 
 /*
  * sctp/input.c
@@ -156,7 +155,7 @@ void sctp_hash_established(struct sctp_association *);
 void sctp_unhash_established(struct sctp_association *);
 void sctp_hash_endpoint(struct sctp_endpoint *);
 void sctp_unhash_endpoint(struct sctp_endpoint *);
-struct sock *sctp_err_lookup(int family, struct sk_buff *,
+struct sock *sctp_err_lookup(struct net *net, int family, struct sk_buff *,
                             struct sctphdr *, struct sctp_association **,
                             struct sctp_transport **);
 void sctp_err_finish(struct sock *, struct sctp_association *);
@@ -173,14 +172,14 @@ void sctp_backlog_migrate(struct sctp_association *assoc,
 /*
  * sctp/proc.c
  */
-int sctp_snmp_proc_init(void);
-void sctp_snmp_proc_exit(void);
-int sctp_eps_proc_init(void);
-void sctp_eps_proc_exit(void);
-int sctp_assocs_proc_init(void);
-void sctp_assocs_proc_exit(void);
-int sctp_remaddr_proc_init(void);
-void sctp_remaddr_proc_exit(void);
+int sctp_snmp_proc_init(struct net *net);
+void sctp_snmp_proc_exit(struct net *net);
+int sctp_eps_proc_init(struct net *net);
+void sctp_eps_proc_exit(struct net *net);
+int sctp_assocs_proc_init(struct net *net);
+void sctp_assocs_proc_exit(struct net *net);
+int sctp_remaddr_proc_init(struct net *net);
+void sctp_remaddr_proc_exit(struct net *net);
 
 
 /*
@@ -222,11 +221,10 @@ extern struct kmem_cache *sctp_bucket_cachep __read_mostly;
 #define sctp_bh_unlock_sock(sk)  bh_unlock_sock(sk)
 
 /* SCTP SNMP MIB stats handlers */
-DECLARE_SNMP_STAT(struct sctp_mib, sctp_statistics);
-#define SCTP_INC_STATS(field)      SNMP_INC_STATS(sctp_statistics, field)
-#define SCTP_INC_STATS_BH(field)   SNMP_INC_STATS_BH(sctp_statistics, field)
-#define SCTP_INC_STATS_USER(field) SNMP_INC_STATS_USER(sctp_statistics, field)
-#define SCTP_DEC_STATS(field)      SNMP_DEC_STATS(sctp_statistics, field)
+#define SCTP_INC_STATS(net, field)      SNMP_INC_STATS((net)->sctp.sctp_statistics, field)
+#define SCTP_INC_STATS_BH(net, field)   SNMP_INC_STATS_BH((net)->sctp.sctp_statistics, field)
+#define SCTP_INC_STATS_USER(net, field) SNMP_INC_STATS_USER((net)->sctp.sctp_statistics, field)
+#define SCTP_DEC_STATS(net, field)      SNMP_DEC_STATS((net)->sctp.sctp_statistics, field)
 
 #endif /* !TEST_FRAME */
 
@@ -361,25 +359,29 @@ atomic_t sctp_dbg_objcnt_## name = ATOMIC_INIT(0)
 #define SCTP_DBG_OBJCNT_ENTRY(name) \
 {.label= #name, .counter= &sctp_dbg_objcnt_## name}
 
-void sctp_dbg_objcnt_init(void);
-void sctp_dbg_objcnt_exit(void);
+void sctp_dbg_objcnt_init(struct net *);
+void sctp_dbg_objcnt_exit(struct net *);
 
 #else
 
 #define SCTP_DBG_OBJCNT_INC(name)
 #define SCTP_DBG_OBJCNT_DEC(name)
 
-static inline void sctp_dbg_objcnt_init(void) { return; }
-static inline void sctp_dbg_objcnt_exit(void) { return; }
+static inline void sctp_dbg_objcnt_init(struct net *net) { return; }
+static inline void sctp_dbg_objcnt_exit(struct net *net) { return; }
 
 #endif /* CONFIG_SCTP_DBG_OBJCOUNT */
 
 #if defined CONFIG_SYSCTL
 void sctp_sysctl_register(void);
 void sctp_sysctl_unregister(void);
+int sctp_sysctl_net_register(struct net *net);
+void sctp_sysctl_net_unregister(struct net *net);
 #else
 static inline void sctp_sysctl_register(void) { return; }
 static inline void sctp_sysctl_unregister(void) { return; }
+static inline int sctp_sysctl_net_register(struct net *net) { return 0; }
+static inline void sctp_sysctl_net_unregister(struct net *net) { return; }
 #endif
 
 /* Size of Supported Address Parameter for 'x' address types. */
@@ -586,7 +588,6 @@ for (pos = chunk->subh.fwdtsn_hdr->skip;\
 
 extern struct proto sctp_prot;
 extern struct proto sctpv6_prot;
-extern struct proc_dir_entry *proc_net_sctp;
 void sctp_put_port(struct sock *sk);
 
 extern struct idr sctp_assocs_id;
@@ -632,21 +633,21 @@ static inline int sctp_sanity_check(void)
 
 /* Warning: The following hash functions assume a power of two 'size'. */
 /* This is the hash function for the SCTP port hash table. */
-static inline int sctp_phashfn(__u16 lport)
+static inline int sctp_phashfn(struct net *net, __u16 lport)
 {
-       return lport & (sctp_port_hashsize - 1);
+       return (net_hash_mix(net) + lport) & (sctp_port_hashsize - 1);
 }
 
 /* This is the hash function for the endpoint hash table. */
-static inline int sctp_ep_hashfn(__u16 lport)
+static inline int sctp_ep_hashfn(struct net *net, __u16 lport)
 {
-       return lport & (sctp_ep_hashsize - 1);
+       return (net_hash_mix(net) + lport) & (sctp_ep_hashsize - 1);
 }
 
 /* This is the hash function for the association hash table. */
-static inline int sctp_assoc_hashfn(__u16 lport, __u16 rport)
+static inline int sctp_assoc_hashfn(struct net *net, __u16 lport, __u16 rport)
 {
-       int h = (lport << 16) + rport;
+       int h = (lport << 16) + rport + net_hash_mix(net);
        h ^= h>>8;
        return h & (sctp_assoc_hashsize - 1);
 }
index 9148632..b5887e1 100644 (file)
@@ -77,7 +77,8 @@ typedef struct {
        int action;
 } sctp_sm_command_t;
 
-typedef sctp_disposition_t (sctp_state_fn_t) (const struct sctp_endpoint *,
+typedef sctp_disposition_t (sctp_state_fn_t) (struct net *,
+                                             const struct sctp_endpoint *,
                                              const struct sctp_association *,
                                              const sctp_subtype_t type,
                                              void *arg,
@@ -178,7 +179,8 @@ sctp_state_fn_t sctp_sf_autoclose_timer_expire;
 
 /* Prototypes for utility support functions.  */
 __u8 sctp_get_chunk_type(struct sctp_chunk *chunk);
-const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t,
+const sctp_sm_table_entry_t *sctp_sm_lookup_event(struct net *,
+                                           sctp_event_t,
                                            sctp_state_t,
                                            sctp_subtype_t);
 int sctp_chunk_iif(const struct sctp_chunk *);
@@ -268,7 +270,7 @@ void sctp_chunk_assign_ssn(struct sctp_chunk *);
 
 /* Prototypes for statetable processing. */
 
-int sctp_do_sm(sctp_event_t event_type, sctp_subtype_t subtype,
+int sctp_do_sm(struct net *net, sctp_event_t event_type, sctp_subtype_t subtype,
               sctp_state_t state,
                struct sctp_endpoint *,
                struct sctp_association *asoc,
index fc5e600..0fef00f 100644 (file)
@@ -102,6 +102,7 @@ struct sctp_bind_bucket {
        unsigned short  fastreuse;
        struct hlist_node       node;
        struct hlist_head       owner;
+       struct net      *net;
 };
 
 struct sctp_bind_hashbucket {
@@ -118,69 +119,6 @@ struct sctp_hashbucket {
 
 /* The SCTP globals structure. */
 extern struct sctp_globals {
-       /* RFC2960 Section 14. Suggested SCTP Protocol Parameter Values
-        *
-        * The following protocol parameters are RECOMMENDED:
-        *
-        * RTO.Initial              - 3  seconds
-        * RTO.Min                  - 1  second
-        * RTO.Max                 -  60 seconds
-        * RTO.Alpha                - 1/8  (3 when converted to right shifts.)
-        * RTO.Beta                 - 1/4  (2 when converted to right shifts.)
-        */
-       unsigned int rto_initial;
-       unsigned int rto_min;
-       unsigned int rto_max;
-
-       /* Note: rto_alpha and rto_beta are really defined as inverse
-        * powers of two to facilitate integer operations.
-        */
-       int rto_alpha;
-       int rto_beta;
-
-       /* Max.Burst                - 4 */
-       int max_burst;
-
-       /* Whether Cookie Preservative is enabled(1) or not(0) */
-       int cookie_preserve_enable;
-
-       /* Valid.Cookie.Life        - 60  seconds  */
-       unsigned int valid_cookie_life;
-
-       /* Delayed SACK timeout  200ms default*/
-       unsigned int sack_timeout;
-
-       /* HB.interval              - 30 seconds  */
-       unsigned int hb_interval;
-
-       /* Association.Max.Retrans  - 10 attempts
-        * Path.Max.Retrans         - 5  attempts (per destination address)
-        * Max.Init.Retransmits     - 8  attempts
-        */
-       int max_retrans_association;
-       int max_retrans_path;
-       int max_retrans_init;
-
-       /* Potentially-Failed.Max.Retrans sysctl value
-        * taken from:
-        * http://tools.ietf.org/html/draft-nishida-tsvwg-sctp-failover-05
-        */
-       int pf_retrans;
-
-       /*
-        * Policy for preforming sctp/socket accounting
-        * 0   - do socket level accounting, all assocs share sk_sndbuf
-        * 1   - do sctp accounting, each asoc may use sk_sndbuf bytes
-        */
-       int sndbuf_policy;
-
-       /*
-        * Policy for preforming sctp/socket accounting
-        * 0   - do socket level accounting, all assocs share sk_rcvbuf
-        * 1   - do sctp accounting, each asoc may use sk_rcvbuf bytes
-        */
-       int rcvbuf_policy;
-
        /* The following variables are implementation specific.  */
 
        /* Default initialization values to be applied to new associations. */
@@ -204,70 +142,11 @@ extern struct sctp_globals {
        int port_hashsize;
        struct sctp_bind_hashbucket *port_hashtable;
 
-       /* This is the global local address list.
-        * We actively maintain this complete list of addresses on
-        * the system by catching address add/delete events.
-        *
-        * It is a list of sctp_sockaddr_entry.
-        */
-       struct list_head local_addr_list;
-       int default_auto_asconf;
-       struct list_head addr_waitq;
-       struct timer_list addr_wq_timer;
-       struct list_head auto_asconf_splist;
-       spinlock_t addr_wq_lock;
-
-       /* Lock that protects the local_addr_list writers */
-       spinlock_t addr_list_lock;
-       
-       /* Flag to indicate if addip is enabled. */
-       int addip_enable;
-       int addip_noauth_enable;
-
-       /* Flag to indicate if PR-SCTP is enabled. */
-       int prsctp_enable;
-
-       /* Flag to idicate if SCTP-AUTH is enabled */
-       int auth_enable;
-
-       /*
-        * Policy to control SCTP IPv4 address scoping
-        * 0   - Disable IPv4 address scoping
-        * 1   - Enable IPv4 address scoping
-        * 2   - Selectively allow only IPv4 private addresses
-        * 3   - Selectively allow only IPv4 link local address
-        */
-       int ipv4_scope_policy;
-
        /* Flag to indicate whether computing and verifying checksum
         * is disabled. */
         bool checksum_disable;
-
-       /* Threshold for rwnd update SACKS.  Receive buffer shifted this many
-        * bits is an indicator of when to send and window update SACK.
-        */
-       int rwnd_update_shift;
-
-       /* Threshold for autoclose timeout, in seconds. */
-       unsigned long max_autoclose;
 } sctp_globals;
 
-#define sctp_rto_initial               (sctp_globals.rto_initial)
-#define sctp_rto_min                   (sctp_globals.rto_min)
-#define sctp_rto_max                   (sctp_globals.rto_max)
-#define sctp_rto_alpha                 (sctp_globals.rto_alpha)
-#define sctp_rto_beta                  (sctp_globals.rto_beta)
-#define sctp_max_burst                 (sctp_globals.max_burst)
-#define sctp_valid_cookie_life         (sctp_globals.valid_cookie_life)
-#define sctp_cookie_preserve_enable    (sctp_globals.cookie_preserve_enable)
-#define sctp_max_retrans_association   (sctp_globals.max_retrans_association)
-#define sctp_sndbuf_policy             (sctp_globals.sndbuf_policy)
-#define sctp_rcvbuf_policy             (sctp_globals.rcvbuf_policy)
-#define sctp_max_retrans_path          (sctp_globals.max_retrans_path)
-#define sctp_pf_retrans                        (sctp_globals.pf_retrans)
-#define sctp_max_retrans_init          (sctp_globals.max_retrans_init)
-#define sctp_sack_timeout              (sctp_globals.sack_timeout)
-#define sctp_hb_interval               (sctp_globals.hb_interval)
 #define sctp_max_instreams             (sctp_globals.max_instreams)
 #define sctp_max_outstreams            (sctp_globals.max_outstreams)
 #define sctp_address_families          (sctp_globals.address_families)
@@ -277,21 +156,7 @@ extern struct sctp_globals {
 #define sctp_assoc_hashtable           (sctp_globals.assoc_hashtable)
 #define sctp_port_hashsize             (sctp_globals.port_hashsize)
 #define sctp_port_hashtable            (sctp_globals.port_hashtable)
-#define sctp_local_addr_list           (sctp_globals.local_addr_list)
-#define sctp_local_addr_lock           (sctp_globals.addr_list_lock)
-#define sctp_auto_asconf_splist                (sctp_globals.auto_asconf_splist)
-#define sctp_addr_waitq                        (sctp_globals.addr_waitq)
-#define sctp_addr_wq_timer             (sctp_globals.addr_wq_timer)
-#define sctp_addr_wq_lock              (sctp_globals.addr_wq_lock)
-#define sctp_default_auto_asconf       (sctp_globals.default_auto_asconf)
-#define sctp_scope_policy              (sctp_globals.ipv4_scope_policy)
-#define sctp_addip_enable              (sctp_globals.addip_enable)
-#define sctp_addip_noauth              (sctp_globals.addip_noauth_enable)
-#define sctp_prsctp_enable             (sctp_globals.prsctp_enable)
-#define sctp_auth_enable               (sctp_globals.auth_enable)
 #define sctp_checksum_disable          (sctp_globals.checksum_disable)
-#define sctp_rwnd_upd_shift            (sctp_globals.rwnd_update_shift)
-#define sctp_max_autoclose             (sctp_globals.max_autoclose)
 
 /* SCTP Socket type: UDP or TCP style. */
 typedef enum {
@@ -1085,7 +950,7 @@ struct sctp_transport {
        __u64 hb_nonce;
 };
 
-struct sctp_transport *sctp_transport_new(const union sctp_addr *,
+struct sctp_transport *sctp_transport_new(struct net *, const union sctp_addr *,
                                          gfp_t);
 void sctp_transport_set_owner(struct sctp_transport *,
                              struct sctp_association *);
@@ -1240,7 +1105,7 @@ struct sctp_bind_addr {
 
 void sctp_bind_addr_init(struct sctp_bind_addr *, __u16 port);
 void sctp_bind_addr_free(struct sctp_bind_addr *);
-int sctp_bind_addr_copy(struct sctp_bind_addr *dest,
+int sctp_bind_addr_copy(struct net *net, struct sctp_bind_addr *dest,
                        const struct sctp_bind_addr *src,
                        sctp_scope_t scope, gfp_t gfp,
                        int flags);
@@ -1267,7 +1132,7 @@ int sctp_raw_to_bind_addrs(struct sctp_bind_addr *bp, __u8 *raw, int len,
                           __u16 port, gfp_t gfp);
 
 sctp_scope_t sctp_scope(const union sctp_addr *);
-int sctp_in_scope(const union sctp_addr *addr, const sctp_scope_t scope);
+int sctp_in_scope(struct net *net, const union sctp_addr *addr, const sctp_scope_t scope);
 int sctp_is_any(struct sock *sk, const union sctp_addr *addr);
 int sctp_addr_is_valid(const union sctp_addr *addr);
 int sctp_is_ep_boundall(struct sock *sk);
@@ -1425,13 +1290,13 @@ struct sctp_association *sctp_endpoint_lookup_assoc(
 int sctp_endpoint_is_peeled_off(struct sctp_endpoint *,
                                const union sctp_addr *);
 struct sctp_endpoint *sctp_endpoint_is_match(struct sctp_endpoint *,
-                                       const union sctp_addr *);
-int sctp_has_association(const union sctp_addr *laddr,
+                                       struct net *, const union sctp_addr *);
+int sctp_has_association(struct net *net, const union sctp_addr *laddr,
                         const union sctp_addr *paddr);
 
-int sctp_verify_init(const struct sctp_association *asoc, sctp_cid_t,
-                    sctp_init_chunk_t *peer_init, struct sctp_chunk *chunk,
-                    struct sctp_chunk **err_chunk);
+int sctp_verify_init(struct net *net, const struct sctp_association *asoc,
+                    sctp_cid_t, sctp_init_chunk_t *peer_init,
+                    struct sctp_chunk *chunk, struct sctp_chunk **err_chunk);
 int sctp_process_init(struct sctp_association *, struct sctp_chunk *chunk,
                      const union sctp_addr *peer,
                      sctp_init_chunk_t *init, gfp_t gfp);
@@ -2013,6 +1878,7 @@ void sctp_assoc_control_transport(struct sctp_association *,
                                  sctp_transport_cmd_t, sctp_sn_error_t);
 struct sctp_transport *sctp_assoc_lookup_tsn(struct sctp_association *, __u32);
 struct sctp_transport *sctp_assoc_is_match(struct sctp_association *,
+                                          struct net *,
                                           const union sctp_addr *,
                                           const union sctp_addr *);
 void sctp_assoc_migrate(struct sctp_association *, struct sock *);
index 0147b90..7159626 100644 (file)
@@ -154,13 +154,15 @@ struct linux_xfrm_mib {
  */
 #define SNMP_UPD_PO_STATS(mib, basefield, addend)      \
        do { \
-               this_cpu_inc(mib[0]->mibs[basefield##PKTS]);            \
-               this_cpu_add(mib[0]->mibs[basefield##OCTETS], addend);  \
+               __typeof__(*mib[0]->mibs) *ptr = mib[0]->mibs;  \
+               this_cpu_inc(ptr[basefield##PKTS]);             \
+               this_cpu_add(ptr[basefield##OCTETS], addend);   \
        } while (0)
 #define SNMP_UPD_PO_STATS_BH(mib, basefield, addend)   \
        do { \
-               __this_cpu_inc(mib[0]->mibs[basefield##PKTS]);          \
-               __this_cpu_add(mib[0]->mibs[basefield##OCTETS], addend);        \
+               __typeof__(*mib[0]->mibs) *ptr = mib[0]->mibs;  \
+               __this_cpu_inc(ptr[basefield##PKTS]);           \
+               __this_cpu_add(ptr[basefield##OCTETS], addend); \
        } while (0)
 
 
index 72132ae..84bdaec 100644 (file)
@@ -606,6 +606,15 @@ static inline void sk_add_bind_node(struct sock *sk,
 #define sk_for_each_bound(__sk, node, list) \
        hlist_for_each_entry(__sk, node, list, sk_bind_node)
 
+static inline struct user_namespace *sk_user_ns(struct sock *sk)
+{
+       /* Careful only use this in a context where these parameters
+        * can not change and must all be valid, such as recvmsg from
+        * userspace.
+        */
+       return sk->sk_socket->file->f_cred->user_ns;
+}
+
 /* Sock flags */
 enum sock_flags {
        SOCK_DEAD,
@@ -1670,7 +1679,7 @@ static inline void sock_graft(struct sock *sk, struct socket *parent)
        write_unlock_bh(&sk->sk_callback_lock);
 }
 
-extern int sock_i_uid(struct sock *sk);
+extern kuid_t sock_i_uid(struct sock *sk);
 extern unsigned long sock_i_ino(struct sock *sk);
 
 static inline struct dst_entry *
index 1f000ff..9a0021d 100644 (file)
@@ -1510,7 +1510,8 @@ struct tcp_iter_state {
        sa_family_t             family;
        enum tcp_seq_states     state;
        struct sock             *syn_wait_sk;
-       int                     bucket, offset, sbucket, num, uid;
+       int                     bucket, offset, sbucket, num;
+       kuid_t                  uid;
        loff_t                  last_pos;
 };
 
index 976a81a..36ad56b 100644 (file)
@@ -573,7 +573,7 @@ struct xfrm_mgr {
        struct list_head        list;
        char                    *id;
        int                     (*notify)(struct xfrm_state *x, const struct km_event *c);
-       int                     (*acquire)(struct xfrm_state *x, struct xfrm_tmpl *, struct xfrm_policy *xp, int dir);
+       int                     (*acquire)(struct xfrm_state *x, struct xfrm_tmpl *, struct xfrm_policy *xp);
        struct xfrm_policy      *(*compile_policy)(struct sock *sk, int opt, u8 *data, int len, int *dir);
        int                     (*new_mapping)(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport);
        int                     (*notify_policy)(struct xfrm_policy *x, int dir, const struct km_event *c);
index af6c7f8..b445d6f 100644 (file)
@@ -942,28 +942,12 @@ config UIDGID_CONVERTED
        depends on PROC_EVENTS = n
 
        # Networking
-       depends on NET = n
        depends on NET_9P = n
-       depends on IPX = n
-       depends on PHONET = n
-       depends on NET_CLS_FLOW = n
-       depends on NETFILTER_XT_MATCH_OWNER = n
-       depends on NETFILTER_XT_MATCH_RECENT = n
-       depends on NETFILTER_XT_TARGET_LOG = n
-       depends on NETFILTER_NETLINK_LOG = n
-       depends on INET = n
-       depends on IPV6 = n
-       depends on IP_SCTP = n
        depends on AF_RXRPC = n
-       depends on LLC2 = n
        depends on NET_KEY = n
-       depends on INET_DIAG = n
        depends on DNS_RESOLVER = n
-       depends on AX25 = n
-       depends on ATALK = n
 
        # Filesystems
-       depends on USB_DEVICEFS = n
        depends on USB_GADGETFS = n
        depends on USB_FUNCTIONFS = n
        depends on DEVTMPFS = n
@@ -1019,9 +1003,6 @@ config UIDGID_CONVERTED
        depends on !UML || HOSTFS = n
 
        # The rare drivers that won't build
-       depends on AIRO = n
-       depends on AIRO_CS = n
-       depends on TUN = n
        depends on INFINIBAND_QIB = n
        depends on BLK_DEV_LOOP = n
        depends on ANDROID_BINDER_IPC = n
index e86b291..aebd4f5 100644 (file)
@@ -479,6 +479,7 @@ pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns)
        }
        return nr;
 }
+EXPORT_SYMBOL_GPL(pid_nr_ns);
 
 pid_t pid_vnr(struct pid *pid)
 {
index b3c7fd5..baa528d 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/slab.h>
 #include <linux/proc_fs.h>
 #include <linux/reboot.h>
+#include <linux/export.h>
 
 #define BITS_PER_PAGE          (PAGE_SIZE*8)
 
@@ -144,6 +145,7 @@ void free_pid_ns(struct kref *kref)
        if (parent != NULL)
                put_pid_ns(parent);
 }
+EXPORT_SYMBOL_GPL(free_pid_ns);
 
 void zap_pid_ns_processes(struct pid_namespace *pid_ns)
 {
index 4226dfe..18eca78 100644 (file)
@@ -22,6 +22,10 @@ static const u16 nla_attr_minlen[NLA_TYPE_MAX+1] = {
        [NLA_U64]       = sizeof(u64),
        [NLA_MSECS]     = sizeof(u64),
        [NLA_NESTED]    = NLA_HDRLEN,
+       [NLA_S8]        = sizeof(s8),
+       [NLA_S16]       = sizeof(s16),
+       [NLA_S32]       = sizeof(s32),
+       [NLA_S64]       = sizeof(s64),
 };
 
 static int validate_nla(const struct nlattr *nla, int maxtype,
index 8ca533c..b258da8 100644 (file)
@@ -368,3 +368,9 @@ void vlan_vids_del_by_dev(struct net_device *dev,
                vlan_vid_del(dev, vid_info->vid);
 }
 EXPORT_SYMBOL(vlan_vids_del_by_dev);
+
+bool vlan_uses_dev(const struct net_device *dev)
+{
+       return rtnl_dereference(dev->vlan_info) ? true : false;
+}
+EXPORT_SYMBOL(vlan_uses_dev);
index b5b1a22..c30f3a0 100644 (file)
@@ -183,7 +183,8 @@ static int atalk_seq_socket_show(struct seq_file *seq, void *v)
                   ntohs(at->dest_net), at->dest_node, at->dest_port,
                   sk_wmem_alloc_get(s),
                   sk_rmem_alloc_get(s),
-                  s->sk_state, SOCK_INODE(s->sk_socket)->i_uid);
+                  s->sk_state,
+                  from_kuid_munged(seq_user_ns(seq), sock_i_uid(s)));
 out:
        return 0;
 }
index e3c579b..957999e 100644 (file)
@@ -51,14 +51,14 @@ int ax25_uid_policy;
 
 EXPORT_SYMBOL(ax25_uid_policy);
 
-ax25_uid_assoc *ax25_findbyuid(uid_t uid)
+ax25_uid_assoc *ax25_findbyuid(kuid_t uid)
 {
        ax25_uid_assoc *ax25_uid, *res = NULL;
        struct hlist_node *node;
 
        read_lock(&ax25_uid_lock);
        ax25_uid_for_each(ax25_uid, node, &ax25_uid_list) {
-               if (ax25_uid->uid == uid) {
+               if (uid_eq(ax25_uid->uid, uid)) {
                        ax25_uid_hold(ax25_uid);
                        res = ax25_uid;
                        break;
@@ -84,7 +84,7 @@ int ax25_uid_ioctl(int cmd, struct sockaddr_ax25 *sax)
                read_lock(&ax25_uid_lock);
                ax25_uid_for_each(ax25_uid, node, &ax25_uid_list) {
                        if (ax25cmp(&sax->sax25_call, &ax25_uid->call) == 0) {
-                               res = ax25_uid->uid;
+                               res = from_kuid_munged(current_user_ns(), ax25_uid->uid);
                                break;
                        }
                }
@@ -93,9 +93,14 @@ int ax25_uid_ioctl(int cmd, struct sockaddr_ax25 *sax)
                return res;
 
        case SIOCAX25ADDUID:
+       {
+               kuid_t sax25_kuid;
                if (!capable(CAP_NET_ADMIN))
                        return -EPERM;
-               user = ax25_findbyuid(sax->sax25_uid);
+               sax25_kuid = make_kuid(current_user_ns(), sax->sax25_uid);
+               if (!uid_valid(sax25_kuid))
+                       return -EINVAL;
+               user = ax25_findbyuid(sax25_kuid);
                if (user) {
                        ax25_uid_put(user);
                        return -EEXIST;
@@ -106,7 +111,7 @@ int ax25_uid_ioctl(int cmd, struct sockaddr_ax25 *sax)
                        return -ENOMEM;
 
                atomic_set(&ax25_uid->refcount, 1);
-               ax25_uid->uid  = sax->sax25_uid;
+               ax25_uid->uid  = sax25_kuid;
                ax25_uid->call = sax->sax25_call;
 
                write_lock(&ax25_uid_lock);
@@ -114,7 +119,7 @@ int ax25_uid_ioctl(int cmd, struct sockaddr_ax25 *sax)
                write_unlock(&ax25_uid_lock);
 
                return 0;
-
+       }
        case SIOCAX25DELUID:
                if (!capable(CAP_NET_ADMIN))
                        return -EPERM;
@@ -172,7 +177,9 @@ static int ax25_uid_seq_show(struct seq_file *seq, void *v)
                struct ax25_uid_assoc *pt;
 
                pt = hlist_entry(v, struct ax25_uid_assoc, uid_node);
-               seq_printf(seq, "%6d %s\n", pt->uid, ax2asc(buf, &pt->call));
+               seq_printf(seq, "%6d %s\n",
+                       from_kuid_munged(seq_user_ns(seq), pt->uid),
+                       ax2asc(buf, &pt->call));
        }
        return 0;
 }
index e877af8..df79300 100644 (file)
@@ -166,13 +166,15 @@ static void batadv_iv_ogm_send_to_if(struct batadv_forw_packet *forw_packet,
        int16_t buff_pos;
        struct batadv_ogm_packet *batadv_ogm_packet;
        struct sk_buff *skb;
+       uint8_t *packet_pos;
 
        if (hard_iface->if_status != BATADV_IF_ACTIVE)
                return;
 
        packet_num = 0;
        buff_pos = 0;
-       batadv_ogm_packet = (struct batadv_ogm_packet *)forw_packet->skb->data;
+       packet_pos = forw_packet->skb->data;
+       batadv_ogm_packet = (struct batadv_ogm_packet *)packet_pos;
 
        /* adjust all flags and log packets */
        while (batadv_iv_ogm_aggr_packet(buff_pos, forw_packet->packet_len,
@@ -181,15 +183,17 @@ static void batadv_iv_ogm_send_to_if(struct batadv_forw_packet *forw_packet,
                /* we might have aggregated direct link packets with an
                 * ordinary base packet
                 */
-               if ((forw_packet->direct_link_flags & (1 << packet_num)) &&
-                   (forw_packet->if_incoming == hard_iface))
+               if (forw_packet->direct_link_flags & BIT(packet_num) &&
+                   forw_packet->if_incoming == hard_iface)
                        batadv_ogm_packet->flags |= BATADV_DIRECTLINK;
                else
                        batadv_ogm_packet->flags &= ~BATADV_DIRECTLINK;
 
-               fwd_str = (packet_num > 0 ? "Forwarding" : (forw_packet->own ?
-                                                           "Sending own" :
-                                                           "Forwarding"));
+               if (packet_num > 0 || !forw_packet->own)
+                       fwd_str = "Forwarding";
+               else
+                       fwd_str = "Sending own";
+
                batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
                           "%s %spacket (originator %pM, seqno %u, TQ %d, TTL %d, IDF %s, ttvn %d) on interface %s [%pM]\n",
                           fwd_str, (packet_num > 0 ? "aggregated " : ""),
@@ -204,8 +208,8 @@ static void batadv_iv_ogm_send_to_if(struct batadv_forw_packet *forw_packet,
                buff_pos += BATADV_OGM_HLEN;
                buff_pos += batadv_tt_len(batadv_ogm_packet->tt_num_changes);
                packet_num++;
-               batadv_ogm_packet = (struct batadv_ogm_packet *)
-                                       (forw_packet->skb->data + buff_pos);
+               packet_pos = forw_packet->skb->data + buff_pos;
+               batadv_ogm_packet = (struct batadv_ogm_packet *)packet_pos;
        }
 
        /* create clone because function is called more than once */
@@ -227,9 +231,10 @@ static void batadv_iv_ogm_emit(struct batadv_forw_packet *forw_packet)
        struct batadv_hard_iface *primary_if = NULL;
        struct batadv_ogm_packet *batadv_ogm_packet;
        unsigned char directlink;
+       uint8_t *packet_pos;
 
-       batadv_ogm_packet = (struct batadv_ogm_packet *)
-                                               (forw_packet->skb->data);
+       packet_pos = forw_packet->skb->data;
+       batadv_ogm_packet = (struct batadv_ogm_packet *)packet_pos;
        directlink = (batadv_ogm_packet->flags & BATADV_DIRECTLINK ? 1 : 0);
 
        if (!forw_packet->if_incoming) {
@@ -454,6 +459,7 @@ static void batadv_iv_ogm_aggregate(struct batadv_forw_packet *forw_packet_aggr,
                                    int packet_len, bool direct_link)
 {
        unsigned char *skb_buff;
+       unsigned long new_direct_link_flag;
 
        skb_buff = skb_put(forw_packet_aggr->skb, packet_len);
        memcpy(skb_buff, packet_buff, packet_len);
@@ -461,9 +467,10 @@ static void batadv_iv_ogm_aggregate(struct batadv_forw_packet *forw_packet_aggr,
        forw_packet_aggr->num_packets++;
 
        /* save packet direct link flag status */
-       if (direct_link)
-               forw_packet_aggr->direct_link_flags |=
-                       (1 << forw_packet_aggr->num_packets);
+       if (direct_link) {
+               new_direct_link_flag = BIT(forw_packet_aggr->num_packets);
+               forw_packet_aggr->direct_link_flags |= new_direct_link_flag;
+       }
 }
 
 static void batadv_iv_ogm_queue_add(struct batadv_priv *bat_priv,
@@ -586,6 +593,8 @@ static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
        struct batadv_ogm_packet *batadv_ogm_packet;
        struct batadv_hard_iface *primary_if;
        int vis_server, tt_num_changes = 0;
+       uint32_t seqno;
+       uint8_t bandwidth;
 
        vis_server = atomic_read(&bat_priv->vis_mode);
        primary_if = batadv_primary_if_get_selected(bat_priv);
@@ -599,12 +608,12 @@ static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
        batadv_ogm_packet = (struct batadv_ogm_packet *)hard_iface->packet_buff;
 
        /* change sequence number to network order */
-       batadv_ogm_packet->seqno =
-                       htonl((uint32_t)atomic_read(&hard_iface->seqno));
+       seqno = (uint32_t)atomic_read(&hard_iface->seqno);
+       batadv_ogm_packet->seqno = htonl(seqno);
        atomic_inc(&hard_iface->seqno);
 
-       batadv_ogm_packet->ttvn = atomic_read(&bat_priv->ttvn);
-       batadv_ogm_packet->tt_crc = htons(bat_priv->tt_crc);
+       batadv_ogm_packet->ttvn = atomic_read(&bat_priv->tt.vn);
+       batadv_ogm_packet->tt_crc = htons(bat_priv->tt.local_crc);
        if (tt_num_changes >= 0)
                batadv_ogm_packet->tt_num_changes = tt_num_changes;
 
@@ -613,12 +622,13 @@ static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
        else
                batadv_ogm_packet->flags &= ~BATADV_VIS_SERVER;
 
-       if ((hard_iface == primary_if) &&
-           (atomic_read(&bat_priv->gw_mode) == BATADV_GW_MODE_SERVER))
-               batadv_ogm_packet->gw_flags =
-                               (uint8_t)atomic_read(&bat_priv->gw_bandwidth);
-       else
+       if (hard_iface == primary_if &&
+           atomic_read(&bat_priv->gw_mode) == BATADV_GW_MODE_SERVER) {
+               bandwidth = (uint8_t)atomic_read(&bat_priv->gw_bandwidth);
+               batadv_ogm_packet->gw_flags = bandwidth;
+       } else {
                batadv_ogm_packet->gw_flags = BATADV_NO_FLAGS;
+       }
 
        batadv_slide_own_bcast_window(hard_iface);
        batadv_iv_ogm_queue_add(bat_priv, hard_iface->packet_buff,
@@ -642,8 +652,9 @@ batadv_iv_ogm_orig_update(struct batadv_priv *bat_priv,
        struct batadv_neigh_node *router = NULL;
        struct batadv_orig_node *orig_node_tmp;
        struct hlist_node *node;
-       uint8_t bcast_own_sum_orig, bcast_own_sum_neigh;
+       uint8_t sum_orig, sum_neigh;
        uint8_t *neigh_addr;
+       uint8_t tq_avg;
 
        batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
                   "update_originator(): Searching and updating originator entry of received packet\n");
@@ -667,8 +678,8 @@ batadv_iv_ogm_orig_update(struct batadv_priv *bat_priv,
                spin_lock_bh(&tmp_neigh_node->lq_update_lock);
                batadv_ring_buffer_set(tmp_neigh_node->tq_recv,
                                       &tmp_neigh_node->tq_index, 0);
-               tmp_neigh_node->tq_avg =
-                       batadv_ring_buffer_avg(tmp_neigh_node->tq_recv);
+               tq_avg = batadv_ring_buffer_avg(tmp_neigh_node->tq_recv);
+               tmp_neigh_node->tq_avg = tq_avg;
                spin_unlock_bh(&tmp_neigh_node->lq_update_lock);
        }
 
@@ -727,17 +738,15 @@ batadv_iv_ogm_orig_update(struct batadv_priv *bat_priv,
        if (router && (neigh_node->tq_avg == router->tq_avg)) {
                orig_node_tmp = router->orig_node;
                spin_lock_bh(&orig_node_tmp->ogm_cnt_lock);
-               bcast_own_sum_orig =
-                       orig_node_tmp->bcast_own_sum[if_incoming->if_num];
+               sum_orig = orig_node_tmp->bcast_own_sum[if_incoming->if_num];
                spin_unlock_bh(&orig_node_tmp->ogm_cnt_lock);
 
                orig_node_tmp = neigh_node->orig_node;
                spin_lock_bh(&orig_node_tmp->ogm_cnt_lock);
-               bcast_own_sum_neigh =
-                       orig_node_tmp->bcast_own_sum[if_incoming->if_num];
+               sum_neigh = orig_node_tmp->bcast_own_sum[if_incoming->if_num];
                spin_unlock_bh(&orig_node_tmp->ogm_cnt_lock);
 
-               if (bcast_own_sum_orig >= bcast_own_sum_neigh)
+               if (sum_orig >= sum_neigh)
                        goto update_tt;
        }
 
@@ -835,8 +844,10 @@ static int batadv_iv_ogm_calc_tq(struct batadv_orig_node *orig_node,
        spin_unlock_bh(&orig_node->ogm_cnt_lock);
 
        /* pay attention to not get a value bigger than 100 % */
-       total_count = (orig_eq_count > neigh_rq_count ?
-                      neigh_rq_count : orig_eq_count);
+       if (orig_eq_count > neigh_rq_count)
+               total_count = neigh_rq_count;
+       else
+               total_count = orig_eq_count;
 
        /* if we have too few packets (too less data) we set tq_own to zero
         * if we receive too few packets it is not considered bidirectional
@@ -910,6 +921,7 @@ batadv_iv_ogm_update_seqnos(const struct ethhdr *ethhdr,
        int set_mark, ret = -1;
        uint32_t seqno = ntohl(batadv_ogm_packet->seqno);
        uint8_t *neigh_addr;
+       uint8_t packet_count;
 
        orig_node = batadv_get_orig_node(bat_priv, batadv_ogm_packet->orig);
        if (!orig_node)
@@ -944,9 +956,9 @@ batadv_iv_ogm_update_seqnos(const struct ethhdr *ethhdr,
                                                     tmp_neigh_node->real_bits,
                                                     seq_diff, set_mark);
 
-               tmp_neigh_node->real_packet_count =
-                       bitmap_weight(tmp_neigh_node->real_bits,
-                                     BATADV_TQ_LOCAL_WINDOW_SIZE);
+               packet_count = bitmap_weight(tmp_neigh_node->real_bits,
+                                            BATADV_TQ_LOCAL_WINDOW_SIZE);
+               tmp_neigh_node->real_packet_count = packet_count;
        }
        rcu_read_unlock();
 
@@ -1163,9 +1175,12 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr,
        /* if sender is a direct neighbor the sender mac equals
         * originator mac
         */
-       orig_neigh_node = (is_single_hop_neigh ?
-                          orig_node :
-                          batadv_get_orig_node(bat_priv, ethhdr->h_source));
+       if (is_single_hop_neigh)
+               orig_neigh_node = orig_node;
+       else
+               orig_neigh_node = batadv_get_orig_node(bat_priv,
+                                                      ethhdr->h_source);
+
        if (!orig_neigh_node)
                goto out;
 
@@ -1251,6 +1266,7 @@ static int batadv_iv_ogm_receive(struct sk_buff *skb,
        int buff_pos = 0, packet_len;
        unsigned char *tt_buff, *packet_buff;
        bool ret;
+       uint8_t *packet_pos;
 
        ret = batadv_check_management_packet(skb, if_incoming, BATADV_OGM_HLEN);
        if (!ret)
@@ -1281,8 +1297,8 @@ static int batadv_iv_ogm_receive(struct sk_buff *skb,
                buff_pos += BATADV_OGM_HLEN;
                buff_pos += batadv_tt_len(batadv_ogm_packet->tt_num_changes);
 
-               batadv_ogm_packet = (struct batadv_ogm_packet *)
-                                               (packet_buff + buff_pos);
+               packet_pos = packet_buff + buff_pos;
+               batadv_ogm_packet = (struct batadv_ogm_packet *)packet_pos;
        } while (batadv_iv_ogm_aggr_packet(buff_pos, packet_len,
                                           batadv_ogm_packet->tt_num_changes));
 
index 6705d35..0a9084a 100644 (file)
@@ -133,7 +133,7 @@ static void batadv_claim_free_ref(struct batadv_claim *claim)
 static struct batadv_claim *batadv_claim_hash_find(struct batadv_priv *bat_priv,
                                                   struct batadv_claim *data)
 {
-       struct batadv_hashtable *hash = bat_priv->claim_hash;
+       struct batadv_hashtable *hash = bat_priv->bla.claim_hash;
        struct hlist_head *head;
        struct hlist_node *node;
        struct batadv_claim *claim;
@@ -174,7 +174,7 @@ static struct batadv_backbone_gw *
 batadv_backbone_hash_find(struct batadv_priv *bat_priv,
                          uint8_t *addr, short vid)
 {
-       struct batadv_hashtable *hash = bat_priv->backbone_hash;
+       struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
        struct hlist_head *head;
        struct hlist_node *node;
        struct batadv_backbone_gw search_entry, *backbone_gw;
@@ -218,7 +218,7 @@ batadv_bla_del_backbone_claims(struct batadv_backbone_gw *backbone_gw)
        int i;
        spinlock_t *list_lock;  /* protects write access to the hash lists */
 
-       hash = backbone_gw->bat_priv->claim_hash;
+       hash = backbone_gw->bat_priv->bla.claim_hash;
        if (!hash)
                return;
 
@@ -265,7 +265,7 @@ static void batadv_bla_send_claim(struct batadv_priv *bat_priv, uint8_t *mac,
        if (!primary_if)
                return;
 
-       memcpy(&local_claim_dest, &bat_priv->claim_dest,
+       memcpy(&local_claim_dest, &bat_priv->bla.claim_dest,
               sizeof(local_claim_dest));
        local_claim_dest.type = claimtype;
 
@@ -281,7 +281,7 @@ static void batadv_bla_send_claim(struct batadv_priv *bat_priv, uint8_t *mac,
                         NULL,
                         /* Ethernet SRC/HW SRC:  originator mac */
                         primary_if->net_dev->dev_addr,
-                        /* HW DST: FF:43:05:XX:00:00
+                        /* HW DST: FF:43:05:XX:YY:YY
                          * with XX   = claim type
                          * and YY:YY = group id
                          */
@@ -295,7 +295,7 @@ static void batadv_bla_send_claim(struct batadv_priv *bat_priv, uint8_t *mac,
 
        /* now we pretend that the client would have sent this ... */
        switch (claimtype) {
-       case BATADV_CLAIM_TYPE_ADD:
+       case BATADV_CLAIM_TYPE_CLAIM:
                /* normal claim frame
                 * set Ethernet SRC to the clients mac
                 */
@@ -303,7 +303,7 @@ static void batadv_bla_send_claim(struct batadv_priv *bat_priv, uint8_t *mac,
                batadv_dbg(BATADV_DBG_BLA, bat_priv,
                           "bla_send_claim(): CLAIM %pM on vid %d\n", mac, vid);
                break;
-       case BATADV_CLAIM_TYPE_DEL:
+       case BATADV_CLAIM_TYPE_UNCLAIM:
                /* unclaim frame
                 * set HW SRC to the clients mac
                 */
@@ -323,7 +323,8 @@ static void batadv_bla_send_claim(struct batadv_priv *bat_priv, uint8_t *mac,
                break;
        case BATADV_CLAIM_TYPE_REQUEST:
                /* request frame
-                * set HW SRC to the special mac containg the crc
+                * set HW SRC and header destination to the receiving backbone
+                * gws mac
                 */
                memcpy(hw_src, mac, ETH_ALEN);
                memcpy(ethhdr->h_dest, mac, ETH_ALEN);
@@ -339,8 +340,9 @@ static void batadv_bla_send_claim(struct batadv_priv *bat_priv, uint8_t *mac,
 
        skb_reset_mac_header(skb);
        skb->protocol = eth_type_trans(skb, soft_iface);
-       bat_priv->stats.rx_packets++;
-       bat_priv->stats.rx_bytes += skb->len + ETH_HLEN;
+       batadv_inc_counter(bat_priv, BATADV_CNT_RX);
+       batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES,
+                          skb->len + ETH_HLEN);
        soft_iface->last_rx = jiffies;
 
        netif_rx(skb);
@@ -389,7 +391,7 @@ batadv_bla_get_backbone_gw(struct batadv_priv *bat_priv, uint8_t *orig,
        /* one for the hash, one for returning */
        atomic_set(&entry->refcount, 2);
 
-       hash_added = batadv_hash_add(bat_priv->backbone_hash,
+       hash_added = batadv_hash_add(bat_priv->bla.backbone_hash,
                                     batadv_compare_backbone_gw,
                                     batadv_choose_backbone_gw, entry,
                                     &entry->hash_entry);
@@ -456,7 +458,7 @@ static void batadv_bla_answer_request(struct batadv_priv *bat_priv,
        if (!backbone_gw)
                return;
 
-       hash = bat_priv->claim_hash;
+       hash = bat_priv->bla.claim_hash;
        for (i = 0; i < hash->size; i++) {
                head = &hash->table[i];
 
@@ -467,7 +469,7 @@ static void batadv_bla_answer_request(struct batadv_priv *bat_priv,
                                continue;
 
                        batadv_bla_send_claim(bat_priv, claim->addr, claim->vid,
-                                             BATADV_CLAIM_TYPE_ADD);
+                                             BATADV_CLAIM_TYPE_CLAIM);
                }
                rcu_read_unlock();
        }
@@ -497,7 +499,7 @@ static void batadv_bla_send_request(struct batadv_backbone_gw *backbone_gw)
 
        /* no local broadcasts should be sent or received, for now. */
        if (!atomic_read(&backbone_gw->request_sent)) {
-               atomic_inc(&backbone_gw->bat_priv->bla_num_requests);
+               atomic_inc(&backbone_gw->bat_priv->bla.num_requests);
                atomic_set(&backbone_gw->request_sent, 1);
        }
 }
@@ -557,7 +559,7 @@ static void batadv_bla_add_claim(struct batadv_priv *bat_priv,
                batadv_dbg(BATADV_DBG_BLA, bat_priv,
                           "bla_add_claim(): adding new entry %pM, vid %d to hash ...\n",
                           mac, vid);
-               hash_added = batadv_hash_add(bat_priv->claim_hash,
+               hash_added = batadv_hash_add(bat_priv->bla.claim_hash,
                                             batadv_compare_claim,
                                             batadv_choose_claim, claim,
                                             &claim->hash_entry);
@@ -577,8 +579,7 @@ static void batadv_bla_add_claim(struct batadv_priv *bat_priv,
                           "bla_add_claim(): changing ownership for %pM, vid %d\n",
                           mac, vid);
 
-               claim->backbone_gw->crc ^=
-                       crc16(0, claim->addr, ETH_ALEN);
+               claim->backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
                batadv_backbone_gw_free_ref(claim->backbone_gw);
 
        }
@@ -610,7 +611,7 @@ static void batadv_bla_del_claim(struct batadv_priv *bat_priv,
        batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla_del_claim(): %pM, vid %d\n",
                   mac, vid);
 
-       batadv_hash_remove(bat_priv->claim_hash, batadv_compare_claim,
+       batadv_hash_remove(bat_priv->bla.claim_hash, batadv_compare_claim,
                           batadv_choose_claim, claim);
        batadv_claim_free_ref(claim); /* reference from the hash is gone */
 
@@ -657,7 +658,7 @@ static int batadv_handle_announce(struct batadv_priv *bat_priv,
                 * we can allow traffic again.
                 */
                if (atomic_read(&backbone_gw->request_sent)) {
-                       atomic_dec(&backbone_gw->bat_priv->bla_num_requests);
+                       atomic_dec(&backbone_gw->bat_priv->bla.num_requests);
                        atomic_set(&backbone_gw->request_sent, 0);
                }
        }
@@ -702,7 +703,7 @@ static int batadv_handle_unclaim(struct batadv_priv *bat_priv,
        if (primary_if && batadv_compare_eth(backbone_addr,
                                             primary_if->net_dev->dev_addr))
                batadv_bla_send_claim(bat_priv, claim_addr, vid,
-                                     BATADV_CLAIM_TYPE_DEL);
+                                     BATADV_CLAIM_TYPE_UNCLAIM);
 
        backbone_gw = batadv_backbone_hash_find(bat_priv, backbone_addr, vid);
 
@@ -738,7 +739,7 @@ static int batadv_handle_claim(struct batadv_priv *bat_priv,
        batadv_bla_add_claim(bat_priv, claim_addr, vid, backbone_gw);
        if (batadv_compare_eth(backbone_addr, primary_if->net_dev->dev_addr))
                batadv_bla_send_claim(bat_priv, claim_addr, vid,
-                                     BATADV_CLAIM_TYPE_ADD);
+                                     BATADV_CLAIM_TYPE_CLAIM);
 
        /* TODO: we could call something like tt_local_del() here. */
 
@@ -772,7 +773,7 @@ static int batadv_check_claim_group(struct batadv_priv *bat_priv,
        struct batadv_bla_claim_dst *bla_dst, *bla_dst_own;
 
        bla_dst = (struct batadv_bla_claim_dst *)hw_dst;
-       bla_dst_own = &bat_priv->claim_dest;
+       bla_dst_own = &bat_priv->bla.claim_dest;
 
        /* check if it is a claim packet in general */
        if (memcmp(bla_dst->magic, bla_dst_own->magic,
@@ -783,12 +784,12 @@ static int batadv_check_claim_group(struct batadv_priv *bat_priv,
         * otherwise assume it is in the hw_src
         */
        switch (bla_dst->type) {
-       case BATADV_CLAIM_TYPE_ADD:
+       case BATADV_CLAIM_TYPE_CLAIM:
                backbone_addr = hw_src;
                break;
        case BATADV_CLAIM_TYPE_REQUEST:
        case BATADV_CLAIM_TYPE_ANNOUNCE:
-       case BATADV_CLAIM_TYPE_DEL:
+       case BATADV_CLAIM_TYPE_UNCLAIM:
                backbone_addr = ethhdr->h_source;
                break;
        default:
@@ -904,12 +905,12 @@ static int batadv_bla_process_claim(struct batadv_priv *bat_priv,
 
        /* check for the different types of claim frames ... */
        switch (bla_dst->type) {
-       case BATADV_CLAIM_TYPE_ADD:
+       case BATADV_CLAIM_TYPE_CLAIM:
                if (batadv_handle_claim(bat_priv, primary_if, hw_src,
                                        ethhdr->h_source, vid))
                        return 1;
                break;
-       case BATADV_CLAIM_TYPE_DEL:
+       case BATADV_CLAIM_TYPE_UNCLAIM:
                if (batadv_handle_unclaim(bat_priv, primary_if,
                                          ethhdr->h_source, hw_src, vid))
                        return 1;
@@ -945,7 +946,7 @@ static void batadv_bla_purge_backbone_gw(struct batadv_priv *bat_priv, int now)
        spinlock_t *list_lock;  /* protects write access to the hash lists */
        int i;
 
-       hash = bat_priv->backbone_hash;
+       hash = bat_priv->bla.backbone_hash;
        if (!hash)
                return;
 
@@ -969,7 +970,7 @@ static void batadv_bla_purge_backbone_gw(struct batadv_priv *bat_priv, int now)
 purge_now:
                        /* don't wait for the pending request anymore */
                        if (atomic_read(&backbone_gw->request_sent))
-                               atomic_dec(&bat_priv->bla_num_requests);
+                               atomic_dec(&bat_priv->bla.num_requests);
 
                        batadv_bla_del_backbone_claims(backbone_gw);
 
@@ -999,7 +1000,7 @@ static void batadv_bla_purge_claims(struct batadv_priv *bat_priv,
        struct batadv_hashtable *hash;
        int i;
 
-       hash = bat_priv->claim_hash;
+       hash = bat_priv->bla.claim_hash;
        if (!hash)
                return;
 
@@ -1046,11 +1047,12 @@ void batadv_bla_update_orig_address(struct batadv_priv *bat_priv,
        struct hlist_node *node;
        struct hlist_head *head;
        struct batadv_hashtable *hash;
+       __be16 group;
        int i;
 
        /* reset bridge loop avoidance group id */
-       bat_priv->claim_dest.group =
-               htons(crc16(0, primary_if->net_dev->dev_addr, ETH_ALEN));
+       group = htons(crc16(0, primary_if->net_dev->dev_addr, ETH_ALEN));
+       bat_priv->bla.claim_dest.group = group;
 
        if (!oldif) {
                batadv_bla_purge_claims(bat_priv, NULL, 1);
@@ -1058,7 +1060,7 @@ void batadv_bla_update_orig_address(struct batadv_priv *bat_priv,
                return;
        }
 
-       hash = bat_priv->backbone_hash;
+       hash = bat_priv->bla.backbone_hash;
        if (!hash)
                return;
 
@@ -1088,8 +1090,8 @@ void batadv_bla_update_orig_address(struct batadv_priv *bat_priv,
 /* (re)start the timer */
 static void batadv_bla_start_timer(struct batadv_priv *bat_priv)
 {
-       INIT_DELAYED_WORK(&bat_priv->bla_work, batadv_bla_periodic_work);
-       queue_delayed_work(batadv_event_workqueue, &bat_priv->bla_work,
+       INIT_DELAYED_WORK(&bat_priv->bla.work, batadv_bla_periodic_work);
+       queue_delayed_work(batadv_event_workqueue, &bat_priv->bla.work,
                           msecs_to_jiffies(BATADV_BLA_PERIOD_LENGTH));
 }
 
@@ -1099,9 +1101,9 @@ static void batadv_bla_start_timer(struct batadv_priv *bat_priv)
  */
 static void batadv_bla_periodic_work(struct work_struct *work)
 {
-       struct delayed_work *delayed_work =
-               container_of(work, struct delayed_work, work);
+       struct delayed_work *delayed_work;
        struct batadv_priv *bat_priv;
+       struct batadv_priv_bla *priv_bla;
        struct hlist_node *node;
        struct hlist_head *head;
        struct batadv_backbone_gw *backbone_gw;
@@ -1109,7 +1111,9 @@ static void batadv_bla_periodic_work(struct work_struct *work)
        struct batadv_hard_iface *primary_if;
        int i;
 
-       bat_priv = container_of(delayed_work, struct batadv_priv, bla_work);
+       delayed_work = container_of(work, struct delayed_work, work);
+       priv_bla = container_of(delayed_work, struct batadv_priv_bla, work);
+       bat_priv = container_of(priv_bla, struct batadv_priv, bla);
        primary_if = batadv_primary_if_get_selected(bat_priv);
        if (!primary_if)
                goto out;
@@ -1120,7 +1124,7 @@ static void batadv_bla_periodic_work(struct work_struct *work)
        if (!atomic_read(&bat_priv->bridge_loop_avoidance))
                goto out;
 
-       hash = bat_priv->backbone_hash;
+       hash = bat_priv->bla.backbone_hash;
        if (!hash)
                goto out;
 
@@ -1160,40 +1164,41 @@ int batadv_bla_init(struct batadv_priv *bat_priv)
        int i;
        uint8_t claim_dest[ETH_ALEN] = {0xff, 0x43, 0x05, 0x00, 0x00, 0x00};
        struct batadv_hard_iface *primary_if;
+       uint16_t crc;
+       unsigned long entrytime;
 
        batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla hash registering\n");
 
        /* setting claim destination address */
-       memcpy(&bat_priv->claim_dest.magic, claim_dest, 3);
-       bat_priv->claim_dest.type = 0;
+       memcpy(&bat_priv->bla.claim_dest.magic, claim_dest, 3);
+       bat_priv->bla.claim_dest.type = 0;
        primary_if = batadv_primary_if_get_selected(bat_priv);
        if (primary_if) {
-               bat_priv->claim_dest.group =
-                       htons(crc16(0, primary_if->net_dev->dev_addr,
-                                   ETH_ALEN));
+               crc = crc16(0, primary_if->net_dev->dev_addr, ETH_ALEN);
+               bat_priv->bla.claim_dest.group = htons(crc);
                batadv_hardif_free_ref(primary_if);
        } else {
-               bat_priv->claim_dest.group = 0; /* will be set later */
+               bat_priv->bla.claim_dest.group = 0; /* will be set later */
        }
 
        /* initialize the duplicate list */
+       entrytime = jiffies - msecs_to_jiffies(BATADV_DUPLIST_TIMEOUT);
        for (i = 0; i < BATADV_DUPLIST_SIZE; i++)
-               bat_priv->bcast_duplist[i].entrytime =
-                       jiffies - msecs_to_jiffies(BATADV_DUPLIST_TIMEOUT);
-       bat_priv->bcast_duplist_curr = 0;
+               bat_priv->bla.bcast_duplist[i].entrytime = entrytime;
+       bat_priv->bla.bcast_duplist_curr = 0;
 
-       if (bat_priv->claim_hash)
+       if (bat_priv->bla.claim_hash)
                return 0;
 
-       bat_priv->claim_hash = batadv_hash_new(128);
-       bat_priv->backbone_hash = batadv_hash_new(32);
+       bat_priv->bla.claim_hash = batadv_hash_new(128);
+       bat_priv->bla.backbone_hash = batadv_hash_new(32);
 
-       if (!bat_priv->claim_hash || !bat_priv->backbone_hash)
+       if (!bat_priv->bla.claim_hash || !bat_priv->bla.backbone_hash)
                return -ENOMEM;
 
-       batadv_hash_set_lock_class(bat_priv->claim_hash,
+       batadv_hash_set_lock_class(bat_priv->bla.claim_hash,
                                   &batadv_claim_hash_lock_class_key);
-       batadv_hash_set_lock_class(bat_priv->backbone_hash,
+       batadv_hash_set_lock_class(bat_priv->bla.backbone_hash,
                                   &batadv_backbone_hash_lock_class_key);
 
        batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla hashes initialized\n");
@@ -1234,8 +1239,9 @@ int batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
        crc = crc16(0, content, length);
 
        for (i = 0; i < BATADV_DUPLIST_SIZE; i++) {
-               curr = (bat_priv->bcast_duplist_curr + i) % BATADV_DUPLIST_SIZE;
-               entry = &bat_priv->bcast_duplist[curr];
+               curr = (bat_priv->bla.bcast_duplist_curr + i);
+               curr %= BATADV_DUPLIST_SIZE;
+               entry = &bat_priv->bla.bcast_duplist[curr];
 
                /* we can stop searching if the entry is too old ;
                 * later entries will be even older
@@ -1256,13 +1262,13 @@ int batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
                return 1;
        }
        /* not found, add a new entry (overwrite the oldest entry) */
-       curr = (bat_priv->bcast_duplist_curr + BATADV_DUPLIST_SIZE - 1);
+       curr = (bat_priv->bla.bcast_duplist_curr + BATADV_DUPLIST_SIZE - 1);
        curr %= BATADV_DUPLIST_SIZE;
-       entry = &bat_priv->bcast_duplist[curr];
+       entry = &bat_priv->bla.bcast_duplist[curr];
        entry->crc = crc;
        entry->entrytime = jiffies;
        memcpy(entry->orig, bcast_packet->orig, ETH_ALEN);
-       bat_priv->bcast_duplist_curr = curr;
+       bat_priv->bla.bcast_duplist_curr = curr;
 
        /* allow it, its the first occurence. */
        return 0;
@@ -1279,7 +1285,7 @@ int batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
  */
 int batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, uint8_t *orig)
 {
-       struct batadv_hashtable *hash = bat_priv->backbone_hash;
+       struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
        struct hlist_head *head;
        struct hlist_node *node;
        struct batadv_backbone_gw *backbone_gw;
@@ -1339,8 +1345,7 @@ int batadv_bla_is_backbone_gw(struct sk_buff *skb,
                if (!pskb_may_pull(skb, hdr_size + sizeof(struct vlan_ethhdr)))
                        return 0;
 
-               vhdr = (struct vlan_ethhdr *)(((uint8_t *)skb->data) +
-                                             hdr_size);
+               vhdr = (struct vlan_ethhdr *)(skb->data + hdr_size);
                vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK;
        }
 
@@ -1359,18 +1364,18 @@ void batadv_bla_free(struct batadv_priv *bat_priv)
 {
        struct batadv_hard_iface *primary_if;
 
-       cancel_delayed_work_sync(&bat_priv->bla_work);
+       cancel_delayed_work_sync(&bat_priv->bla.work);
        primary_if = batadv_primary_if_get_selected(bat_priv);
 
-       if (bat_priv->claim_hash) {
+       if (bat_priv->bla.claim_hash) {
                batadv_bla_purge_claims(bat_priv, primary_if, 1);
-               batadv_hash_destroy(bat_priv->claim_hash);
-               bat_priv->claim_hash = NULL;
+               batadv_hash_destroy(bat_priv->bla.claim_hash);
+               bat_priv->bla.claim_hash = NULL;
        }
-       if (bat_priv->backbone_hash) {
+       if (bat_priv->bla.backbone_hash) {
                batadv_bla_purge_backbone_gw(bat_priv, 1);
-               batadv_hash_destroy(bat_priv->backbone_hash);
-               bat_priv->backbone_hash = NULL;
+               batadv_hash_destroy(bat_priv->bla.backbone_hash);
+               bat_priv->bla.backbone_hash = NULL;
        }
        if (primary_if)
                batadv_hardif_free_ref(primary_if);
@@ -1409,7 +1414,7 @@ int batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, short vid,
                goto allow;
 
 
-       if (unlikely(atomic_read(&bat_priv->bla_num_requests)))
+       if (unlikely(atomic_read(&bat_priv->bla.num_requests)))
                /* don't allow broadcasts while requests are in flight */
                if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast)
                        goto handled;
@@ -1508,7 +1513,7 @@ int batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb, short vid)
 
        ethhdr = (struct ethhdr *)skb_mac_header(skb);
 
-       if (unlikely(atomic_read(&bat_priv->bla_num_requests)))
+       if (unlikely(atomic_read(&bat_priv->bla.num_requests)))
                /* don't allow broadcasts while requests are in flight */
                if (is_multicast_ether_addr(ethhdr->h_dest))
                        goto handled;
@@ -1564,7 +1569,7 @@ int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset)
 {
        struct net_device *net_dev = (struct net_device *)seq->private;
        struct batadv_priv *bat_priv = netdev_priv(net_dev);
-       struct batadv_hashtable *hash = bat_priv->claim_hash;
+       struct batadv_hashtable *hash = bat_priv->bla.claim_hash;
        struct batadv_claim *claim;
        struct batadv_hard_iface *primary_if;
        struct hlist_node *node;
@@ -1593,7 +1598,7 @@ int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset)
        seq_printf(seq,
                   "Claims announced for the mesh %s (orig %pM, group id %04x)\n",
                   net_dev->name, primary_addr,
-                  ntohs(bat_priv->claim_dest.group));
+                  ntohs(bat_priv->bla.claim_dest.group));
        seq_printf(seq, "   %-17s    %-5s    %-17s [o] (%-4s)\n",
                   "Client", "VID", "Originator", "CRC");
        for (i = 0; i < hash->size; i++) {
@@ -1616,3 +1621,68 @@ out:
                batadv_hardif_free_ref(primary_if);
        return ret;
 }
+
+int batadv_bla_backbone_table_seq_print_text(struct seq_file *seq, void *offset)
+{
+       struct net_device *net_dev = (struct net_device *)seq->private;
+       struct batadv_priv *bat_priv = netdev_priv(net_dev);
+       struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
+       struct batadv_backbone_gw *backbone_gw;
+       struct batadv_hard_iface *primary_if;
+       struct hlist_node *node;
+       struct hlist_head *head;
+       int secs, msecs;
+       uint32_t i;
+       bool is_own;
+       int ret = 0;
+       uint8_t *primary_addr;
+
+       primary_if = batadv_primary_if_get_selected(bat_priv);
+       if (!primary_if) {
+               ret = seq_printf(seq,
+                                "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
+                                net_dev->name);
+               goto out;
+       }
+
+       if (primary_if->if_status != BATADV_IF_ACTIVE) {
+               ret = seq_printf(seq,
+                                "BATMAN mesh %s disabled - primary interface not active\n",
+                                net_dev->name);
+               goto out;
+       }
+
+       primary_addr = primary_if->net_dev->dev_addr;
+       seq_printf(seq,
+                  "Backbones announced for the mesh %s (orig %pM, group id %04x)\n",
+                  net_dev->name, primary_addr,
+                  ntohs(bat_priv->bla.claim_dest.group));
+       seq_printf(seq, "   %-17s    %-5s %-9s (%-4s)\n",
+                  "Originator", "VID", "last seen", "CRC");
+       for (i = 0; i < hash->size; i++) {
+               head = &hash->table[i];
+
+               rcu_read_lock();
+               hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) {
+                       msecs = jiffies_to_msecs(jiffies -
+                                                backbone_gw->lasttime);
+                       secs = msecs / 1000;
+                       msecs = msecs % 1000;
+
+                       is_own = batadv_compare_eth(backbone_gw->orig,
+                                                   primary_addr);
+                       if (is_own)
+                               continue;
+
+                       seq_printf(seq,
+                                  " * %pM on % 5d % 4i.%03is (%04x)\n",
+                                  backbone_gw->orig, backbone_gw->vid,
+                                  secs, msecs, backbone_gw->crc);
+               }
+               rcu_read_unlock();
+       }
+out:
+       if (primary_if)
+               batadv_hardif_free_ref(primary_if);
+       return ret;
+}
index 563cfbf..789cb73 100644 (file)
@@ -27,6 +27,8 @@ int batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb, short vid);
 int batadv_bla_is_backbone_gw(struct sk_buff *skb,
                              struct batadv_orig_node *orig_node, int hdr_size);
 int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset);
+int batadv_bla_backbone_table_seq_print_text(struct seq_file *seq,
+                                            void *offset);
 int batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, uint8_t *orig);
 int batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
                                   struct batadv_bcast_packet *bcast_packet,
@@ -41,8 +43,7 @@ void batadv_bla_free(struct batadv_priv *bat_priv);
 #else /* ifdef CONFIG_BATMAN_ADV_BLA */
 
 static inline int batadv_bla_rx(struct batadv_priv *bat_priv,
-                               struct sk_buff *skb, short vid,
-                               bool is_bcast)
+                               struct sk_buff *skb, short vid, bool is_bcast)
 {
        return 0;
 }
@@ -66,6 +67,12 @@ static inline int batadv_bla_claim_table_seq_print_text(struct seq_file *seq,
        return 0;
 }
 
+static inline int batadv_bla_backbone_table_seq_print_text(struct seq_file *seq,
+                                                          void *offset)
+{
+       return 0;
+}
+
 static inline int batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv,
                                                 uint8_t *orig)
 {
index 34fbb16..391d4fb 100644 (file)
@@ -267,6 +267,15 @@ static int batadv_bla_claim_table_open(struct inode *inode, struct file *file)
        return single_open(file, batadv_bla_claim_table_seq_print_text,
                           net_dev);
 }
+
+static int batadv_bla_backbone_table_open(struct inode *inode,
+                                         struct file *file)
+{
+       struct net_device *net_dev = (struct net_device *)inode->i_private;
+       return single_open(file, batadv_bla_backbone_table_seq_print_text,
+                          net_dev);
+}
+
 #endif
 
 static int batadv_transtable_local_open(struct inode *inode, struct file *file)
@@ -305,6 +314,8 @@ static BATADV_DEBUGINFO(transtable_global, S_IRUGO,
                        batadv_transtable_global_open);
 #ifdef CONFIG_BATMAN_ADV_BLA
 static BATADV_DEBUGINFO(bla_claim_table, S_IRUGO, batadv_bla_claim_table_open);
+static BATADV_DEBUGINFO(bla_backbone_table, S_IRUGO,
+                       batadv_bla_backbone_table_open);
 #endif
 static BATADV_DEBUGINFO(transtable_local, S_IRUGO,
                        batadv_transtable_local_open);
@@ -316,6 +327,7 @@ static struct batadv_debuginfo *batadv_mesh_debuginfos[] = {
        &batadv_debuginfo_transtable_global,
 #ifdef CONFIG_BATMAN_ADV_BLA
        &batadv_debuginfo_bla_claim_table,
+       &batadv_debuginfo_bla_backbone_table,
 #endif
        &batadv_debuginfo_transtable_local,
        &batadv_debuginfo_vis_data,
index fc866f2..15d67ab 100644 (file)
@@ -48,7 +48,7 @@ batadv_gw_get_selected_gw_node(struct batadv_priv *bat_priv)
        struct batadv_gw_node *gw_node;
 
        rcu_read_lock();
-       gw_node = rcu_dereference(bat_priv->curr_gw);
+       gw_node = rcu_dereference(bat_priv->gw.curr_gw);
        if (!gw_node)
                goto out;
 
@@ -91,23 +91,23 @@ static void batadv_gw_select(struct batadv_priv *bat_priv,
 {
        struct batadv_gw_node *curr_gw_node;
 
-       spin_lock_bh(&bat_priv->gw_list_lock);
+       spin_lock_bh(&bat_priv->gw.list_lock);
 
        if (new_gw_node && !atomic_inc_not_zero(&new_gw_node->refcount))
                new_gw_node = NULL;
 
-       curr_gw_node = rcu_dereference_protected(bat_priv->curr_gw, 1);
-       rcu_assign_pointer(bat_priv->curr_gw, new_gw_node);
+       curr_gw_node = rcu_dereference_protected(bat_priv->gw.curr_gw, 1);
+       rcu_assign_pointer(bat_priv->gw.curr_gw, new_gw_node);
 
        if (curr_gw_node)
                batadv_gw_node_free_ref(curr_gw_node);
 
-       spin_unlock_bh(&bat_priv->gw_list_lock);
+       spin_unlock_bh(&bat_priv->gw.list_lock);
 }
 
 void batadv_gw_deselect(struct batadv_priv *bat_priv)
 {
-       atomic_set(&bat_priv->gw_reselect, 1);
+       atomic_set(&bat_priv->gw.reselect, 1);
 }
 
 static struct batadv_gw_node *
@@ -117,12 +117,17 @@ batadv_gw_get_best_gw_node(struct batadv_priv *bat_priv)
        struct hlist_node *node;
        struct batadv_gw_node *gw_node, *curr_gw = NULL;
        uint32_t max_gw_factor = 0, tmp_gw_factor = 0;
+       uint32_t gw_divisor;
        uint8_t max_tq = 0;
        int down, up;
+       uint8_t tq_avg;
        struct batadv_orig_node *orig_node;
 
+       gw_divisor = BATADV_TQ_LOCAL_WINDOW_SIZE * BATADV_TQ_LOCAL_WINDOW_SIZE;
+       gw_divisor *= 64;
+
        rcu_read_lock();
-       hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw_list, list) {
+       hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw.list, list) {
                if (gw_node->deleted)
                        continue;
 
@@ -134,19 +139,19 @@ batadv_gw_get_best_gw_node(struct batadv_priv *bat_priv)
                if (!atomic_inc_not_zero(&gw_node->refcount))
                        goto next;
 
+               tq_avg = router->tq_avg;
+
                switch (atomic_read(&bat_priv->gw_sel_class)) {
                case 1: /* fast connection */
                        batadv_gw_bandwidth_to_kbit(orig_node->gw_flags,
                                                    &down, &up);
 
-                       tmp_gw_factor = (router->tq_avg * router->tq_avg *
-                                        down * 100 * 100) /
-                                        (BATADV_TQ_LOCAL_WINDOW_SIZE *
-                                         BATADV_TQ_LOCAL_WINDOW_SIZE * 64);
+                       tmp_gw_factor = tq_avg * tq_avg * down * 100 * 100;
+                       tmp_gw_factor /= gw_divisor;
 
                        if ((tmp_gw_factor > max_gw_factor) ||
                            ((tmp_gw_factor == max_gw_factor) &&
-                            (router->tq_avg > max_tq))) {
+                            (tq_avg > max_tq))) {
                                if (curr_gw)
                                        batadv_gw_node_free_ref(curr_gw);
                                curr_gw = gw_node;
@@ -161,7 +166,7 @@ batadv_gw_get_best_gw_node(struct batadv_priv *bat_priv)
                          *     soon as a better gateway appears which has
                          *     $routing_class more tq points)
                          */
-                       if (router->tq_avg > max_tq) {
+                       if (tq_avg > max_tq) {
                                if (curr_gw)
                                        batadv_gw_node_free_ref(curr_gw);
                                curr_gw = gw_node;
@@ -170,8 +175,8 @@ batadv_gw_get_best_gw_node(struct batadv_priv *bat_priv)
                        break;
                }
 
-               if (router->tq_avg > max_tq)
-                       max_tq = router->tq_avg;
+               if (tq_avg > max_tq)
+                       max_tq = tq_avg;
 
                if (tmp_gw_factor > max_gw_factor)
                        max_gw_factor = tmp_gw_factor;
@@ -202,7 +207,7 @@ void batadv_gw_election(struct batadv_priv *bat_priv)
 
        curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
 
-       if (!batadv_atomic_dec_not_zero(&bat_priv->gw_reselect) && curr_gw)
+       if (!batadv_atomic_dec_not_zero(&bat_priv->gw.reselect) && curr_gw)
                goto out;
 
        next_gw = batadv_gw_get_best_gw_node(bat_priv);
@@ -321,9 +326,9 @@ static void batadv_gw_node_add(struct batadv_priv *bat_priv,
        gw_node->orig_node = orig_node;
        atomic_set(&gw_node->refcount, 1);
 
-       spin_lock_bh(&bat_priv->gw_list_lock);
-       hlist_add_head_rcu(&gw_node->list, &bat_priv->gw_list);
-       spin_unlock_bh(&bat_priv->gw_list_lock);
+       spin_lock_bh(&bat_priv->gw.list_lock);
+       hlist_add_head_rcu(&gw_node->list, &bat_priv->gw.list);
+       spin_unlock_bh(&bat_priv->gw.list_lock);
 
        batadv_gw_bandwidth_to_kbit(new_gwflags, &down, &up);
        batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
@@ -350,7 +355,7 @@ void batadv_gw_node_update(struct batadv_priv *bat_priv,
        curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
 
        rcu_read_lock();
-       hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw_list, list) {
+       hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw.list, list) {
                if (gw_node->orig_node != orig_node)
                        continue;
 
@@ -404,10 +409,10 @@ void batadv_gw_node_purge(struct batadv_priv *bat_priv)
 
        curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
 
-       spin_lock_bh(&bat_priv->gw_list_lock);
+       spin_lock_bh(&bat_priv->gw.list_lock);
 
        hlist_for_each_entry_safe(gw_node, node, node_tmp,
-                                 &bat_priv->gw_list, list) {
+                                 &bat_priv->gw.list, list) {
                if (((!gw_node->deleted) ||
                     (time_before(jiffies, gw_node->deleted + timeout))) &&
                    atomic_read(&bat_priv->mesh_state) == BATADV_MESH_ACTIVE)
@@ -420,7 +425,7 @@ void batadv_gw_node_purge(struct batadv_priv *bat_priv)
                batadv_gw_node_free_ref(gw_node);
        }
 
-       spin_unlock_bh(&bat_priv->gw_list_lock);
+       spin_unlock_bh(&bat_priv->gw.list_lock);
 
        /* gw_deselect() needs to acquire the gw_list_lock */
        if (do_deselect)
@@ -496,7 +501,7 @@ int batadv_gw_client_seq_print_text(struct seq_file *seq, void *offset)
                   primary_if->net_dev->dev_addr, net_dev->name);
 
        rcu_read_lock();
-       hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw_list, list) {
+       hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw.list, list) {
                if (gw_node->deleted)
                        continue;
 
index 282bf6e..d112fd6 100644 (file)
@@ -103,13 +103,14 @@ static void batadv_primary_if_update_addr(struct batadv_priv *bat_priv,
 {
        struct batadv_vis_packet *vis_packet;
        struct batadv_hard_iface *primary_if;
+       struct sk_buff *skb;
 
        primary_if = batadv_primary_if_get_selected(bat_priv);
        if (!primary_if)
                goto out;
 
-       vis_packet = (struct batadv_vis_packet *)
-                               bat_priv->my_vis_info->skb_packet->data;
+       skb = bat_priv->vis.my_info->skb_packet;
+       vis_packet = (struct batadv_vis_packet *)skb->data;
        memcpy(vis_packet->vis_orig, primary_if->net_dev->dev_addr, ETH_ALEN);
        memcpy(vis_packet->sender_orig,
               primary_if->net_dev->dev_addr, ETH_ALEN);
@@ -313,7 +314,13 @@ int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface,
        hard_iface->if_num = bat_priv->num_ifaces;
        bat_priv->num_ifaces++;
        hard_iface->if_status = BATADV_IF_INACTIVE;
-       batadv_orig_hash_add_if(hard_iface, bat_priv->num_ifaces);
+       ret = batadv_orig_hash_add_if(hard_iface, bat_priv->num_ifaces);
+       if (ret < 0) {
+               bat_priv->bat_algo_ops->bat_iface_disable(hard_iface);
+               bat_priv->num_ifaces--;
+               hard_iface->if_status = BATADV_IF_NOT_IN_USE;
+               goto err_dev;
+       }
 
        hard_iface->batman_adv_ptype.type = ethertype;
        hard_iface->batman_adv_ptype.func = batadv_batman_skb_recv;
index 13c88b2..b4aa470 100644 (file)
@@ -58,9 +58,6 @@ static int __init batadv_init(void)
 
        batadv_iv_init();
 
-       /* the name should not be longer than 10 chars - see
-        * http://lwn.net/Articles/23634/
-        */
        batadv_event_workqueue = create_singlethread_workqueue("bat_events");
 
        if (!batadv_event_workqueue)
@@ -97,20 +94,20 @@ int batadv_mesh_init(struct net_device *soft_iface)
 
        spin_lock_init(&bat_priv->forw_bat_list_lock);
        spin_lock_init(&bat_priv->forw_bcast_list_lock);
-       spin_lock_init(&bat_priv->tt_changes_list_lock);
-       spin_lock_init(&bat_priv->tt_req_list_lock);
-       spin_lock_init(&bat_priv->tt_roam_list_lock);
-       spin_lock_init(&bat_priv->tt_buff_lock);
-       spin_lock_init(&bat_priv->gw_list_lock);
-       spin_lock_init(&bat_priv->vis_hash_lock);
-       spin_lock_init(&bat_priv->vis_list_lock);
+       spin_lock_init(&bat_priv->tt.changes_list_lock);
+       spin_lock_init(&bat_priv->tt.req_list_lock);
+       spin_lock_init(&bat_priv->tt.roam_list_lock);
+       spin_lock_init(&bat_priv->tt.last_changeset_lock);
+       spin_lock_init(&bat_priv->gw.list_lock);
+       spin_lock_init(&bat_priv->vis.hash_lock);
+       spin_lock_init(&bat_priv->vis.list_lock);
 
        INIT_HLIST_HEAD(&bat_priv->forw_bat_list);
        INIT_HLIST_HEAD(&bat_priv->forw_bcast_list);
-       INIT_HLIST_HEAD(&bat_priv->gw_list);
-       INIT_LIST_HEAD(&bat_priv->tt_changes_list);
-       INIT_LIST_HEAD(&bat_priv->tt_req_list);
-       INIT_LIST_HEAD(&bat_priv->tt_roam_list);
+       INIT_HLIST_HEAD(&bat_priv->gw.list);
+       INIT_LIST_HEAD(&bat_priv->tt.changes_list);
+       INIT_LIST_HEAD(&bat_priv->tt.req_list);
+       INIT_LIST_HEAD(&bat_priv->tt.roam_list);
 
        ret = batadv_originator_init(bat_priv);
        if (ret < 0)
@@ -131,7 +128,7 @@ int batadv_mesh_init(struct net_device *soft_iface)
        if (ret < 0)
                goto err;
 
-       atomic_set(&bat_priv->gw_reselect, 0);
+       atomic_set(&bat_priv->gw.reselect, 0);
        atomic_set(&bat_priv->mesh_state, BATADV_MESH_ACTIVE);
 
        return 0;
index 5d8fa07..d57b746 100644 (file)
@@ -26,7 +26,7 @@
 #define BATADV_DRIVER_DEVICE "batman-adv"
 
 #ifndef BATADV_SOURCE_VERSION
-#define BATADV_SOURCE_VERSION "2012.3.0"
+#define BATADV_SOURCE_VERSION "2012.4.0"
 #endif
 
 /* B.A.T.M.A.N. parameters */
  * -> TODO: check influence on BATADV_TQ_LOCAL_WINDOW_SIZE
  */
 #define BATADV_PURGE_TIMEOUT 200000 /* 200 seconds */
-#define BATADV_TT_LOCAL_TIMEOUT 3600000 /* in miliseconds */
-#define BATADV_TT_CLIENT_ROAM_TIMEOUT 600000 /* in miliseconds */
+#define BATADV_TT_LOCAL_TIMEOUT 3600000 /* in milliseconds */
+#define BATADV_TT_CLIENT_ROAM_TIMEOUT 600000 /* in milliseconds */
+#define BATADV_TT_CLIENT_TEMP_TIMEOUT 600000 /* in milliseconds */
 /* sliding packet range of received originator messages in sequence numbers
  * (should be a multiple of our word size)
  */
 #define BATADV_TQ_LOCAL_WINDOW_SIZE 64
-/* miliseconds we have to keep pending tt_req */
+/* milliseconds we have to keep pending tt_req */
 #define BATADV_TT_REQUEST_TIMEOUT 3000
 
 #define BATADV_TQ_GLOBAL_WINDOW_SIZE 5
@@ -59,7 +60,7 @@
 #define BATADV_TT_OGM_APPEND_MAX 3
 
 /* Time in which a client can roam at most ROAMING_MAX_COUNT times in
- * miliseconds
+ * milliseconds
  */
 #define BATADV_ROAMING_MAX_TIME 20000
 #define BATADV_ROAMING_MAX_COUNT 5
@@ -123,15 +124,6 @@ enum batadv_uev_type {
 /* Append 'batman-adv: ' before kernel messages */
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
-/* all messages related to routing / flooding / broadcasting / etc */
-enum batadv_dbg_level {
-       BATADV_DBG_BATMAN = 1 << 0,
-       BATADV_DBG_ROUTES = 1 << 1, /* route added / changed / deleted */
-       BATADV_DBG_TT     = 1 << 2, /* translation table operations */
-       BATADV_DBG_BLA    = 1 << 3, /* bridge loop avoidance */
-       BATADV_DBG_ALL    = 15,
-};
-
 /* Kernel headers */
 
 #include <linux/mutex.h>       /* mutex */
@@ -173,6 +165,15 @@ int batadv_algo_register(struct batadv_algo_ops *bat_algo_ops);
 int batadv_algo_select(struct batadv_priv *bat_priv, char *name);
 int batadv_algo_seq_print_text(struct seq_file *seq, void *offset);
 
+/* all messages related to routing / flooding / broadcasting / etc */
+enum batadv_dbg_level {
+       BATADV_DBG_BATMAN = BIT(0),
+       BATADV_DBG_ROUTES = BIT(1), /* route added / changed / deleted */
+       BATADV_DBG_TT     = BIT(2), /* translation table operations */
+       BATADV_DBG_BLA    = BIT(3), /* bridge loop avoidance */
+       BATADV_DBG_ALL    = 15,
+};
+
 #ifdef CONFIG_BATMAN_ADV_DEBUG
 int batadv_debug_log(struct batadv_priv *bat_priv, const char *fmt, ...)
 __printf(2, 3);
index 8d3e55a..2d23a14 100644 (file)
@@ -37,10 +37,10 @@ enum batadv_packettype {
 #define BATADV_COMPAT_VERSION 14
 
 enum batadv_iv_flags {
-       BATADV_NOT_BEST_NEXT_HOP   = 1 << 3,
-       BATADV_PRIMARIES_FIRST_HOP = 1 << 4,
-       BATADV_VIS_SERVER          = 1 << 5,
-       BATADV_DIRECTLINK          = 1 << 6,
+       BATADV_NOT_BEST_NEXT_HOP   = BIT(3),
+       BATADV_PRIMARIES_FIRST_HOP = BIT(4),
+       BATADV_VIS_SERVER          = BIT(5),
+       BATADV_DIRECTLINK          = BIT(6),
 };
 
 /* ICMP message types */
@@ -60,8 +60,8 @@ enum batadv_vis_packettype {
 
 /* fragmentation defines */
 enum batadv_unicast_frag_flags {
-       BATADV_UNI_FRAG_HEAD      = 1 << 0,
-       BATADV_UNI_FRAG_LARGETAIL = 1 << 1,
+       BATADV_UNI_FRAG_HEAD      = BIT(0),
+       BATADV_UNI_FRAG_LARGETAIL = BIT(1),
 };
 
 /* TT_QUERY subtypes */
@@ -74,26 +74,27 @@ enum batadv_tt_query_packettype {
 
 /* TT_QUERY flags */
 enum batadv_tt_query_flags {
-       BATADV_TT_FULL_TABLE = 1 << 2,
+       BATADV_TT_FULL_TABLE = BIT(2),
 };
 
 /* BATADV_TT_CLIENT flags.
- * Flags from 1 to 1 << 7 are sent on the wire, while flags from 1 << 8 to
- * 1 << 15 are used for local computation only
+ * Flags from BIT(0) to BIT(7) are sent on the wire, while flags from BIT(8) to
+ * BIT(15) are used for local computation only
  */
 enum batadv_tt_client_flags {
-       BATADV_TT_CLIENT_DEL     = 1 << 0,
-       BATADV_TT_CLIENT_ROAM    = 1 << 1,
-       BATADV_TT_CLIENT_WIFI    = 1 << 2,
-       BATADV_TT_CLIENT_NOPURGE = 1 << 8,
-       BATADV_TT_CLIENT_NEW     = 1 << 9,
-       BATADV_TT_CLIENT_PENDING = 1 << 10,
+       BATADV_TT_CLIENT_DEL     = BIT(0),
+       BATADV_TT_CLIENT_ROAM    = BIT(1),
+       BATADV_TT_CLIENT_WIFI    = BIT(2),
+       BATADV_TT_CLIENT_TEMP    = BIT(3),
+       BATADV_TT_CLIENT_NOPURGE = BIT(8),
+       BATADV_TT_CLIENT_NEW     = BIT(9),
+       BATADV_TT_CLIENT_PENDING = BIT(10),
 };
 
 /* claim frame types for the bridge loop avoidance */
 enum batadv_bla_claimframe {
-       BATADV_CLAIM_TYPE_ADD           = 0x00,
-       BATADV_CLAIM_TYPE_DEL           = 0x01,
+       BATADV_CLAIM_TYPE_CLAIM         = 0x00,
+       BATADV_CLAIM_TYPE_UNCLAIM       = 0x01,
        BATADV_CLAIM_TYPE_ANNOUNCE      = 0x02,
        BATADV_CLAIM_TYPE_REQUEST       = 0x03,
 };
index bc2b88b..939fc01 100644 (file)
@@ -579,32 +579,45 @@ batadv_find_ifalter_router(struct batadv_orig_node *primary_orig,
        return router;
 }
 
-int batadv_recv_tt_query(struct sk_buff *skb, struct batadv_hard_iface *recv_if)
+static int batadv_check_unicast_packet(struct sk_buff *skb, int hdr_size)
 {
-       struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
-       struct batadv_tt_query_packet *tt_query;
-       uint16_t tt_size;
        struct ethhdr *ethhdr;
-       char tt_flag;
-       size_t packet_size;
 
        /* drop packet if it has not necessary minimum size */
-       if (unlikely(!pskb_may_pull(skb,
-                                   sizeof(struct batadv_tt_query_packet))))
-               goto out;
-
-       /* I could need to modify it */
-       if (skb_cow(skb, sizeof(struct batadv_tt_query_packet)) < 0)
-               goto out;
+       if (unlikely(!pskb_may_pull(skb, hdr_size)))
+               return -1;
 
        ethhdr = (struct ethhdr *)skb_mac_header(skb);
 
        /* packet with unicast indication but broadcast recipient */
        if (is_broadcast_ether_addr(ethhdr->h_dest))
-               goto out;
+               return -1;
 
        /* packet with broadcast sender address */
        if (is_broadcast_ether_addr(ethhdr->h_source))
+               return -1;
+
+       /* not for me */
+       if (!batadv_is_my_mac(ethhdr->h_dest))
+               return -1;
+
+       return 0;
+}
+
+int batadv_recv_tt_query(struct sk_buff *skb, struct batadv_hard_iface *recv_if)
+{
+       struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
+       struct batadv_tt_query_packet *tt_query;
+       uint16_t tt_size;
+       int hdr_size = sizeof(*tt_query);
+       char tt_flag;
+       size_t packet_size;
+
+       if (batadv_check_unicast_packet(skb, hdr_size) < 0)
+               return NET_RX_DROP;
+
+       /* I could need to modify it */
+       if (skb_cow(skb, sizeof(struct batadv_tt_query_packet)) < 0)
                goto out;
 
        tt_query = (struct batadv_tt_query_packet *)skb->data;
@@ -721,7 +734,7 @@ int batadv_recv_roam_adv(struct sk_buff *skb, struct batadv_hard_iface *recv_if)
         * been incremented yet. This flag will make me check all the incoming
         * packets for the correct destination.
         */
-       bat_priv->tt_poss_change = true;
+       bat_priv->tt.poss_change = true;
 
        batadv_orig_node_free_ref(orig_node);
 out:
@@ -819,31 +832,6 @@ err:
        return NULL;
 }
 
-static int batadv_check_unicast_packet(struct sk_buff *skb, int hdr_size)
-{
-       struct ethhdr *ethhdr;
-
-       /* drop packet if it has not necessary minimum size */
-       if (unlikely(!pskb_may_pull(skb, hdr_size)))
-               return -1;
-
-       ethhdr = (struct ethhdr *)skb_mac_header(skb);
-
-       /* packet with unicast indication but broadcast recipient */
-       if (is_broadcast_ether_addr(ethhdr->h_dest))
-               return -1;
-
-       /* packet with broadcast sender address */
-       if (is_broadcast_ether_addr(ethhdr->h_source))
-               return -1;
-
-       /* not for me */
-       if (!batadv_is_my_mac(ethhdr->h_dest))
-               return -1;
-
-       return 0;
-}
-
 static int batadv_route_unicast_packet(struct sk_buff *skb,
                                       struct batadv_hard_iface *recv_if)
 {
@@ -947,8 +935,8 @@ static int batadv_check_unicast_ttvn(struct batadv_priv *bat_priv,
        unicast_packet = (struct batadv_unicast_packet *)skb->data;
 
        if (batadv_is_my_mac(unicast_packet->dest)) {
-               tt_poss_change = bat_priv->tt_poss_change;
-               curr_ttvn = (uint8_t)atomic_read(&bat_priv->ttvn);
+               tt_poss_change = bat_priv->tt.poss_change;
+               curr_ttvn = (uint8_t)atomic_read(&bat_priv->tt.vn);
        } else {
                orig_node = batadv_orig_hash_find(bat_priv,
                                                  unicast_packet->dest);
@@ -993,8 +981,7 @@ static int batadv_check_unicast_ttvn(struct batadv_priv *bat_priv,
                } else {
                        memcpy(unicast_packet->dest, orig_node->orig,
                               ETH_ALEN);
-                       curr_ttvn = (uint8_t)
-                               atomic_read(&orig_node->last_ttvn);
+                       curr_ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
                        batadv_orig_node_free_ref(orig_node);
                }
 
@@ -1025,8 +1012,9 @@ int batadv_recv_unicast_packet(struct sk_buff *skb,
 
        /* packet for me */
        if (batadv_is_my_mac(unicast_packet->dest)) {
-               batadv_interface_rx(recv_if->soft_iface, skb, recv_if,
-                                   hdr_size);
+               batadv_interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size,
+                                   NULL);
+
                return NET_RX_SUCCESS;
        }
 
@@ -1063,7 +1051,7 @@ int batadv_recv_ucast_frag_packet(struct sk_buff *skb,
                        return NET_RX_SUCCESS;
 
                batadv_interface_rx(recv_if->soft_iface, new_skb, recv_if,
-                                   sizeof(struct batadv_unicast_packet));
+                                   sizeof(struct batadv_unicast_packet), NULL);
                return NET_RX_SUCCESS;
        }
 
@@ -1150,7 +1138,8 @@ int batadv_recv_bcast_packet(struct sk_buff *skb,
                goto out;
 
        /* broadcast for me */
-       batadv_interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size);
+       batadv_interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size,
+                           orig_node);
        ret = NET_RX_SUCCESS;
        goto out;
 
index 3b4b2da..570a8bc 100644 (file)
@@ -190,13 +190,13 @@ out:
 static void batadv_send_outstanding_bcast_packet(struct work_struct *work)
 {
        struct batadv_hard_iface *hard_iface;
-       struct delayed_work *delayed_work =
-               container_of(work, struct delayed_work, work);
+       struct delayed_work *delayed_work;
        struct batadv_forw_packet *forw_packet;
        struct sk_buff *skb1;
        struct net_device *soft_iface;
        struct batadv_priv *bat_priv;
 
+       delayed_work = container_of(work, struct delayed_work, work);
        forw_packet = container_of(delayed_work, struct batadv_forw_packet,
                                   delayed_work);
        soft_iface = forw_packet->if_incoming->soft_iface;
@@ -239,11 +239,11 @@ out:
 
 void batadv_send_outstanding_bat_ogm_packet(struct work_struct *work)
 {
-       struct delayed_work *delayed_work =
-               container_of(work, struct delayed_work, work);
+       struct delayed_work *delayed_work;
        struct batadv_forw_packet *forw_packet;
        struct batadv_priv *bat_priv;
 
+       delayed_work = container_of(work, struct delayed_work, work);
        forw_packet = container_of(delayed_work, struct batadv_forw_packet,
                                   delayed_work);
        bat_priv = netdev_priv(forw_packet->if_incoming->soft_iface);
index 109ea2a..7b683e0 100644 (file)
@@ -93,7 +93,14 @@ static int batadv_interface_release(struct net_device *dev)
 static struct net_device_stats *batadv_interface_stats(struct net_device *dev)
 {
        struct batadv_priv *bat_priv = netdev_priv(dev);
-       return &bat_priv->stats;
+       struct net_device_stats *stats = &bat_priv->stats;
+
+       stats->tx_packets = batadv_sum_counter(bat_priv, BATADV_CNT_TX);
+       stats->tx_bytes = batadv_sum_counter(bat_priv, BATADV_CNT_TX_BYTES);
+       stats->tx_dropped = batadv_sum_counter(bat_priv, BATADV_CNT_TX_DROPPED);
+       stats->rx_packets = batadv_sum_counter(bat_priv, BATADV_CNT_RX);
+       stats->rx_bytes = batadv_sum_counter(bat_priv, BATADV_CNT_RX_BYTES);
+       return stats;
 }
 
 static int batadv_interface_set_mac_addr(struct net_device *dev, void *p)
@@ -142,6 +149,7 @@ static int batadv_interface_tx(struct sk_buff *skb,
        int data_len = skb->len, ret;
        short vid __maybe_unused = -1;
        bool do_bcast = false;
+       uint32_t seqno;
 
        if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE)
                goto dropped;
@@ -223,8 +231,8 @@ static int batadv_interface_tx(struct sk_buff *skb,
                       primary_if->net_dev->dev_addr, ETH_ALEN);
 
                /* set broadcast sequence number */
-               bcast_packet->seqno =
-                       htonl(atomic_inc_return(&bat_priv->bcast_seqno));
+               seqno = atomic_inc_return(&bat_priv->bcast_seqno);
+               bcast_packet->seqno = htonl(seqno);
 
                batadv_add_bcast_packet_to_list(bat_priv, skb, 1);
 
@@ -246,14 +254,14 @@ static int batadv_interface_tx(struct sk_buff *skb,
                        goto dropped_freed;
        }
 
-       bat_priv->stats.tx_packets++;
-       bat_priv->stats.tx_bytes += data_len;
+       batadv_inc_counter(bat_priv, BATADV_CNT_TX);
+       batadv_add_counter(bat_priv, BATADV_CNT_TX_BYTES, data_len);
        goto end;
 
 dropped:
        kfree_skb(skb);
 dropped_freed:
-       bat_priv->stats.tx_dropped++;
+       batadv_inc_counter(bat_priv, BATADV_CNT_TX_DROPPED);
 end:
        if (primary_if)
                batadv_hardif_free_ref(primary_if);
@@ -262,7 +270,7 @@ end:
 
 void batadv_interface_rx(struct net_device *soft_iface,
                         struct sk_buff *skb, struct batadv_hard_iface *recv_if,
-                        int hdr_size)
+                        int hdr_size, struct batadv_orig_node *orig_node)
 {
        struct batadv_priv *bat_priv = netdev_priv(soft_iface);
        struct ethhdr *ethhdr;
@@ -308,11 +316,16 @@ void batadv_interface_rx(struct net_device *soft_iface,
 
        /* skb->ip_summed = CHECKSUM_UNNECESSARY; */
 
-       bat_priv->stats.rx_packets++;
-       bat_priv->stats.rx_bytes += skb->len + ETH_HLEN;
+       batadv_inc_counter(bat_priv, BATADV_CNT_RX);
+       batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES,
+                          skb->len + ETH_HLEN);
 
        soft_iface->last_rx = jiffies;
 
+       if (orig_node)
+               batadv_tt_add_temporary_global_entry(bat_priv, orig_node,
+                                                    ethhdr->h_source);
+
        if (batadv_is_ap_isolated(bat_priv, ethhdr->h_source, ethhdr->h_dest))
                goto dropped;
 
@@ -379,15 +392,22 @@ struct net_device *batadv_softif_create(const char *name)
        if (!soft_iface)
                goto out;
 
+       bat_priv = netdev_priv(soft_iface);
+
+       /* batadv_interface_stats() needs to be available as soon as
+        * register_netdevice() has been called
+        */
+       bat_priv->bat_counters = __alloc_percpu(cnt_len, __alignof__(uint64_t));
+       if (!bat_priv->bat_counters)
+               goto free_soft_iface;
+
        ret = register_netdevice(soft_iface);
        if (ret < 0) {
                pr_err("Unable to register the batman interface '%s': %i\n",
                       name, ret);
-               goto free_soft_iface;
+               goto free_bat_counters;
        }
 
-       bat_priv = netdev_priv(soft_iface);
-
        atomic_set(&bat_priv->aggregated_ogms, 1);
        atomic_set(&bat_priv->bonding, 0);
        atomic_set(&bat_priv->bridge_loop_avoidance, 0);
@@ -405,29 +425,26 @@ struct net_device *batadv_softif_create(const char *name)
 
        atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
        atomic_set(&bat_priv->bcast_seqno, 1);
-       atomic_set(&bat_priv->ttvn, 0);
-       atomic_set(&bat_priv->tt_local_changes, 0);
-       atomic_set(&bat_priv->tt_ogm_append_cnt, 0);
-       atomic_set(&bat_priv->bla_num_requests, 0);
-
-       bat_priv->tt_buff = NULL;
-       bat_priv->tt_buff_len = 0;
-       bat_priv->tt_poss_change = false;
+       atomic_set(&bat_priv->tt.vn, 0);
+       atomic_set(&bat_priv->tt.local_changes, 0);
+       atomic_set(&bat_priv->tt.ogm_append_cnt, 0);
+#ifdef CONFIG_BATMAN_ADV_BLA
+       atomic_set(&bat_priv->bla.num_requests, 0);
+#endif
+       bat_priv->tt.last_changeset = NULL;
+       bat_priv->tt.last_changeset_len = 0;
+       bat_priv->tt.poss_change = false;
 
        bat_priv->primary_if = NULL;
        bat_priv->num_ifaces = 0;
 
-       bat_priv->bat_counters = __alloc_percpu(cnt_len, __alignof__(uint64_t));
-       if (!bat_priv->bat_counters)
-               goto unreg_soft_iface;
-
        ret = batadv_algo_select(bat_priv, batadv_routing_algo);
        if (ret < 0)
-               goto free_bat_counters;
+               goto unreg_soft_iface;
 
        ret = batadv_sysfs_add_meshif(soft_iface);
        if (ret < 0)
-               goto free_bat_counters;
+               goto unreg_soft_iface;
 
        ret = batadv_debugfs_add_meshif(soft_iface);
        if (ret < 0)
@@ -443,12 +460,13 @@ unreg_debugfs:
        batadv_debugfs_del_meshif(soft_iface);
 unreg_sysfs:
        batadv_sysfs_del_meshif(soft_iface);
-free_bat_counters:
-       free_percpu(bat_priv->bat_counters);
 unreg_soft_iface:
+       free_percpu(bat_priv->bat_counters);
        unregister_netdevice(soft_iface);
        return NULL;
 
+free_bat_counters:
+       free_percpu(bat_priv->bat_counters);
 free_soft_iface:
        free_netdev(soft_iface);
 out:
@@ -518,6 +536,11 @@ static u32 batadv_get_link(struct net_device *dev)
 static const struct {
        const char name[ETH_GSTRING_LEN];
 } batadv_counters_strings[] = {
+       { "tx" },
+       { "tx_bytes" },
+       { "tx_dropped" },
+       { "rx" },
+       { "rx_bytes" },
        { "forward" },
        { "forward_bytes" },
        { "mgmt_tx" },
index 852c683..07a08fe 100644 (file)
@@ -21,8 +21,9 @@
 #define _NET_BATMAN_ADV_SOFT_INTERFACE_H_
 
 int batadv_skb_head_push(struct sk_buff *skb, unsigned int len);
-void batadv_interface_rx(struct net_device *soft_iface, struct sk_buff *skb,
-                        struct batadv_hard_iface *recv_if, int hdr_size);
+void batadv_interface_rx(struct net_device *soft_iface,
+                        struct sk_buff *skb, struct batadv_hard_iface *recv_if,
+                        int hdr_size, struct batadv_orig_node *orig_node);
 struct net_device *batadv_softif_create(const char *name);
 void batadv_softif_destroy(struct net_device *soft_iface);
 int batadv_softif_is_valid(const struct net_device *net_dev);
index 99dd8f7..112edd3 100644 (file)
@@ -34,6 +34,10 @@ static void batadv_send_roam_adv(struct batadv_priv *bat_priv, uint8_t *client,
 static void batadv_tt_purge(struct work_struct *work);
 static void
 batadv_tt_global_del_orig_list(struct batadv_tt_global_entry *tt_global_entry);
+static void batadv_tt_global_del(struct batadv_priv *bat_priv,
+                                struct batadv_orig_node *orig_node,
+                                const unsigned char *addr,
+                                const char *message, bool roaming);
 
 /* returns 1 if they are the same mac addr */
 static int batadv_compare_tt(const struct hlist_node *node, const void *data2)
@@ -46,8 +50,8 @@ static int batadv_compare_tt(const struct hlist_node *node, const void *data2)
 
 static void batadv_tt_start_timer(struct batadv_priv *bat_priv)
 {
-       INIT_DELAYED_WORK(&bat_priv->tt_work, batadv_tt_purge);
-       queue_delayed_work(batadv_event_workqueue, &bat_priv->tt_work,
+       INIT_DELAYED_WORK(&bat_priv->tt.work, batadv_tt_purge);
+       queue_delayed_work(batadv_event_workqueue, &bat_priv->tt.work,
                           msecs_to_jiffies(5000));
 }
 
@@ -88,7 +92,7 @@ batadv_tt_local_hash_find(struct batadv_priv *bat_priv, const void *data)
        struct batadv_tt_common_entry *tt_common_entry;
        struct batadv_tt_local_entry *tt_local_entry = NULL;
 
-       tt_common_entry = batadv_tt_hash_find(bat_priv->tt_local_hash, data);
+       tt_common_entry = batadv_tt_hash_find(bat_priv->tt.local_hash, data);
        if (tt_common_entry)
                tt_local_entry = container_of(tt_common_entry,
                                              struct batadv_tt_local_entry,
@@ -102,7 +106,7 @@ batadv_tt_global_hash_find(struct batadv_priv *bat_priv, const void *data)
        struct batadv_tt_common_entry *tt_common_entry;
        struct batadv_tt_global_entry *tt_global_entry = NULL;
 
-       tt_common_entry = batadv_tt_hash_find(bat_priv->tt_global_hash, data);
+       tt_common_entry = batadv_tt_hash_find(bat_priv->tt.global_hash, data);
        if (tt_common_entry)
                tt_global_entry = container_of(tt_common_entry,
                                               struct batadv_tt_global_entry,
@@ -152,6 +156,8 @@ static void batadv_tt_orig_list_entry_free_rcu(struct rcu_head *rcu)
 static void
 batadv_tt_orig_list_entry_free_ref(struct batadv_tt_orig_list_entry *orig_entry)
 {
+       if (!atomic_dec_and_test(&orig_entry->refcount))
+               return;
        /* to avoid race conditions, immediately decrease the tt counter */
        atomic_dec(&orig_entry->orig_node->tt_size);
        call_rcu(&orig_entry->rcu, batadv_tt_orig_list_entry_free_rcu);
@@ -175,8 +181,8 @@ static void batadv_tt_local_event(struct batadv_priv *bat_priv,
        del_op_requested = flags & BATADV_TT_CLIENT_DEL;
 
        /* check for ADD+DEL or DEL+ADD events */
-       spin_lock_bh(&bat_priv->tt_changes_list_lock);
-       list_for_each_entry_safe(entry, safe, &bat_priv->tt_changes_list,
+       spin_lock_bh(&bat_priv->tt.changes_list_lock);
+       list_for_each_entry_safe(entry, safe, &bat_priv->tt.changes_list,
                                 list) {
                if (!batadv_compare_eth(entry->change.addr, addr))
                        continue;
@@ -203,15 +209,15 @@ del:
        }
 
        /* track the change in the OGMinterval list */
-       list_add_tail(&tt_change_node->list, &bat_priv->tt_changes_list);
+       list_add_tail(&tt_change_node->list, &bat_priv->tt.changes_list);
 
 unlock:
-       spin_unlock_bh(&bat_priv->tt_changes_list_lock);
+       spin_unlock_bh(&bat_priv->tt.changes_list_lock);
 
        if (event_removed)
-               atomic_dec(&bat_priv->tt_local_changes);
+               atomic_dec(&bat_priv->tt.local_changes);
        else
-               atomic_inc(&bat_priv->tt_local_changes);
+               atomic_inc(&bat_priv->tt.local_changes);
 }
 
 int batadv_tt_len(int changes_num)
@@ -221,12 +227,12 @@ int batadv_tt_len(int changes_num)
 
 static int batadv_tt_local_init(struct batadv_priv *bat_priv)
 {
-       if (bat_priv->tt_local_hash)
+       if (bat_priv->tt.local_hash)
                return 0;
 
-       bat_priv->tt_local_hash = batadv_hash_new(1024);
+       bat_priv->tt.local_hash = batadv_hash_new(1024);
 
-       if (!bat_priv->tt_local_hash)
+       if (!bat_priv->tt.local_hash)
                return -ENOMEM;
 
        return 0;
@@ -258,7 +264,7 @@ void batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
 
        batadv_dbg(BATADV_DBG_TT, bat_priv,
                   "Creating new local tt entry: %pM (ttvn: %d)\n", addr,
-                  (uint8_t)atomic_read(&bat_priv->ttvn));
+                  (uint8_t)atomic_read(&bat_priv->tt.vn));
 
        memcpy(tt_local_entry->common.addr, addr, ETH_ALEN);
        tt_local_entry->common.flags = BATADV_NO_FLAGS;
@@ -266,6 +272,7 @@ void batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
                tt_local_entry->common.flags |= BATADV_TT_CLIENT_WIFI;
        atomic_set(&tt_local_entry->common.refcount, 2);
        tt_local_entry->last_seen = jiffies;
+       tt_local_entry->common.added_at = tt_local_entry->last_seen;
 
        /* the batman interface mac address should never be purged */
        if (batadv_compare_eth(addr, soft_iface->dev_addr))
@@ -277,7 +284,7 @@ void batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
         */
        tt_local_entry->common.flags |= BATADV_TT_CLIENT_NEW;
 
-       hash_added = batadv_hash_add(bat_priv->tt_local_hash, batadv_compare_tt,
+       hash_added = batadv_hash_add(bat_priv->tt.local_hash, batadv_compare_tt,
                                     batadv_choose_orig,
                                     &tt_local_entry->common,
                                     &tt_local_entry->common.hash_entry);
@@ -348,7 +355,7 @@ static void batadv_tt_prepare_packet_buff(struct batadv_priv *bat_priv,
        primary_if = batadv_primary_if_get_selected(bat_priv);
 
        req_len = min_packet_len;
-       req_len += batadv_tt_len(atomic_read(&bat_priv->tt_local_changes));
+       req_len += batadv_tt_len(atomic_read(&bat_priv->tt.local_changes));
 
        /* if we have too many changes for one packet don't send any
         * and wait for the tt table request which will be fragmented
@@ -381,10 +388,10 @@ static int batadv_tt_changes_fill_buff(struct batadv_priv *bat_priv,
        if (new_len > 0)
                tot_changes = new_len / batadv_tt_len(1);
 
-       spin_lock_bh(&bat_priv->tt_changes_list_lock);
-       atomic_set(&bat_priv->tt_local_changes, 0);
+       spin_lock_bh(&bat_priv->tt.changes_list_lock);
+       atomic_set(&bat_priv->tt.local_changes, 0);
 
-       list_for_each_entry_safe(entry, safe, &bat_priv->tt_changes_list,
+       list_for_each_entry_safe(entry, safe, &bat_priv->tt.changes_list,
                                 list) {
                if (count < tot_changes) {
                        memcpy(tt_buff + batadv_tt_len(count),
@@ -394,25 +401,25 @@ static int batadv_tt_changes_fill_buff(struct batadv_priv *bat_priv,
                list_del(&entry->list);
                kfree(entry);
        }
-       spin_unlock_bh(&bat_priv->tt_changes_list_lock);
+       spin_unlock_bh(&bat_priv->tt.changes_list_lock);
 
        /* Keep the buffer for possible tt_request */
-       spin_lock_bh(&bat_priv->tt_buff_lock);
-       kfree(bat_priv->tt_buff);
-       bat_priv->tt_buff_len = 0;
-       bat_priv->tt_buff = NULL;
+       spin_lock_bh(&bat_priv->tt.last_changeset_lock);
+       kfree(bat_priv->tt.last_changeset);
+       bat_priv->tt.last_changeset_len = 0;
+       bat_priv->tt.last_changeset = NULL;
        /* check whether this new OGM has no changes due to size problems */
        if (new_len > 0) {
                /* if kmalloc() fails we will reply with the full table
                 * instead of providing the diff
                 */
-               bat_priv->tt_buff = kmalloc(new_len, GFP_ATOMIC);
-               if (bat_priv->tt_buff) {
-                       memcpy(bat_priv->tt_buff, tt_buff, new_len);
-                       bat_priv->tt_buff_len = new_len;
+               bat_priv->tt.last_changeset = kmalloc(new_len, GFP_ATOMIC);
+               if (bat_priv->tt.last_changeset) {
+                       memcpy(bat_priv->tt.last_changeset, tt_buff, new_len);
+                       bat_priv->tt.last_changeset_len = new_len;
                }
        }
-       spin_unlock_bh(&bat_priv->tt_buff_lock);
+       spin_unlock_bh(&bat_priv->tt.last_changeset_lock);
 
        return count;
 }
@@ -421,7 +428,7 @@ int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset)
 {
        struct net_device *net_dev = (struct net_device *)seq->private;
        struct batadv_priv *bat_priv = netdev_priv(net_dev);
-       struct batadv_hashtable *hash = bat_priv->tt_local_hash;
+       struct batadv_hashtable *hash = bat_priv->tt.local_hash;
        struct batadv_tt_common_entry *tt_common_entry;
        struct batadv_hard_iface *primary_if;
        struct hlist_node *node;
@@ -446,7 +453,7 @@ int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset)
 
        seq_printf(seq,
                   "Locally retrieved addresses (from %s) announced via TT (TTVN: %u):\n",
-                  net_dev->name, (uint8_t)atomic_read(&bat_priv->ttvn));
+                  net_dev->name, (uint8_t)atomic_read(&bat_priv->tt.vn));
 
        for (i = 0; i < hash->size; i++) {
                head = &hash->table[i];
@@ -544,7 +551,7 @@ static void batadv_tt_local_purge_list(struct batadv_priv *bat_priv,
 
 static void batadv_tt_local_purge(struct batadv_priv *bat_priv)
 {
-       struct batadv_hashtable *hash = bat_priv->tt_local_hash;
+       struct batadv_hashtable *hash = bat_priv->tt.local_hash;
        struct hlist_head *head;
        spinlock_t *list_lock; /* protects write access to the hash lists */
        uint32_t i;
@@ -570,10 +577,10 @@ static void batadv_tt_local_table_free(struct batadv_priv *bat_priv)
        struct hlist_head *head;
        uint32_t i;
 
-       if (!bat_priv->tt_local_hash)
+       if (!bat_priv->tt.local_hash)
                return;
 
-       hash = bat_priv->tt_local_hash;
+       hash = bat_priv->tt.local_hash;
 
        for (i = 0; i < hash->size; i++) {
                head = &hash->table[i];
@@ -593,17 +600,17 @@ static void batadv_tt_local_table_free(struct batadv_priv *bat_priv)
 
        batadv_hash_destroy(hash);
 
-       bat_priv->tt_local_hash = NULL;
+       bat_priv->tt.local_hash = NULL;
 }
 
 static int batadv_tt_global_init(struct batadv_priv *bat_priv)
 {
-       if (bat_priv->tt_global_hash)
+       if (bat_priv->tt.global_hash)
                return 0;
 
-       bat_priv->tt_global_hash = batadv_hash_new(1024);
+       bat_priv->tt.global_hash = batadv_hash_new(1024);
 
-       if (!bat_priv->tt_global_hash)
+       if (!bat_priv->tt.global_hash)
                return -ENOMEM;
 
        return 0;
@@ -613,62 +620,99 @@ static void batadv_tt_changes_list_free(struct batadv_priv *bat_priv)
 {
        struct batadv_tt_change_node *entry, *safe;
 
-       spin_lock_bh(&bat_priv->tt_changes_list_lock);
+       spin_lock_bh(&bat_priv->tt.changes_list_lock);
 
-       list_for_each_entry_safe(entry, safe, &bat_priv->tt_changes_list,
+       list_for_each_entry_safe(entry, safe, &bat_priv->tt.changes_list,
                                 list) {
                list_del(&entry->list);
                kfree(entry);
        }
 
-       atomic_set(&bat_priv->tt_local_changes, 0);
-       spin_unlock_bh(&bat_priv->tt_changes_list_lock);
+       atomic_set(&bat_priv->tt.local_changes, 0);
+       spin_unlock_bh(&bat_priv->tt.changes_list_lock);
 }
 
-/* find out if an orig_node is already in the list of a tt_global_entry.
- * returns 1 if found, 0 otherwise
+/* retrieves the orig_tt_list_entry belonging to orig_node from the
+ * batadv_tt_global_entry list
+ *
+ * returns it with an increased refcounter, NULL if not found
  */
-static bool
-batadv_tt_global_entry_has_orig(const struct batadv_tt_global_entry *entry,
-                               const struct batadv_orig_node *orig_node)
+static struct batadv_tt_orig_list_entry *
+batadv_tt_global_orig_entry_find(const struct batadv_tt_global_entry *entry,
+                                const struct batadv_orig_node *orig_node)
 {
-       struct batadv_tt_orig_list_entry *tmp_orig_entry;
+       struct batadv_tt_orig_list_entry *tmp_orig_entry, *orig_entry = NULL;
        const struct hlist_head *head;
        struct hlist_node *node;
-       bool found = false;
 
        rcu_read_lock();
        head = &entry->orig_list;
        hlist_for_each_entry_rcu(tmp_orig_entry, node, head, list) {
-               if (tmp_orig_entry->orig_node == orig_node) {
-                       found = true;
-                       break;
-               }
+               if (tmp_orig_entry->orig_node != orig_node)
+                       continue;
+               if (!atomic_inc_not_zero(&tmp_orig_entry->refcount))
+                       continue;
+
+               orig_entry = tmp_orig_entry;
+               break;
        }
        rcu_read_unlock();
+
+       return orig_entry;
+}
+
+/* find out if an orig_node is already in the list of a tt_global_entry.
+ * returns true if found, false otherwise
+ */
+static bool
+batadv_tt_global_entry_has_orig(const struct batadv_tt_global_entry *entry,
+                               const struct batadv_orig_node *orig_node)
+{
+       struct batadv_tt_orig_list_entry *orig_entry;
+       bool found = false;
+
+       orig_entry = batadv_tt_global_orig_entry_find(entry, orig_node);
+       if (orig_entry) {
+               found = true;
+               batadv_tt_orig_list_entry_free_ref(orig_entry);
+       }
+
        return found;
 }
 
 static void
-batadv_tt_global_add_orig_entry(struct batadv_tt_global_entry *tt_global_entry,
+batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global,
                                struct batadv_orig_node *orig_node, int ttvn)
 {
        struct batadv_tt_orig_list_entry *orig_entry;
 
+       orig_entry = batadv_tt_global_orig_entry_find(tt_global, orig_node);
+       if (orig_entry) {
+               /* refresh the ttvn: the current value could be a bogus one that
+                * was added during a "temporary client detection"
+                */
+               orig_entry->ttvn = ttvn;
+               goto out;
+       }
+
        orig_entry = kzalloc(sizeof(*orig_entry), GFP_ATOMIC);
        if (!orig_entry)
-               return;
+               goto out;
 
        INIT_HLIST_NODE(&orig_entry->list);
        atomic_inc(&orig_node->refcount);
        atomic_inc(&orig_node->tt_size);
        orig_entry->orig_node = orig_node;
        orig_entry->ttvn = ttvn;
+       atomic_set(&orig_entry->refcount, 2);
 
-       spin_lock_bh(&tt_global_entry->list_lock);
+       spin_lock_bh(&tt_global->list_lock);
        hlist_add_head_rcu(&orig_entry->list,
-                          &tt_global_entry->orig_list);
-       spin_unlock_bh(&tt_global_entry->list_lock);
+                          &tt_global->orig_list);
+       spin_unlock_bh(&tt_global->list_lock);
+out:
+       if (orig_entry)
+               batadv_tt_orig_list_entry_free_ref(orig_entry);
 }
 
 /* caller must hold orig_node refcount */
@@ -695,11 +739,12 @@ int batadv_tt_global_add(struct batadv_priv *bat_priv,
                common->flags = flags;
                tt_global_entry->roam_at = 0;
                atomic_set(&common->refcount, 2);
+               common->added_at = jiffies;
 
                INIT_HLIST_HEAD(&tt_global_entry->orig_list);
                spin_lock_init(&tt_global_entry->list_lock);
 
-               hash_added = batadv_hash_add(bat_priv->tt_global_hash,
+               hash_added = batadv_hash_add(bat_priv->tt.global_hash,
                                             batadv_compare_tt,
                                             batadv_choose_orig, common,
                                             &common->hash_entry);
@@ -709,11 +754,20 @@ int batadv_tt_global_add(struct batadv_priv *bat_priv,
                        batadv_tt_global_entry_free_ref(tt_global_entry);
                        goto out_remove;
                }
-
-               batadv_tt_global_add_orig_entry(tt_global_entry, orig_node,
-                                               ttvn);
        } else {
-               /* there is already a global entry, use this one. */
+               /* If there is already a global entry, we can use this one for
+                * our processing.
+                * But if we are trying to add a temporary client we can exit
+                * directly because the temporary information should never
+                * override any already known client state (whatever it is)
+                */
+               if (flags & BATADV_TT_CLIENT_TEMP)
+                       goto out;
+
+               /* if the client was temporary added before receiving the first
+                * OGM announcing it, we have to clear the TEMP flag
+                */
+               tt_global_entry->common.flags &= ~BATADV_TT_CLIENT_TEMP;
 
                /* If there is the BATADV_TT_CLIENT_ROAM flag set, there is only
                 * one originator left in the list and we previously received a
@@ -727,12 +781,9 @@ int batadv_tt_global_add(struct batadv_priv *bat_priv,
                        tt_global_entry->common.flags &= ~BATADV_TT_CLIENT_ROAM;
                        tt_global_entry->roam_at = 0;
                }
-
-               if (!batadv_tt_global_entry_has_orig(tt_global_entry,
-                                                    orig_node))
-                       batadv_tt_global_add_orig_entry(tt_global_entry,
-                                                       orig_node, ttvn);
        }
+       /* add the new orig_entry (if needed) or update it */
+       batadv_tt_global_orig_entry_add(tt_global_entry, orig_node, ttvn);
 
        batadv_dbg(BATADV_DBG_TT, bat_priv,
                   "Creating new global tt entry: %pM (via %pM)\n",
@@ -771,11 +822,12 @@ batadv_tt_global_print_entry(struct batadv_tt_global_entry *tt_global_entry,
        hlist_for_each_entry_rcu(orig_entry, node, head, list) {
                flags = tt_common_entry->flags;
                last_ttvn = atomic_read(&orig_entry->orig_node->last_ttvn);
-               seq_printf(seq, " * %pM  (%3u) via %pM     (%3u)   [%c%c]\n",
+               seq_printf(seq, " * %pM  (%3u) via %pM     (%3u)   [%c%c%c]\n",
                           tt_global_entry->common.addr, orig_entry->ttvn,
                           orig_entry->orig_node->orig, last_ttvn,
                           (flags & BATADV_TT_CLIENT_ROAM ? 'R' : '.'),
-                          (flags & BATADV_TT_CLIENT_WIFI ? 'W' : '.'));
+                          (flags & BATADV_TT_CLIENT_WIFI ? 'W' : '.'),
+                          (flags & BATADV_TT_CLIENT_TEMP ? 'T' : '.'));
        }
 }
 
@@ -783,7 +835,7 @@ int batadv_tt_global_seq_print_text(struct seq_file *seq, void *offset)
 {
        struct net_device *net_dev = (struct net_device *)seq->private;
        struct batadv_priv *bat_priv = netdev_priv(net_dev);
-       struct batadv_hashtable *hash = bat_priv->tt_global_hash;
+       struct batadv_hashtable *hash = bat_priv->tt.global_hash;
        struct batadv_tt_common_entry *tt_common_entry;
        struct batadv_tt_global_entry *tt_global;
        struct batadv_hard_iface *primary_if;
@@ -884,7 +936,7 @@ batadv_tt_global_del_struct(struct batadv_priv *bat_priv,
                   "Deleting global tt entry %pM: %s\n",
                   tt_global_entry->common.addr, message);
 
-       batadv_hash_remove(bat_priv->tt_global_hash, batadv_compare_tt,
+       batadv_hash_remove(bat_priv->tt.global_hash, batadv_compare_tt,
                           batadv_choose_orig, tt_global_entry->common.addr);
        batadv_tt_global_entry_free_ref(tt_global_entry);
 
@@ -995,7 +1047,7 @@ void batadv_tt_global_del_orig(struct batadv_priv *bat_priv,
        struct batadv_tt_global_entry *tt_global;
        struct batadv_tt_common_entry *tt_common_entry;
        uint32_t i;
-       struct batadv_hashtable *hash = bat_priv->tt_global_hash;
+       struct batadv_hashtable *hash = bat_priv->tt.global_hash;
        struct hlist_node *node, *safe;
        struct hlist_head *head;
        spinlock_t *list_lock; /* protects write access to the hash lists */
@@ -1030,49 +1082,63 @@ void batadv_tt_global_del_orig(struct batadv_priv *bat_priv,
        orig_node->tt_initialised = false;
 }
 
-static void batadv_tt_global_roam_purge_list(struct batadv_priv *bat_priv,
-                                            struct hlist_head *head)
+static bool batadv_tt_global_to_purge(struct batadv_tt_global_entry *tt_global,
+                                     char **msg)
 {
-       struct batadv_tt_common_entry *tt_common_entry;
-       struct batadv_tt_global_entry *tt_global_entry;
-       struct hlist_node *node, *node_tmp;
-
-       hlist_for_each_entry_safe(tt_common_entry, node, node_tmp, head,
-                                 hash_entry) {
-               tt_global_entry = container_of(tt_common_entry,
-                                              struct batadv_tt_global_entry,
-                                              common);
-               if (!(tt_global_entry->common.flags & BATADV_TT_CLIENT_ROAM))
-                       continue;
-               if (!batadv_has_timed_out(tt_global_entry->roam_at,
-                                         BATADV_TT_CLIENT_ROAM_TIMEOUT))
-                       continue;
+       bool purge = false;
+       unsigned long roam_timeout = BATADV_TT_CLIENT_ROAM_TIMEOUT;
+       unsigned long temp_timeout = BATADV_TT_CLIENT_TEMP_TIMEOUT;
 
-               batadv_dbg(BATADV_DBG_TT, bat_priv,
-                          "Deleting global tt entry (%pM): Roaming timeout\n",
-                          tt_global_entry->common.addr);
+       if ((tt_global->common.flags & BATADV_TT_CLIENT_ROAM) &&
+           batadv_has_timed_out(tt_global->roam_at, roam_timeout)) {
+               purge = true;
+               *msg = "Roaming timeout\n";
+       }
 
-               hlist_del_rcu(node);
-               batadv_tt_global_entry_free_ref(tt_global_entry);
+       if ((tt_global->common.flags & BATADV_TT_CLIENT_TEMP) &&
+           batadv_has_timed_out(tt_global->common.added_at, temp_timeout)) {
+               purge = true;
+               *msg = "Temporary client timeout\n";
        }
+
+       return purge;
 }
 
-static void batadv_tt_global_roam_purge(struct batadv_priv *bat_priv)
+static void batadv_tt_global_purge(struct batadv_priv *bat_priv)
 {
-       struct batadv_hashtable *hash = bat_priv->tt_global_hash;
+       struct batadv_hashtable *hash = bat_priv->tt.global_hash;
        struct hlist_head *head;
+       struct hlist_node *node, *node_tmp;
        spinlock_t *list_lock; /* protects write access to the hash lists */
        uint32_t i;
+       char *msg = NULL;
+       struct batadv_tt_common_entry *tt_common;
+       struct batadv_tt_global_entry *tt_global;
 
        for (i = 0; i < hash->size; i++) {
                head = &hash->table[i];
                list_lock = &hash->list_locks[i];
 
                spin_lock_bh(list_lock);
-               batadv_tt_global_roam_purge_list(bat_priv, head);
+               hlist_for_each_entry_safe(tt_common, node, node_tmp, head,
+                                         hash_entry) {
+                       tt_global = container_of(tt_common,
+                                                struct batadv_tt_global_entry,
+                                                common);
+
+                       if (!batadv_tt_global_to_purge(tt_global, &msg))
+                               continue;
+
+                       batadv_dbg(BATADV_DBG_TT, bat_priv,
+                                  "Deleting global tt entry (%pM): %s\n",
+                                  tt_global->common.addr, msg);
+
+                       hlist_del_rcu(node);
+
+                       batadv_tt_global_entry_free_ref(tt_global);
+               }
                spin_unlock_bh(list_lock);
        }
-
 }
 
 static void batadv_tt_global_table_free(struct batadv_priv *bat_priv)
@@ -1085,10 +1151,10 @@ static void batadv_tt_global_table_free(struct batadv_priv *bat_priv)
        struct hlist_head *head;
        uint32_t i;
 
-       if (!bat_priv->tt_global_hash)
+       if (!bat_priv->tt.global_hash)
                return;
 
-       hash = bat_priv->tt_global_hash;
+       hash = bat_priv->tt.global_hash;
 
        for (i = 0; i < hash->size; i++) {
                head = &hash->table[i];
@@ -1108,7 +1174,7 @@ static void batadv_tt_global_table_free(struct batadv_priv *bat_priv)
 
        batadv_hash_destroy(hash);
 
-       bat_priv->tt_global_hash = NULL;
+       bat_priv->tt.global_hash = NULL;
 }
 
 static bool
@@ -1187,7 +1253,7 @@ static uint16_t batadv_tt_global_crc(struct batadv_priv *bat_priv,
                                     struct batadv_orig_node *orig_node)
 {
        uint16_t total = 0, total_one;
-       struct batadv_hashtable *hash = bat_priv->tt_global_hash;
+       struct batadv_hashtable *hash = bat_priv->tt.global_hash;
        struct batadv_tt_common_entry *tt_common;
        struct batadv_tt_global_entry *tt_global;
        struct hlist_node *node;
@@ -1210,6 +1276,12 @@ static uint16_t batadv_tt_global_crc(struct batadv_priv *bat_priv,
                         */
                        if (tt_common->flags & BATADV_TT_CLIENT_ROAM)
                                continue;
+                       /* Temporary clients have not been announced yet, so
+                        * they have to be skipped while computing the global
+                        * crc
+                        */
+                       if (tt_common->flags & BATADV_TT_CLIENT_TEMP)
+                               continue;
 
                        /* find out if this global entry is announced by this
                         * originator
@@ -1234,7 +1306,7 @@ static uint16_t batadv_tt_global_crc(struct batadv_priv *bat_priv,
 static uint16_t batadv_tt_local_crc(struct batadv_priv *bat_priv)
 {
        uint16_t total = 0, total_one;
-       struct batadv_hashtable *hash = bat_priv->tt_local_hash;
+       struct batadv_hashtable *hash = bat_priv->tt.local_hash;
        struct batadv_tt_common_entry *tt_common;
        struct hlist_node *node;
        struct hlist_head *head;
@@ -1267,14 +1339,14 @@ static void batadv_tt_req_list_free(struct batadv_priv *bat_priv)
 {
        struct batadv_tt_req_node *node, *safe;
 
-       spin_lock_bh(&bat_priv->tt_req_list_lock);
+       spin_lock_bh(&bat_priv->tt.req_list_lock);
 
-       list_for_each_entry_safe(node, safe, &bat_priv->tt_req_list, list) {
+       list_for_each_entry_safe(node, safe, &bat_priv->tt.req_list, list) {
                list_del(&node->list);
                kfree(node);
        }
 
-       spin_unlock_bh(&bat_priv->tt_req_list_lock);
+       spin_unlock_bh(&bat_priv->tt.req_list_lock);
 }
 
 static void batadv_tt_save_orig_buffer(struct batadv_priv *bat_priv,
@@ -1304,15 +1376,15 @@ static void batadv_tt_req_purge(struct batadv_priv *bat_priv)
 {
        struct batadv_tt_req_node *node, *safe;
 
-       spin_lock_bh(&bat_priv->tt_req_list_lock);
-       list_for_each_entry_safe(node, safe, &bat_priv->tt_req_list, list) {
+       spin_lock_bh(&bat_priv->tt.req_list_lock);
+       list_for_each_entry_safe(node, safe, &bat_priv->tt.req_list, list) {
                if (batadv_has_timed_out(node->issued_at,
                                         BATADV_TT_REQUEST_TIMEOUT)) {
                        list_del(&node->list);
                        kfree(node);
                }
        }
-       spin_unlock_bh(&bat_priv->tt_req_list_lock);
+       spin_unlock_bh(&bat_priv->tt.req_list_lock);
 }
 
 /* returns the pointer to the new tt_req_node struct if no request
@@ -1324,8 +1396,8 @@ batadv_new_tt_req_node(struct batadv_priv *bat_priv,
 {
        struct batadv_tt_req_node *tt_req_node_tmp, *tt_req_node = NULL;
 
-       spin_lock_bh(&bat_priv->tt_req_list_lock);
-       list_for_each_entry(tt_req_node_tmp, &bat_priv->tt_req_list, list) {
+       spin_lock_bh(&bat_priv->tt.req_list_lock);
+       list_for_each_entry(tt_req_node_tmp, &bat_priv->tt.req_list, list) {
                if (batadv_compare_eth(tt_req_node_tmp, orig_node) &&
                    !batadv_has_timed_out(tt_req_node_tmp->issued_at,
                                          BATADV_TT_REQUEST_TIMEOUT))
@@ -1339,9 +1411,9 @@ batadv_new_tt_req_node(struct batadv_priv *bat_priv,
        memcpy(tt_req_node->addr, orig_node->orig, ETH_ALEN);
        tt_req_node->issued_at = jiffies;
 
-       list_add(&tt_req_node->list, &bat_priv->tt_req_list);
+       list_add(&tt_req_node->list, &bat_priv->tt.req_list);
 unlock:
-       spin_unlock_bh(&bat_priv->tt_req_list_lock);
+       spin_unlock_bh(&bat_priv->tt.req_list_lock);
        return tt_req_node;
 }
 
@@ -1363,7 +1435,8 @@ static int batadv_tt_global_valid(const void *entry_ptr,
        const struct batadv_tt_global_entry *tt_global_entry;
        const struct batadv_orig_node *orig_node = data_ptr;
 
-       if (tt_common_entry->flags & BATADV_TT_CLIENT_ROAM)
+       if (tt_common_entry->flags & BATADV_TT_CLIENT_ROAM ||
+           tt_common_entry->flags & BATADV_TT_CLIENT_TEMP)
                return 0;
 
        tt_global_entry = container_of(tt_common_entry,
@@ -1507,9 +1580,9 @@ out:
        if (ret)
                kfree_skb(skb);
        if (ret && tt_req_node) {
-               spin_lock_bh(&bat_priv->tt_req_list_lock);
+               spin_lock_bh(&bat_priv->tt.req_list_lock);
                list_del(&tt_req_node->list);
-               spin_unlock_bh(&bat_priv->tt_req_list_lock);
+               spin_unlock_bh(&bat_priv->tt.req_list_lock);
                kfree(tt_req_node);
        }
        return ret;
@@ -1530,6 +1603,7 @@ batadv_send_other_tt_response(struct batadv_priv *bat_priv,
        uint16_t tt_len, tt_tot;
        struct sk_buff *skb = NULL;
        struct batadv_tt_query_packet *tt_response;
+       uint8_t *packet_pos;
        size_t len;
 
        batadv_dbg(BATADV_DBG_TT, bat_priv,
@@ -1583,8 +1657,8 @@ batadv_send_other_tt_response(struct batadv_priv *bat_priv,
                        goto unlock;
 
                skb_reserve(skb, ETH_HLEN);
-               tt_response = (struct batadv_tt_query_packet *)skb_put(skb,
-                                                                      len);
+               packet_pos = skb_put(skb, len);
+               tt_response = (struct batadv_tt_query_packet *)packet_pos;
                tt_response->ttvn = req_ttvn;
                tt_response->tt_data = htons(tt_tot);
 
@@ -1600,7 +1674,7 @@ batadv_send_other_tt_response(struct batadv_priv *bat_priv,
                ttvn = (uint8_t)atomic_read(&req_dst_orig_node->last_ttvn);
 
                skb = batadv_tt_response_fill_table(tt_len, ttvn,
-                                                   bat_priv->tt_global_hash,
+                                                   bat_priv->tt.global_hash,
                                                    primary_if,
                                                    batadv_tt_global_valid,
                                                    req_dst_orig_node);
@@ -1663,6 +1737,7 @@ batadv_send_my_tt_response(struct batadv_priv *bat_priv,
        uint16_t tt_len, tt_tot;
        struct sk_buff *skb = NULL;
        struct batadv_tt_query_packet *tt_response;
+       uint8_t *packet_pos;
        size_t len;
 
        batadv_dbg(BATADV_DBG_TT, bat_priv,
@@ -1671,7 +1746,7 @@ batadv_send_my_tt_response(struct batadv_priv *bat_priv,
                   (tt_request->flags & BATADV_TT_FULL_TABLE ? 'F' : '.'));
 
 
-       my_ttvn = (uint8_t)atomic_read(&bat_priv->ttvn);
+       my_ttvn = (uint8_t)atomic_read(&bat_priv->tt.vn);
        req_ttvn = tt_request->ttvn;
 
        orig_node = batadv_orig_hash_find(bat_priv, tt_request->src);
@@ -1690,7 +1765,7 @@ batadv_send_my_tt_response(struct batadv_priv *bat_priv,
         * is too big send the whole local translation table
         */
        if (tt_request->flags & BATADV_TT_FULL_TABLE || my_ttvn != req_ttvn ||
-           !bat_priv->tt_buff)
+           !bat_priv->tt.last_changeset)
                full_table = true;
        else
                full_table = false;
@@ -1699,8 +1774,8 @@ batadv_send_my_tt_response(struct batadv_priv *bat_priv,
         * I'll send only one packet with as much TT entries as I can
         */
        if (!full_table) {
-               spin_lock_bh(&bat_priv->tt_buff_lock);
-               tt_len = bat_priv->tt_buff_len;
+               spin_lock_bh(&bat_priv->tt.last_changeset_lock);
+               tt_len = bat_priv->tt.last_changeset_len;
                tt_tot = tt_len / sizeof(struct batadv_tt_change);
 
                len = sizeof(*tt_response) + tt_len;
@@ -1709,22 +1784,22 @@ batadv_send_my_tt_response(struct batadv_priv *bat_priv,
                        goto unlock;
 
                skb_reserve(skb, ETH_HLEN);
-               tt_response = (struct batadv_tt_query_packet *)skb_put(skb,
-                                                                      len);
+               packet_pos = skb_put(skb, len);
+               tt_response = (struct batadv_tt_query_packet *)packet_pos;
                tt_response->ttvn = req_ttvn;
                tt_response->tt_data = htons(tt_tot);
 
                tt_buff = skb->data + sizeof(*tt_response);
-               memcpy(tt_buff, bat_priv->tt_buff,
-                      bat_priv->tt_buff_len);
-               spin_unlock_bh(&bat_priv->tt_buff_lock);
+               memcpy(tt_buff, bat_priv->tt.last_changeset,
+                      bat_priv->tt.last_changeset_len);
+               spin_unlock_bh(&bat_priv->tt.last_changeset_lock);
        } else {
-               tt_len = (uint16_t)atomic_read(&bat_priv->num_local_tt);
+               tt_len = (uint16_t)atomic_read(&bat_priv->tt.local_entry_num);
                tt_len *= sizeof(struct batadv_tt_change);
-               ttvn = (uint8_t)atomic_read(&bat_priv->ttvn);
+               ttvn = (uint8_t)atomic_read(&bat_priv->tt.vn);
 
                skb = batadv_tt_response_fill_table(tt_len, ttvn,
-                                                   bat_priv->tt_local_hash,
+                                                   bat_priv->tt.local_hash,
                                                    primary_if,
                                                    batadv_tt_local_valid_entry,
                                                    NULL);
@@ -1756,7 +1831,7 @@ batadv_send_my_tt_response(struct batadv_priv *bat_priv,
        goto out;
 
 unlock:
-       spin_unlock_bh(&bat_priv->tt_buff_lock);
+       spin_unlock_bh(&bat_priv->tt.last_changeset_lock);
 out:
        if (orig_node)
                batadv_orig_node_free_ref(orig_node);
@@ -1909,14 +1984,14 @@ void batadv_handle_tt_response(struct batadv_priv *bat_priv,
        }
 
        /* Delete the tt_req_node from pending tt_requests list */
-       spin_lock_bh(&bat_priv->tt_req_list_lock);
-       list_for_each_entry_safe(node, safe, &bat_priv->tt_req_list, list) {
+       spin_lock_bh(&bat_priv->tt.req_list_lock);
+       list_for_each_entry_safe(node, safe, &bat_priv->tt.req_list, list) {
                if (!batadv_compare_eth(node->addr, tt_response->src))
                        continue;
                list_del(&node->list);
                kfree(node);
        }
-       spin_unlock_bh(&bat_priv->tt_req_list_lock);
+       spin_unlock_bh(&bat_priv->tt.req_list_lock);
 
        /* Recalculate the CRC for this orig_node and store it */
        orig_node->tt_crc = batadv_tt_global_crc(bat_priv, orig_node);
@@ -1950,22 +2025,22 @@ static void batadv_tt_roam_list_free(struct batadv_priv *bat_priv)
 {
        struct batadv_tt_roam_node *node, *safe;
 
-       spin_lock_bh(&bat_priv->tt_roam_list_lock);
+       spin_lock_bh(&bat_priv->tt.roam_list_lock);
 
-       list_for_each_entry_safe(node, safe, &bat_priv->tt_roam_list, list) {
+       list_for_each_entry_safe(node, safe, &bat_priv->tt.roam_list, list) {
                list_del(&node->list);
                kfree(node);
        }
 
-       spin_unlock_bh(&bat_priv->tt_roam_list_lock);
+       spin_unlock_bh(&bat_priv->tt.roam_list_lock);
 }
 
 static void batadv_tt_roam_purge(struct batadv_priv *bat_priv)
 {
        struct batadv_tt_roam_node *node, *safe;
 
-       spin_lock_bh(&bat_priv->tt_roam_list_lock);
-       list_for_each_entry_safe(node, safe, &bat_priv->tt_roam_list, list) {
+       spin_lock_bh(&bat_priv->tt.roam_list_lock);
+       list_for_each_entry_safe(node, safe, &bat_priv->tt.roam_list, list) {
                if (!batadv_has_timed_out(node->first_time,
                                          BATADV_ROAMING_MAX_TIME))
                        continue;
@@ -1973,7 +2048,7 @@ static void batadv_tt_roam_purge(struct batadv_priv *bat_priv)
                list_del(&node->list);
                kfree(node);
        }
-       spin_unlock_bh(&bat_priv->tt_roam_list_lock);
+       spin_unlock_bh(&bat_priv->tt.roam_list_lock);
 }
 
 /* This function checks whether the client already reached the
@@ -1988,11 +2063,11 @@ static bool batadv_tt_check_roam_count(struct batadv_priv *bat_priv,
        struct batadv_tt_roam_node *tt_roam_node;
        bool ret = false;
 
-       spin_lock_bh(&bat_priv->tt_roam_list_lock);
+       spin_lock_bh(&bat_priv->tt.roam_list_lock);
        /* The new tt_req will be issued only if I'm not waiting for a
         * reply from the same orig_node yet
         */
-       list_for_each_entry(tt_roam_node, &bat_priv->tt_roam_list, list) {
+       list_for_each_entry(tt_roam_node, &bat_priv->tt.roam_list, list) {
                if (!batadv_compare_eth(tt_roam_node->addr, client))
                        continue;
 
@@ -2017,12 +2092,12 @@ static bool batadv_tt_check_roam_count(struct batadv_priv *bat_priv,
                           BATADV_ROAMING_MAX_COUNT - 1);
                memcpy(tt_roam_node->addr, client, ETH_ALEN);
 
-               list_add(&tt_roam_node->list, &bat_priv->tt_roam_list);
+               list_add(&tt_roam_node->list, &bat_priv->tt.roam_list);
                ret = true;
        }
 
 unlock:
-       spin_unlock_bh(&bat_priv->tt_roam_list_lock);
+       spin_unlock_bh(&bat_priv->tt.roam_list_lock);
        return ret;
 }
 
@@ -2086,13 +2161,15 @@ out:
 static void batadv_tt_purge(struct work_struct *work)
 {
        struct delayed_work *delayed_work;
+       struct batadv_priv_tt *priv_tt;
        struct batadv_priv *bat_priv;
 
        delayed_work = container_of(work, struct delayed_work, work);
-       bat_priv = container_of(delayed_work, struct batadv_priv, tt_work);
+       priv_tt = container_of(delayed_work, struct batadv_priv_tt, work);
+       bat_priv = container_of(priv_tt, struct batadv_priv, tt);
 
        batadv_tt_local_purge(bat_priv);
-       batadv_tt_global_roam_purge(bat_priv);
+       batadv_tt_global_purge(bat_priv);
        batadv_tt_req_purge(bat_priv);
        batadv_tt_roam_purge(bat_priv);
 
@@ -2101,7 +2178,7 @@ static void batadv_tt_purge(struct work_struct *work)
 
 void batadv_tt_free(struct batadv_priv *bat_priv)
 {
-       cancel_delayed_work_sync(&bat_priv->tt_work);
+       cancel_delayed_work_sync(&bat_priv->tt.work);
 
        batadv_tt_local_table_free(bat_priv);
        batadv_tt_global_table_free(bat_priv);
@@ -2109,7 +2186,7 @@ void batadv_tt_free(struct batadv_priv *bat_priv)
        batadv_tt_changes_list_free(bat_priv);
        batadv_tt_roam_list_free(bat_priv);
 
-       kfree(bat_priv->tt_buff);
+       kfree(bat_priv->tt.last_changeset);
 }
 
 /* This function will enable or disable the specified flags for all the entries
@@ -2153,7 +2230,7 @@ out:
 /* Purge out all the tt local entries marked with BATADV_TT_CLIENT_PENDING */
 static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv)
 {
-       struct batadv_hashtable *hash = bat_priv->tt_local_hash;
+       struct batadv_hashtable *hash = bat_priv->tt.local_hash;
        struct batadv_tt_common_entry *tt_common;
        struct batadv_tt_local_entry *tt_local;
        struct hlist_node *node, *node_tmp;
@@ -2178,7 +2255,7 @@ static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv)
                                   "Deleting local tt entry (%pM): pending\n",
                                   tt_common->addr);
 
-                       atomic_dec(&bat_priv->num_local_tt);
+                       atomic_dec(&bat_priv->tt.local_entry_num);
                        hlist_del_rcu(node);
                        tt_local = container_of(tt_common,
                                                struct batadv_tt_local_entry,
@@ -2196,26 +2273,26 @@ static int batadv_tt_commit_changes(struct batadv_priv *bat_priv,
 {
        uint16_t changed_num = 0;
 
-       if (atomic_read(&bat_priv->tt_local_changes) < 1)
+       if (atomic_read(&bat_priv->tt.local_changes) < 1)
                return -ENOENT;
 
-       changed_num = batadv_tt_set_flags(bat_priv->tt_local_hash,
+       changed_num = batadv_tt_set_flags(bat_priv->tt.local_hash,
                                          BATADV_TT_CLIENT_NEW, false);
 
        /* all reset entries have to be counted as local entries */
-       atomic_add(changed_num, &bat_priv->num_local_tt);
+       atomic_add(changed_num, &bat_priv->tt.local_entry_num);
        batadv_tt_local_purge_pending_clients(bat_priv);
-       bat_priv->tt_crc = batadv_tt_local_crc(bat_priv);
+       bat_priv->tt.local_crc = batadv_tt_local_crc(bat_priv);
 
        /* Increment the TTVN only once per OGM interval */
-       atomic_inc(&bat_priv->ttvn);
+       atomic_inc(&bat_priv->tt.vn);
        batadv_dbg(BATADV_DBG_TT, bat_priv,
                   "Local changes committed, updating to ttvn %u\n",
-                  (uint8_t)atomic_read(&bat_priv->ttvn));
-       bat_priv->tt_poss_change = false;
+                  (uint8_t)atomic_read(&bat_priv->tt.vn));
+       bat_priv->tt.poss_change = false;
 
        /* reset the sending counter */
-       atomic_set(&bat_priv->tt_ogm_append_cnt, BATADV_TT_OGM_APPEND_MAX);
+       atomic_set(&bat_priv->tt.ogm_append_cnt, BATADV_TT_OGM_APPEND_MAX);
 
        return batadv_tt_changes_fill_buff(bat_priv, packet_buff,
                                           packet_buff_len, packet_min_len);
@@ -2235,7 +2312,7 @@ int batadv_tt_append_diff(struct batadv_priv *bat_priv,
 
        /* if the changes have been sent often enough */
        if ((tt_num_changes < 0) &&
-           (!batadv_atomic_dec_not_zero(&bat_priv->tt_ogm_append_cnt))) {
+           (!batadv_atomic_dec_not_zero(&bat_priv->tt.ogm_append_cnt))) {
                batadv_tt_realloc_packet_buff(packet_buff, packet_buff_len,
                                              packet_min_len, packet_min_len);
                tt_num_changes = 0;
@@ -2366,3 +2443,22 @@ bool batadv_tt_global_client_is_roaming(struct batadv_priv *bat_priv,
 out:
        return ret;
 }
+
+bool batadv_tt_add_temporary_global_entry(struct batadv_priv *bat_priv,
+                                         struct batadv_orig_node *orig_node,
+                                         const unsigned char *addr)
+{
+       bool ret = false;
+
+       if (!batadv_tt_global_add(bat_priv, orig_node, addr,
+                                 BATADV_TT_CLIENT_TEMP,
+                                 atomic_read(&orig_node->last_ttvn)))
+               goto out;
+
+       batadv_dbg(BATADV_DBG_TT, bat_priv,
+                  "Added temporary global client (addr: %pM orig: %pM)\n",
+                  addr, orig_node->orig);
+       ret = true;
+out:
+       return ret;
+}
index ffa8735..811fffd 100644 (file)
@@ -59,6 +59,8 @@ int batadv_tt_append_diff(struct batadv_priv *bat_priv,
                          int packet_min_len);
 bool batadv_tt_global_client_is_roaming(struct batadv_priv *bat_priv,
                                        uint8_t *addr);
-
+bool batadv_tt_add_temporary_global_entry(struct batadv_priv *bat_priv,
+                                         struct batadv_orig_node *orig_node,
+                                         const unsigned char *addr);
 
 #endif /* _NET_BATMAN_ADV_TRANSLATION_TABLE_H_ */
index 12635fd..2ed82ca 100644 (file)
@@ -145,6 +145,11 @@ struct batadv_bcast_duplist_entry {
 #endif
 
 enum batadv_counters {
+       BATADV_CNT_TX,
+       BATADV_CNT_TX_BYTES,
+       BATADV_CNT_TX_DROPPED,
+       BATADV_CNT_RX,
+       BATADV_CNT_RX_BYTES,
        BATADV_CNT_FORWARD,
        BATADV_CNT_FORWARD_BYTES,
        BATADV_CNT_MGMT_TX,
@@ -160,6 +165,67 @@ enum batadv_counters {
        BATADV_CNT_NUM,
 };
 
+/**
+ * struct batadv_priv_tt - per mesh interface translation table data
+ * @vn: translation table version number
+ * @local_changes: changes registered in an originator interval
+ * @poss_change: Detect an ongoing roaming phase. If true, then this node
+ *  received a roaming_adv and has to inspect every packet directed to it to
+ *  check whether it still is the true destination or not. This flag will be
+ *  reset to false as soon as the this node's ttvn is increased
+ * @changes_list: tracks tt local changes within an originator interval
+ * @req_list: list of pending tt_requests
+ * @local_crc: Checksum of the local table, recomputed before sending a new OGM
+ */
+struct batadv_priv_tt {
+       atomic_t vn;
+       atomic_t ogm_append_cnt;
+       atomic_t local_changes;
+       bool poss_change;
+       struct list_head changes_list;
+       struct batadv_hashtable *local_hash;
+       struct batadv_hashtable *global_hash;
+       struct list_head req_list;
+       struct list_head roam_list;
+       spinlock_t changes_list_lock; /* protects changes */
+       spinlock_t req_list_lock; /* protects req_list */
+       spinlock_t roam_list_lock; /* protects roam_list */
+       atomic_t local_entry_num;
+       uint16_t local_crc;
+       unsigned char *last_changeset;
+       int16_t last_changeset_len;
+       spinlock_t last_changeset_lock; /* protects last_changeset */
+       struct delayed_work work;
+};
+
+#ifdef CONFIG_BATMAN_ADV_BLA
+struct batadv_priv_bla {
+       atomic_t num_requests; /* number of bla requests in flight */
+       struct batadv_hashtable *claim_hash;
+       struct batadv_hashtable *backbone_hash;
+       struct batadv_bcast_duplist_entry bcast_duplist[BATADV_DUPLIST_SIZE];
+       int bcast_duplist_curr;
+       struct batadv_bla_claim_dst claim_dest;
+       struct delayed_work work;
+};
+#endif
+
+struct batadv_priv_gw {
+       struct hlist_head list;
+       spinlock_t list_lock; /* protects gw_list and curr_gw */
+       struct batadv_gw_node __rcu *curr_gw;  /* rcu protected pointer */
+       atomic_t reselect;
+};
+
+struct batadv_priv_vis {
+       struct list_head send_list;
+       struct batadv_hashtable *hash;
+       spinlock_t hash_lock; /* protects hash */
+       spinlock_t list_lock; /* protects info::recv_list */
+       struct delayed_work work;
+       struct batadv_vis_info *my_info;
+};
+
 struct batadv_priv {
        atomic_t mesh_state;
        struct net_device_stats stats;
@@ -179,64 +245,24 @@ struct batadv_priv {
        atomic_t bcast_seqno;
        atomic_t bcast_queue_left;
        atomic_t batman_queue_left;
-       atomic_t ttvn; /* translation table version number */
-       atomic_t tt_ogm_append_cnt;
-       atomic_t tt_local_changes; /* changes registered in a OGM interval */
-       atomic_t bla_num_requests; /* number of bla requests in flight */
-       /* The tt_poss_change flag is used to detect an ongoing roaming phase.
-        * If true, then I received a Roaming_adv and I have to inspect every
-        * packet directed to me to check whether I am still the true
-        * destination or not. This flag will be reset to false as soon as I
-        * increase my TTVN
-        */
-       bool tt_poss_change;
        char num_ifaces;
        struct batadv_debug_log *debug_log;
        struct kobject *mesh_obj;
        struct dentry *debug_dir;
        struct hlist_head forw_bat_list;
        struct hlist_head forw_bcast_list;
-       struct hlist_head gw_list;
-       struct list_head tt_changes_list; /* tracks changes in a OGM int */
-       struct list_head vis_send_list;
        struct batadv_hashtable *orig_hash;
-       struct batadv_hashtable *tt_local_hash;
-       struct batadv_hashtable *tt_global_hash;
-#ifdef CONFIG_BATMAN_ADV_BLA
-       struct batadv_hashtable *claim_hash;
-       struct batadv_hashtable *backbone_hash;
-#endif
-       struct list_head tt_req_list; /* list of pending tt_requests */
-       struct list_head tt_roam_list;
-       struct batadv_hashtable *vis_hash;
-#ifdef CONFIG_BATMAN_ADV_BLA
-       struct batadv_bcast_duplist_entry bcast_duplist[BATADV_DUPLIST_SIZE];
-       int bcast_duplist_curr;
-       struct batadv_bla_claim_dst claim_dest;
-#endif
        spinlock_t forw_bat_list_lock; /* protects forw_bat_list */
        spinlock_t forw_bcast_list_lock; /* protects  */
-       spinlock_t tt_changes_list_lock; /* protects tt_changes */
-       spinlock_t tt_req_list_lock; /* protects tt_req_list */
-       spinlock_t tt_roam_list_lock; /* protects tt_roam_list */
-       spinlock_t gw_list_lock; /* protects gw_list and curr_gw */
-       spinlock_t vis_hash_lock; /* protects vis_hash */
-       spinlock_t vis_list_lock; /* protects vis_info::recv_list */
-       atomic_t num_local_tt;
-       /* Checksum of the local table, recomputed before sending a new OGM */
-       uint16_t tt_crc;
-       unsigned char *tt_buff;
-       int16_t tt_buff_len;
-       spinlock_t tt_buff_lock; /* protects tt_buff */
-       struct delayed_work tt_work;
        struct delayed_work orig_work;
-       struct delayed_work vis_work;
-       struct delayed_work bla_work;
-       struct batadv_gw_node __rcu *curr_gw;  /* rcu protected pointer */
-       atomic_t gw_reselect;
        struct batadv_hard_iface __rcu *primary_if;  /* rcu protected pointer */
-       struct batadv_vis_info *my_vis_info;
        struct batadv_algo_ops *bat_algo_ops;
+#ifdef CONFIG_BATMAN_ADV_BLA
+       struct batadv_priv_bla bla;
+#endif
+       struct batadv_priv_gw gw;
+       struct batadv_priv_tt tt;
+       struct batadv_priv_vis vis;
 };
 
 struct batadv_socket_client {
@@ -258,6 +284,7 @@ struct batadv_tt_common_entry {
        uint8_t addr[ETH_ALEN];
        struct hlist_node hash_entry;
        uint16_t flags;
+       unsigned long added_at;
        atomic_t refcount;
        struct rcu_head rcu;
 };
@@ -277,6 +304,7 @@ struct batadv_tt_global_entry {
 struct batadv_tt_orig_list_entry {
        struct batadv_orig_node *orig_node;
        uint8_t ttvn;
+       atomic_t refcount;
        struct rcu_head rcu;
        struct hlist_node list;
 };
index 0016464..f397232 100644 (file)
@@ -39,6 +39,7 @@ batadv_frag_merge_packet(struct list_head *head,
        struct batadv_unicast_packet *unicast_packet;
        int hdr_len = sizeof(*unicast_packet);
        int uni_diff = sizeof(*up) - hdr_len;
+       uint8_t *packet_pos;
 
        up = (struct batadv_unicast_frag_packet *)skb->data;
        /* set skb to the first part and tmp_skb to the second part */
@@ -65,8 +66,8 @@ batadv_frag_merge_packet(struct list_head *head,
        kfree_skb(tmp_skb);
 
        memmove(skb->data + uni_diff, skb->data, hdr_len);
-       unicast_packet = (struct batadv_unicast_packet *)skb_pull(skb,
-                                                                 uni_diff);
+       packet_pos = skb_pull(skb, uni_diff);
+       unicast_packet = (struct batadv_unicast_packet *)packet_pos;
        unicast_packet->header.packet_type = BATADV_UNICAST;
 
        return skb;
@@ -121,6 +122,7 @@ batadv_frag_search_packet(struct list_head *head,
 {
        struct batadv_frag_packet_list_entry *tfp;
        struct batadv_unicast_frag_packet *tmp_up = NULL;
+       int is_head_tmp, is_head;
        uint16_t search_seqno;
 
        if (up->flags & BATADV_UNI_FRAG_HEAD)
@@ -128,6 +130,8 @@ batadv_frag_search_packet(struct list_head *head,
        else
                search_seqno = ntohs(up->seqno)-1;
 
+       is_head = !!(up->flags & BATADV_UNI_FRAG_HEAD);
+
        list_for_each_entry(tfp, head, list) {
 
                if (!tfp->skb)
@@ -139,9 +143,8 @@ batadv_frag_search_packet(struct list_head *head,
                tmp_up = (struct batadv_unicast_frag_packet *)tfp->skb->data;
 
                if (tfp->seqno == search_seqno) {
-
-                       if ((tmp_up->flags & BATADV_UNI_FRAG_HEAD) !=
-                           (up->flags & BATADV_UNI_FRAG_HEAD))
+                       is_head_tmp = !!(tmp_up->flags & BATADV_UNI_FRAG_HEAD);
+                       if (is_head_tmp != is_head)
                                return tfp;
                        else
                                goto mov_tail;
@@ -334,8 +337,7 @@ find_router:
        /* copy the destination for faster routing */
        memcpy(unicast_packet->dest, orig_node->orig, ETH_ALEN);
        /* set the destination tt version number */
-       unicast_packet->ttvn =
-               (uint8_t)atomic_read(&orig_node->last_ttvn);
+       unicast_packet->ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
 
        /* inform the destination node that we are still missing a correct route
         * for this client. The destination will receive this packet and will
index 2a2ea06..5abd145 100644 (file)
@@ -41,13 +41,13 @@ static void batadv_free_info(struct kref *ref)
        bat_priv = info->bat_priv;
 
        list_del_init(&info->send_list);
-       spin_lock_bh(&bat_priv->vis_list_lock);
+       spin_lock_bh(&bat_priv->vis.list_lock);
        list_for_each_entry_safe(entry, tmp, &info->recv_list, list) {
                list_del(&entry->list);
                kfree(entry);
        }
 
-       spin_unlock_bh(&bat_priv->vis_list_lock);
+       spin_unlock_bh(&bat_priv->vis.list_lock);
        kfree_skb(info->skb_packet);
        kfree(info);
 }
@@ -94,7 +94,7 @@ static uint32_t batadv_vis_info_choose(const void *data, uint32_t size)
 static struct batadv_vis_info *
 batadv_vis_hash_find(struct batadv_priv *bat_priv, const void *data)
 {
-       struct batadv_hashtable *hash = bat_priv->vis_hash;
+       struct batadv_hashtable *hash = bat_priv->vis.hash;
        struct hlist_head *head;
        struct hlist_node *node;
        struct batadv_vis_info *vis_info, *vis_info_tmp = NULL;
@@ -252,7 +252,7 @@ int batadv_vis_seq_print_text(struct seq_file *seq, void *offset)
        struct hlist_head *head;
        struct net_device *net_dev = (struct net_device *)seq->private;
        struct batadv_priv *bat_priv = netdev_priv(net_dev);
-       struct batadv_hashtable *hash = bat_priv->vis_hash;
+       struct batadv_hashtable *hash = bat_priv->vis.hash;
        uint32_t i;
        int ret = 0;
        int vis_server = atomic_read(&bat_priv->vis_mode);
@@ -264,12 +264,12 @@ int batadv_vis_seq_print_text(struct seq_file *seq, void *offset)
        if (vis_server == BATADV_VIS_TYPE_CLIENT_UPDATE)
                goto out;
 
-       spin_lock_bh(&bat_priv->vis_hash_lock);
+       spin_lock_bh(&bat_priv->vis.hash_lock);
        for (i = 0; i < hash->size; i++) {
                head = &hash->table[i];
                batadv_vis_seq_print_text_bucket(seq, head);
        }
-       spin_unlock_bh(&bat_priv->vis_hash_lock);
+       spin_unlock_bh(&bat_priv->vis.hash_lock);
 
 out:
        if (primary_if)
@@ -285,7 +285,7 @@ static void batadv_send_list_add(struct batadv_priv *bat_priv,
 {
        if (list_empty(&info->send_list)) {
                kref_get(&info->refcount);
-               list_add_tail(&info->send_list, &bat_priv->vis_send_list);
+               list_add_tail(&info->send_list, &bat_priv->vis.send_list);
        }
 }
 
@@ -311,9 +311,9 @@ static void batadv_recv_list_add(struct batadv_priv *bat_priv,
                return;
 
        memcpy(entry->mac, mac, ETH_ALEN);
-       spin_lock_bh(&bat_priv->vis_list_lock);
+       spin_lock_bh(&bat_priv->vis.list_lock);
        list_add_tail(&entry->list, recv_list);
-       spin_unlock_bh(&bat_priv->vis_list_lock);
+       spin_unlock_bh(&bat_priv->vis.list_lock);
 }
 
 /* returns 1 if this mac is in the recv_list */
@@ -323,14 +323,14 @@ static int batadv_recv_list_is_in(struct batadv_priv *bat_priv,
 {
        const struct batadv_recvlist_node *entry;
 
-       spin_lock_bh(&bat_priv->vis_list_lock);
+       spin_lock_bh(&bat_priv->vis.list_lock);
        list_for_each_entry(entry, recv_list, list) {
                if (batadv_compare_eth(entry->mac, mac)) {
-                       spin_unlock_bh(&bat_priv->vis_list_lock);
+                       spin_unlock_bh(&bat_priv->vis.list_lock);
                        return 1;
                }
        }
-       spin_unlock_bh(&bat_priv->vis_list_lock);
+       spin_unlock_bh(&bat_priv->vis.list_lock);
        return 0;
 }
 
@@ -354,7 +354,7 @@ batadv_add_packet(struct batadv_priv *bat_priv,
 
        *is_new = 0;
        /* sanity check */
-       if (!bat_priv->vis_hash)
+       if (!bat_priv->vis.hash)
                return NULL;
 
        /* see if the packet is already in vis_hash */
@@ -385,7 +385,7 @@ batadv_add_packet(struct batadv_priv *bat_priv,
                        }
                }
                /* remove old entry */
-               batadv_hash_remove(bat_priv->vis_hash, batadv_vis_info_cmp,
+               batadv_hash_remove(bat_priv->vis.hash, batadv_vis_info_cmp,
                                   batadv_vis_info_choose, old_info);
                batadv_send_list_del(old_info);
                kref_put(&old_info->refcount, batadv_free_info);
@@ -426,7 +426,7 @@ batadv_add_packet(struct batadv_priv *bat_priv,
        batadv_recv_list_add(bat_priv, &info->recv_list, packet->sender_orig);
 
        /* try to add it */
-       hash_added = batadv_hash_add(bat_priv->vis_hash, batadv_vis_info_cmp,
+       hash_added = batadv_hash_add(bat_priv->vis.hash, batadv_vis_info_cmp,
                                     batadv_vis_info_choose, info,
                                     &info->hash_entry);
        if (hash_added != 0) {
@@ -449,7 +449,7 @@ void batadv_receive_server_sync_packet(struct batadv_priv *bat_priv,
 
        make_broadcast = (vis_server == BATADV_VIS_TYPE_SERVER_SYNC);
 
-       spin_lock_bh(&bat_priv->vis_hash_lock);
+       spin_lock_bh(&bat_priv->vis.hash_lock);
        info = batadv_add_packet(bat_priv, vis_packet, vis_info_len,
                                 &is_new, make_broadcast);
        if (!info)
@@ -461,7 +461,7 @@ void batadv_receive_server_sync_packet(struct batadv_priv *bat_priv,
        if (vis_server == BATADV_VIS_TYPE_SERVER_SYNC && is_new)
                batadv_send_list_add(bat_priv, info);
 end:
-       spin_unlock_bh(&bat_priv->vis_hash_lock);
+       spin_unlock_bh(&bat_priv->vis.hash_lock);
 }
 
 /* handle an incoming client update packet and schedule forward if needed. */
@@ -484,7 +484,7 @@ void batadv_receive_client_update_packet(struct batadv_priv *bat_priv,
            batadv_is_my_mac(vis_packet->target_orig))
                are_target = 1;
 
-       spin_lock_bh(&bat_priv->vis_hash_lock);
+       spin_lock_bh(&bat_priv->vis.hash_lock);
        info = batadv_add_packet(bat_priv, vis_packet, vis_info_len,
                                 &is_new, are_target);
 
@@ -505,7 +505,7 @@ void batadv_receive_client_update_packet(struct batadv_priv *bat_priv,
        }
 
 end:
-       spin_unlock_bh(&bat_priv->vis_hash_lock);
+       spin_unlock_bh(&bat_priv->vis.hash_lock);
 }
 
 /* Walk the originators and find the VIS server with the best tq. Set the packet
@@ -574,10 +574,11 @@ static int batadv_generate_vis_packet(struct batadv_priv *bat_priv)
        struct hlist_head *head;
        struct batadv_orig_node *orig_node;
        struct batadv_neigh_node *router;
-       struct batadv_vis_info *info = bat_priv->my_vis_info;
+       struct batadv_vis_info *info = bat_priv->vis.my_info;
        struct batadv_vis_packet *packet;
        struct batadv_vis_info_entry *entry;
        struct batadv_tt_common_entry *tt_common_entry;
+       uint8_t *packet_pos;
        int best_tq = -1;
        uint32_t i;
 
@@ -618,8 +619,8 @@ static int batadv_generate_vis_packet(struct batadv_priv *bat_priv)
                                goto next;
 
                        /* fill one entry into buffer. */
-                       entry = (struct batadv_vis_info_entry *)
-                                     skb_put(info->skb_packet, sizeof(*entry));
+                       packet_pos = skb_put(info->skb_packet, sizeof(*entry));
+                       entry = (struct batadv_vis_info_entry *)packet_pos;
                        memcpy(entry->src,
                               router->if_incoming->net_dev->dev_addr,
                               ETH_ALEN);
@@ -636,7 +637,7 @@ next:
                rcu_read_unlock();
        }
 
-       hash = bat_priv->tt_local_hash;
+       hash = bat_priv->tt.local_hash;
 
        for (i = 0; i < hash->size; i++) {
                head = &hash->table[i];
@@ -644,9 +645,8 @@ next:
                rcu_read_lock();
                hlist_for_each_entry_rcu(tt_common_entry, node, head,
                                         hash_entry) {
-                       entry = (struct batadv_vis_info_entry *)
-                                       skb_put(info->skb_packet,
-                                               sizeof(*entry));
+                       packet_pos = skb_put(info->skb_packet, sizeof(*entry));
+                       entry = (struct batadv_vis_info_entry *)packet_pos;
                        memset(entry->src, 0, ETH_ALEN);
                        memcpy(entry->dest, tt_common_entry->addr, ETH_ALEN);
                        entry->quality = 0; /* 0 means TT */
@@ -671,7 +671,7 @@ unlock:
 static void batadv_purge_vis_packets(struct batadv_priv *bat_priv)
 {
        uint32_t i;
-       struct batadv_hashtable *hash = bat_priv->vis_hash;
+       struct batadv_hashtable *hash = bat_priv->vis.hash;
        struct hlist_node *node, *node_tmp;
        struct hlist_head *head;
        struct batadv_vis_info *info;
@@ -682,7 +682,7 @@ static void batadv_purge_vis_packets(struct batadv_priv *bat_priv)
                hlist_for_each_entry_safe(info, node, node_tmp,
                                          head, hash_entry) {
                        /* never purge own data. */
-                       if (info == bat_priv->my_vis_info)
+                       if (info == bat_priv->vis.my_info)
                                continue;
 
                        if (batadv_has_timed_out(info->first_seen,
@@ -814,34 +814,36 @@ out:
 /* called from timer; send (and maybe generate) vis packet. */
 static void batadv_send_vis_packets(struct work_struct *work)
 {
-       struct delayed_work *delayed_work =
-               container_of(work, struct delayed_work, work);
+       struct delayed_work *delayed_work;
        struct batadv_priv *bat_priv;
+       struct batadv_priv_vis *priv_vis;
        struct batadv_vis_info *info;
 
-       bat_priv = container_of(delayed_work, struct batadv_priv, vis_work);
-       spin_lock_bh(&bat_priv->vis_hash_lock);
+       delayed_work = container_of(work, struct delayed_work, work);
+       priv_vis = container_of(delayed_work, struct batadv_priv_vis, work);
+       bat_priv = container_of(priv_vis, struct batadv_priv, vis);
+       spin_lock_bh(&bat_priv->vis.hash_lock);
        batadv_purge_vis_packets(bat_priv);
 
        if (batadv_generate_vis_packet(bat_priv) == 0) {
                /* schedule if generation was successful */
-               batadv_send_list_add(bat_priv, bat_priv->my_vis_info);
+               batadv_send_list_add(bat_priv, bat_priv->vis.my_info);
        }
 
-       while (!list_empty(&bat_priv->vis_send_list)) {
-               info = list_first_entry(&bat_priv->vis_send_list,
+       while (!list_empty(&bat_priv->vis.send_list)) {
+               info = list_first_entry(&bat_priv->vis.send_list,
                                        typeof(*info), send_list);
 
                kref_get(&info->refcount);
-               spin_unlock_bh(&bat_priv->vis_hash_lock);
+               spin_unlock_bh(&bat_priv->vis.hash_lock);
 
                batadv_send_vis_packet(bat_priv, info);
 
-               spin_lock_bh(&bat_priv->vis_hash_lock);
+               spin_lock_bh(&bat_priv->vis.hash_lock);
                batadv_send_list_del(info);
                kref_put(&info->refcount, batadv_free_info);
        }
-       spin_unlock_bh(&bat_priv->vis_hash_lock);
+       spin_unlock_bh(&bat_priv->vis.hash_lock);
        batadv_start_vis_timer(bat_priv);
 }
 
@@ -856,37 +858,37 @@ int batadv_vis_init(struct batadv_priv *bat_priv)
        unsigned long first_seen;
        struct sk_buff *tmp_skb;
 
-       if (bat_priv->vis_hash)
+       if (bat_priv->vis.hash)
                return 0;
 
-       spin_lock_bh(&bat_priv->vis_hash_lock);
+       spin_lock_bh(&bat_priv->vis.hash_lock);
 
-       bat_priv->vis_hash = batadv_hash_new(256);
-       if (!bat_priv->vis_hash) {
+       bat_priv->vis.hash = batadv_hash_new(256);
+       if (!bat_priv->vis.hash) {
                pr_err("Can't initialize vis_hash\n");
                goto err;
        }
 
-       bat_priv->my_vis_info = kmalloc(BATADV_MAX_VIS_PACKET_SIZE, GFP_ATOMIC);
-       if (!bat_priv->my_vis_info)
+       bat_priv->vis.my_info = kmalloc(BATADV_MAX_VIS_PACKET_SIZE, GFP_ATOMIC);
+       if (!bat_priv->vis.my_info)
                goto err;
 
        len = sizeof(*packet) + BATADV_MAX_VIS_PACKET_SIZE + ETH_HLEN;
-       bat_priv->my_vis_info->skb_packet = dev_alloc_skb(len);
-       if (!bat_priv->my_vis_info->skb_packet)
+       bat_priv->vis.my_info->skb_packet = dev_alloc_skb(len);
+       if (!bat_priv->vis.my_info->skb_packet)
                goto free_info;
 
-       skb_reserve(bat_priv->my_vis_info->skb_packet, ETH_HLEN);
-       tmp_skb = bat_priv->my_vis_info->skb_packet;
+       skb_reserve(bat_priv->vis.my_info->skb_packet, ETH_HLEN);
+       tmp_skb = bat_priv->vis.my_info->skb_packet;
        packet = (struct batadv_vis_packet *)skb_put(tmp_skb, sizeof(*packet));
 
        /* prefill the vis info */
        first_seen = jiffies - msecs_to_jiffies(BATADV_VIS_INTERVAL);
-       bat_priv->my_vis_info->first_seen = first_seen;
-       INIT_LIST_HEAD(&bat_priv->my_vis_info->recv_list);
-       INIT_LIST_HEAD(&bat_priv->my_vis_info->send_list);
-       kref_init(&bat_priv->my_vis_info->refcount);
-       bat_priv->my_vis_info->bat_priv = bat_priv;
+       bat_priv->vis.my_info->first_seen = first_seen;
+       INIT_LIST_HEAD(&bat_priv->vis.my_info->recv_list);
+       INIT_LIST_HEAD(&bat_priv->vis.my_info->send_list);
+       kref_init(&bat_priv->vis.my_info->refcount);
+       bat_priv->vis.my_info->bat_priv = bat_priv;
        packet->header.version = BATADV_COMPAT_VERSION;
        packet->header.packet_type = BATADV_VIS;
        packet->header.ttl = BATADV_TTL;
@@ -894,28 +896,28 @@ int batadv_vis_init(struct batadv_priv *bat_priv)
        packet->reserved = 0;
        packet->entries = 0;
 
-       INIT_LIST_HEAD(&bat_priv->vis_send_list);
+       INIT_LIST_HEAD(&bat_priv->vis.send_list);
 
-       hash_added = batadv_hash_add(bat_priv->vis_hash, batadv_vis_info_cmp,
+       hash_added = batadv_hash_add(bat_priv->vis.hash, batadv_vis_info_cmp,
                                     batadv_vis_info_choose,
-                                    bat_priv->my_vis_info,
-                                    &bat_priv->my_vis_info->hash_entry);
+                                    bat_priv->vis.my_info,
+                                    &bat_priv->vis.my_info->hash_entry);
        if (hash_added != 0) {
                pr_err("Can't add own vis packet into hash\n");
                /* not in hash, need to remove it manually. */
-               kref_put(&bat_priv->my_vis_info->refcount, batadv_free_info);
+               kref_put(&bat_priv->vis.my_info->refcount, batadv_free_info);
                goto err;
        }
 
-       spin_unlock_bh(&bat_priv->vis_hash_lock);
+       spin_unlock_bh(&bat_priv->vis.hash_lock);
        batadv_start_vis_timer(bat_priv);
        return 0;
 
 free_info:
-       kfree(bat_priv->my_vis_info);
-       bat_priv->my_vis_info = NULL;
+       kfree(bat_priv->vis.my_info);
+       bat_priv->vis.my_info = NULL;
 err:
-       spin_unlock_bh(&bat_priv->vis_hash_lock);
+       spin_unlock_bh(&bat_priv->vis.hash_lock);
        batadv_vis_quit(bat_priv);
        return -ENOMEM;
 }
@@ -933,23 +935,23 @@ static void batadv_free_info_ref(struct hlist_node *node, void *arg)
 /* shutdown vis-server */
 void batadv_vis_quit(struct batadv_priv *bat_priv)
 {
-       if (!bat_priv->vis_hash)
+       if (!bat_priv->vis.hash)
                return;
 
-       cancel_delayed_work_sync(&bat_priv->vis_work);
+       cancel_delayed_work_sync(&bat_priv->vis.work);
 
-       spin_lock_bh(&bat_priv->vis_hash_lock);
+       spin_lock_bh(&bat_priv->vis.hash_lock);
        /* properly remove, kill timers ... */
-       batadv_hash_delete(bat_priv->vis_hash, batadv_free_info_ref, NULL);
-       bat_priv->vis_hash = NULL;
-       bat_priv->my_vis_info = NULL;
-       spin_unlock_bh(&bat_priv->vis_hash_lock);
+       batadv_hash_delete(bat_priv->vis.hash, batadv_free_info_ref, NULL);
+       bat_priv->vis.hash = NULL;
+       bat_priv->vis.my_info = NULL;
+       spin_unlock_bh(&bat_priv->vis.hash_lock);
 }
 
 /* schedule packets for (re)transmission */
 static void batadv_start_vis_timer(struct batadv_priv *bat_priv)
 {
-       INIT_DELAYED_WORK(&bat_priv->vis_work, batadv_send_vis_packets);
-       queue_delayed_work(batadv_event_workqueue, &bat_priv->vis_work,
+       INIT_DELAYED_WORK(&bat_priv->vis.work, batadv_send_vis_packets);
+       queue_delayed_work(batadv_event_workqueue, &bat_priv->vis.work,
                           msecs_to_jiffies(BATADV_VIS_INTERVAL));
 }
index 84e716e..873282f 100644 (file)
@@ -20,7 +20,7 @@
 #ifndef _NET_BATMAN_ADV_VIS_H_
 #define _NET_BATMAN_ADV_VIS_H_
 
-/* timeout of vis packets in miliseconds */
+/* timeout of vis packets in milliseconds */
 #define BATADV_VIS_TIMEOUT             200000
 
 int batadv_vis_seq_print_text(struct seq_file *seq, void *offset);
index 4ff0bf3..0760d1f 100644 (file)
@@ -316,7 +316,7 @@ send_rsp:
 static inline int a2mp_cmd_rsp(struct amp_mgr *mgr, struct sk_buff *skb,
                               struct a2mp_cmd *hdr)
 {
-       BT_DBG("ident %d code %d", hdr->ident, hdr->code);
+       BT_DBG("ident %d code 0x%2.2x", hdr->ident, hdr->code);
 
        skb_pull(skb, le16_to_cpu(hdr->len));
        return 0;
@@ -325,17 +325,19 @@ static inline int a2mp_cmd_rsp(struct amp_mgr *mgr, struct sk_buff *skb,
 /* Handle A2MP signalling */
 static int a2mp_chan_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb)
 {
-       struct a2mp_cmd *hdr = (void *) skb->data;
+       struct a2mp_cmd *hdr;
        struct amp_mgr *mgr = chan->data;
        int err = 0;
 
        amp_mgr_get(mgr);
 
        while (skb->len >= sizeof(*hdr)) {
-               struct a2mp_cmd *hdr = (void *) skb->data;
-               u16 len = le16_to_cpu(hdr->len);
+               u16 len;
 
-               BT_DBG("code 0x%02x id %d len %d", hdr->code, hdr->ident, len);
+               hdr = (void *) skb->data;
+               len = le16_to_cpu(hdr->len);
+
+               BT_DBG("code 0x%2.2x id %d len %u", hdr->code, hdr->ident, len);
 
                skb_pull(skb, sizeof(*hdr));
 
@@ -393,7 +395,9 @@ static int a2mp_chan_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb)
 
        if (err) {
                struct a2mp_cmd_rej rej;
+
                rej.reason = __constant_cpu_to_le16(0);
+               hdr = (void *) skb->data;
 
                BT_DBG("Send A2MP Rej: cmd 0x%2.2x err %d", hdr->code, err);
 
@@ -412,7 +416,7 @@ static int a2mp_chan_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb)
 
 static void a2mp_chan_close_cb(struct l2cap_chan *chan)
 {
-       l2cap_chan_destroy(chan);
+       l2cap_chan_put(chan);
 }
 
 static void a2mp_chan_state_change_cb(struct l2cap_chan *chan, int state)
index f7db579..58f9762 100644 (file)
@@ -28,6 +28,7 @@
 #include <asm/ioctls.h>
 
 #include <net/bluetooth/bluetooth.h>
+#include <linux/proc_fs.h>
 
 #define VERSION "2.16"
 
@@ -532,6 +533,146 @@ int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo)
 }
 EXPORT_SYMBOL(bt_sock_wait_state);
 
+#ifdef CONFIG_PROC_FS
+struct bt_seq_state {
+       struct bt_sock_list *l;
+};
+
+static void *bt_seq_start(struct seq_file *seq, loff_t *pos)
+       __acquires(seq->private->l->lock)
+{
+       struct bt_seq_state *s = seq->private;
+       struct bt_sock_list *l = s->l;
+
+       read_lock(&l->lock);
+       return seq_hlist_start_head(&l->head, *pos);
+}
+
+static void *bt_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+       struct bt_seq_state *s = seq->private;
+       struct bt_sock_list *l = s->l;
+
+       return seq_hlist_next(v, &l->head, pos);
+}
+
+static void bt_seq_stop(struct seq_file *seq, void *v)
+       __releases(seq->private->l->lock)
+{
+       struct bt_seq_state *s = seq->private;
+       struct bt_sock_list *l = s->l;
+
+       read_unlock(&l->lock);
+}
+
+static int bt_seq_show(struct seq_file *seq, void *v)
+{
+       struct sock *sk;
+       struct bt_sock *bt;
+       struct bt_seq_state *s = seq->private;
+       struct bt_sock_list *l = s->l;
+       bdaddr_t src_baswapped, dst_baswapped;
+
+       if (v == SEQ_START_TOKEN) {
+               seq_puts(seq ,"sk               RefCnt Rmem   Wmem   User   Inode  Src Dst Parent");
+
+               if (l->custom_seq_show) {
+                       seq_putc(seq, ' ');
+                       l->custom_seq_show(seq, v);
+               }
+
+               seq_putc(seq, '\n');
+       } else {
+               sk = sk_entry(v);
+               bt = bt_sk(sk);
+               baswap(&src_baswapped, &bt->src);
+               baswap(&dst_baswapped, &bt->dst);
+
+               seq_printf(seq, "%pK %-6d %-6u %-6u %-6u %-6lu %pM %pM %-6lu",
+                          sk,
+                          atomic_read(&sk->sk_refcnt),
+                          sk_rmem_alloc_get(sk),
+                          sk_wmem_alloc_get(sk),
+                          sock_i_uid(sk),
+                          sock_i_ino(sk),
+                          &src_baswapped,
+                          &dst_baswapped,
+                          bt->parent? sock_i_ino(bt->parent): 0LU);
+
+               if (l->custom_seq_show) {
+                       seq_putc(seq, ' ');
+                       l->custom_seq_show(seq, v);
+               }
+
+               seq_putc(seq, '\n');
+       }
+       return 0;
+}
+
+static struct seq_operations bt_seq_ops = {
+       .start = bt_seq_start,
+       .next  = bt_seq_next,
+       .stop  = bt_seq_stop,
+       .show  = bt_seq_show,
+};
+
+static int bt_seq_open(struct inode *inode, struct file *file)
+{
+       struct bt_sock_list *sk_list;
+       struct bt_seq_state *s;
+
+       sk_list = PDE(inode)->data;
+       s = __seq_open_private(file, &bt_seq_ops,
+                              sizeof(struct bt_seq_state));
+       if (s == NULL)
+               return -ENOMEM;
+
+       s->l = sk_list;
+       return 0;
+}
+
+int bt_procfs_init(struct module* module, struct net *net, const char *name,
+                  struct bt_sock_list* sk_list,
+                  int (* seq_show)(struct seq_file *, void *))
+{
+       struct proc_dir_entry * pde;
+
+       sk_list->custom_seq_show = seq_show;
+
+       sk_list->fops.owner     = module;
+       sk_list->fops.open      = bt_seq_open;
+       sk_list->fops.read      = seq_read;
+       sk_list->fops.llseek    = seq_lseek;
+       sk_list->fops.release   = seq_release_private;
+
+       pde = proc_net_fops_create(net, name, 0, &sk_list->fops);
+       if (pde == NULL)
+               return -ENOMEM;
+
+       pde->data = sk_list;
+
+       return 0;
+}
+
+void bt_procfs_cleanup(struct net *net, const char *name)
+{
+       proc_net_remove(net, name);
+}
+#else
+int bt_procfs_init(struct module* module, struct net *net, const char *name,
+                  struct bt_sock_list* sk_list,
+                  int (* seq_show)(struct seq_file *, void *))
+{
+       return 0;
+}
+
+void bt_procfs_cleanup(struct net *net, const char *name)
+{
+}
+#endif
+EXPORT_SYMBOL(bt_procfs_init);
+EXPORT_SYMBOL(bt_procfs_cleanup);
+
 static struct net_proto_family bt_sock_family_ops = {
        .owner  = THIS_MODULE,
        .family = PF_BLUETOOTH,
index 5e5f5b4..5b6cc0b 100644 (file)
 
 #include "bnep.h"
 
+static struct bt_sock_list bnep_sk_list = {
+       .lock = __RW_LOCK_UNLOCKED(bnep_sk_list.lock)
+};
+
 static int bnep_sock_release(struct socket *sock)
 {
        struct sock *sk = sock->sk;
@@ -38,6 +42,8 @@ static int bnep_sock_release(struct socket *sock)
        if (!sk)
                return 0;
 
+       bt_sock_unlink(&bnep_sk_list, sk);
+
        sock_orphan(sk);
        sock_put(sk);
        return 0;
@@ -204,6 +210,7 @@ static int bnep_sock_create(struct net *net, struct socket *sock, int protocol,
        sk->sk_protocol = protocol;
        sk->sk_state    = BT_OPEN;
 
+       bt_sock_link(&bnep_sk_list, sk);
        return 0;
 }
 
@@ -222,19 +229,30 @@ int __init bnep_sock_init(void)
                return err;
 
        err = bt_sock_register(BTPROTO_BNEP, &bnep_sock_family_ops);
-       if (err < 0)
+       if (err < 0) {
+               BT_ERR("Can't register BNEP socket");
                goto error;
+       }
+
+       err = bt_procfs_init(THIS_MODULE, &init_net, "bnep", &bnep_sk_list, NULL);
+       if (err < 0) {
+               BT_ERR("Failed to create BNEP proc file");
+               bt_sock_unregister(BTPROTO_BNEP);
+               goto error;
+       }
+
+       BT_INFO("BNEP socket layer initialized");
 
        return 0;
 
 error:
-       BT_ERR("Can't register BNEP socket");
        proto_unregister(&bnep_proto);
        return err;
 }
 
 void __exit bnep_sock_cleanup(void)
 {
+       bt_procfs_cleanup(&init_net, "bnep");
        if (bt_sock_unregister(BTPROTO_BNEP) < 0)
                BT_ERR("Can't unregister BNEP socket");
 
index 311668d..d5cacef 100644 (file)
 
 #include "cmtp.h"
 
+static struct bt_sock_list cmtp_sk_list = {
+       .lock = __RW_LOCK_UNLOCKED(cmtp_sk_list.lock)
+};
+
 static int cmtp_sock_release(struct socket *sock)
 {
        struct sock *sk = sock->sk;
@@ -51,6 +55,8 @@ static int cmtp_sock_release(struct socket *sock)
        if (!sk)
                return 0;
 
+       bt_sock_unlink(&cmtp_sk_list, sk);
+
        sock_orphan(sk);
        sock_put(sk);
 
@@ -214,6 +220,8 @@ static int cmtp_sock_create(struct net *net, struct socket *sock, int protocol,
        sk->sk_protocol = protocol;
        sk->sk_state    = BT_OPEN;
 
+       bt_sock_link(&cmtp_sk_list, sk);
+
        return 0;
 }
 
@@ -232,19 +240,30 @@ int cmtp_init_sockets(void)
                return err;
 
        err = bt_sock_register(BTPROTO_CMTP, &cmtp_sock_family_ops);
-       if (err < 0)
+       if (err < 0) {
+               BT_ERR("Can't register CMTP socket");
                goto error;
+       }
+
+       err = bt_procfs_init(THIS_MODULE, &init_net, "cmtp", &cmtp_sk_list, NULL);
+       if (err < 0) {
+               BT_ERR("Failed to create CMTP proc file");
+               bt_sock_unregister(BTPROTO_HIDP);
+               goto error;
+       }
+
+       BT_INFO("CMTP socket layer initialized");
 
        return 0;
 
 error:
-       BT_ERR("Can't register CMTP socket");
        proto_unregister(&cmtp_proto);
        return err;
 }
 
 void cmtp_cleanup_sockets(void)
 {
+       bt_procfs_cleanup(&init_net, "cmtp");
        if (bt_sock_unregister(BTPROTO_CMTP) < 0)
                BT_ERR("Can't unregister CMTP socket");
 
index d4de5db..fa974a1 100644 (file)
@@ -696,7 +696,8 @@ int hci_dev_open(__u16 dev)
                hci_dev_hold(hdev);
                set_bit(HCI_UP, &hdev->flags);
                hci_notify(hdev, HCI_DEV_UP);
-               if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
+               if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
+                   mgmt_valid_hdev(hdev)) {
                        hci_dev_lock(hdev);
                        mgmt_powered(hdev, 1);
                        hci_dev_unlock(hdev);
@@ -797,7 +798,8 @@ static int hci_dev_do_close(struct hci_dev *hdev)
         * and no tasks are scheduled. */
        hdev->close(hdev);
 
-       if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
+       if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
+           mgmt_valid_hdev(hdev)) {
                hci_dev_lock(hdev);
                mgmt_powered(hdev, 0);
                hci_dev_unlock(hdev);
index 715d7e3..4fd2cf3 100644 (file)
@@ -513,7 +513,7 @@ static void hci_setup_event_mask(struct hci_dev *hdev)
        if (hdev->features[3] & LMP_RSSI_INQ)
                events[4] |= 0x02; /* Inquiry Result with RSSI */
 
-       if (hdev->features[5] & LMP_SNIFF_SUBR)
+       if (lmp_sniffsubr_capable(hdev))
                events[5] |= 0x20; /* Sniff Subrating */
 
        if (hdev->features[5] & LMP_PAUSE_ENC)
@@ -522,13 +522,13 @@ static void hci_setup_event_mask(struct hci_dev *hdev)
        if (hdev->features[6] & LMP_EXT_INQ)
                events[5] |= 0x40; /* Extended Inquiry Result */
 
-       if (hdev->features[6] & LMP_NO_FLUSH)
+       if (lmp_no_flush_capable(hdev))
                events[7] |= 0x01; /* Enhanced Flush Complete */
 
        if (hdev->features[7] & LMP_LSTO)
                events[6] |= 0x80; /* Link Supervision Timeout Changed */
 
-       if (hdev->features[6] & LMP_SIMPLE_PAIR) {
+       if (lmp_ssp_capable(hdev)) {
                events[6] |= 0x01;      /* IO Capability Request */
                events[6] |= 0x02;      /* IO Capability Response */
                events[6] |= 0x04;      /* User Confirmation Request */
@@ -541,7 +541,7 @@ static void hci_setup_event_mask(struct hci_dev *hdev)
                                         * Features Notification */
        }
 
-       if (hdev->features[4] & LMP_LE)
+       if (lmp_le_capable(hdev))
                events[7] |= 0x20;      /* LE Meta-Event */
 
        hci_send_cmd(hdev, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
@@ -623,11 +623,11 @@ static void hci_setup_link_policy(struct hci_dev *hdev)
        struct hci_cp_write_def_link_policy cp;
        u16 link_policy = 0;
 
-       if (hdev->features[0] & LMP_RSWITCH)
+       if (lmp_rswitch_capable(hdev))
                link_policy |= HCI_LP_RSWITCH;
        if (hdev->features[0] & LMP_HOLD)
                link_policy |= HCI_LP_HOLD;
-       if (hdev->features[0] & LMP_SNIFF)
+       if (lmp_sniff_capable(hdev))
                link_policy |= HCI_LP_SNIFF;
        if (hdev->features[1] & LMP_PARK)
                link_policy |= HCI_LP_PARK;
@@ -686,7 +686,7 @@ static void hci_cc_read_local_features(struct hci_dev *hdev,
                hdev->esco_type |= (ESCO_HV3);
        }
 
-       if (hdev->features[3] & LMP_ESCO)
+       if (lmp_esco_capable(hdev))
                hdev->esco_type |= (ESCO_EV3);
 
        if (hdev->features[4] & LMP_EV4)
@@ -746,7 +746,7 @@ static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
                break;
        }
 
-       if (test_bit(HCI_INIT, &hdev->flags) && hdev->features[4] & LMP_LE)
+       if (test_bit(HCI_INIT, &hdev->flags) && lmp_le_capable(hdev))
                hci_set_le_support(hdev);
 
 done:
@@ -1625,43 +1625,30 @@ static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
 
 static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status)
 {
-       struct hci_cp_le_create_conn *cp;
        struct hci_conn *conn;
 
        BT_DBG("%s status 0x%2.2x", hdev->name, status);
 
-       cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
-       if (!cp)
-               return;
+       if (status) {
+               hci_dev_lock(hdev);
 
-       hci_dev_lock(hdev);
+               conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
+               if (!conn) {
+                       hci_dev_unlock(hdev);
+                       return;
+               }
 
-       conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr);
+               BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&conn->dst),
+                      conn);
 
-       BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->peer_addr),
-              conn);
+               conn->state = BT_CLOSED;
+               mgmt_connect_failed(hdev, &conn->dst, conn->type,
+                                   conn->dst_type, status);
+               hci_proto_connect_cfm(conn, status);
+               hci_conn_del(conn);
 
-       if (status) {
-               if (conn && conn->state == BT_CONNECT) {
-                       conn->state = BT_CLOSED;
-                       mgmt_connect_failed(hdev, &cp->peer_addr, conn->type,
-                                           conn->dst_type, status);
-                       hci_proto_connect_cfm(conn, status);
-                       hci_conn_del(conn);
-               }
-       } else {
-               if (!conn) {
-                       conn = hci_conn_add(hdev, LE_LINK, &cp->peer_addr);
-                       if (conn) {
-                               conn->dst_type = cp->peer_addr_type;
-                               conn->out = true;
-                       } else {
-                               BT_ERR("No memory for new connection");
-                       }
-               }
+               hci_dev_unlock(hdev);
        }
-
-       hci_dev_unlock(hdev);
 }
 
 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
@@ -3268,12 +3255,8 @@ static void hci_user_passkey_request_evt(struct hci_dev *hdev,
 
        BT_DBG("%s", hdev->name);
 
-       hci_dev_lock(hdev);
-
        if (test_bit(HCI_MGMT, &hdev->dev_flags))
                mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
-
-       hci_dev_unlock(hdev);
 }
 
 static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
@@ -3366,11 +3349,23 @@ static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
 
        hci_dev_lock(hdev);
 
-       if (ev->status) {
-               conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
-               if (!conn)
+       conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
+       if (!conn) {
+               conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr);
+               if (!conn) {
+                       BT_ERR("No memory for new connection");
                        goto unlock;
+               }
 
+               conn->dst_type = ev->bdaddr_type;
+
+               if (ev->role == LE_CONN_ROLE_MASTER) {
+                       conn->out = true;
+                       conn->link_mode |= HCI_LM_MASTER;
+               }
+       }
+
+       if (ev->status) {
                mgmt_connect_failed(hdev, &conn->dst, conn->type,
                                    conn->dst_type, ev->status);
                hci_proto_connect_cfm(conn, ev->status);
@@ -3379,18 +3374,6 @@ static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
                goto unlock;
        }
 
-       conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &ev->bdaddr);
-       if (!conn) {
-               conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr);
-               if (!conn) {
-                       BT_ERR("No memory for new connection");
-                       hci_dev_unlock(hdev);
-                       return;
-               }
-
-               conn->dst_type = ev->bdaddr_type;
-       }
-
        if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
                mgmt_device_connected(hdev, &ev->bdaddr, conn->type,
                                      conn->dst_type, 0, NULL, 0, NULL);
index 19fdac7..bb64331 100644 (file)
@@ -1102,21 +1102,30 @@ int __init hci_sock_init(void)
                return err;
 
        err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
-       if (err < 0)
+       if (err < 0) {
+               BT_ERR("HCI socket registration failed");
                goto error;
+       }
+
+       err = bt_procfs_init(THIS_MODULE, &init_net, "hci", &hci_sk_list, NULL);
+       if (err < 0) {
+               BT_ERR("Failed to create HCI proc file");
+               bt_sock_unregister(BTPROTO_HCI);
+               goto error;
+       }
 
        BT_INFO("HCI socket layer initialized");
 
        return 0;
 
 error:
-       BT_ERR("HCI socket registration failed");
        proto_unregister(&hci_sk_proto);
        return err;
 }
 
 void hci_sock_cleanup(void)
 {
+       bt_procfs_cleanup(&init_net, "hci");
        if (bt_sock_unregister(BTPROTO_HCI) < 0)
                BT_ERR("HCI socket unregistration failed");
 
index 18b3f68..eca3889 100644 (file)
 
 #include "hidp.h"
 
+static struct bt_sock_list hidp_sk_list = {
+       .lock = __RW_LOCK_UNLOCKED(hidp_sk_list.lock)
+};
+
 static int hidp_sock_release(struct socket *sock)
 {
        struct sock *sk = sock->sk;
@@ -34,6 +38,8 @@ static int hidp_sock_release(struct socket *sock)
        if (!sk)
                return 0;
 
+       bt_sock_unlink(&hidp_sk_list, sk);
+
        sock_orphan(sk);
        sock_put(sk);
 
@@ -253,6 +259,8 @@ static int hidp_sock_create(struct net *net, struct socket *sock, int protocol,
        sk->sk_protocol = protocol;
        sk->sk_state    = BT_OPEN;
 
+       bt_sock_link(&hidp_sk_list, sk);
+
        return 0;
 }
 
@@ -271,8 +279,19 @@ int __init hidp_init_sockets(void)
                return err;
 
        err = bt_sock_register(BTPROTO_HIDP, &hidp_sock_family_ops);
-       if (err < 0)
+       if (err < 0) {
+               BT_ERR("Can't register HIDP socket");
                goto error;
+       }
+
+       err = bt_procfs_init(THIS_MODULE, &init_net, "hidp", &hidp_sk_list, NULL);
+       if (err < 0) {
+               BT_ERR("Failed to create HIDP proc file");
+               bt_sock_unregister(BTPROTO_HIDP);
+               goto error;
+       }
+
+       BT_INFO("HIDP socket layer initialized");
 
        return 0;
 
@@ -284,6 +303,7 @@ error:
 
 void __exit hidp_cleanup_sockets(void)
 {
+       bt_procfs_cleanup(&init_net, "hidp");
        if (bt_sock_unregister(BTPROTO_HIDP) < 0)
                BT_ERR("Can't unregister HIDP socket");
 
index daa149b..f0a3ab1 100644 (file)
@@ -416,13 +416,30 @@ struct l2cap_chan *l2cap_chan_create(void)
        return chan;
 }
 
-void l2cap_chan_destroy(struct l2cap_chan *chan)
+static void l2cap_chan_destroy(struct l2cap_chan *chan)
 {
+       BT_DBG("chan %p", chan);
+
        write_lock(&chan_list_lock);
        list_del(&chan->global_l);
        write_unlock(&chan_list_lock);
 
-       l2cap_chan_put(chan);
+       kfree(chan);
+}
+
+void l2cap_chan_hold(struct l2cap_chan *c)
+{
+       BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->refcnt));
+
+       atomic_inc(&c->refcnt);
+}
+
+void l2cap_chan_put(struct l2cap_chan *c)
+{
+       BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->refcnt));
+
+       if (atomic_dec_and_test(&c->refcnt))
+               l2cap_chan_destroy(c);
 }
 
 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
@@ -5330,7 +5347,7 @@ int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
        return exact ? lm1 : lm2;
 }
 
-int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
+void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
 {
        struct l2cap_conn *conn;
 
@@ -5343,7 +5360,6 @@ int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
        } else
                l2cap_conn_del(hcon, bt_to_errno(status));
 
-       return 0;
 }
 
 int l2cap_disconn_ind(struct hci_conn *hcon)
@@ -5357,12 +5373,11 @@ int l2cap_disconn_ind(struct hci_conn *hcon)
        return conn->disc_reason;
 }
 
-int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
+void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
 {
        BT_DBG("hcon %p reason %d", hcon, reason);
 
        l2cap_conn_del(hcon, bt_to_errno(reason));
-       return 0;
 }
 
 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
@@ -5405,6 +5420,11 @@ int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
                BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
                       state_to_string(chan->state));
 
+               if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
+                       l2cap_chan_unlock(chan);
+                       continue;
+               }
+
                if (chan->scid == L2CAP_CID_LE_DATA) {
                        if (!status && encrypt) {
                                chan->sec_level = hcon->sec_level;
index 1497edd..3a6ce73 100644 (file)
 #include <net/bluetooth/l2cap.h>
 #include <net/bluetooth/smp.h>
 
+static struct bt_sock_list l2cap_sk_list = {
+       .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
+};
+
 static const struct proto_ops l2cap_sock_ops;
 static void l2cap_sock_init(struct sock *sk, struct sock *parent);
 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio);
@@ -824,7 +828,7 @@ static void l2cap_sock_kill(struct sock *sk)
 
        /* Kill poor orphan */
 
-       l2cap_chan_destroy(l2cap_pi(sk)->chan);
+       l2cap_chan_put(l2cap_pi(sk)->chan);
        sock_set_flag(sk, SOCK_DEAD);
        sock_put(sk);
 }
@@ -887,6 +891,8 @@ static int l2cap_sock_release(struct socket *sock)
        if (!sk)
                return 0;
 
+       bt_sock_unlink(&l2cap_sk_list, sk);
+
        err = l2cap_sock_shutdown(sock, 2);
 
        sock_orphan(sk);
@@ -1211,6 +1217,7 @@ static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
                return -ENOMEM;
 
        l2cap_sock_init(sk, NULL);
+       bt_sock_link(&l2cap_sk_list, sk);
        return 0;
 }
 
@@ -1249,21 +1256,30 @@ int __init l2cap_init_sockets(void)
                return err;
 
        err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
-       if (err < 0)
+       if (err < 0) {
+               BT_ERR("L2CAP socket registration failed");
                goto error;
+       }
+
+       err = bt_procfs_init(THIS_MODULE, &init_net, "l2cap", &l2cap_sk_list, NULL);
+       if (err < 0) {
+               BT_ERR("Failed to create L2CAP proc file");
+               bt_sock_unregister(BTPROTO_L2CAP);
+               goto error;
+       }
 
        BT_INFO("L2CAP socket layer initialized");
 
        return 0;
 
 error:
-       BT_ERR("L2CAP socket registration failed");
        proto_unregister(&l2cap_proto);
        return err;
 }
 
 void l2cap_cleanup_sockets(void)
 {
+       bt_procfs_cleanup(&init_net, "l2cap");
        if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
                BT_ERR("L2CAP socket unregistration failed");
 
index ad6613d..a3329cb 100644 (file)
@@ -193,6 +193,11 @@ static u8 mgmt_status_table[] = {
        MGMT_STATUS_CONNECT_FAILED,     /* MAC Connection Failed */
 };
 
+bool mgmt_valid_hdev(struct hci_dev *hdev)
+{
+       return hdev->dev_type == HCI_BREDR;
+}
+
 static u8 mgmt_status(u8 hci_status)
 {
        if (hci_status < ARRAY_SIZE(mgmt_status_table))
@@ -317,7 +322,6 @@ static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
                           u16 data_len)
 {
        struct mgmt_rp_read_index_list *rp;
-       struct list_head *p;
        struct hci_dev *d;
        size_t rp_len;
        u16 count;
@@ -328,7 +332,10 @@ static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
        read_lock(&hci_dev_list_lock);
 
        count = 0;
-       list_for_each(p, &hci_dev_list) {
+       list_for_each_entry(d, &hci_dev_list, list) {
+               if (!mgmt_valid_hdev(d))
+                       continue;
+
                count++;
        }
 
@@ -346,6 +353,9 @@ static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
                if (test_bit(HCI_SETUP, &d->dev_flags))
                        continue;
 
+               if (!mgmt_valid_hdev(d))
+                       continue;
+
                rp->index[i++] = cpu_to_le16(d->id);
                BT_DBG("Added hci%u", d->id);
        }
@@ -370,10 +380,10 @@ static u32 get_supported_settings(struct hci_dev *hdev)
        settings |= MGMT_SETTING_DISCOVERABLE;
        settings |= MGMT_SETTING_PAIRABLE;
 
-       if (hdev->features[6] & LMP_SIMPLE_PAIR)
+       if (lmp_ssp_capable(hdev))
                settings |= MGMT_SETTING_SSP;
 
-       if (!(hdev->features[4] & LMP_NO_BREDR)) {
+       if (lmp_bredr_capable(hdev)) {
                settings |= MGMT_SETTING_BREDR;
                settings |= MGMT_SETTING_LINK_SECURITY;
        }
@@ -381,7 +391,7 @@ static u32 get_supported_settings(struct hci_dev *hdev)
        if (enable_hs)
                settings |= MGMT_SETTING_HS;
 
-       if (hdev->features[4] & LMP_LE)
+       if (lmp_le_capable(hdev))
                settings |= MGMT_SETTING_LE;
 
        return settings;
@@ -403,7 +413,7 @@ static u32 get_current_settings(struct hci_dev *hdev)
        if (test_bit(HCI_PAIRABLE, &hdev->dev_flags))
                settings |= MGMT_SETTING_PAIRABLE;
 
-       if (!(hdev->features[4] & LMP_NO_BREDR))
+       if (lmp_bredr_capable(hdev))
                settings |= MGMT_SETTING_BREDR;
 
        if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
@@ -1111,7 +1121,7 @@ static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
 
        hci_dev_lock(hdev);
 
-       if (!(hdev->features[6] & LMP_SIMPLE_PAIR)) {
+       if (!lmp_ssp_capable(hdev)) {
                err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
                                 MGMT_STATUS_NOT_SUPPORTED);
                goto failed;
@@ -1195,7 +1205,7 @@ static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
 
        hci_dev_lock(hdev);
 
-       if (!(hdev->features[4] & LMP_LE)) {
+       if (!lmp_le_capable(hdev)) {
                err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
                                 MGMT_STATUS_NOT_SUPPORTED);
                goto unlock;
@@ -2191,7 +2201,7 @@ static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
                goto unlock;
        }
 
-       if (!(hdev->features[6] & LMP_SIMPLE_PAIR)) {
+       if (!lmp_ssp_capable(hdev)) {
                err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
                                 MGMT_STATUS_NOT_SUPPORTED);
                goto unlock;
@@ -2820,6 +2830,9 @@ static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
 
 int mgmt_index_added(struct hci_dev *hdev)
 {
+       if (!mgmt_valid_hdev(hdev))
+               return -ENOTSUPP;
+
        return mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
 }
 
@@ -2827,6 +2840,9 @@ int mgmt_index_removed(struct hci_dev *hdev)
 {
        u8 status = MGMT_STATUS_INVALID_INDEX;
 
+       if (!mgmt_valid_hdev(hdev))
+               return -ENOTSUPP;
+
        mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
 
        return mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
index 1a17850..b3226f3 100644 (file)
@@ -1035,8 +1035,17 @@ int __init rfcomm_init_sockets(void)
                return err;
 
        err = bt_sock_register(BTPROTO_RFCOMM, &rfcomm_sock_family_ops);
-       if (err < 0)
+       if (err < 0) {
+               BT_ERR("RFCOMM socket layer registration failed");
+               goto error;
+       }
+
+       err = bt_procfs_init(THIS_MODULE, &init_net, "rfcomm", &rfcomm_sk_list, NULL);
+       if (err < 0) {
+               BT_ERR("Failed to create RFCOMM proc file");
+               bt_sock_unregister(BTPROTO_RFCOMM);
                goto error;
+       }
 
        if (bt_debugfs) {
                rfcomm_sock_debugfs = debugfs_create_file("rfcomm", 0444,
@@ -1050,13 +1059,14 @@ int __init rfcomm_init_sockets(void)
        return 0;
 
 error:
-       BT_ERR("RFCOMM socket layer registration failed");
        proto_unregister(&rfcomm_proto);
        return err;
 }
 
 void __exit rfcomm_cleanup_sockets(void)
 {
+       bt_procfs_cleanup(&init_net, "rfcomm");
+
        debugfs_remove(rfcomm_sock_debugfs);
 
        if (bt_sock_unregister(BTPROTO_RFCOMM) < 0)
index 3589e21..dc42b91 100644 (file)
@@ -912,7 +912,7 @@ int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
        return lm;
 }
 
-int sco_connect_cfm(struct hci_conn *hcon, __u8 status)
+void sco_connect_cfm(struct hci_conn *hcon, __u8 status)
 {
        BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
        if (!status) {
@@ -923,16 +923,13 @@ int sco_connect_cfm(struct hci_conn *hcon, __u8 status)
                        sco_conn_ready(conn);
        } else
                sco_conn_del(hcon, bt_to_errno(status));
-
-       return 0;
 }
 
-int sco_disconn_cfm(struct hci_conn *hcon, __u8 reason)
+void sco_disconn_cfm(struct hci_conn *hcon, __u8 reason)
 {
        BT_DBG("hcon %p reason %d", hcon, reason);
 
        sco_conn_del(hcon, bt_to_errno(reason));
-       return 0;
 }
 
 int sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb)
@@ -1025,6 +1022,13 @@ int __init sco_init(void)
                goto error;
        }
 
+       err = bt_procfs_init(THIS_MODULE, &init_net, "sco", &sco_sk_list, NULL);
+       if (err < 0) {
+               BT_ERR("Failed to create SCO proc file");
+               bt_sock_unregister(BTPROTO_SCO);
+               goto error;
+       }
+
        if (bt_debugfs) {
                sco_debugfs = debugfs_create_file("sco", 0444, bt_debugfs,
                                                  NULL, &sco_debugfs_fops);
@@ -1043,6 +1047,8 @@ error:
 
 void __exit sco_exit(void)
 {
+       bt_procfs_cleanup(&init_net, "sco");
+
        debugfs_remove(sco_debugfs);
 
        if (bt_sock_unregister(BTPROTO_SCO) < 0)
index d21f323..9ce430b 100644 (file)
@@ -312,7 +312,7 @@ int br_fdb_fillbuf(struct net_bridge *br, void *buf,
 
                        fe->is_local = f->is_local;
                        if (!f->is_static)
-                               fe->ageing_timer_value = jiffies_to_clock_t(jiffies - f->updated);
+                               fe->ageing_timer_value = jiffies_delta_to_clock_t(jiffies - f->updated);
                        ++fe;
                        ++num;
                }
index a6747e6..c3530a8 100644 (file)
@@ -170,5 +170,5 @@ void br_stp_port_timer_init(struct net_bridge_port *p)
 unsigned long br_timer_value(const struct timer_list *timer)
 {
        return timer_pending(timer)
-               ? jiffies_to_clock_t(timer->expires - jiffies) : 0;
+               ? jiffies_delta_to_clock_t(timer->expires - jiffies) : 0;
 }
index 42e6bd0..3c2e9dc 100644 (file)
@@ -100,9 +100,7 @@ static struct nf_hook_ops ebt_ops_filter[] __read_mostly = {
 static int __net_init frame_filter_net_init(struct net *net)
 {
        net->xt.frame_filter = ebt_register_table(net, &frame_filter);
-       if (IS_ERR(net->xt.frame_filter))
-               return PTR_ERR(net->xt.frame_filter);
-       return 0;
+       return PTR_RET(net->xt.frame_filter);
 }
 
 static void __net_exit frame_filter_net_exit(struct net *net)
index 6dc2f87..10871bc 100644 (file)
@@ -100,9 +100,7 @@ static struct nf_hook_ops ebt_ops_nat[] __read_mostly = {
 static int __net_init frame_nat_net_init(struct net *net)
 {
        net->xt.frame_nat = ebt_register_table(net, &frame_nat);
-       if (IS_ERR(net->xt.frame_nat))
-               return PTR_ERR(net->xt.frame_nat);
-       return 0;
+       return PTR_RET(net->xt.frame_nat);
 }
 
 static void __net_exit frame_nat_net_exit(struct net *net)
index 8398836..a5fc3e3 100644 (file)
@@ -1109,11 +1109,23 @@ void netdev_state_change(struct net_device *dev)
 }
 EXPORT_SYMBOL(netdev_state_change);
 
-int netdev_bonding_change(struct net_device *dev, unsigned long event)
+/**
+ *     netdev_notify_peers - notify network peers about existence of @dev
+ *     @dev: network device
+ *
+ * Generate traffic such that interested network peers are aware of
+ * @dev, such as by generating a gratuitous ARP. This may be used when
+ * a device wants to inform the rest of the network about some sort of
+ * reconfiguration such as a failover event or virtual machine
+ * migration.
+ */
+void netdev_notify_peers(struct net_device *dev)
 {
-       return call_netdevice_notifiers(event, dev);
+       rtnl_lock();
+       call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
+       rtnl_unlock();
 }
-EXPORT_SYMBOL(netdev_bonding_change);
+EXPORT_SYMBOL(netdev_notify_peers);
 
 /**
  *     dev_load        - load a network module
@@ -1394,7 +1406,6 @@ rollback:
                                nb->notifier_call(nb, NETDEV_DOWN, dev);
                        }
                        nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
-                       nb->notifier_call(nb, NETDEV_UNREGISTER_BATCH, dev);
                }
        }
 
@@ -1436,7 +1447,6 @@ int unregister_netdevice_notifier(struct notifier_block *nb)
                                nb->notifier_call(nb, NETDEV_DOWN, dev);
                        }
                        nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
-                       nb->notifier_call(nb, NETDEV_UNREGISTER_BATCH, dev);
                }
        }
 unlock:
@@ -4510,8 +4520,8 @@ static void dev_change_rx_flags(struct net_device *dev, int flags)
 static int __dev_set_promiscuity(struct net_device *dev, int inc)
 {
        unsigned int old_flags = dev->flags;
-       uid_t uid;
-       gid_t gid;
+       kuid_t uid;
+       kgid_t gid;
 
        ASSERT_RTNL();
 
@@ -4543,7 +4553,8 @@ static int __dev_set_promiscuity(struct net_device *dev, int inc)
                                dev->name, (dev->flags & IFF_PROMISC),
                                (old_flags & IFF_PROMISC),
                                audit_get_loginuid(current),
-                               uid, gid,
+                               from_kuid(&init_user_ns, uid),
+                               from_kgid(&init_user_ns, gid),
                                audit_get_sessionid(current));
                }
 
@@ -5236,12 +5247,12 @@ int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
  */
 static int dev_new_index(struct net *net)
 {
-       static int ifindex;
+       int ifindex = net->ifindex;
        for (;;) {
                if (++ifindex <= 0)
                        ifindex = 1;
                if (!__dev_get_by_index(net, ifindex))
-                       return ifindex;
+                       return net->ifindex = ifindex;
        }
 }
 
@@ -5319,10 +5330,6 @@ static void rollback_registered_many(struct list_head *head)
                netdev_unregister_kobject(dev);
        }
 
-       /* Process any work delayed until the end of the batch */
-       dev = list_first_entry(head, struct net_device, unreg_list);
-       call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
-
        synchronize_net();
 
        list_for_each_entry(dev, head, unreg_list)
@@ -5594,7 +5601,12 @@ int register_netdevice(struct net_device *dev)
                }
        }
 
-       dev->ifindex = dev_new_index(net);
+       ret = -EBUSY;
+       if (!dev->ifindex)
+               dev->ifindex = dev_new_index(net);
+       else if (__dev_get_by_index(net, dev->ifindex))
+               goto err_uninit;
+
        if (dev->iflink == -1)
                dev->iflink = dev->ifindex;
 
@@ -5637,6 +5649,8 @@ int register_netdevice(struct net_device *dev)
 
        set_bit(__LINK_STATE_PRESENT, &dev->state);
 
+       linkwatch_init_dev(dev);
+
        dev_init_scheduler(dev);
        dev_hold(dev);
        list_netdevice(dev);
@@ -5770,9 +5784,12 @@ static void netdev_wait_allrefs(struct net_device *dev)
 
                        /* Rebroadcast unregister notification */
                        call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
-                       /* don't resend NETDEV_UNREGISTER_BATCH, _BATCH users
-                        * should have already handle it the first time */
 
+                       __rtnl_unlock();
+                       rcu_barrier();
+                       rtnl_lock();
+
+                       call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
                        if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
                                     &dev->state)) {
                                /* We must not have linkwatch events
@@ -5834,9 +5851,8 @@ void netdev_run_todo(void)
 
        __rtnl_unlock();
 
-       /* Wait for rcu callbacks to finish before attempting to drain
-        * the device list.  This usually avoids a 250ms wait.
-        */
+
+       /* Wait for rcu callbacks to finish before next phase */
        if (!list_empty(&list))
                rcu_barrier();
 
@@ -5845,6 +5861,10 @@ void netdev_run_todo(void)
                        = list_first_entry(&list, struct net_device, todo_list);
                list_del(&dev->todo_list);
 
+               rtnl_lock();
+               call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
+               __rtnl_unlock();
+
                if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
                        pr_err("network todo '%s' but state %d\n",
                               dev->name, dev->reg_state);
@@ -6239,7 +6259,8 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
           the device is just moving and can keep their slaves up.
        */
        call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
-       call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
+       rcu_barrier();
+       call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
        rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
 
        /*
index 56d6361..f6593d2 100644 (file)
@@ -374,7 +374,7 @@ static int dst_dev_event(struct notifier_block *this, unsigned long event,
        struct dst_entry *dst, *last = NULL;
 
        switch (event) {
-       case NETDEV_UNREGISTER:
+       case NETDEV_UNREGISTER_FINAL:
        case NETDEV_DOWN:
                mutex_lock(&dst_gc_mutex);
                for (dst = dst_busy_list; dst; dst = dst->next) {
index c3519c6..a019222 100644 (file)
@@ -76,6 +76,14 @@ static void rfc2863_policy(struct net_device *dev)
 }
 
 
+void linkwatch_init_dev(struct net_device *dev)
+{
+       /* Handle pre-registration link state changes */
+       if (!netif_carrier_ok(dev) || netif_dormant(dev))
+               rfc2863_policy(dev);
+}
+
+
 static bool linkwatch_urgent_event(struct net_device *dev)
 {
        if (!netif_running(dev))
index e4ba3e7..dd67818 100644 (file)
@@ -380,6 +380,7 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
        struct udphdr *udph;
        struct iphdr *iph;
        struct ethhdr *eth;
+       static atomic_t ip_ident;
 
        udp_len = len + sizeof(*udph);
        ip_len = udp_len + sizeof(*iph);
@@ -415,7 +416,7 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
        put_unaligned(0x45, (unsigned char *)iph);
        iph->tos      = 0;
        put_unaligned(htons(ip_len), &(iph->tot_len));
-       iph->id       = 0;
+       iph->id       = htons(atomic_inc_return(&ip_ident));
        iph->frag_off = 0;
        iph->ttl      = 64;
        iph->protocol = IPPROTO_UDP;
index 2c5a0a0..c64efcf 100644 (file)
@@ -618,7 +618,7 @@ int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, u32 id,
                       long expires, u32 error)
 {
        struct rta_cacheinfo ci = {
-               .rta_lastuse = jiffies_to_clock_t(jiffies - dst->lastuse),
+               .rta_lastuse = jiffies_delta_to_clock_t(jiffies - dst->lastuse),
                .rta_used = dst->__use,
                .rta_clntref = atomic_read(&(dst->__refcnt)),
                .rta_error = error,
@@ -1812,8 +1812,6 @@ replay:
                        return -ENODEV;
                }
 
-               if (ifm->ifi_index)
-                       return -EOPNOTSUPP;
                if (tb[IFLA_MAP] || tb[IFLA_MASTER] || tb[IFLA_PROTINFO])
                        return -EOPNOTSUPP;
 
@@ -1839,10 +1837,14 @@ replay:
                        return PTR_ERR(dest_net);
 
                dev = rtnl_create_link(net, dest_net, ifname, ops, tb);
-
-               if (IS_ERR(dev))
+               if (IS_ERR(dev)) {
                        err = PTR_ERR(dev);
-               else if (ops->newlink)
+                       goto out;
+               }
+
+               dev->ifindex = ifm->ifi_index;
+
+               if (ops->newlink)
                        err = ops->newlink(net, dev, tb, data);
                else
                        err = register_netdevice(dev);
@@ -2356,7 +2358,7 @@ static int rtnetlink_event(struct notifier_block *this, unsigned long event, voi
        case NETDEV_PRE_TYPE_CHANGE:
        case NETDEV_GOING_DOWN:
        case NETDEV_UNREGISTER:
-       case NETDEV_UNREGISTER_BATCH:
+       case NETDEV_UNREGISTER_FINAL:
        case NETDEV_RELEASE:
        case NETDEV_JOIN:
                break;
index 040cebe..6ab491d 100644 (file)
 static __inline__ int scm_check_creds(struct ucred *creds)
 {
        const struct cred *cred = current_cred();
+       kuid_t uid = make_kuid(cred->user_ns, creds->uid);
+       kgid_t gid = make_kgid(cred->user_ns, creds->gid);
+
+       if (!uid_valid(uid) || !gid_valid(gid))
+               return -EINVAL;
 
        if ((creds->pid == task_tgid_vnr(current) || capable(CAP_SYS_ADMIN)) &&
-           ((creds->uid == cred->uid   || creds->uid == cred->euid ||
-             creds->uid == cred->suid) || capable(CAP_SETUID)) &&
-           ((creds->gid == cred->gid   || creds->gid == cred->egid ||
-             creds->gid == cred->sgid) || capable(CAP_SETGID))) {
+           ((uid_eq(uid, cred->uid)   || uid_eq(uid, cred->euid) ||
+             uid_eq(uid, cred->suid)) || capable(CAP_SETUID)) &&
+           ((gid_eq(gid, cred->gid)   || gid_eq(gid, cred->egid) ||
+             gid_eq(gid, cred->sgid)) || capable(CAP_SETGID))) {
               return 0;
        }
        return -EPERM;
@@ -149,6 +154,9 @@ int __scm_send(struct socket *sock, struct msghdr *msg, struct scm_cookie *p)
                                goto error;
                        break;
                case SCM_CREDENTIALS:
+               {
+                       kuid_t uid;
+                       kgid_t gid;
                        if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct ucred)))
                                goto error;
                        memcpy(&p->creds, CMSG_DATA(cmsg), sizeof(struct ucred));
@@ -166,22 +174,29 @@ int __scm_send(struct socket *sock, struct msghdr *msg, struct scm_cookie *p)
                                p->pid = pid;
                        }
 
+                       err = -EINVAL;
+                       uid = make_kuid(current_user_ns(), p->creds.uid);
+                       gid = make_kgid(current_user_ns(), p->creds.gid);
+                       if (!uid_valid(uid) || !gid_valid(gid))
+                               goto error;
+
                        if (!p->cred ||
-                           (p->cred->euid != p->creds.uid) ||
-                           (p->cred->egid != p->creds.gid)) {
+                           !uid_eq(p->cred->euid, uid) ||
+                           !gid_eq(p->cred->egid, gid)) {
                                struct cred *cred;
                                err = -ENOMEM;
                                cred = prepare_creds();
                                if (!cred)
                                        goto error;
 
-                               cred->uid = cred->euid = p->creds.uid;
-                               cred->gid = cred->egid = p->creds.gid;
+                               cred->uid = cred->euid = uid;
+                               cred->gid = cred->egid = gid;
                                if (p->cred)
                                        put_cred(p->cred);
                                p->cred = cred;
                        }
                        break;
+               }
                default:
                        goto error;
                }
index 8f67ced..d765156 100644 (file)
@@ -868,8 +868,8 @@ void cred_to_ucred(struct pid *pid, const struct cred *cred,
        if (cred) {
                struct user_namespace *current_ns = current_user_ns();
 
-               ucred->uid = from_kuid(current_ns, cred->euid);
-               ucred->gid = from_kgid(current_ns, cred->egid);
+               ucred->uid = from_kuid_munged(current_ns, cred->euid);
+               ucred->gid = from_kgid_munged(current_ns, cred->egid);
        }
 }
 EXPORT_SYMBOL_GPL(cred_to_ucred);
@@ -1230,7 +1230,7 @@ void sock_update_classid(struct sock *sk)
        rcu_read_lock();  /* doing current task, which cannot vanish. */
        classid = task_cls_classid(current);
        rcu_read_unlock();
-       if (classid && classid != sk->sk_classid)
+       if (classid != sk->sk_classid)
                sk->sk_classid = classid;
 }
 EXPORT_SYMBOL(sock_update_classid);
@@ -1527,12 +1527,12 @@ void sock_edemux(struct sk_buff *skb)
 }
 EXPORT_SYMBOL(sock_edemux);
 
-int sock_i_uid(struct sock *sk)
+kuid_t sock_i_uid(struct sock *sk)
 {
-       int uid;
+       kuid_t uid;
 
        read_lock_bh(&sk->sk_callback_lock);
-       uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : 0;
+       uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : GLOBAL_ROOT_UID;
        read_unlock_bh(&sk->sk_callback_lock);
        return uid;
 }
index 85a3604..c855e8d 100644 (file)
@@ -961,7 +961,7 @@ static int dn_route_output_slow(struct dst_entry **pprt, const struct flowidn *o
                .saddr = oldflp->saddr,
                .flowidn_scope = RT_SCOPE_UNIVERSE,
                .flowidn_mark = oldflp->flowidn_mark,
-               .flowidn_iif = init_net.loopback_dev->ifindex,
+               .flowidn_iif = LOOPBACK_IFINDEX,
                .flowidn_oif = oldflp->flowidn_oif,
        };
        struct dn_route *rt = NULL;
@@ -979,7 +979,7 @@ static int dn_route_output_slow(struct dst_entry **pprt, const struct flowidn *o
                       "dn_route_output_slow: dst=%04x src=%04x mark=%d"
                       " iif=%d oif=%d\n", le16_to_cpu(oldflp->daddr),
                       le16_to_cpu(oldflp->saddr),
-                      oldflp->flowidn_mark, init_net.loopback_dev->ifindex,
+                      oldflp->flowidn_mark, LOOPBACK_IFINDEX,
                       oldflp->flowidn_oif);
 
        /* If we have an output interface, verify its a DECnet device */
@@ -1042,7 +1042,7 @@ source_ok:
                        if (!fld.daddr)
                                goto out;
                }
-               fld.flowidn_oif = init_net.loopback_dev->ifindex;
+               fld.flowidn_oif = LOOPBACK_IFINDEX;
                res.type = RTN_LOCAL;
                goto make_route;
        }
index fe4582c..6681ccf 100644 (file)
@@ -1364,7 +1364,7 @@ static struct sk_buff **inet_gro_receive(struct sk_buff **head,
        if (*(u8 *)iph != 0x45)
                goto out_unlock;
 
-       if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl)))
+       if (unlikely(ip_fast_csum((u8 *)iph, 5)))
                goto out_unlock;
 
        id = ntohl(*(__be32 *)&iph->id);
@@ -1380,7 +1380,6 @@ static struct sk_buff **inet_gro_receive(struct sk_buff **head,
                iph2 = ip_hdr(p);
 
                if ((iph->protocol ^ iph2->protocol) |
-                   (iph->tos ^ iph2->tos) |
                    ((__force u32)iph->saddr ^ (__force u32)iph2->saddr) |
                    ((__force u32)iph->daddr ^ (__force u32)iph2->daddr)) {
                        NAPI_GRO_CB(p)->same_flow = 0;
@@ -1390,6 +1389,7 @@ static struct sk_buff **inet_gro_receive(struct sk_buff **head,
                /* All fields must match except length and checksum. */
                NAPI_GRO_CB(p)->flush |=
                        (iph->ttl ^ iph2->ttl) |
+                       (iph->tos ^ iph2->tos) |
                        ((u16)(ntohs(iph2->id) + NAPI_GRO_CB(p)->count) ^ id);
 
                NAPI_GRO_CB(p)->flush |= flush;
index 44bf82e..adf273f 100644 (file)
@@ -94,25 +94,22 @@ static const struct nla_policy ifa_ipv4_policy[IFA_MAX+1] = {
        [IFA_LABEL]             = { .type = NLA_STRING, .len = IFNAMSIZ - 1 },
 };
 
-/* inet_addr_hash's shifting is dependent upon this IN4_ADDR_HSIZE
- * value.  So if you change this define, make appropriate changes to
- * inet_addr_hash as well.
- */
-#define IN4_ADDR_HSIZE 256
+#define IN4_ADDR_HSIZE_SHIFT   8
+#define IN4_ADDR_HSIZE         (1U << IN4_ADDR_HSIZE_SHIFT)
+
 static struct hlist_head inet_addr_lst[IN4_ADDR_HSIZE];
 static DEFINE_SPINLOCK(inet_addr_hash_lock);
 
-static inline unsigned int inet_addr_hash(struct net *net, __be32 addr)
+static u32 inet_addr_hash(struct net *net, __be32 addr)
 {
-       u32 val = (__force u32) addr ^ hash_ptr(net, 8);
+       u32 val = (__force u32) addr ^ net_hash_mix(net);
 
-       return ((val ^ (val >> 8) ^ (val >> 16) ^ (val >> 24)) &
-               (IN4_ADDR_HSIZE - 1));
+       return hash_32(val, IN4_ADDR_HSIZE_SHIFT);
 }
 
 static void inet_hash_insert(struct net *net, struct in_ifaddr *ifa)
 {
-       unsigned int hash = inet_addr_hash(net, ifa->ifa_local);
+       u32 hash = inet_addr_hash(net, ifa->ifa_local);
 
        spin_lock(&inet_addr_hash_lock);
        hlist_add_head_rcu(&ifa->hash, &inet_addr_lst[hash]);
@@ -136,18 +133,18 @@ static void inet_hash_remove(struct in_ifaddr *ifa)
  */
 struct net_device *__ip_dev_find(struct net *net, __be32 addr, bool devref)
 {
-       unsigned int hash = inet_addr_hash(net, addr);
+       u32 hash = inet_addr_hash(net, addr);
        struct net_device *result = NULL;
        struct in_ifaddr *ifa;
        struct hlist_node *node;
 
        rcu_read_lock();
        hlist_for_each_entry_rcu(ifa, node, &inet_addr_lst[hash], hash) {
-               struct net_device *dev = ifa->ifa_dev->dev;
-
-               if (!net_eq(dev_net(dev), net))
-                       continue;
                if (ifa->ifa_local == addr) {
+                       struct net_device *dev = ifa->ifa_dev->dev;
+
+                       if (!net_eq(dev_net(dev), net))
+                               continue;
                        result = dev;
                        break;
                }
@@ -182,10 +179,10 @@ static void inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap,
 static void devinet_sysctl_register(struct in_device *idev);
 static void devinet_sysctl_unregister(struct in_device *idev);
 #else
-static inline void devinet_sysctl_register(struct in_device *idev)
+static void devinet_sysctl_register(struct in_device *idev)
 {
 }
-static inline void devinet_sysctl_unregister(struct in_device *idev)
+static void devinet_sysctl_unregister(struct in_device *idev)
 {
 }
 #endif
@@ -205,7 +202,7 @@ static void inet_rcu_free_ifa(struct rcu_head *head)
        kfree(ifa);
 }
 
-static inline void inet_free_ifa(struct in_ifaddr *ifa)
+static void inet_free_ifa(struct in_ifaddr *ifa)
 {
        call_rcu(&ifa->rcu_head, inet_rcu_free_ifa);
 }
@@ -659,7 +656,7 @@ static int inet_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg
  *     Determine a default network mask, based on the IP address.
  */
 
-static inline int inet_abc_len(__be32 addr)
+static int inet_abc_len(__be32 addr)
 {
        int rc = -1;    /* Something else, probably a multicast. */
 
@@ -1124,7 +1121,7 @@ skip:
        }
 }
 
-static inline bool inetdev_valid_mtu(unsigned int mtu)
+static bool inetdev_valid_mtu(unsigned int mtu)
 {
        return mtu >= 68;
 }
@@ -1239,7 +1236,7 @@ static struct notifier_block ip_netdev_notifier = {
        .notifier_call = inetdev_event,
 };
 
-static inline size_t inet_nlmsg_size(void)
+static size_t inet_nlmsg_size(void)
 {
        return NLMSG_ALIGN(sizeof(struct ifaddrmsg))
               + nla_total_size(4) /* IFA_ADDRESS */
index c43ae3f..acdee32 100644 (file)
@@ -218,7 +218,7 @@ __be32 fib_compute_spec_dst(struct sk_buff *skb)
        scope = RT_SCOPE_UNIVERSE;
        if (!ipv4_is_zeronet(ip_hdr(skb)->saddr)) {
                fl4.flowi4_oif = 0;
-               fl4.flowi4_iif = net->loopback_dev->ifindex;
+               fl4.flowi4_iif = LOOPBACK_IFINDEX;
                fl4.daddr = ip_hdr(skb)->saddr;
                fl4.saddr = 0;
                fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos);
@@ -1041,7 +1041,7 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
 static int fib_netdev_event(struct notifier_block *this, unsigned long event, void *ptr)
 {
        struct net_device *dev = ptr;
-       struct in_device *in_dev = __in_dev_get_rtnl(dev);
+       struct in_device *in_dev;
        struct net *net = dev_net(dev);
 
        if (event == NETDEV_UNREGISTER) {
@@ -1050,8 +1050,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
                return NOTIFY_DONE;
        }
 
-       if (!in_dev)
-               return NOTIFY_DONE;
+       in_dev = __in_dev_get_rtnl(dev);
 
        switch (event) {
        case NETDEV_UP:
@@ -1062,16 +1061,14 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
                fib_sync_up(dev);
 #endif
                atomic_inc(&net->ipv4.dev_addr_genid);
-               rt_cache_flush(dev_net(dev), -1);
+               rt_cache_flush(net, -1);
                break;
        case NETDEV_DOWN:
                fib_disable_ip(dev, 0, 0);
                break;
        case NETDEV_CHANGEMTU:
        case NETDEV_CHANGE:
-               rt_cache_flush(dev_net(dev), 0);
-               break;
-       case NETDEV_UNREGISTER_BATCH:
+               rt_cache_flush(net, 0);
                break;
        }
        return NOTIFY_DONE;
index 57bd978..3c820da 100644 (file)
@@ -1550,7 +1550,8 @@ int fib_table_lookup(struct fib_table *tb, const struct flowi4 *flp,
                 * state.directly.
                 */
                if (pref_mismatch) {
-                       int mp = KEYLENGTH - fls(pref_mismatch);
+                       /* fls(x) = __fls(x) + 1 */
+                       int mp = KEYLENGTH - __fls(pref_mismatch) - 1;
 
                        if (tkey_extract_bits(cn->key, mp, cn->pos - mp) != 0)
                                goto backtrace;
@@ -1655,7 +1656,12 @@ int fib_table_delete(struct fib_table *tb, struct fib_config *cfg)
        if (!l)
                return -ESRCH;
 
-       fa_head = get_fa_head(l, plen);
+       li = find_leaf_info(l, plen);
+
+       if (!li)
+               return -ESRCH;
+
+       fa_head = &li->falh;
        fa = fib_find_alias(fa_head, tos, 0);
 
        if (!fa)
@@ -1691,9 +1697,6 @@ int fib_table_delete(struct fib_table *tb, struct fib_config *cfg)
        rtmsg_fib(RTM_DELROUTE, htonl(key), fa, plen, tb->tb_id,
                  &cfg->fc_nlinfo, 0);
 
-       l = fib_find_node(t, key);
-       li = find_leaf_info(l, plen);
-
        list_del_rcu(&fa->fa_list);
 
        if (!plen)
index 6699f23..0b5580c 100644 (file)
@@ -2435,6 +2435,8 @@ static int igmp_mc_seq_show(struct seq_file *seq, void *v)
                struct ip_mc_list *im = (struct ip_mc_list *)v;
                struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq);
                char   *querier;
+               long delta;
+
 #ifdef CONFIG_IP_MULTICAST
                querier = IGMP_V1_SEEN(state->in_dev) ? "V1" :
                          IGMP_V2_SEEN(state->in_dev) ? "V2" :
@@ -2448,11 +2450,12 @@ static int igmp_mc_seq_show(struct seq_file *seq, void *v)
                                   state->dev->ifindex, state->dev->name, state->in_dev->mc_count, querier);
                }
 
+               delta = im->timer.expires - jiffies;
                seq_printf(seq,
                           "\t\t\t\t%08X %5d %d:%08lX\t\t%d\n",
                           im->multiaddr, im->users,
-                          im->tm_running, im->tm_running ?
-                          jiffies_to_clock_t(im->timer.expires-jiffies) : 0,
+                          im->tm_running,
+                          im->tm_running ? jiffies_delta_to_clock_t(delta) : 0,
                           im->reporter);
        }
        return 0;
index 570e61f..8bc005b 100644 (file)
@@ -69,6 +69,7 @@ static inline void inet_diag_unlock_handler(
 
 int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
                              struct sk_buff *skb, struct inet_diag_req_v2 *req,
+                             struct user_namespace *user_ns,                   
                              u32 pid, u32 seq, u16 nlmsg_flags,
                              const struct nlmsghdr *unlh)
 {
@@ -124,7 +125,7 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
        }
 #endif
 
-       r->idiag_uid = sock_i_uid(sk);
+       r->idiag_uid = from_kuid_munged(user_ns, sock_i_uid(sk));
        r->idiag_inode = sock_i_ino(sk);
 
        if (ext & (1 << (INET_DIAG_MEMINFO - 1))) {
@@ -199,11 +200,12 @@ EXPORT_SYMBOL_GPL(inet_sk_diag_fill);
 
 static int inet_csk_diag_fill(struct sock *sk,
                              struct sk_buff *skb, struct inet_diag_req_v2 *req,
+                             struct user_namespace *user_ns,
                              u32 pid, u32 seq, u16 nlmsg_flags,
                              const struct nlmsghdr *unlh)
 {
        return inet_sk_diag_fill(sk, inet_csk(sk),
-                       skb, req, pid, seq, nlmsg_flags, unlh);
+                       skb, req, user_ns, pid, seq, nlmsg_flags, unlh);
 }
 
 static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
@@ -256,14 +258,16 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
 }
 
 static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
-                       struct inet_diag_req_v2 *r, u32 pid, u32 seq, u16 nlmsg_flags,
+                       struct inet_diag_req_v2 *r,
+                       struct user_namespace *user_ns,
+                       u32 pid, u32 seq, u16 nlmsg_flags,
                        const struct nlmsghdr *unlh)
 {
        if (sk->sk_state == TCP_TIME_WAIT)
                return inet_twsk_diag_fill((struct inet_timewait_sock *)sk,
                                           skb, r, pid, seq, nlmsg_flags,
                                           unlh);
-       return inet_csk_diag_fill(sk, skb, r, pid, seq, nlmsg_flags, unlh);
+       return inet_csk_diag_fill(sk, skb, r, user_ns, pid, seq, nlmsg_flags, unlh);
 }
 
 int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *in_skb,
@@ -311,6 +315,7 @@ int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *in_s
        }
 
        err = sk_diag_fill(sk, rep, req,
+                          sk_user_ns(NETLINK_CB(in_skb).ssk),
                           NETLINK_CB(in_skb).pid,
                           nlh->nlmsg_seq, 0, nlh);
        if (err < 0) {
@@ -551,6 +556,7 @@ static int inet_csk_diag_dump(struct sock *sk,
                return 0;
 
        return inet_csk_diag_fill(sk, skb, r,
+                                 sk_user_ns(NETLINK_CB(cb->skb).ssk),
                                  NETLINK_CB(cb->skb).pid,
                                  cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh);
 }
@@ -591,7 +597,9 @@ static int inet_twsk_diag_dump(struct inet_timewait_sock *tw,
 }
 
 static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
-                             struct request_sock *req, u32 pid, u32 seq,
+                             struct request_sock *req,
+                             struct user_namespace *user_ns,
+                             u32 pid, u32 seq,
                              const struct nlmsghdr *unlh)
 {
        const struct inet_request_sock *ireq = inet_rsk(req);
@@ -625,7 +633,7 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
        r->idiag_expires = jiffies_to_msecs(tmo);
        r->idiag_rqueue = 0;
        r->idiag_wqueue = 0;
-       r->idiag_uid = sock_i_uid(sk);
+       r->idiag_uid = from_kuid_munged(user_ns, sock_i_uid(sk));
        r->idiag_inode = 0;
 #if IS_ENABLED(CONFIG_IPV6)
        if (r->idiag_family == AF_INET6) {
@@ -702,6 +710,7 @@ static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk,
                        }
 
                        err = inet_diag_fill_req(skb, sk, req,
+                                              sk_user_ns(NETLINK_CB(cb->skb).ssk),
                                               NETLINK_CB(cb->skb).pid,
                                               cb->nlh->nlmsg_seq, cb->nlh);
                        if (err < 0) {
index ebdf06f..8aa7a4c 100644 (file)
@@ -1808,7 +1808,7 @@ static struct mr_table *ipmr_rt_fib_lookup(struct net *net, struct sk_buff *skb)
                .flowi4_oif = (rt_is_output_route(rt) ?
                               skb->dev->ifindex : 0),
                .flowi4_iif = (rt_is_output_route(rt) ?
-                              net->loopback_dev->ifindex :
+                              LOOPBACK_IFINDEX :
                               skb->dev->ifindex),
                .flowi4_mark = skb->mark,
        };
index 31371be..c301300 100644 (file)
@@ -85,7 +85,7 @@ static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par)
                        return ipv4_is_local_multicast(iph->daddr) ^ invert;
                flow.flowi4_iif = 0;
        } else {
-               flow.flowi4_iif = dev_net(par->in)->loopback_dev->ifindex;
+               flow.flowi4_iif = LOOPBACK_IFINDEX;
        }
 
        flow.daddr = iph->saddr;
index 851acec..6b3da5c 100644 (file)
@@ -69,9 +69,7 @@ static int __net_init iptable_filter_net_init(struct net *net)
        net->ipv4.iptable_filter =
                ipt_register_table(net, &packet_filter, repl);
        kfree(repl);
-       if (IS_ERR(net->ipv4.iptable_filter))
-               return PTR_ERR(net->ipv4.iptable_filter);
-       return 0;
+       return PTR_RET(net->ipv4.iptable_filter);
 }
 
 static void __net_exit iptable_filter_net_exit(struct net *net)
@@ -96,14 +94,10 @@ static int __init iptable_filter_init(void)
        filter_ops = xt_hook_link(&packet_filter, iptable_filter_hook);
        if (IS_ERR(filter_ops)) {
                ret = PTR_ERR(filter_ops);
-               goto cleanup_table;
+               unregister_pernet_subsys(&iptable_filter_net_ops);
        }
 
        return ret;
-
- cleanup_table:
-       unregister_pernet_subsys(&iptable_filter_net_ops);
-       return ret;
 }
 
 static void __exit iptable_filter_fini(void)
index aef5d1f..85d88f2 100644 (file)
@@ -104,9 +104,7 @@ static int __net_init iptable_mangle_net_init(struct net *net)
        net->ipv4.iptable_mangle =
                ipt_register_table(net, &packet_mangler, repl);
        kfree(repl);
-       if (IS_ERR(net->ipv4.iptable_mangle))
-               return PTR_ERR(net->ipv4.iptable_mangle);
-       return 0;
+       return PTR_RET(net->ipv4.iptable_mangle);
 }
 
 static void __net_exit iptable_mangle_net_exit(struct net *net)
@@ -131,14 +129,10 @@ static int __init iptable_mangle_init(void)
        mangle_ops = xt_hook_link(&packet_mangler, iptable_mangle_hook);
        if (IS_ERR(mangle_ops)) {
                ret = PTR_ERR(mangle_ops);
-               goto cleanup_table;
+               unregister_pernet_subsys(&iptable_mangle_net_ops);
        }
 
        return ret;
-
- cleanup_table:
-       unregister_pernet_subsys(&iptable_mangle_net_ops);
-       return ret;
 }
 
 static void __exit iptable_mangle_fini(void)
index 07fb710..03d9696 100644 (file)
@@ -48,9 +48,7 @@ static int __net_init iptable_raw_net_init(struct net *net)
        net->ipv4.iptable_raw =
                ipt_register_table(net, &packet_raw, repl);
        kfree(repl);
-       if (IS_ERR(net->ipv4.iptable_raw))
-               return PTR_ERR(net->ipv4.iptable_raw);
-       return 0;
+       return PTR_RET(net->ipv4.iptable_raw);
 }
 
 static void __net_exit iptable_raw_net_exit(struct net *net)
@@ -75,14 +73,10 @@ static int __init iptable_raw_init(void)
        rawtable_ops = xt_hook_link(&packet_raw, iptable_raw_hook);
        if (IS_ERR(rawtable_ops)) {
                ret = PTR_ERR(rawtable_ops);
-               goto cleanup_table;
+               unregister_pernet_subsys(&iptable_raw_net_ops);
        }
 
        return ret;
-
- cleanup_table:
-       unregister_pernet_subsys(&iptable_raw_net_ops);
-       return ret;
 }
 
 static void __exit iptable_raw_fini(void)
index be45bdc..b283d8e 100644 (file)
@@ -66,10 +66,7 @@ static int __net_init iptable_security_net_init(struct net *net)
        net->ipv4.iptable_security =
                ipt_register_table(net, &security_table, repl);
        kfree(repl);
-       if (IS_ERR(net->ipv4.iptable_security))
-               return PTR_ERR(net->ipv4.iptable_security);
-
-       return 0;
+       return PTR_RET(net->ipv4.iptable_security);
 }
 
 static void __net_exit iptable_security_net_exit(struct net *net)
index 6232d47..8f3d054 100644 (file)
@@ -185,10 +185,10 @@ exit:
        return sk;
 }
 
-static void inet_get_ping_group_range_net(struct net *net, gid_t *low,
-                                         gid_t *high)
+static void inet_get_ping_group_range_net(struct net *net, kgid_t *low,
+                                         kgid_t *high)
 {
-       gid_t *data = net->ipv4.sysctl_ping_group_range;
+       kgid_t *data = net->ipv4.sysctl_ping_group_range;
        unsigned int seq;
 
        do {
@@ -203,19 +203,13 @@ static void inet_get_ping_group_range_net(struct net *net, gid_t *low,
 static int ping_init_sock(struct sock *sk)
 {
        struct net *net = sock_net(sk);
-       gid_t group = current_egid();
-       gid_t range[2];
+       kgid_t group = current_egid();
        struct group_info *group_info = get_current_groups();
        int i, j, count = group_info->ngroups;
        kgid_t low, high;
 
-       inet_get_ping_group_range_net(net, range, range+1);
-       low = make_kgid(&init_user_ns, range[0]);
-       high = make_kgid(&init_user_ns, range[1]);
-       if (!gid_valid(low) || !gid_valid(high) || gid_lt(high, low))
-               return -EACCES;
-
-       if (range[0] <= group && group <= range[1])
+       inet_get_ping_group_range_net(net, &low, &high);
+       if (gid_lte(low, group) && gid_lte(group, high))
                return 0;
 
        for (i = 0; i < group_info->nblocks; i++) {
@@ -845,7 +839,9 @@ static void ping_format_sock(struct sock *sp, struct seq_file *f,
                bucket, src, srcp, dest, destp, sp->sk_state,
                sk_wmem_alloc_get(sp),
                sk_rmem_alloc_get(sp),
-               0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
+               0, 0L, 0,
+               from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
+               0, sock_i_ino(sp),
                atomic_read(&sp->sk_refcnt), sp,
                atomic_read(&sp->sk_drops), len);
 }
index ff0f071..f242578 100644 (file)
@@ -992,7 +992,9 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
                i, src, srcp, dest, destp, sp->sk_state,
                sk_wmem_alloc_get(sp),
                sk_rmem_alloc_get(sp),
-               0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
+               0, 0L, 0,
+               from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
+               0, sock_i_ino(sp),
                atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
 }
 
index 82cf2a7..d2d1e15 100644 (file)
@@ -1589,11 +1589,14 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
        if (ipv4_is_zeronet(daddr))
                goto martian_destination;
 
-       if (likely(!IN_DEV_ROUTE_LOCALNET(in_dev))) {
-               if (ipv4_is_loopback(daddr))
+       /* Following code try to avoid calling IN_DEV_NET_ROUTE_LOCALNET(),
+        * and call it once if daddr or/and saddr are loopback addresses
+        */
+       if (ipv4_is_loopback(daddr)) {
+               if (!IN_DEV_NET_ROUTE_LOCALNET(in_dev, net))
                        goto martian_destination;
-
-               if (ipv4_is_loopback(saddr))
+       } else if (ipv4_is_loopback(saddr)) {
+               if (!IN_DEV_NET_ROUTE_LOCALNET(in_dev, net))
                        goto martian_source;
        }
 
@@ -1618,7 +1621,7 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
 
        if (res.type == RTN_LOCAL) {
                err = fib_validate_source(skb, saddr, daddr, tos,
-                                         net->loopback_dev->ifindex,
+                                         LOOPBACK_IFINDEX,
                                          dev, in_dev, &itag);
                if (err < 0)
                        goto martian_source_keep_err;
@@ -1894,7 +1897,7 @@ struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *fl4)
 
        orig_oif = fl4->flowi4_oif;
 
-       fl4->flowi4_iif = net->loopback_dev->ifindex;
+       fl4->flowi4_iif = LOOPBACK_IFINDEX;
        fl4->flowi4_tos = tos & IPTOS_RT_MASK;
        fl4->flowi4_scope = ((tos & RTO_ONLINK) ?
                         RT_SCOPE_LINK : RT_SCOPE_UNIVERSE);
@@ -1983,7 +1986,7 @@ struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *fl4)
                if (!fl4->daddr)
                        fl4->daddr = fl4->saddr = htonl(INADDR_LOOPBACK);
                dev_out = net->loopback_dev;
-               fl4->flowi4_oif = net->loopback_dev->ifindex;
+               fl4->flowi4_oif = LOOPBACK_IFINDEX;
                res.type = RTN_LOCAL;
                flags |= RTCF_LOCAL;
                goto make_route;
index 1b5ce96..3e78c79 100644 (file)
@@ -76,9 +76,9 @@ static int ipv4_local_port_range(ctl_table *table, int write,
 }
 
 
-static void inet_get_ping_group_range_table(struct ctl_table *table, gid_t *low, gid_t *high)
+static void inet_get_ping_group_range_table(struct ctl_table *table, kgid_t *low, kgid_t *high)
 {
-       gid_t *data = table->data;
+       kgid_t *data = table->data;
        unsigned int seq;
        do {
                seq = read_seqbegin(&sysctl_local_ports.lock);
@@ -89,12 +89,12 @@ static void inet_get_ping_group_range_table(struct ctl_table *table, gid_t *low,
 }
 
 /* Update system visible IP port range */
-static void set_ping_group_range(struct ctl_table *table, gid_t range[2])
+static void set_ping_group_range(struct ctl_table *table, kgid_t low, kgid_t high)
 {
-       gid_t *data = table->data;
+       kgid_t *data = table->data;
        write_seqlock(&sysctl_local_ports.lock);
-       data[0] = range[0];
-       data[1] = range[1];
+       data[0] = low;
+       data[1] = high;
        write_sequnlock(&sysctl_local_ports.lock);
 }
 
@@ -103,21 +103,33 @@ static int ipv4_ping_group_range(ctl_table *table, int write,
                                 void __user *buffer,
                                 size_t *lenp, loff_t *ppos)
 {
+       struct user_namespace *user_ns = current_user_ns();
        int ret;
-       gid_t range[2];
+       gid_t urange[2];
+       kgid_t low, high;
        ctl_table tmp = {
-               .data = &range,
-               .maxlen = sizeof(range),
+               .data = &urange,
+               .maxlen = sizeof(urange),
                .mode = table->mode,
                .extra1 = &ip_ping_group_range_min,
                .extra2 = &ip_ping_group_range_max,
        };
 
-       inet_get_ping_group_range_table(table, range, range + 1);
+       inet_get_ping_group_range_table(table, &low, &high);
+       urange[0] = from_kgid_munged(user_ns, low);
+       urange[1] = from_kgid_munged(user_ns, high);
        ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
 
-       if (write && ret == 0)
-               set_ping_group_range(table, range);
+       if (write && ret == 0) {
+               low = make_kgid(user_ns, urange[0]);
+               high = make_kgid(user_ns, urange[1]);
+               if (!gid_valid(low) || !gid_valid(high) ||
+                   (urange[1] < urange[0]) || gid_lt(high, low)) {
+                       low = make_kgid(&init_user_ns, 1);
+                       high = make_kgid(&init_user_ns, 0);
+               }
+               set_ping_group_range(table, low, high);
+       }
 
        return ret;
 }
@@ -786,7 +798,7 @@ static struct ctl_table ipv4_net_table[] = {
        {
                .procname       = "ping_group_range",
                .data           = &init_net.ipv4.sysctl_ping_group_range,
-               .maxlen         = sizeof(init_net.ipv4.sysctl_ping_group_range),
+               .maxlen         = sizeof(gid_t)*2,
                .mode           = 0644,
                .proc_handler   = ipv4_ping_group_range,
        },
@@ -830,8 +842,8 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
         * Sane defaults - nobody may create ping sockets.
         * Boot scripts should set this to distro-specific group.
         */
-       net->ipv4.sysctl_ping_group_range[0] = 1;
-       net->ipv4.sysctl_ping_group_range[1] = 0;
+       net->ipv4.sysctl_ping_group_range[0] = make_kgid(&init_user_ns, 1);
+       net->ipv4.sysctl_ping_group_range[1] = make_kgid(&init_user_ns, 0);
 
        tcp_init_mem(net);
 
index 6e38c6c..ce4ffe9 100644 (file)
@@ -237,7 +237,11 @@ static inline void TCP_ECN_check_ce(struct tcp_sock *tp, const struct sk_buff *s
                        tcp_enter_quickack_mode((struct sock *)tp);
                break;
        case INET_ECN_CE:
-               tp->ecn_flags |= TCP_ECN_DEMAND_CWR;
+               if (!(tp->ecn_flags & TCP_ECN_DEMAND_CWR)) {
+                       /* Better not delay acks, sender can have a very low cwnd */
+                       tcp_enter_quickack_mode((struct sock *)tp);
+                       tp->ecn_flags |= TCP_ECN_DEMAND_CWR;
+               }
                /* fallinto */
        default:
                tp->ecn_flags |= TCP_ECN_SEEN;
@@ -5741,7 +5745,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
 
                TCP_ECN_rcv_synack(tp, th);
 
-               tp->snd_wl1 = TCP_SKB_CB(skb)->seq;
+               tcp_init_wl(tp, TCP_SKB_CB(skb)->seq);
                tcp_ack(sk, skb, FLAG_SLOWPATH);
 
                /* Ok.. it's good. Set up sequence numbers and
@@ -5754,7 +5758,6 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
                 * never scaled.
                 */
                tp->snd_wnd = ntohs(th->window);
-               tcp_init_wl(tp, TCP_SKB_CB(skb)->seq);
 
                if (!tp->rx_opt.wscale_ok) {
                        tp->rx_opt.snd_wscale = tp->rx_opt.rcv_wscale = 0;
index 00a748d..36f02f9 100644 (file)
@@ -2393,10 +2393,10 @@ void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
 EXPORT_SYMBOL(tcp_proc_unregister);
 
 static void get_openreq4(const struct sock *sk, const struct request_sock *req,
-                        struct seq_file *f, int i, int uid, int *len)
+                        struct seq_file *f, int i, kuid_t uid, int *len)
 {
        const struct inet_request_sock *ireq = inet_rsk(req);
-       int ttd = req->expires - jiffies;
+       long delta = req->expires - jiffies;
 
        seq_printf(f, "%4d: %08X:%04X %08X:%04X"
                " %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %pK%n",
@@ -2408,9 +2408,9 @@ static void get_openreq4(const struct sock *sk, const struct request_sock *req,
                TCP_SYN_RECV,
                0, 0, /* could print option size, but that is af dependent. */
                1,    /* timers active (only the expire timer) */
-               jiffies_to_clock_t(ttd),
+               jiffies_delta_to_clock_t(delta),
                req->retrans,
-               uid,
+               from_kuid_munged(seq_user_ns(f), uid),
                0,  /* non standard timer */
                0, /* open_requests have no inode */
                atomic_read(&sk->sk_refcnt),
@@ -2459,9 +2459,9 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
                tp->write_seq - tp->snd_una,
                rx_queue,
                timer_active,
-               jiffies_to_clock_t(timer_expires - jiffies),
+               jiffies_delta_to_clock_t(timer_expires - jiffies),
                icsk->icsk_retransmits,
-               sock_i_uid(sk),
+               from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)),
                icsk->icsk_probes_out,
                sock_i_ino(sk),
                atomic_read(&sk->sk_refcnt), sk,
@@ -2478,10 +2478,7 @@ static void get_timewait4_sock(const struct inet_timewait_sock *tw,
 {
        __be32 dest, src;
        __u16 destp, srcp;
-       int ttd = tw->tw_ttd - jiffies;
-
-       if (ttd < 0)
-               ttd = 0;
+       long delta = tw->tw_ttd - jiffies;
 
        dest  = tw->tw_daddr;
        src   = tw->tw_rcv_saddr;
@@ -2491,7 +2488,7 @@ static void get_timewait4_sock(const struct inet_timewait_sock *tw,
        seq_printf(f, "%4d: %08X:%04X %08X:%04X"
                " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
                i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
-               3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
+               3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
                atomic_read(&tw->tw_refcnt), tw, len);
 }
 
index 6f6d1ac..c4e6432 100644 (file)
@@ -2110,7 +2110,9 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
                bucket, src, srcp, dest, destp, sp->sk_state,
                sk_wmem_alloc_get(sp),
                sk_rmem_alloc_get(sp),
-               0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
+               0, 0L, 0,
+               from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
+               0, sock_i_ino(sp),
                atomic_read(&sp->sk_refcnt), sp,
                atomic_read(&sp->sk_drops), len);
 }
index 16d0960..d2f336e 100644 (file)
@@ -24,7 +24,9 @@ static int sk_diag_dump(struct sock *sk, struct sk_buff *skb,
        if (!inet_diag_bc_sk(bc, sk))
                return 0;
 
-       return inet_sk_diag_fill(sk, NULL, skb, req, NETLINK_CB(cb->skb).pid,
+       return inet_sk_diag_fill(sk, NULL, skb, req,
+                       sk_user_ns(NETLINK_CB(cb->skb).ssk),
+                       NETLINK_CB(cb->skb).pid,
                        cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh);
 }
 
@@ -69,6 +71,7 @@ static int udp_dump_one(struct udp_table *tbl, struct sk_buff *in_skb,
                goto out;
 
        err = inet_sk_diag_fill(sk, NULL, rep, req,
+                          sk_user_ns(NETLINK_CB(in_skb).ssk),
                           NETLINK_CB(in_skb).pid,
                           nlh->nlmsg_seq, 0, nlh);
        if (err < 0) {
index 5728695..4f7fe72 100644 (file)
@@ -201,6 +201,22 @@ config IPV6_TUNNEL
 
          If unsure, say N.
 
+config IPV6_GRE
+       tristate "IPv6: GRE tunnel"
+       select IPV6_TUNNEL
+       ---help---
+         Tunneling means encapsulating data of one protocol type within
+         another protocol and sending it over a channel that understands the
+         encapsulating protocol. This particular tunneling driver implements
+         GRE (Generic Routing Encapsulation) and at this time allows
+         encapsulating of IPv4 or IPv6 over existing IPv6 infrastructure.
+         This driver is useful if the other endpoint is a Cisco router: Cisco
+         likes GRE much better than the other Linux tunneling driver ("IP
+         tunneling" above). In addition, GRE allows multicast redistribution
+         through the tunnel.
+
+         Saying M here will produce a module called ip6_gre. If unsure, say N.
+
 config IPV6_MULTIPLE_TABLES
        bool "IPv6: Multiple Routing Tables"
        depends on EXPERIMENTAL
index 686934a..b6d3f79 100644 (file)
@@ -36,6 +36,7 @@ obj-$(CONFIG_NETFILTER)       += netfilter/
 
 obj-$(CONFIG_IPV6_SIT) += sit.o
 obj-$(CONFIG_IPV6_TUNNEL) += ip6_tunnel.o
+obj-$(CONFIG_IPV6_GRE) += ip6_gre.o
 
 obj-y += addrconf_core.o exthdrs_core.o
 
index 9772fbd..90bbefb 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/seq_file.h>
 #include <linux/slab.h>
 #include <linux/export.h>
+#include <linux/pid_namespace.h>
 
 #include <net/net_namespace.h>
 #include <net/sock.h>
@@ -91,6 +92,8 @@ static struct ip6_flowlabel *fl_lookup(struct net *net, __be32 label)
 static void fl_free(struct ip6_flowlabel *fl)
 {
        if (fl) {
+               if (fl->share == IPV6_FL_S_PROCESS)
+                       put_pid(fl->owner.pid);
                release_net(fl->fl_net);
                kfree(fl->opt);
        }
@@ -394,10 +397,10 @@ fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq,
        case IPV6_FL_S_ANY:
                break;
        case IPV6_FL_S_PROCESS:
-               fl->owner = current->pid;
+               fl->owner.pid = get_task_pid(current, PIDTYPE_PID);
                break;
        case IPV6_FL_S_USER:
-               fl->owner = current_euid();
+               fl->owner.uid = current_euid();
                break;
        default:
                err = -EINVAL;
@@ -561,7 +564,10 @@ recheck:
                                err = -EPERM;
                                if (fl1->share == IPV6_FL_S_EXCL ||
                                    fl1->share != fl->share ||
-                                   fl1->owner != fl->owner)
+                                   ((fl1->share == IPV6_FL_S_PROCESS) &&
+                                    (fl1->owner.pid == fl->owner.pid)) ||
+                                   ((fl1->share == IPV6_FL_S_USER) &&
+                                    uid_eq(fl1->owner.uid, fl->owner.uid)))
                                        goto release;
 
                                err = -EINVAL;
@@ -621,6 +627,7 @@ done:
 
 struct ip6fl_iter_state {
        struct seq_net_private p;
+       struct pid_namespace *pid_ns;
        int bucket;
 };
 
@@ -699,6 +706,7 @@ static void ip6fl_seq_stop(struct seq_file *seq, void *v)
 
 static int ip6fl_seq_show(struct seq_file *seq, void *v)
 {
+       struct ip6fl_iter_state *state = ip6fl_seq_private(seq);
        if (v == SEQ_START_TOKEN)
                seq_printf(seq, "%-5s %-1s %-6s %-6s %-6s %-8s %-32s %s\n",
                           "Label", "S", "Owner", "Users", "Linger", "Expires", "Dst", "Opt");
@@ -708,7 +716,11 @@ static int ip6fl_seq_show(struct seq_file *seq, void *v)
                           "%05X %-1d %-6d %-6d %-6ld %-8ld %pi6 %-4d\n",
                           (unsigned int)ntohl(fl->label),
                           fl->share,
-                          (int)fl->owner,
+                          ((fl->share == IPV6_FL_S_PROCESS) ?
+                           pid_nr_ns(fl->owner.pid, state->pid_ns) :
+                           ((fl->share == IPV6_FL_S_USER) ?
+                            from_kuid_munged(seq_user_ns(seq), fl->owner.uid) :
+                            0)),
                           atomic_read(&fl->users),
                           fl->linger/HZ,
                           (long)(fl->expires - jiffies)/HZ,
@@ -727,8 +739,29 @@ static const struct seq_operations ip6fl_seq_ops = {
 
 static int ip6fl_seq_open(struct inode *inode, struct file *file)
 {
-       return seq_open_net(inode, file, &ip6fl_seq_ops,
-                           sizeof(struct ip6fl_iter_state));
+       struct seq_file *seq;
+       struct ip6fl_iter_state *state;
+       int err;
+
+       err = seq_open_net(inode, file, &ip6fl_seq_ops,
+                          sizeof(struct ip6fl_iter_state));
+
+       if (!err) {
+               seq = file->private_data;
+               state = ip6fl_seq_private(seq);
+               rcu_read_lock();
+               state->pid_ns = get_pid_ns(task_active_pid_ns(current));
+               rcu_read_unlock();
+       }
+       return err;
+}
+
+static int ip6fl_seq_release(struct inode *inode, struct file *file)
+{
+       struct seq_file *seq = file->private_data;
+       struct ip6fl_iter_state *state = ip6fl_seq_private(seq);
+       put_pid_ns(state->pid_ns);
+       return seq_release_net(inode, file);
 }
 
 static const struct file_operations ip6fl_seq_fops = {
@@ -736,7 +769,7 @@ static const struct file_operations ip6fl_seq_fops = {
        .open           =       ip6fl_seq_open,
        .read           =       seq_read,
        .llseek         =       seq_lseek,
-       .release        =       seq_release_net,
+       .release        =       ip6fl_seq_release,
 };
 
 static int __net_init ip6_flowlabel_proc_init(struct net *net)
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
new file mode 100644 (file)
index 0000000..424d11a
--- /dev/null
@@ -0,0 +1,1792 @@
+/*
+ *     GRE over IPv6 protocol decoder.
+ *
+ *     Authors: Dmitry Kozlov (xeb@mail.ru)
+ *
+ *     This program is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License
+ *     as published by the Free Software Foundation; either version
+ *     2 of the License, or (at your option) any later version.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/capability.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/in.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/if_arp.h>
+#include <linux/mroute.h>
+#include <linux/init.h>
+#include <linux/in6.h>
+#include <linux/inetdevice.h>
+#include <linux/igmp.h>
+#include <linux/netfilter_ipv4.h>
+#include <linux/etherdevice.h>
+#include <linux/if_ether.h>
+#include <linux/hash.h>
+#include <linux/if_tunnel.h>
+#include <linux/ip6_tunnel.h>
+
+#include <net/sock.h>
+#include <net/ip.h>
+#include <net/icmp.h>
+#include <net/protocol.h>
+#include <net/addrconf.h>
+#include <net/arp.h>
+#include <net/checksum.h>
+#include <net/dsfield.h>
+#include <net/inet_ecn.h>
+#include <net/xfrm.h>
+#include <net/net_namespace.h>
+#include <net/netns/generic.h>
+#include <net/rtnetlink.h>
+
+#include <net/ipv6.h>
+#include <net/ip6_fib.h>
+#include <net/ip6_route.h>
+#include <net/ip6_tunnel.h>
+
+
+#define IPV6_TCLASS_MASK (IPV6_FLOWINFO_MASK & ~IPV6_FLOWLABEL_MASK)
+#define IPV6_TCLASS_SHIFT 20
+
+#define HASH_SIZE_SHIFT  5
+#define HASH_SIZE (1 << HASH_SIZE_SHIFT)
+
+static int ip6gre_net_id __read_mostly;
+struct ip6gre_net {
+       struct ip6_tnl __rcu *tunnels[4][HASH_SIZE];
+
+       struct net_device *fb_tunnel_dev;
+};
+
+static struct rtnl_link_ops ip6gre_link_ops __read_mostly;
+static int ip6gre_tunnel_init(struct net_device *dev);
+static void ip6gre_tunnel_setup(struct net_device *dev);
+static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t);
+static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu);
+
+/* Tunnel hash table */
+
+/*
+   4 hash tables:
+
+   3: (remote,local)
+   2: (remote,*)
+   1: (*,local)
+   0: (*,*)
+
+   We require exact key match i.e. if a key is present in packet
+   it will match only tunnel with the same key; if it is not present,
+   it will match only keyless tunnel.
+
+   All keysless packets, if not matched configured keyless tunnels
+   will match fallback tunnel.
+ */
+
+#define HASH_KEY(key) (((__force u32)key^((__force u32)key>>4))&(HASH_SIZE - 1))
+static u32 HASH_ADDR(const struct in6_addr *addr)
+{
+       u32 hash = ipv6_addr_hash(addr);
+
+       return hash_32(hash, HASH_SIZE_SHIFT);
+}
+
+#define tunnels_r_l    tunnels[3]
+#define tunnels_r      tunnels[2]
+#define tunnels_l      tunnels[1]
+#define tunnels_wc     tunnels[0]
+/*
+ * Locking : hash tables are protected by RCU and RTNL
+ */
+
+#define for_each_ip_tunnel_rcu(start) \
+       for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
+
+/* often modified stats are per cpu, other are shared (netdev->stats) */
+struct pcpu_tstats {
+       u64     rx_packets;
+       u64     rx_bytes;
+       u64     tx_packets;
+       u64     tx_bytes;
+       struct u64_stats_sync   syncp;
+};
+
+static struct rtnl_link_stats64 *ip6gre_get_stats64(struct net_device *dev,
+               struct rtnl_link_stats64 *tot)
+{
+       int i;
+
+       for_each_possible_cpu(i) {
+               const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i);
+               u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
+               unsigned int start;
+
+               do {
+                       start = u64_stats_fetch_begin_bh(&tstats->syncp);
+                       rx_packets = tstats->rx_packets;
+                       tx_packets = tstats->tx_packets;
+                       rx_bytes = tstats->rx_bytes;
+                       tx_bytes = tstats->tx_bytes;
+               } while (u64_stats_fetch_retry_bh(&tstats->syncp, start));
+
+               tot->rx_packets += rx_packets;
+               tot->tx_packets += tx_packets;
+               tot->rx_bytes   += rx_bytes;
+               tot->tx_bytes   += tx_bytes;
+       }
+
+       tot->multicast = dev->stats.multicast;
+       tot->rx_crc_errors = dev->stats.rx_crc_errors;
+       tot->rx_fifo_errors = dev->stats.rx_fifo_errors;
+       tot->rx_length_errors = dev->stats.rx_length_errors;
+       tot->rx_errors = dev->stats.rx_errors;
+       tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
+       tot->tx_carrier_errors = dev->stats.tx_carrier_errors;
+       tot->tx_dropped = dev->stats.tx_dropped;
+       tot->tx_aborted_errors = dev->stats.tx_aborted_errors;
+       tot->tx_errors = dev->stats.tx_errors;
+
+       return tot;
+}
+
+/* Given src, dst and key, find appropriate for input tunnel. */
+
+static struct ip6_tnl *ip6gre_tunnel_lookup(struct net_device *dev,
+               const struct in6_addr *remote, const struct in6_addr *local,
+               __be32 key, __be16 gre_proto)
+{
+       struct net *net = dev_net(dev);
+       int link = dev->ifindex;
+       unsigned int h0 = HASH_ADDR(remote);
+       unsigned int h1 = HASH_KEY(key);
+       struct ip6_tnl *t, *cand = NULL;
+       struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
+       int dev_type = (gre_proto == htons(ETH_P_TEB)) ?
+                      ARPHRD_ETHER : ARPHRD_IP6GRE;
+       int score, cand_score = 4;
+
+       for_each_ip_tunnel_rcu(ign->tunnels_r_l[h0 ^ h1]) {
+               if (!ipv6_addr_equal(local, &t->parms.laddr) ||
+                   !ipv6_addr_equal(remote, &t->parms.raddr) ||
+                   key != t->parms.i_key ||
+                   !(t->dev->flags & IFF_UP))
+                       continue;
+
+               if (t->dev->type != ARPHRD_IP6GRE &&
+                   t->dev->type != dev_type)
+                       continue;
+
+               score = 0;
+               if (t->parms.link != link)
+                       score |= 1;
+               if (t->dev->type != dev_type)
+                       score |= 2;
+               if (score == 0)
+                       return t;
+
+               if (score < cand_score) {
+                       cand = t;
+                       cand_score = score;
+               }
+       }
+
+       for_each_ip_tunnel_rcu(ign->tunnels_r[h0 ^ h1]) {
+               if (!ipv6_addr_equal(remote, &t->parms.raddr) ||
+                   key != t->parms.i_key ||
+                   !(t->dev->flags & IFF_UP))
+                       continue;
+
+               if (t->dev->type != ARPHRD_IP6GRE &&
+                   t->dev->type != dev_type)
+                       continue;
+
+               score = 0;
+               if (t->parms.link != link)
+                       score |= 1;
+               if (t->dev->type != dev_type)
+                       score |= 2;
+               if (score == 0)
+                       return t;
+
+               if (score < cand_score) {
+                       cand = t;
+                       cand_score = score;
+               }
+       }
+
+       for_each_ip_tunnel_rcu(ign->tunnels_l[h1]) {
+               if ((!ipv6_addr_equal(local, &t->parms.laddr) &&
+                         (!ipv6_addr_equal(local, &t->parms.raddr) ||
+                                !ipv6_addr_is_multicast(local))) ||
+                   key != t->parms.i_key ||
+                   !(t->dev->flags & IFF_UP))
+                       continue;
+
+               if (t->dev->type != ARPHRD_IP6GRE &&
+                   t->dev->type != dev_type)
+                       continue;
+
+               score = 0;
+               if (t->parms.link != link)
+                       score |= 1;
+               if (t->dev->type != dev_type)
+                       score |= 2;
+               if (score == 0)
+                       return t;
+
+               if (score < cand_score) {
+                       cand = t;
+                       cand_score = score;
+               }
+       }
+
+       for_each_ip_tunnel_rcu(ign->tunnels_wc[h1]) {
+               if (t->parms.i_key != key ||
+                   !(t->dev->flags & IFF_UP))
+                       continue;
+
+               if (t->dev->type != ARPHRD_IP6GRE &&
+                   t->dev->type != dev_type)
+                       continue;
+
+               score = 0;
+               if (t->parms.link != link)
+                       score |= 1;
+               if (t->dev->type != dev_type)
+                       score |= 2;
+               if (score == 0)
+                       return t;
+
+               if (score < cand_score) {
+                       cand = t;
+                       cand_score = score;
+               }
+       }
+
+       if (cand != NULL)
+               return cand;
+
+       dev = ign->fb_tunnel_dev;
+       if (dev->flags & IFF_UP)
+               return netdev_priv(dev);
+
+       return NULL;
+}
+
+static struct ip6_tnl __rcu **__ip6gre_bucket(struct ip6gre_net *ign,
+               const struct __ip6_tnl_parm *p)
+{
+       const struct in6_addr *remote = &p->raddr;
+       const struct in6_addr *local = &p->laddr;
+       unsigned int h = HASH_KEY(p->i_key);
+       int prio = 0;
+
+       if (!ipv6_addr_any(local))
+               prio |= 1;
+       if (!ipv6_addr_any(remote) && !ipv6_addr_is_multicast(remote)) {
+               prio |= 2;
+               h ^= HASH_ADDR(remote);
+       }
+
+       return &ign->tunnels[prio][h];
+}
+
+static inline struct ip6_tnl __rcu **ip6gre_bucket(struct ip6gre_net *ign,
+               const struct ip6_tnl *t)
+{
+       return __ip6gre_bucket(ign, &t->parms);
+}
+
+static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t)
+{
+       struct ip6_tnl __rcu **tp = ip6gre_bucket(ign, t);
+
+       rcu_assign_pointer(t->next, rtnl_dereference(*tp));
+       rcu_assign_pointer(*tp, t);
+}
+
+static void ip6gre_tunnel_unlink(struct ip6gre_net *ign, struct ip6_tnl *t)
+{
+       struct ip6_tnl __rcu **tp;
+       struct ip6_tnl *iter;
+
+       for (tp = ip6gre_bucket(ign, t);
+            (iter = rtnl_dereference(*tp)) != NULL;
+            tp = &iter->next) {
+               if (t == iter) {
+                       rcu_assign_pointer(*tp, t->next);
+                       break;
+               }
+       }
+}
+
+static struct ip6_tnl *ip6gre_tunnel_find(struct net *net,
+                                          const struct __ip6_tnl_parm *parms,
+                                          int type)
+{
+       const struct in6_addr *remote = &parms->raddr;
+       const struct in6_addr *local = &parms->laddr;
+       __be32 key = parms->i_key;
+       int link = parms->link;
+       struct ip6_tnl *t;
+       struct ip6_tnl __rcu **tp;
+       struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
+
+       for (tp = __ip6gre_bucket(ign, parms);
+            (t = rtnl_dereference(*tp)) != NULL;
+            tp = &t->next)
+               if (ipv6_addr_equal(local, &t->parms.laddr) &&
+                   ipv6_addr_equal(remote, &t->parms.raddr) &&
+                   key == t->parms.i_key &&
+                   link == t->parms.link &&
+                   type == t->dev->type)
+                       break;
+
+       return t;
+}
+
+static struct ip6_tnl *ip6gre_tunnel_locate(struct net *net,
+               const struct __ip6_tnl_parm *parms, int create)
+{
+       struct ip6_tnl *t, *nt;
+       struct net_device *dev;
+       char name[IFNAMSIZ];
+       struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
+
+       t = ip6gre_tunnel_find(net, parms, ARPHRD_IP6GRE);
+       if (t || !create)
+               return t;
+
+       if (parms->name[0])
+               strlcpy(name, parms->name, IFNAMSIZ);
+       else
+               strcpy(name, "ip6gre%d");
+
+       dev = alloc_netdev(sizeof(*t), name, ip6gre_tunnel_setup);
+       if (!dev)
+               return NULL;
+
+       dev_net_set(dev, net);
+
+       nt = netdev_priv(dev);
+       nt->parms = *parms;
+       dev->rtnl_link_ops = &ip6gre_link_ops;
+
+       nt->dev = dev;
+       ip6gre_tnl_link_config(nt, 1);
+
+       if (register_netdevice(dev) < 0)
+               goto failed_free;
+
+       /* Can use a lockless transmit, unless we generate output sequences */
+       if (!(nt->parms.o_flags & GRE_SEQ))
+               dev->features |= NETIF_F_LLTX;
+
+       dev_hold(dev);
+       ip6gre_tunnel_link(ign, nt);
+       return nt;
+
+failed_free:
+       free_netdev(dev);
+       return NULL;
+}
+
+static void ip6gre_tunnel_uninit(struct net_device *dev)
+{
+       struct net *net = dev_net(dev);
+       struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
+
+       ip6gre_tunnel_unlink(ign, netdev_priv(dev));
+       dev_put(dev);
+}
+
+
+static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+               u8 type, u8 code, int offset, __be32 info)
+{
+       const struct ipv6hdr *ipv6h = (const struct ipv6hdr *)skb->data;
+       __be16 *p = (__be16 *)(skb->data + offset);
+       int grehlen = offset + 4;
+       struct ip6_tnl *t;
+       __be16 flags;
+
+       flags = p[0];
+       if (flags&(GRE_CSUM|GRE_KEY|GRE_SEQ|GRE_ROUTING|GRE_VERSION)) {
+               if (flags&(GRE_VERSION|GRE_ROUTING))
+                       return;
+               if (flags&GRE_KEY) {
+                       grehlen += 4;
+                       if (flags&GRE_CSUM)
+                               grehlen += 4;
+               }
+       }
+
+       /* If only 8 bytes returned, keyed message will be dropped here */
+       if (!pskb_may_pull(skb, grehlen))
+               return;
+       ipv6h = (const struct ipv6hdr *)skb->data;
+       p = (__be16 *)(skb->data + offset);
+
+       rcu_read_lock();
+
+       t = ip6gre_tunnel_lookup(skb->dev, &ipv6h->daddr, &ipv6h->saddr,
+                               flags & GRE_KEY ?
+                               *(((__be32 *)p) + (grehlen / 4) - 1) : 0,
+                               p[1]);
+       if (t == NULL)
+               goto out;
+
+       switch (type) {
+               __u32 teli;
+               struct ipv6_tlv_tnl_enc_lim *tel;
+               __u32 mtu;
+       case ICMPV6_DEST_UNREACH:
+               net_warn_ratelimited("%s: Path to destination invalid or inactive!\n",
+                                    t->parms.name);
+               break;
+       case ICMPV6_TIME_EXCEED:
+               if (code == ICMPV6_EXC_HOPLIMIT) {
+                       net_warn_ratelimited("%s: Too small hop limit or routing loop in tunnel!\n",
+                                            t->parms.name);
+               }
+               break;
+       case ICMPV6_PARAMPROB:
+               teli = 0;
+               if (code == ICMPV6_HDR_FIELD)
+                       teli = ip6_tnl_parse_tlv_enc_lim(skb, skb->data);
+
+               if (teli && teli == info - 2) {
+                       tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli];
+                       if (tel->encap_limit == 0) {
+                               net_warn_ratelimited("%s: Too small encapsulation limit or routing loop in tunnel!\n",
+                                                    t->parms.name);
+                       }
+               } else {
+                       net_warn_ratelimited("%s: Recipient unable to parse tunneled packet!\n",
+                                            t->parms.name);
+               }
+               break;
+       case ICMPV6_PKT_TOOBIG:
+               mtu = info - offset;
+               if (mtu < IPV6_MIN_MTU)
+                       mtu = IPV6_MIN_MTU;
+               t->dev->mtu = mtu;
+               break;
+       }
+
+       if (time_before(jiffies, t->err_time + IP6TUNNEL_ERR_TIMEO))
+               t->err_count++;
+       else
+               t->err_count = 1;
+       t->err_time = jiffies;
+out:
+       rcu_read_unlock();
+}
+
+static inline void ip6gre_ecn_decapsulate_ipv4(const struct ip6_tnl *t,
+               const struct ipv6hdr *ipv6h, struct sk_buff *skb)
+{
+       __u8 dsfield = ipv6_get_dsfield(ipv6h) & ~INET_ECN_MASK;
+
+       if (t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY)
+               ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, dsfield);
+
+       if (INET_ECN_is_ce(dsfield))
+               IP_ECN_set_ce(ip_hdr(skb));
+}
+
+static inline void ip6gre_ecn_decapsulate_ipv6(const struct ip6_tnl *t,
+               const struct ipv6hdr *ipv6h, struct sk_buff *skb)
+{
+       if (t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY)
+               ipv6_copy_dscp(ipv6_get_dsfield(ipv6h), ipv6_hdr(skb));
+
+       if (INET_ECN_is_ce(ipv6_get_dsfield(ipv6h)))
+               IP6_ECN_set_ce(ipv6_hdr(skb));
+}
+
+static int ip6gre_rcv(struct sk_buff *skb)
+{
+       const struct ipv6hdr *ipv6h;
+       u8     *h;
+       __be16    flags;
+       __sum16   csum = 0;
+       __be32 key = 0;
+       u32    seqno = 0;
+       struct ip6_tnl *tunnel;
+       int    offset = 4;
+       __be16 gre_proto;
+
+       if (!pskb_may_pull(skb, sizeof(struct in6_addr)))
+               goto drop_nolock;
+
+       ipv6h = ipv6_hdr(skb);
+       h = skb->data;
+       flags = *(__be16 *)h;
+
+       if (flags&(GRE_CSUM|GRE_KEY|GRE_ROUTING|GRE_SEQ|GRE_VERSION)) {
+               /* - Version must be 0.
+                  - We do not support routing headers.
+                */
+               if (flags&(GRE_VERSION|GRE_ROUTING))
+                       goto drop_nolock;
+
+               if (flags&GRE_CSUM) {
+                       switch (skb->ip_summed) {
+                       case CHECKSUM_COMPLETE:
+                               csum = csum_fold(skb->csum);
+                               if (!csum)
+                                       break;
+                               /* fall through */
+                       case CHECKSUM_NONE:
+                               skb->csum = 0;
+                               csum = __skb_checksum_complete(skb);
+                               skb->ip_summed = CHECKSUM_COMPLETE;
+                       }
+                       offset += 4;
+               }
+               if (flags&GRE_KEY) {
+                       key = *(__be32 *)(h + offset);
+                       offset += 4;
+               }
+               if (flags&GRE_SEQ) {
+                       seqno = ntohl(*(__be32 *)(h + offset));
+                       offset += 4;
+               }
+       }
+
+       gre_proto = *(__be16 *)(h + 2);
+
+       rcu_read_lock();
+       tunnel = ip6gre_tunnel_lookup(skb->dev,
+                                         &ipv6h->saddr, &ipv6h->daddr, key,
+                                         gre_proto);
+       if (tunnel) {
+               struct pcpu_tstats *tstats;
+
+               if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
+                       goto drop;
+
+               if (!ip6_tnl_rcv_ctl(tunnel, &ipv6h->daddr, &ipv6h->saddr)) {
+                       tunnel->dev->stats.rx_dropped++;
+                       goto drop;
+               }
+
+               secpath_reset(skb);
+
+               skb->protocol = gre_proto;
+               /* WCCP version 1 and 2 protocol decoding.
+                * - Change protocol to IP
+                * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header
+                */
+               if (flags == 0 && gre_proto == htons(ETH_P_WCCP)) {
+                       skb->protocol = htons(ETH_P_IP);
+                       if ((*(h + offset) & 0xF0) != 0x40)
+                               offset += 4;
+               }
+
+               skb->mac_header = skb->network_header;
+               __pskb_pull(skb, offset);
+               skb_postpull_rcsum(skb, skb_transport_header(skb), offset);
+               skb->pkt_type = PACKET_HOST;
+
+               if (((flags&GRE_CSUM) && csum) ||
+                   (!(flags&GRE_CSUM) && tunnel->parms.i_flags&GRE_CSUM)) {
+                       tunnel->dev->stats.rx_crc_errors++;
+                       tunnel->dev->stats.rx_errors++;
+                       goto drop;
+               }
+               if (tunnel->parms.i_flags&GRE_SEQ) {
+                       if (!(flags&GRE_SEQ) ||
+                           (tunnel->i_seqno &&
+                                       (s32)(seqno - tunnel->i_seqno) < 0)) {
+                               tunnel->dev->stats.rx_fifo_errors++;
+                               tunnel->dev->stats.rx_errors++;
+                               goto drop;
+                       }
+                       tunnel->i_seqno = seqno + 1;
+               }
+
+               /* Warning: All skb pointers will be invalidated! */
+               if (tunnel->dev->type == ARPHRD_ETHER) {
+                       if (!pskb_may_pull(skb, ETH_HLEN)) {
+                               tunnel->dev->stats.rx_length_errors++;
+                               tunnel->dev->stats.rx_errors++;
+                               goto drop;
+                       }
+
+                       ipv6h = ipv6_hdr(skb);
+                       skb->protocol = eth_type_trans(skb, tunnel->dev);
+                       skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
+               }
+
+               tstats = this_cpu_ptr(tunnel->dev->tstats);
+               u64_stats_update_begin(&tstats->syncp);
+               tstats->rx_packets++;
+               tstats->rx_bytes += skb->len;
+               u64_stats_update_end(&tstats->syncp);
+
+               __skb_tunnel_rx(skb, tunnel->dev);
+
+               skb_reset_network_header(skb);
+               if (skb->protocol == htons(ETH_P_IP))
+                       ip6gre_ecn_decapsulate_ipv4(tunnel, ipv6h, skb);
+               else if (skb->protocol == htons(ETH_P_IPV6))
+                       ip6gre_ecn_decapsulate_ipv6(tunnel, ipv6h, skb);
+
+               netif_rx(skb);
+
+               rcu_read_unlock();
+               return 0;
+       }
+       icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
+
+drop:
+       rcu_read_unlock();
+drop_nolock:
+       kfree_skb(skb);
+       return 0;
+}
+
+struct ipv6_tel_txoption {
+       struct ipv6_txoptions ops;
+       __u8 dst_opt[8];
+};
+
+static void init_tel_txopt(struct ipv6_tel_txoption *opt, __u8 encap_limit)
+{
+       memset(opt, 0, sizeof(struct ipv6_tel_txoption));
+
+       opt->dst_opt[2] = IPV6_TLV_TNL_ENCAP_LIMIT;
+       opt->dst_opt[3] = 1;
+       opt->dst_opt[4] = encap_limit;
+       opt->dst_opt[5] = IPV6_TLV_PADN;
+       opt->dst_opt[6] = 1;
+
+       opt->ops.dst0opt = (struct ipv6_opt_hdr *) opt->dst_opt;
+       opt->ops.opt_nflen = 8;
+}
+
+static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb,
+                        struct net_device *dev,
+                        __u8 dsfield,
+                        struct flowi6 *fl6,
+                        int encap_limit,
+                        __u32 *pmtu)
+{
+       struct net *net = dev_net(dev);
+       struct ip6_tnl *tunnel = netdev_priv(dev);
+       struct net_device *tdev;    /* Device to other host */
+       struct ipv6hdr  *ipv6h;     /* Our new IP header */
+       unsigned int max_headroom;  /* The extra header space needed */
+       int    gre_hlen;
+       struct ipv6_tel_txoption opt;
+       int    mtu;
+       struct dst_entry *dst = NULL, *ndst = NULL;
+       struct net_device_stats *stats = &tunnel->dev->stats;
+       int err = -1;
+       u8 proto;
+       int pkt_len;
+       struct sk_buff *new_skb;
+
+       if (dev->type == ARPHRD_ETHER)
+               IPCB(skb)->flags = 0;
+
+       if (dev->header_ops && dev->type == ARPHRD_IP6GRE) {
+               gre_hlen = 0;
+               ipv6h = (struct ipv6hdr *)skb->data;
+               fl6->daddr = ipv6h->daddr;
+       } else {
+               gre_hlen = tunnel->hlen;
+               fl6->daddr = tunnel->parms.raddr;
+       }
+
+       if (!fl6->flowi6_mark)
+               dst = ip6_tnl_dst_check(tunnel);
+
+       if (!dst) {
+               ndst = ip6_route_output(net, NULL, fl6);
+
+               if (ndst->error)
+                       goto tx_err_link_failure;
+               ndst = xfrm_lookup(net, ndst, flowi6_to_flowi(fl6), NULL, 0);
+               if (IS_ERR(ndst)) {
+                       err = PTR_ERR(ndst);
+                       ndst = NULL;
+                       goto tx_err_link_failure;
+               }
+               dst = ndst;
+       }
+
+       tdev = dst->dev;
+
+       if (tdev == dev) {
+               stats->collisions++;
+               net_warn_ratelimited("%s: Local routing loop detected!\n",
+                                    tunnel->parms.name);
+               goto tx_err_dst_release;
+       }
+
+       mtu = dst_mtu(dst) - sizeof(*ipv6h);
+       if (encap_limit >= 0) {
+               max_headroom += 8;
+               mtu -= 8;
+       }
+       if (mtu < IPV6_MIN_MTU)
+               mtu = IPV6_MIN_MTU;
+       if (skb_dst(skb))
+               skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
+       if (skb->len > mtu) {
+               *pmtu = mtu;
+               err = -EMSGSIZE;
+               goto tx_err_dst_release;
+       }
+
+       if (tunnel->err_count > 0) {
+               if (time_before(jiffies,
+                               tunnel->err_time + IP6TUNNEL_ERR_TIMEO)) {
+                       tunnel->err_count--;
+
+                       dst_link_failure(skb);
+               } else
+                       tunnel->err_count = 0;
+       }
+
+       max_headroom = LL_RESERVED_SPACE(tdev) + gre_hlen + dst->header_len;
+
+       if (skb_headroom(skb) < max_headroom || skb_shared(skb) ||
+           (skb_cloned(skb) && !skb_clone_writable(skb, 0))) {
+               new_skb = skb_realloc_headroom(skb, max_headroom);
+               if (max_headroom > dev->needed_headroom)
+                       dev->needed_headroom = max_headroom;
+               if (!new_skb)
+                       goto tx_err_dst_release;
+
+               if (skb->sk)
+                       skb_set_owner_w(new_skb, skb->sk);
+               consume_skb(skb);
+               skb = new_skb;
+       }
+
+       skb_dst_drop(skb);
+
+       if (fl6->flowi6_mark) {
+               skb_dst_set(skb, dst);
+               ndst = NULL;
+       } else {
+               skb_dst_set_noref(skb, dst);
+       }
+
+       skb->transport_header = skb->network_header;
+
+       proto = NEXTHDR_GRE;
+       if (encap_limit >= 0) {
+               init_tel_txopt(&opt, encap_limit);
+               ipv6_push_nfrag_opts(skb, &opt.ops, &proto, NULL);
+       }
+
+       skb_push(skb, gre_hlen);
+       skb_reset_network_header(skb);
+
+       /*
+        *      Push down and install the IP header.
+        */
+       ipv6h = ipv6_hdr(skb);
+       *(__be32 *)ipv6h = fl6->flowlabel | htonl(0x60000000);
+       dsfield = INET_ECN_encapsulate(0, dsfield);
+       ipv6_change_dsfield(ipv6h, ~INET_ECN_MASK, dsfield);
+       ipv6h->hop_limit = tunnel->parms.hop_limit;
+       ipv6h->nexthdr = proto;
+       ipv6h->saddr = fl6->saddr;
+       ipv6h->daddr = fl6->daddr;
+
+       ((__be16 *)(ipv6h + 1))[0] = tunnel->parms.o_flags;
+       ((__be16 *)(ipv6h + 1))[1] = (dev->type == ARPHRD_ETHER) ?
+                                  htons(ETH_P_TEB) : skb->protocol;
+
+       if (tunnel->parms.o_flags&(GRE_KEY|GRE_CSUM|GRE_SEQ)) {
+               __be32 *ptr = (__be32 *)(((u8 *)ipv6h) + tunnel->hlen - 4);
+
+               if (tunnel->parms.o_flags&GRE_SEQ) {
+                       ++tunnel->o_seqno;
+                       *ptr = htonl(tunnel->o_seqno);
+                       ptr--;
+               }
+               if (tunnel->parms.o_flags&GRE_KEY) {
+                       *ptr = tunnel->parms.o_key;
+                       ptr--;
+               }
+               if (tunnel->parms.o_flags&GRE_CSUM) {
+                       *ptr = 0;
+                       *(__sum16 *)ptr = ip_compute_csum((void *)(ipv6h+1),
+                               skb->len - sizeof(struct ipv6hdr));
+               }
+       }
+
+       nf_reset(skb);
+       pkt_len = skb->len;
+       err = ip6_local_out(skb);
+
+       if (net_xmit_eval(err) == 0) {
+               struct pcpu_tstats *tstats = this_cpu_ptr(tunnel->dev->tstats);
+
+               tstats->tx_bytes += pkt_len;
+               tstats->tx_packets++;
+       } else {
+               stats->tx_errors++;
+               stats->tx_aborted_errors++;
+       }
+
+       if (ndst)
+               ip6_tnl_dst_store(tunnel, ndst);
+
+       return 0;
+tx_err_link_failure:
+       stats->tx_carrier_errors++;
+       dst_link_failure(skb);
+tx_err_dst_release:
+       dst_release(ndst);
+       return err;
+}
+
+static inline int ip6gre_xmit_ipv4(struct sk_buff *skb, struct net_device *dev)
+{
+       struct ip6_tnl *t = netdev_priv(dev);
+       const struct iphdr  *iph = ip_hdr(skb);
+       int encap_limit = -1;
+       struct flowi6 fl6;
+       __u8 dsfield;
+       __u32 mtu;
+       int err;
+
+       if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
+               encap_limit = t->parms.encap_limit;
+
+       memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
+       fl6.flowi6_proto = IPPROTO_IPIP;
+
+       dsfield = ipv4_get_dsfield(iph);
+
+       if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
+               fl6.flowlabel |= htonl((__u32)iph->tos << IPV6_TCLASS_SHIFT)
+                                         & IPV6_TCLASS_MASK;
+       if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
+               fl6.flowi6_mark = skb->mark;
+
+       err = ip6gre_xmit2(skb, dev, dsfield, &fl6, encap_limit, &mtu);
+       if (err != 0) {
+               /* XXX: send ICMP error even if DF is not set. */
+               if (err == -EMSGSIZE)
+                       icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
+                                 htonl(mtu));
+               return -1;
+       }
+
+       return 0;
+}
+
+static inline int ip6gre_xmit_ipv6(struct sk_buff *skb, struct net_device *dev)
+{
+       struct ip6_tnl *t = netdev_priv(dev);
+       struct ipv6hdr *ipv6h = ipv6_hdr(skb);
+       int encap_limit = -1;
+       __u16 offset;
+       struct flowi6 fl6;
+       __u8 dsfield;
+       __u32 mtu;
+       int err;
+
+       if (ipv6_addr_equal(&t->parms.raddr, &ipv6h->saddr))
+               return -1;
+
+       offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb));
+       if (offset > 0) {
+               struct ipv6_tlv_tnl_enc_lim *tel;
+               tel = (struct ipv6_tlv_tnl_enc_lim *)&skb_network_header(skb)[offset];
+               if (tel->encap_limit == 0) {
+                       icmpv6_send(skb, ICMPV6_PARAMPROB,
+                                   ICMPV6_HDR_FIELD, offset + 2);
+                       return -1;
+               }
+               encap_limit = tel->encap_limit - 1;
+       } else if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
+               encap_limit = t->parms.encap_limit;
+
+       memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
+       fl6.flowi6_proto = IPPROTO_IPV6;
+
+       dsfield = ipv6_get_dsfield(ipv6h);
+       if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
+               fl6.flowlabel |= (*(__be32 *) ipv6h & IPV6_TCLASS_MASK);
+       if (t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL)
+               fl6.flowlabel |= (*(__be32 *) ipv6h & IPV6_FLOWLABEL_MASK);
+       if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
+               fl6.flowi6_mark = skb->mark;
+
+       err = ip6gre_xmit2(skb, dev, dsfield, &fl6, encap_limit, &mtu);
+       if (err != 0) {
+               if (err == -EMSGSIZE)
+                       icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
+               return -1;
+       }
+
+       return 0;
+}
+
+/**
+ * ip6_tnl_addr_conflict - compare packet addresses to tunnel's own
+ *   @t: the outgoing tunnel device
+ *   @hdr: IPv6 header from the incoming packet
+ *
+ * Description:
+ *   Avoid trivial tunneling loop by checking that tunnel exit-point
+ *   doesn't match source of incoming packet.
+ *
+ * Return:
+ *   1 if conflict,
+ *   0 else
+ **/
+
+static inline bool ip6gre_tnl_addr_conflict(const struct ip6_tnl *t,
+       const struct ipv6hdr *hdr)
+{
+       return ipv6_addr_equal(&t->parms.raddr, &hdr->saddr);
+}
+
+static int ip6gre_xmit_other(struct sk_buff *skb, struct net_device *dev)
+{
+       struct ip6_tnl *t = netdev_priv(dev);
+       int encap_limit = -1;
+       struct flowi6 fl6;
+       __u32 mtu;
+       int err;
+
+       if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
+               encap_limit = t->parms.encap_limit;
+
+       memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
+       fl6.flowi6_proto = skb->protocol;
+
+       err = ip6gre_xmit2(skb, dev, 0, &fl6, encap_limit, &mtu);
+
+       return err;
+}
+
+static netdev_tx_t ip6gre_tunnel_xmit(struct sk_buff *skb,
+       struct net_device *dev)
+{
+       struct ip6_tnl *t = netdev_priv(dev);
+       struct net_device_stats *stats = &t->dev->stats;
+       int ret;
+
+       if (!ip6_tnl_xmit_ctl(t))
+               return -1;
+
+       switch (skb->protocol) {
+       case htons(ETH_P_IP):
+               ret = ip6gre_xmit_ipv4(skb, dev);
+               break;
+       case htons(ETH_P_IPV6):
+               ret = ip6gre_xmit_ipv6(skb, dev);
+               break;
+       default:
+               ret = ip6gre_xmit_other(skb, dev);
+               break;
+       }
+
+       if (ret < 0)
+               goto tx_err;
+
+       return NETDEV_TX_OK;
+
+tx_err:
+       stats->tx_errors++;
+       stats->tx_dropped++;
+       kfree_skb(skb);
+       return NETDEV_TX_OK;
+}
+
+static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu)
+{
+       struct net_device *dev = t->dev;
+       struct __ip6_tnl_parm *p = &t->parms;
+       struct flowi6 *fl6 = &t->fl.u.ip6;
+       int addend = sizeof(struct ipv6hdr) + 4;
+
+       if (dev->type != ARPHRD_ETHER) {
+               memcpy(dev->dev_addr, &p->laddr, sizeof(struct in6_addr));
+               memcpy(dev->broadcast, &p->raddr, sizeof(struct in6_addr));
+       }
+
+       /* Set up flowi template */
+       fl6->saddr = p->laddr;
+       fl6->daddr = p->raddr;
+       fl6->flowi6_oif = p->link;
+       fl6->flowlabel = 0;
+
+       if (!(p->flags&IP6_TNL_F_USE_ORIG_TCLASS))
+               fl6->flowlabel |= IPV6_TCLASS_MASK & p->flowinfo;
+       if (!(p->flags&IP6_TNL_F_USE_ORIG_FLOWLABEL))
+               fl6->flowlabel |= IPV6_FLOWLABEL_MASK & p->flowinfo;
+
+       p->flags &= ~(IP6_TNL_F_CAP_XMIT|IP6_TNL_F_CAP_RCV|IP6_TNL_F_CAP_PER_PACKET);
+       p->flags |= ip6_tnl_get_cap(t, &p->laddr, &p->raddr);
+
+       if (p->flags&IP6_TNL_F_CAP_XMIT &&
+                       p->flags&IP6_TNL_F_CAP_RCV && dev->type != ARPHRD_ETHER)
+               dev->flags |= IFF_POINTOPOINT;
+       else
+               dev->flags &= ~IFF_POINTOPOINT;
+
+       dev->iflink = p->link;
+
+       /* Precalculate GRE options length */
+       if (t->parms.o_flags&(GRE_CSUM|GRE_KEY|GRE_SEQ)) {
+               if (t->parms.o_flags&GRE_CSUM)
+                       addend += 4;
+               if (t->parms.o_flags&GRE_KEY)
+                       addend += 4;
+               if (t->parms.o_flags&GRE_SEQ)
+                       addend += 4;
+       }
+
+       if (p->flags & IP6_TNL_F_CAP_XMIT) {
+               int strict = (ipv6_addr_type(&p->raddr) &
+                             (IPV6_ADDR_MULTICAST|IPV6_ADDR_LINKLOCAL));
+
+               struct rt6_info *rt = rt6_lookup(dev_net(dev),
+                                                &p->raddr, &p->laddr,
+                                                p->link, strict);
+
+               if (rt == NULL)
+                       return;
+
+               if (rt->dst.dev) {
+                       dev->hard_header_len = rt->dst.dev->hard_header_len + addend;
+
+                       if (set_mtu) {
+                               dev->mtu = rt->dst.dev->mtu - addend;
+                               if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
+                                       dev->mtu -= 8;
+
+                               if (dev->mtu < IPV6_MIN_MTU)
+                                       dev->mtu = IPV6_MIN_MTU;
+                       }
+               }
+               dst_release(&rt->dst);
+       }
+
+       t->hlen = addend;
+}
+
+static int ip6gre_tnl_change(struct ip6_tnl *t,
+       const struct __ip6_tnl_parm *p, int set_mtu)
+{
+       t->parms.laddr = p->laddr;
+       t->parms.raddr = p->raddr;
+       t->parms.flags = p->flags;
+       t->parms.hop_limit = p->hop_limit;
+       t->parms.encap_limit = p->encap_limit;
+       t->parms.flowinfo = p->flowinfo;
+       t->parms.link = p->link;
+       t->parms.proto = p->proto;
+       t->parms.i_key = p->i_key;
+       t->parms.o_key = p->o_key;
+       t->parms.i_flags = p->i_flags;
+       t->parms.o_flags = p->o_flags;
+       ip6_tnl_dst_reset(t);
+       ip6gre_tnl_link_config(t, set_mtu);
+       return 0;
+}
+
+static void ip6gre_tnl_parm_from_user(struct __ip6_tnl_parm *p,
+       const struct ip6_tnl_parm2 *u)
+{
+       p->laddr = u->laddr;
+       p->raddr = u->raddr;
+       p->flags = u->flags;
+       p->hop_limit = u->hop_limit;
+       p->encap_limit = u->encap_limit;
+       p->flowinfo = u->flowinfo;
+       p->link = u->link;
+       p->i_key = u->i_key;
+       p->o_key = u->o_key;
+       p->i_flags = u->i_flags;
+       p->o_flags = u->o_flags;
+       memcpy(p->name, u->name, sizeof(u->name));
+}
+
+static void ip6gre_tnl_parm_to_user(struct ip6_tnl_parm2 *u,
+       const struct __ip6_tnl_parm *p)
+{
+       u->proto = IPPROTO_GRE;
+       u->laddr = p->laddr;
+       u->raddr = p->raddr;
+       u->flags = p->flags;
+       u->hop_limit = p->hop_limit;
+       u->encap_limit = p->encap_limit;
+       u->flowinfo = p->flowinfo;
+       u->link = p->link;
+       u->i_key = p->i_key;
+       u->o_key = p->o_key;
+       u->i_flags = p->i_flags;
+       u->o_flags = p->o_flags;
+       memcpy(u->name, p->name, sizeof(u->name));
+}
+
+static int ip6gre_tunnel_ioctl(struct net_device *dev,
+       struct ifreq *ifr, int cmd)
+{
+       int err = 0;
+       struct ip6_tnl_parm2 p;
+       struct __ip6_tnl_parm p1;
+       struct ip6_tnl *t;
+       struct net *net = dev_net(dev);
+       struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
+
+       switch (cmd) {
+       case SIOCGETTUNNEL:
+               t = NULL;
+               if (dev == ign->fb_tunnel_dev) {
+                       if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) {
+                               err = -EFAULT;
+                               break;
+                       }
+                       ip6gre_tnl_parm_from_user(&p1, &p);
+                       t = ip6gre_tunnel_locate(net, &p1, 0);
+               }
+               if (t == NULL)
+                       t = netdev_priv(dev);
+               ip6gre_tnl_parm_to_user(&p, &t->parms);
+               if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
+                       err = -EFAULT;
+               break;
+
+       case SIOCADDTUNNEL:
+       case SIOCCHGTUNNEL:
+               err = -EPERM;
+               if (!capable(CAP_NET_ADMIN))
+                       goto done;
+
+               err = -EFAULT;
+               if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
+                       goto done;
+
+               err = -EINVAL;
+               if ((p.i_flags|p.o_flags)&(GRE_VERSION|GRE_ROUTING))
+                       goto done;
+
+               if (!(p.i_flags&GRE_KEY))
+                       p.i_key = 0;
+               if (!(p.o_flags&GRE_KEY))
+                       p.o_key = 0;
+
+               ip6gre_tnl_parm_from_user(&p1, &p);
+               t = ip6gre_tunnel_locate(net, &p1, cmd == SIOCADDTUNNEL);
+
+               if (dev != ign->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) {
+                       if (t != NULL) {
+                               if (t->dev != dev) {
+                                       err = -EEXIST;
+                                       break;
+                               }
+                       } else {
+                               t = netdev_priv(dev);
+
+                               ip6gre_tunnel_unlink(ign, t);
+                               synchronize_net();
+                               ip6gre_tnl_change(t, &p1, 1);
+                               ip6gre_tunnel_link(ign, t);
+                               netdev_state_change(dev);
+                       }
+               }
+
+               if (t) {
+                       err = 0;
+
+                       ip6gre_tnl_parm_to_user(&p, &t->parms);
+                       if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
+                               err = -EFAULT;
+               } else
+                       err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT);
+               break;
+
+       case SIOCDELTUNNEL:
+               err = -EPERM;
+               if (!capable(CAP_NET_ADMIN))
+                       goto done;
+
+               if (dev == ign->fb_tunnel_dev) {
+                       err = -EFAULT;
+                       if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
+                               goto done;
+                       err = -ENOENT;
+                       ip6gre_tnl_parm_from_user(&p1, &p);
+                       t = ip6gre_tunnel_locate(net, &p1, 0);
+                       if (t == NULL)
+                               goto done;
+                       err = -EPERM;
+                       if (t == netdev_priv(ign->fb_tunnel_dev))
+                               goto done;
+                       dev = t->dev;
+               }
+               unregister_netdevice(dev);
+               err = 0;
+               break;
+
+       default:
+               err = -EINVAL;
+       }
+
+done:
+       return err;
+}
+
+static int ip6gre_tunnel_change_mtu(struct net_device *dev, int new_mtu)
+{
+       struct ip6_tnl *tunnel = netdev_priv(dev);
+       if (new_mtu < 68 ||
+           new_mtu > 0xFFF8 - dev->hard_header_len - tunnel->hlen)
+               return -EINVAL;
+       dev->mtu = new_mtu;
+       return 0;
+}
+
+static int ip6gre_header(struct sk_buff *skb, struct net_device *dev,
+                       unsigned short type,
+                       const void *daddr, const void *saddr, unsigned int len)
+{
+       struct ip6_tnl *t = netdev_priv(dev);
+       struct ipv6hdr *ipv6h = (struct ipv6hdr *)skb_push(skb, t->hlen);
+       __be16 *p = (__be16 *)(ipv6h+1);
+
+       *(__be32 *)ipv6h = t->fl.u.ip6.flowlabel | htonl(0x60000000);
+       ipv6h->hop_limit = t->parms.hop_limit;
+       ipv6h->nexthdr = NEXTHDR_GRE;
+       ipv6h->saddr = t->parms.laddr;
+       ipv6h->daddr = t->parms.raddr;
+
+       p[0]            = t->parms.o_flags;
+       p[1]            = htons(type);
+
+       /*
+        *      Set the source hardware address.
+        */
+
+       if (saddr)
+               memcpy(&ipv6h->saddr, saddr, sizeof(struct in6_addr));
+       if (daddr)
+               memcpy(&ipv6h->daddr, daddr, sizeof(struct in6_addr));
+       if (!ipv6_addr_any(&ipv6h->daddr))
+               return t->hlen;
+
+       return -t->hlen;
+}
+
+static int ip6gre_header_parse(const struct sk_buff *skb, unsigned char *haddr)
+{
+       const struct ipv6hdr *ipv6h = (const struct ipv6hdr *)skb_mac_header(skb);
+       memcpy(haddr, &ipv6h->saddr, sizeof(struct in6_addr));
+       return sizeof(struct in6_addr);
+}
+
+static const struct header_ops ip6gre_header_ops = {
+       .create = ip6gre_header,
+       .parse  = ip6gre_header_parse,
+};
+
+static const struct net_device_ops ip6gre_netdev_ops = {
+       .ndo_init               = ip6gre_tunnel_init,
+       .ndo_uninit             = ip6gre_tunnel_uninit,
+       .ndo_start_xmit         = ip6gre_tunnel_xmit,
+       .ndo_do_ioctl           = ip6gre_tunnel_ioctl,
+       .ndo_change_mtu         = ip6gre_tunnel_change_mtu,
+       .ndo_get_stats64        = ip6gre_get_stats64,
+};
+
+static void ip6gre_dev_free(struct net_device *dev)
+{
+       free_percpu(dev->tstats);
+       free_netdev(dev);
+}
+
+static void ip6gre_tunnel_setup(struct net_device *dev)
+{
+       struct ip6_tnl *t;
+
+       dev->netdev_ops = &ip6gre_netdev_ops;
+       dev->destructor = ip6gre_dev_free;
+
+       dev->type = ARPHRD_IP6GRE;
+       dev->hard_header_len = LL_MAX_HEADER + sizeof(struct ipv6hdr) + 4;
+       dev->mtu = ETH_DATA_LEN - sizeof(struct ipv6hdr) - 4;
+       t = netdev_priv(dev);
+       if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
+               dev->mtu -= 8;
+       dev->flags |= IFF_NOARP;
+       dev->iflink = 0;
+       dev->addr_len = sizeof(struct in6_addr);
+       dev->features |= NETIF_F_NETNS_LOCAL;
+       dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
+}
+
+static int ip6gre_tunnel_init(struct net_device *dev)
+{
+       struct ip6_tnl *tunnel;
+
+       tunnel = netdev_priv(dev);
+
+       tunnel->dev = dev;
+       strcpy(tunnel->parms.name, dev->name);
+
+       memcpy(dev->dev_addr, &tunnel->parms.laddr, sizeof(struct in6_addr));
+       memcpy(dev->broadcast, &tunnel->parms.raddr, sizeof(struct in6_addr));
+
+       if (ipv6_addr_any(&tunnel->parms.raddr))
+               dev->header_ops = &ip6gre_header_ops;
+
+       dev->tstats = alloc_percpu(struct pcpu_tstats);
+       if (!dev->tstats)
+               return -ENOMEM;
+
+       return 0;
+}
+
+static void ip6gre_fb_tunnel_init(struct net_device *dev)
+{
+       struct ip6_tnl *tunnel = netdev_priv(dev);
+
+       tunnel->dev = dev;
+       strcpy(tunnel->parms.name, dev->name);
+
+       tunnel->hlen            = sizeof(struct ipv6hdr) + 4;
+
+       dev_hold(dev);
+}
+
+
+static struct inet6_protocol ip6gre_protocol __read_mostly = {
+       .handler     = ip6gre_rcv,
+       .err_handler = ip6gre_err,
+       .flags       = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
+};
+
+static void ip6gre_destroy_tunnels(struct ip6gre_net *ign,
+       struct list_head *head)
+{
+       int prio;
+
+       for (prio = 0; prio < 4; prio++) {
+               int h;
+               for (h = 0; h < HASH_SIZE; h++) {
+                       struct ip6_tnl *t;
+
+                       t = rtnl_dereference(ign->tunnels[prio][h]);
+
+                       while (t != NULL) {
+                               unregister_netdevice_queue(t->dev, head);
+                               t = rtnl_dereference(t->next);
+                       }
+               }
+       }
+}
+
+static int __net_init ip6gre_init_net(struct net *net)
+{
+       struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
+       int err;
+
+       ign->fb_tunnel_dev = alloc_netdev(sizeof(struct ip6_tnl), "ip6gre0",
+                                          ip6gre_tunnel_setup);
+       if (!ign->fb_tunnel_dev) {
+               err = -ENOMEM;
+               goto err_alloc_dev;
+       }
+       dev_net_set(ign->fb_tunnel_dev, net);
+
+       ip6gre_fb_tunnel_init(ign->fb_tunnel_dev);
+       ign->fb_tunnel_dev->rtnl_link_ops = &ip6gre_link_ops;
+
+       err = register_netdev(ign->fb_tunnel_dev);
+       if (err)
+               goto err_reg_dev;
+
+       rcu_assign_pointer(ign->tunnels_wc[0],
+                          netdev_priv(ign->fb_tunnel_dev));
+       return 0;
+
+err_reg_dev:
+       ip6gre_dev_free(ign->fb_tunnel_dev);
+err_alloc_dev:
+       return err;
+}
+
+static void __net_exit ip6gre_exit_net(struct net *net)
+{
+       struct ip6gre_net *ign;
+       LIST_HEAD(list);
+
+       ign = net_generic(net, ip6gre_net_id);
+       rtnl_lock();
+       ip6gre_destroy_tunnels(ign, &list);
+       unregister_netdevice_many(&list);
+       rtnl_unlock();
+}
+
+static struct pernet_operations ip6gre_net_ops = {
+       .init = ip6gre_init_net,
+       .exit = ip6gre_exit_net,
+       .id   = &ip6gre_net_id,
+       .size = sizeof(struct ip6gre_net),
+};
+
+static int ip6gre_tunnel_validate(struct nlattr *tb[], struct nlattr *data[])
+{
+       __be16 flags;
+
+       if (!data)
+               return 0;
+
+       flags = 0;
+       if (data[IFLA_GRE_IFLAGS])
+               flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
+       if (data[IFLA_GRE_OFLAGS])
+               flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
+       if (flags & (GRE_VERSION|GRE_ROUTING))
+               return -EINVAL;
+
+       return 0;
+}
+
+static int ip6gre_tap_validate(struct nlattr *tb[], struct nlattr *data[])
+{
+       struct in6_addr daddr;
+
+       if (tb[IFLA_ADDRESS]) {
+               if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
+                       return -EINVAL;
+               if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
+                       return -EADDRNOTAVAIL;
+       }
+
+       if (!data)
+               goto out;
+
+       if (data[IFLA_GRE_REMOTE]) {
+               nla_memcpy(&daddr, data[IFLA_GRE_REMOTE], sizeof(struct in6_addr));
+               if (ipv6_addr_any(&daddr))
+                       return -EINVAL;
+       }
+
+out:
+       return ip6gre_tunnel_validate(tb, data);
+}
+
+
+static void ip6gre_netlink_parms(struct nlattr *data[],
+                               struct __ip6_tnl_parm *parms)
+{
+       memset(parms, 0, sizeof(*parms));
+
+       if (!data)
+               return;
+
+       if (data[IFLA_GRE_LINK])
+               parms->link = nla_get_u32(data[IFLA_GRE_LINK]);
+
+       if (data[IFLA_GRE_IFLAGS])
+               parms->i_flags = nla_get_be16(data[IFLA_GRE_IFLAGS]);
+
+       if (data[IFLA_GRE_OFLAGS])
+               parms->o_flags = nla_get_be16(data[IFLA_GRE_OFLAGS]);
+
+       if (data[IFLA_GRE_IKEY])
+               parms->i_key = nla_get_be32(data[IFLA_GRE_IKEY]);
+
+       if (data[IFLA_GRE_OKEY])
+               parms->o_key = nla_get_be32(data[IFLA_GRE_OKEY]);
+
+       if (data[IFLA_GRE_LOCAL])
+               nla_memcpy(&parms->laddr, data[IFLA_GRE_LOCAL], sizeof(struct in6_addr));
+
+       if (data[IFLA_GRE_REMOTE])
+               nla_memcpy(&parms->raddr, data[IFLA_GRE_REMOTE], sizeof(struct in6_addr));
+
+       if (data[IFLA_GRE_TTL])
+               parms->hop_limit = nla_get_u8(data[IFLA_GRE_TTL]);
+
+       if (data[IFLA_GRE_ENCAP_LIMIT])
+               parms->encap_limit = nla_get_u8(data[IFLA_GRE_ENCAP_LIMIT]);
+
+       if (data[IFLA_GRE_FLOWINFO])
+               parms->flowinfo = nla_get_u32(data[IFLA_GRE_FLOWINFO]);
+
+       if (data[IFLA_GRE_FLAGS])
+               parms->flags = nla_get_u32(data[IFLA_GRE_FLAGS]);
+}
+
+static int ip6gre_tap_init(struct net_device *dev)
+{
+       struct ip6_tnl *tunnel;
+
+       tunnel = netdev_priv(dev);
+
+       tunnel->dev = dev;
+       strcpy(tunnel->parms.name, dev->name);
+
+       ip6gre_tnl_link_config(tunnel, 1);
+
+       dev->tstats = alloc_percpu(struct pcpu_tstats);
+       if (!dev->tstats)
+               return -ENOMEM;
+
+       return 0;
+}
+
+static const struct net_device_ops ip6gre_tap_netdev_ops = {
+       .ndo_init = ip6gre_tap_init,
+       .ndo_uninit = ip6gre_tunnel_uninit,
+       .ndo_start_xmit = ip6gre_tunnel_xmit,
+       .ndo_set_mac_address = eth_mac_addr,
+       .ndo_validate_addr = eth_validate_addr,
+       .ndo_change_mtu = ip6gre_tunnel_change_mtu,
+       .ndo_get_stats64 = ip6gre_get_stats64,
+};
+
+static void ip6gre_tap_setup(struct net_device *dev)
+{
+
+       ether_setup(dev);
+
+       dev->netdev_ops = &ip6gre_tap_netdev_ops;
+       dev->destructor = ip6gre_dev_free;
+
+       dev->iflink = 0;
+       dev->features |= NETIF_F_NETNS_LOCAL;
+}
+
+static int ip6gre_newlink(struct net *src_net, struct net_device *dev,
+       struct nlattr *tb[], struct nlattr *data[])
+{
+       struct ip6_tnl *nt;
+       struct net *net = dev_net(dev);
+       struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
+       int err;
+
+       nt = netdev_priv(dev);
+       ip6gre_netlink_parms(data, &nt->parms);
+
+       if (ip6gre_tunnel_find(net, &nt->parms, dev->type))
+               return -EEXIST;
+
+       if (dev->type == ARPHRD_ETHER && !tb[IFLA_ADDRESS])
+               eth_hw_addr_random(dev);
+
+       nt->dev = dev;
+       ip6gre_tnl_link_config(nt, !tb[IFLA_MTU]);
+
+       /* Can use a lockless transmit, unless we generate output sequences */
+       if (!(nt->parms.o_flags & GRE_SEQ))
+               dev->features |= NETIF_F_LLTX;
+
+       err = register_netdevice(dev);
+       if (err)
+               goto out;
+
+       dev_hold(dev);
+       ip6gre_tunnel_link(ign, nt);
+
+out:
+       return err;
+}
+
+static int ip6gre_changelink(struct net_device *dev, struct nlattr *tb[],
+                           struct nlattr *data[])
+{
+       struct ip6_tnl *t, *nt;
+       struct net *net = dev_net(dev);
+       struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
+       struct __ip6_tnl_parm p;
+
+       if (dev == ign->fb_tunnel_dev)
+               return -EINVAL;
+
+       nt = netdev_priv(dev);
+       ip6gre_netlink_parms(data, &p);
+
+       t = ip6gre_tunnel_locate(net, &p, 0);
+
+       if (t) {
+               if (t->dev != dev)
+                       return -EEXIST;
+       } else {
+               t = nt;
+
+               ip6gre_tunnel_unlink(ign, t);
+               ip6gre_tnl_change(t, &p, !tb[IFLA_MTU]);
+               ip6gre_tunnel_link(ign, t);
+               netdev_state_change(dev);
+       }
+
+       return 0;
+}
+
+static size_t ip6gre_get_size(const struct net_device *dev)
+{
+       return
+               /* IFLA_GRE_LINK */
+               nla_total_size(4) +
+               /* IFLA_GRE_IFLAGS */
+               nla_total_size(2) +
+               /* IFLA_GRE_OFLAGS */
+               nla_total_size(2) +
+               /* IFLA_GRE_IKEY */
+               nla_total_size(4) +
+               /* IFLA_GRE_OKEY */
+               nla_total_size(4) +
+               /* IFLA_GRE_LOCAL */
+               nla_total_size(4) +
+               /* IFLA_GRE_REMOTE */
+               nla_total_size(4) +
+               /* IFLA_GRE_TTL */
+               nla_total_size(1) +
+               /* IFLA_GRE_TOS */
+               nla_total_size(1) +
+               /* IFLA_GRE_ENCAP_LIMIT */
+               nla_total_size(1) +
+               /* IFLA_GRE_FLOWINFO */
+               nla_total_size(4) +
+               /* IFLA_GRE_FLAGS */
+               nla_total_size(4) +
+               0;
+}
+
+static int ip6gre_fill_info(struct sk_buff *skb, const struct net_device *dev)
+{
+       struct ip6_tnl *t = netdev_priv(dev);
+       struct __ip6_tnl_parm *p = &t->parms;
+
+       if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
+           nla_put_be16(skb, IFLA_GRE_IFLAGS, p->i_flags) ||
+           nla_put_be16(skb, IFLA_GRE_OFLAGS, p->o_flags) ||
+           nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
+           nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
+           nla_put(skb, IFLA_GRE_LOCAL, sizeof(struct in6_addr), &p->raddr) ||
+           nla_put(skb, IFLA_GRE_REMOTE, sizeof(struct in6_addr), &p->laddr) ||
+           nla_put_u8(skb, IFLA_GRE_TTL, p->hop_limit) ||
+           /*nla_put_u8(skb, IFLA_GRE_TOS, t->priority) ||*/
+           nla_put_u8(skb, IFLA_GRE_ENCAP_LIMIT, p->encap_limit) ||
+           nla_put_be32(skb, IFLA_GRE_FLOWINFO, p->flowinfo) ||
+           nla_put_u32(skb, IFLA_GRE_FLAGS, p->flags))
+               goto nla_put_failure;
+       return 0;
+
+nla_put_failure:
+       return -EMSGSIZE;
+}
+
+static const struct nla_policy ip6gre_policy[IFLA_GRE_MAX + 1] = {
+       [IFLA_GRE_LINK]        = { .type = NLA_U32 },
+       [IFLA_GRE_IFLAGS]      = { .type = NLA_U16 },
+       [IFLA_GRE_OFLAGS]      = { .type = NLA_U16 },
+       [IFLA_GRE_IKEY]        = { .type = NLA_U32 },
+       [IFLA_GRE_OKEY]        = { .type = NLA_U32 },
+       [IFLA_GRE_LOCAL]       = { .len = FIELD_SIZEOF(struct ipv6hdr, saddr) },
+       [IFLA_GRE_REMOTE]      = { .len = FIELD_SIZEOF(struct ipv6hdr, daddr) },
+       [IFLA_GRE_TTL]         = { .type = NLA_U8 },
+       [IFLA_GRE_ENCAP_LIMIT] = { .type = NLA_U8 },
+       [IFLA_GRE_FLOWINFO]    = { .type = NLA_U32 },
+       [IFLA_GRE_FLAGS]       = { .type = NLA_U32 },
+};
+
+static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
+       .kind           = "ip6gre",
+       .maxtype        = IFLA_GRE_MAX,
+       .policy         = ip6gre_policy,
+       .priv_size      = sizeof(struct ip6_tnl),
+       .setup          = ip6gre_tunnel_setup,
+       .validate       = ip6gre_tunnel_validate,
+       .newlink        = ip6gre_newlink,
+       .changelink     = ip6gre_changelink,
+       .get_size       = ip6gre_get_size,
+       .fill_info      = ip6gre_fill_info,
+};
+
+static struct rtnl_link_ops ip6gre_tap_ops __read_mostly = {
+       .kind           = "ip6gretap",
+       .maxtype        = IFLA_GRE_MAX,
+       .policy         = ip6gre_policy,
+       .priv_size      = sizeof(struct ip6_tnl),
+       .setup          = ip6gre_tap_setup,
+       .validate       = ip6gre_tap_validate,
+       .newlink        = ip6gre_newlink,
+       .changelink     = ip6gre_changelink,
+       .get_size       = ip6gre_get_size,
+       .fill_info      = ip6gre_fill_info,
+};
+
+/*
+ *     And now the modules code and kernel interface.
+ */
+
+static int __init ip6gre_init(void)
+{
+       int err;
+
+       pr_info("GRE over IPv6 tunneling driver\n");
+
+       err = register_pernet_device(&ip6gre_net_ops);
+       if (err < 0)
+               return err;
+
+       err = inet6_add_protocol(&ip6gre_protocol, IPPROTO_GRE);
+       if (err < 0) {
+               pr_info("%s: can't add protocol\n", __func__);
+               goto add_proto_failed;
+       }
+
+       err = rtnl_link_register(&ip6gre_link_ops);
+       if (err < 0)
+               goto rtnl_link_failed;
+
+       err = rtnl_link_register(&ip6gre_tap_ops);
+       if (err < 0)
+               goto tap_ops_failed;
+
+out:
+       return err;
+
+tap_ops_failed:
+       rtnl_link_unregister(&ip6gre_link_ops);
+rtnl_link_failed:
+       inet6_del_protocol(&ip6gre_protocol, IPPROTO_GRE);
+add_proto_failed:
+       unregister_pernet_device(&ip6gre_net_ops);
+       goto out;
+}
+
+static void __exit ip6gre_fini(void)
+{
+       rtnl_link_unregister(&ip6gre_tap_ops);
+       rtnl_link_unregister(&ip6gre_link_ops);
+       inet6_del_protocol(&ip6gre_protocol, IPPROTO_GRE);
+       unregister_pernet_device(&ip6gre_net_ops);
+}
+
+module_init(ip6gre_init);
+module_exit(ip6gre_fini);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("D. Kozlov (xeb@mail.ru)");
+MODULE_DESCRIPTION("GRE over IPv6 tunneling device");
+MODULE_ALIAS_RTNL_LINK("ip6gre");
+MODULE_ALIAS_NETDEV("ip6gre0");
index 9a1d5fe..cb7e2de 100644 (file)
@@ -126,7 +126,7 @@ static struct net_device_stats *ip6_get_stats(struct net_device *dev)
  * Locking : hash tables are protected by RCU and RTNL
  */
 
-static inline struct dst_entry *ip6_tnl_dst_check(struct ip6_tnl *t)
+struct dst_entry *ip6_tnl_dst_check(struct ip6_tnl *t)
 {
        struct dst_entry *dst = t->dst_cache;
 
@@ -139,20 +139,23 @@ static inline struct dst_entry *ip6_tnl_dst_check(struct ip6_tnl *t)
 
        return dst;
 }
+EXPORT_SYMBOL_GPL(ip6_tnl_dst_check);
 
-static inline void ip6_tnl_dst_reset(struct ip6_tnl *t)
+void ip6_tnl_dst_reset(struct ip6_tnl *t)
 {
        dst_release(t->dst_cache);
        t->dst_cache = NULL;
 }
+EXPORT_SYMBOL_GPL(ip6_tnl_dst_reset);
 
-static inline void ip6_tnl_dst_store(struct ip6_tnl *t, struct dst_entry *dst)
+void ip6_tnl_dst_store(struct ip6_tnl *t, struct dst_entry *dst)
 {
        struct rt6_info *rt = (struct rt6_info *) dst;
        t->dst_cookie = rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0;
        dst_release(t->dst_cache);
        t->dst_cache = dst;
 }
+EXPORT_SYMBOL_GPL(ip6_tnl_dst_store);
 
 /**
  * ip6_tnl_lookup - fetch tunnel matching the end-point addresses
@@ -200,7 +203,7 @@ ip6_tnl_lookup(struct net *net, const struct in6_addr *remote, const struct in6_
  **/
 
 static struct ip6_tnl __rcu **
-ip6_tnl_bucket(struct ip6_tnl_net *ip6n, const struct ip6_tnl_parm *p)
+ip6_tnl_bucket(struct ip6_tnl_net *ip6n, const struct __ip6_tnl_parm *p)
 {
        const struct in6_addr *remote = &p->raddr;
        const struct in6_addr *local = &p->laddr;
@@ -267,7 +270,7 @@ static void ip6_dev_free(struct net_device *dev)
  *   created tunnel or NULL
  **/
 
-static struct ip6_tnl *ip6_tnl_create(struct net *net, struct ip6_tnl_parm *p)
+static struct ip6_tnl *ip6_tnl_create(struct net *net, struct __ip6_tnl_parm *p)
 {
        struct net_device *dev;
        struct ip6_tnl *t;
@@ -322,7 +325,7 @@ failed:
  **/
 
 static struct ip6_tnl *ip6_tnl_locate(struct net *net,
-               struct ip6_tnl_parm *p, int create)
+               struct __ip6_tnl_parm *p, int create)
 {
        const struct in6_addr *remote = &p->raddr;
        const struct in6_addr *local = &p->laddr;
@@ -374,8 +377,7 @@ ip6_tnl_dev_uninit(struct net_device *dev)
  *   else index to encapsulation limit
  **/
 
-static __u16
-parse_tlv_tnl_enc_lim(struct sk_buff *skb, __u8 * raw)
+__u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw)
 {
        const struct ipv6hdr *ipv6h = (const struct ipv6hdr *) raw;
        __u8 nexthdr = ipv6h->nexthdr;
@@ -425,6 +427,7 @@ parse_tlv_tnl_enc_lim(struct sk_buff *skb, __u8 * raw)
        }
        return 0;
 }
+EXPORT_SYMBOL(ip6_tnl_parse_tlv_enc_lim);
 
 /**
  * ip6_tnl_err - tunnel error handler
@@ -480,7 +483,7 @@ ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt,
        case ICMPV6_PARAMPROB:
                teli = 0;
                if ((*code) == ICMPV6_HDR_FIELD)
-                       teli = parse_tlv_tnl_enc_lim(skb, skb->data);
+                       teli = ip6_tnl_parse_tlv_enc_lim(skb, skb->data);
 
                if (teli && teli == *info - 2) {
                        tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli];
@@ -693,11 +696,11 @@ static void ip6ip6_dscp_ecn_decapsulate(const struct ip6_tnl *t,
                IP6_ECN_set_ce(ipv6_hdr(skb));
 }
 
-static __u32 ip6_tnl_get_cap(struct ip6_tnl *t,
+__u32 ip6_tnl_get_cap(struct ip6_tnl *t,
                             const struct in6_addr *laddr,
                             const struct in6_addr *raddr)
 {
-       struct ip6_tnl_parm *p = &t->parms;
+       struct __ip6_tnl_parm *p = &t->parms;
        int ltype = ipv6_addr_type(laddr);
        int rtype = ipv6_addr_type(raddr);
        __u32 flags = 0;
@@ -715,13 +718,14 @@ static __u32 ip6_tnl_get_cap(struct ip6_tnl *t,
        }
        return flags;
 }
+EXPORT_SYMBOL(ip6_tnl_get_cap);
 
 /* called with rcu_read_lock() */
-static inline int ip6_tnl_rcv_ctl(struct ip6_tnl *t,
+int ip6_tnl_rcv_ctl(struct ip6_tnl *t,
                                  const struct in6_addr *laddr,
                                  const struct in6_addr *raddr)
 {
-       struct ip6_tnl_parm *p = &t->parms;
+       struct __ip6_tnl_parm *p = &t->parms;
        int ret = 0;
        struct net *net = dev_net(t->dev);
 
@@ -740,6 +744,7 @@ static inline int ip6_tnl_rcv_ctl(struct ip6_tnl *t,
        }
        return ret;
 }
+EXPORT_SYMBOL_GPL(ip6_tnl_rcv_ctl);
 
 /**
  * ip6_tnl_rcv - decapsulate IPv6 packet and retransmit it locally
@@ -859,9 +864,9 @@ ip6_tnl_addr_conflict(const struct ip6_tnl *t, const struct ipv6hdr *hdr)
        return ipv6_addr_equal(&t->parms.raddr, &hdr->saddr);
 }
 
-static inline int ip6_tnl_xmit_ctl(struct ip6_tnl *t)
+int ip6_tnl_xmit_ctl(struct ip6_tnl *t)
 {
-       struct ip6_tnl_parm *p = &t->parms;
+       struct __ip6_tnl_parm *p = &t->parms;
        int ret = 0;
        struct net *net = dev_net(t->dev);
 
@@ -885,6 +890,8 @@ static inline int ip6_tnl_xmit_ctl(struct ip6_tnl *t)
        }
        return ret;
 }
+EXPORT_SYMBOL_GPL(ip6_tnl_xmit_ctl);
+
 /**
  * ip6_tnl_xmit2 - encapsulate packet and send
  *   @skb: the outgoing socket buffer
@@ -1085,7 +1092,7 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
            !ip6_tnl_xmit_ctl(t) || ip6_tnl_addr_conflict(t, ipv6h))
                return -1;
 
-       offset = parse_tlv_tnl_enc_lim(skb, skb_network_header(skb));
+       offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb));
        if (offset > 0) {
                struct ipv6_tlv_tnl_enc_lim *tel;
                tel = (struct ipv6_tlv_tnl_enc_lim *)&skb_network_header(skb)[offset];
@@ -1152,7 +1159,7 @@ tx_err:
 static void ip6_tnl_link_config(struct ip6_tnl *t)
 {
        struct net_device *dev = t->dev;
-       struct ip6_tnl_parm *p = &t->parms;
+       struct __ip6_tnl_parm *p = &t->parms;
        struct flowi6 *fl6 = &t->fl.u.ip6;
 
        memcpy(dev->dev_addr, &p->laddr, sizeof(struct in6_addr));
@@ -1215,7 +1222,7 @@ static void ip6_tnl_link_config(struct ip6_tnl *t)
  **/
 
 static int
-ip6_tnl_change(struct ip6_tnl *t, struct ip6_tnl_parm *p)
+ip6_tnl_change(struct ip6_tnl *t, const struct __ip6_tnl_parm *p)
 {
        t->parms.laddr = p->laddr;
        t->parms.raddr = p->raddr;
@@ -1230,6 +1237,34 @@ ip6_tnl_change(struct ip6_tnl *t, struct ip6_tnl_parm *p)
        return 0;
 }
 
+static void
+ip6_tnl_parm_from_user(struct __ip6_tnl_parm *p, const struct ip6_tnl_parm *u)
+{
+       p->laddr = u->laddr;
+       p->raddr = u->raddr;
+       p->flags = u->flags;
+       p->hop_limit = u->hop_limit;
+       p->encap_limit = u->encap_limit;
+       p->flowinfo = u->flowinfo;
+       p->link = u->link;
+       p->proto = u->proto;
+       memcpy(p->name, u->name, sizeof(u->name));
+}
+
+static void
+ip6_tnl_parm_to_user(struct ip6_tnl_parm *u, const struct __ip6_tnl_parm *p)
+{
+       u->laddr = p->laddr;
+       u->raddr = p->raddr;
+       u->flags = p->flags;
+       u->hop_limit = p->hop_limit;
+       u->encap_limit = p->encap_limit;
+       u->flowinfo = p->flowinfo;
+       u->link = p->link;
+       u->proto = p->proto;
+       memcpy(u->name, p->name, sizeof(u->name));
+}
+
 /**
  * ip6_tnl_ioctl - configure ipv6 tunnels from userspace
  *   @dev: virtual device associated with tunnel
@@ -1263,6 +1298,7 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 {
        int err = 0;
        struct ip6_tnl_parm p;
+       struct __ip6_tnl_parm p1;
        struct ip6_tnl *t = NULL;
        struct net *net = dev_net(dev);
        struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
@@ -1274,11 +1310,14 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
                                err = -EFAULT;
                                break;
                        }
-                       t = ip6_tnl_locate(net, &p, 0);
+                       ip6_tnl_parm_from_user(&p1, &p);
+                       t = ip6_tnl_locate(net, &p1, 0);
+               } else {
+                       memset(&p, 0, sizeof(p));
                }
                if (t == NULL)
                        t = netdev_priv(dev);
-               memcpy(&p, &t->parms, sizeof (p));
+               ip6_tnl_parm_to_user(&p, &t->parms);
                if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof (p))) {
                        err = -EFAULT;
                }
@@ -1295,7 +1334,8 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
                if (p.proto != IPPROTO_IPV6 && p.proto != IPPROTO_IPIP &&
                    p.proto != 0)
                        break;
-               t = ip6_tnl_locate(net, &p, cmd == SIOCADDTUNNEL);
+               ip6_tnl_parm_from_user(&p1, &p);
+               t = ip6_tnl_locate(net, &p1, cmd == SIOCADDTUNNEL);
                if (dev != ip6n->fb_tnl_dev && cmd == SIOCCHGTUNNEL) {
                        if (t != NULL) {
                                if (t->dev != dev) {
@@ -1307,13 +1347,14 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 
                        ip6_tnl_unlink(ip6n, t);
                        synchronize_net();
-                       err = ip6_tnl_change(t, &p);
+                       err = ip6_tnl_change(t, &p1);
                        ip6_tnl_link(ip6n, t);
                        netdev_state_change(dev);
                }
                if (t) {
                        err = 0;
-                       if (copy_to_user(ifr->ifr_ifru.ifru_data, &t->parms, sizeof (p)))
+                       ip6_tnl_parm_to_user(&p, &t->parms);
+                       if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
                                err = -EFAULT;
 
                } else
@@ -1329,7 +1370,9 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
                        if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof (p)))
                                break;
                        err = -ENOENT;
-                       if ((t = ip6_tnl_locate(net, &p, 0)) == NULL)
+                       ip6_tnl_parm_from_user(&p1, &p);
+                       t = ip6_tnl_locate(net, &p1, 0);
+                       if (t == NULL)
                                break;
                        err = -EPERM;
                        if (t->dev == ip6n->fb_tnl_dev)
index 325e59a..beb5777 100644 (file)
@@ -61,9 +61,7 @@ static int __net_init ip6table_filter_net_init(struct net *net)
        net->ipv6.ip6table_filter =
                ip6t_register_table(net, &packet_filter, repl);
        kfree(repl);
-       if (IS_ERR(net->ipv6.ip6table_filter))
-               return PTR_ERR(net->ipv6.ip6table_filter);
-       return 0;
+       return PTR_RET(net->ipv6.ip6table_filter);
 }
 
 static void __net_exit ip6table_filter_net_exit(struct net *net)
index 4d78240..7431121 100644 (file)
@@ -97,9 +97,7 @@ static int __net_init ip6table_mangle_net_init(struct net *net)
        net->ipv6.ip6table_mangle =
                ip6t_register_table(net, &packet_mangler, repl);
        kfree(repl);
-       if (IS_ERR(net->ipv6.ip6table_mangle))
-               return PTR_ERR(net->ipv6.ip6table_mangle);
-       return 0;
+       return PTR_RET(net->ipv6.ip6table_mangle);
 }
 
 static void __net_exit ip6table_mangle_net_exit(struct net *net)
index 5b9926a..60d1bdd 100644 (file)
@@ -40,9 +40,7 @@ static int __net_init ip6table_raw_net_init(struct net *net)
        net->ipv6.ip6table_raw =
                ip6t_register_table(net, &packet_raw, repl);
        kfree(repl);
-       if (IS_ERR(net->ipv6.ip6table_raw))
-               return PTR_ERR(net->ipv6.ip6table_raw);
-       return 0;
+       return PTR_RET(net->ipv6.ip6table_raw);
 }
 
 static void __net_exit ip6table_raw_net_exit(struct net *net)
index 91aa2b4..db15535 100644 (file)
@@ -58,10 +58,7 @@ static int __net_init ip6table_security_net_init(struct net *net)
        net->ipv6.ip6table_security =
                ip6t_register_table(net, &security_table, repl);
        kfree(repl);
-       if (IS_ERR(net->ipv6.ip6table_security))
-               return PTR_ERR(net->ipv6.ip6table_security);
-
-       return 0;
+       return PTR_RET(net->ipv6.ip6table_security);
 }
 
 static void __net_exit ip6table_security_net_exit(struct net *net)
index ef0579d..7af88ef 100644 (file)
@@ -1251,7 +1251,8 @@ static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
                   sk_wmem_alloc_get(sp),
                   sk_rmem_alloc_get(sp),
                   0, 0L, 0,
-                  sock_i_uid(sp), 0,
+                  from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
+                  0,
                   sock_i_ino(sp),
                   atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
 }
index 8e80fd2..0ddf2d1 100644 (file)
@@ -965,7 +965,7 @@ struct dst_entry * ip6_route_output(struct net *net, const struct sock *sk,
 {
        int flags = 0;
 
-       fl6->flowi6_iif = net->loopback_dev->ifindex;
+       fl6->flowi6_iif = LOOPBACK_IFINDEX;
 
        if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr))
                flags |= RT6_LOOKUP_F_IFACE;
index a3e60cc..f99b81d 100644 (file)
@@ -1828,7 +1828,7 @@ static void tcp_v6_destroy_sock(struct sock *sk)
 #ifdef CONFIG_PROC_FS
 /* Proc filesystem TCPv6 sock list dumping. */
 static void get_openreq6(struct seq_file *seq,
-                        const struct sock *sk, struct request_sock *req, int i, int uid)
+                        const struct sock *sk, struct request_sock *req, int i, kuid_t uid)
 {
        int ttd = req->expires - jiffies;
        const struct in6_addr *src = &inet6_rsk(req)->loc_addr;
@@ -1852,7 +1852,7 @@ static void get_openreq6(struct seq_file *seq,
                   1,   /* timers active (only the expire timer) */
                   jiffies_to_clock_t(ttd),
                   req->retrans,
-                  uid,
+                  from_kuid_munged(seq_user_ns(seq), uid),
                   0,  /* non standard timer */
                   0, /* open_requests have no inode */
                   0, req);
@@ -1900,9 +1900,9 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
                   tp->write_seq-tp->snd_una,
                   (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq),
                   timer_active,
-                  jiffies_to_clock_t(timer_expires - jiffies),
+                  jiffies_delta_to_clock_t(timer_expires - jiffies),
                   icsk->icsk_retransmits,
-                  sock_i_uid(sp),
+                  from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
                   icsk->icsk_probes_out,
                   sock_i_ino(sp),
                   atomic_read(&sp->sk_refcnt), sp,
@@ -1920,10 +1920,7 @@ static void get_timewait6_sock(struct seq_file *seq,
        const struct in6_addr *dest, *src;
        __u16 destp, srcp;
        const struct inet6_timewait_sock *tw6 = inet6_twsk((struct sock *)tw);
-       int ttd = tw->tw_ttd - jiffies;
-
-       if (ttd < 0)
-               ttd = 0;
+       long delta = tw->tw_ttd - jiffies;
 
        dest = &tw6->tw_v6_daddr;
        src  = &tw6->tw_v6_rcv_saddr;
@@ -1939,7 +1936,7 @@ static void get_timewait6_sock(struct seq_file *seq,
                   dest->s6_addr32[0], dest->s6_addr32[1],
                   dest->s6_addr32[2], dest->s6_addr32[3], destp,
                   tw->tw_substate, 0, 0,
-                  3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
+                  3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
                   atomic_read(&tw->tw_refcnt), tw);
 }
 
index 99d0077..bbdff07 100644 (file)
@@ -1458,7 +1458,8 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket
                   sk_wmem_alloc_get(sp),
                   sk_rmem_alloc_get(sp),
                   0, 0L, 0,
-                  sock_i_uid(sp), 0,
+                  from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
+                  0,
                   sock_i_ino(sp),
                   atomic_read(&sp->sk_refcnt), sp,
                   atomic_read(&sp->sk_drops));
index f8ba30d..02ff7f2 100644 (file)
@@ -217,7 +217,8 @@ static int ipx_seq_socket_show(struct seq_file *seq, void *v)
        seq_printf(seq, "%08X  %08X  %02X     %03d\n",
                   sk_wmem_alloc_get(s),
                   sk_rmem_alloc_get(s),
-                  s->sk_state, SOCK_INODE(s->sk_socket)->i_uid);
+                  s->sk_state,
+                  from_kuid_munged(seq_user_ns(seq), sock_i_uid(s)));
 out:
        return 0;
 }
index 34e4185..334f93b 100644 (file)
@@ -3024,7 +3024,7 @@ static u32 get_acqseq(void)
        return res;
 }
 
-static int pfkey_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *xp, int dir)
+static int pfkey_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *xp)
 {
        struct sk_buff *skb;
        struct sadb_msg *hdr;
@@ -3105,7 +3105,7 @@ static int pfkey_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *t, struct
        pol->sadb_x_policy_len = sizeof(struct sadb_x_policy)/sizeof(uint64_t);
        pol->sadb_x_policy_exttype = SADB_X_EXT_POLICY;
        pol->sadb_x_policy_type = IPSEC_POLICY_IPSEC;
-       pol->sadb_x_policy_dir = dir+1;
+       pol->sadb_x_policy_dir = XFRM_POLICY_OUT + 1;
        pol->sadb_x_policy_id = xp->index;
 
        /* Set sadb_comb's. */
@@ -3661,7 +3661,7 @@ static int pfkey_seq_show(struct seq_file *f, void *v)
                               atomic_read(&s->sk_refcnt),
                               sk_rmem_alloc_get(s),
                               sk_wmem_alloc_get(s),
-                              sock_i_uid(s),
+                              from_kuid_munged(seq_user_ns(f), sock_i_uid(s)),
                               sock_i_ino(s)
                               );
        return 0;
index a1839c0..7b4799c 100644 (file)
@@ -151,7 +151,7 @@ static int llc_seq_socket_show(struct seq_file *seq, void *v)
                   sk_wmem_alloc_get(sk),
                   sk_rmem_alloc_get(sk) - llc->copied_seq,
                   sk->sk_state,
-                  sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : -1,
+                  from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)),
                   llc->link);
 out:
        return 0;
index 8dfd70d..a04752e 100644 (file)
@@ -38,14 +38,10 @@ static void gf_mulx(u8 *pad)
 static void aes_128_cmac_vector(struct crypto_cipher *tfm, size_t num_elem,
                                const u8 *addr[], const size_t *len, u8 *mac)
 {
-       u8 scratch[2 * AES_BLOCK_SIZE];
-       u8 *cbc, *pad;
+       u8 cbc[AES_BLOCK_SIZE], pad[AES_BLOCK_SIZE];
        const u8 *pos, *end;
        size_t i, e, left, total_len;
 
-       cbc = scratch;
-       pad = scratch + AES_BLOCK_SIZE;
-
        memset(cbc, 0, AES_BLOCK_SIZE);
 
        total_len = 0;
index d41974a..929f897 100644 (file)
@@ -102,6 +102,18 @@ static int ieee80211_change_iface(struct wiphy *wiphy,
        return 0;
 }
 
+static int ieee80211_start_p2p_device(struct wiphy *wiphy,
+                                     struct wireless_dev *wdev)
+{
+       return ieee80211_do_open(wdev, true);
+}
+
+static void ieee80211_stop_p2p_device(struct wiphy *wiphy,
+                                     struct wireless_dev *wdev)
+{
+       ieee80211_sdata_stop(IEEE80211_WDEV_TO_SUB_IF(wdev));
+}
+
 static int ieee80211_set_noack_map(struct wiphy *wiphy,
                                  struct net_device *dev,
                                  u16 noack_map)
@@ -330,7 +342,7 @@ static void rate_idx_to_bitrate(struct rate_info *rate, struct sta_info *sta, in
        if (!(rate->flags & RATE_INFO_FLAGS_MCS)) {
                struct ieee80211_supported_band *sband;
                sband = sta->local->hw.wiphy->bands[
-                               sta->local->hw.conf.channel->band];
+                               sta->local->oper_channel->band];
                rate->legacy = sband->bitrates[idx].bitrate;
        } else
                rate->mcs = idx;
@@ -725,25 +737,23 @@ static int ieee80211_set_monitor_channel(struct wiphy *wiphy,
 static int ieee80211_set_probe_resp(struct ieee80211_sub_if_data *sdata,
                                    const u8 *resp, size_t resp_len)
 {
-       struct sk_buff *new, *old;
+       struct probe_resp *new, *old;
 
        if (!resp || !resp_len)
                return 1;
 
        old = rtnl_dereference(sdata->u.ap.probe_resp);
 
-       new = dev_alloc_skb(resp_len);
+       new = kzalloc(sizeof(struct probe_resp) + resp_len, GFP_KERNEL);
        if (!new)
                return -ENOMEM;
 
-       memcpy(skb_put(new, resp_len), resp, resp_len);
+       new->len = resp_len;
+       memcpy(new->data, resp, resp_len);
 
        rcu_assign_pointer(sdata->u.ap.probe_resp, new);
-       if (old) {
-               /* TODO: use call_rcu() */
-               synchronize_rcu();
-               dev_kfree_skb(old);
-       }
+       if (old)
+               kfree_rcu(old, rcu_head);
 
        return 0;
 }
@@ -950,7 +960,7 @@ static void ieee80211_send_layer2_update(struct sta_info *sta)
        /* 802.2 Type 1 Logical Link Control (LLC) Exchange Identifier (XID)
         * Update response frame; IEEE Std 802.2-1998, 5.4.1.2.1 */
 
-       memset(msg->da, 0xff, ETH_ALEN);
+       eth_broadcast_addr(msg->da);
        memcpy(msg->sa, sta->sta.addr, ETH_ALEN);
        msg->len = htons(6);
        msg->dsap = 0;
@@ -1285,9 +1295,10 @@ static int ieee80211_change_station(struct wiphy *wiphy,
        mutex_unlock(&local->sta_mtx);
 
        if (sdata->vif.type == NL80211_IFTYPE_STATION &&
-           params->sta_flags_mask & BIT(NL80211_STA_FLAG_AUTHORIZED))
+           params->sta_flags_mask & BIT(NL80211_STA_FLAG_AUTHORIZED)) {
                ieee80211_recalc_ps(local, -1);
-
+               ieee80211_recalc_ps_vif(sdata);
+       }
        return 0;
 }
 
@@ -1661,7 +1672,7 @@ static int ieee80211_change_bss(struct wiphy *wiphy,
        }
 
        if (!sdata->vif.bss_conf.use_short_slot &&
-           sdata->local->hw.conf.channel->band == IEEE80211_BAND_5GHZ) {
+           sdata->local->oper_channel->band == IEEE80211_BAND_5GHZ) {
                sdata->vif.bss_conf.use_short_slot = true;
                changed |= BSS_CHANGED_ERP_SLOT;
        }
@@ -1775,6 +1786,7 @@ static int ieee80211_scan(struct wiphy *wiphy,
        case NL80211_IFTYPE_ADHOC:
        case NL80211_IFTYPE_MESH_POINT:
        case NL80211_IFTYPE_P2P_CLIENT:
+       case NL80211_IFTYPE_P2P_DEVICE:
                break;
        case NL80211_IFTYPE_P2P_GO:
                if (sdata->local->ops->hw_scan)
@@ -1927,7 +1939,7 @@ static int ieee80211_set_tx_power(struct wiphy *wiphy,
                                  enum nl80211_tx_power_setting type, int mbm)
 {
        struct ieee80211_local *local = wiphy_priv(wiphy);
-       struct ieee80211_channel *chan = local->hw.conf.channel;
+       struct ieee80211_channel *chan = local->oper_channel;
        u32 changes = 0;
 
        switch (type) {
@@ -2079,6 +2091,7 @@ static int ieee80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
                ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
 
        ieee80211_recalc_ps(local, -1);
+       ieee80211_recalc_ps_vif(sdata);
 
        return 0;
 }
@@ -2461,6 +2474,9 @@ static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
                if (!sdata->u.mgd.associated)
                        need_offchan = true;
                break;
+       case NL80211_IFTYPE_P2P_DEVICE:
+               need_offchan = true;
+               break;
        default:
                return -EOPNOTSUPP;
        }
@@ -2653,6 +2669,7 @@ ieee80211_prep_tdls_encap_data(struct wiphy *wiphy, struct net_device *dev,
                               u16 status_code, struct sk_buff *skb)
 {
        struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+       struct ieee80211_local *local = sdata->local;
        struct ieee80211_tdls_data *tf;
 
        tf = (void *)skb_put(skb, offsetof(struct ieee80211_tdls_data, u));
@@ -2672,8 +2689,10 @@ ieee80211_prep_tdls_encap_data(struct wiphy *wiphy, struct net_device *dev,
                tf->u.setup_req.capability =
                        cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata));
 
-               ieee80211_add_srates_ie(sdata, skb, false);
-               ieee80211_add_ext_srates_ie(sdata, skb, false);
+               ieee80211_add_srates_ie(sdata, skb, false,
+                                       local->oper_channel->band);
+               ieee80211_add_ext_srates_ie(sdata, skb, false,
+                                           local->oper_channel->band);
                ieee80211_tdls_add_ext_capab(skb);
                break;
        case WLAN_TDLS_SETUP_RESPONSE:
@@ -2686,8 +2705,10 @@ ieee80211_prep_tdls_encap_data(struct wiphy *wiphy, struct net_device *dev,
                tf->u.setup_resp.capability =
                        cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata));
 
-               ieee80211_add_srates_ie(sdata, skb, false);
-               ieee80211_add_ext_srates_ie(sdata, skb, false);
+               ieee80211_add_srates_ie(sdata, skb, false,
+                                       local->oper_channel->band);
+               ieee80211_add_ext_srates_ie(sdata, skb, false,
+                                           local->oper_channel->band);
                ieee80211_tdls_add_ext_capab(skb);
                break;
        case WLAN_TDLS_SETUP_CONFIRM:
@@ -2725,6 +2746,7 @@ ieee80211_prep_tdls_direct(struct wiphy *wiphy, struct net_device *dev,
                           u16 status_code, struct sk_buff *skb)
 {
        struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+       struct ieee80211_local *local = sdata->local;
        struct ieee80211_mgmt *mgmt;
 
        mgmt = (void *)skb_put(skb, 24);
@@ -2747,8 +2769,10 @@ ieee80211_prep_tdls_direct(struct wiphy *wiphy, struct net_device *dev,
                mgmt->u.action.u.tdls_discover_resp.capability =
                        cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata));
 
-               ieee80211_add_srates_ie(sdata, skb, false);
-               ieee80211_add_ext_srates_ie(sdata, skb, false);
+               ieee80211_add_srates_ie(sdata, skb, false,
+                                       local->oper_channel->band);
+               ieee80211_add_ext_srates_ie(sdata, skb, false,
+                                           local->oper_channel->band);
                ieee80211_tdls_add_ext_capab(skb);
                break;
        default:
@@ -3005,6 +3029,8 @@ struct cfg80211_ops mac80211_config_ops = {
        .add_virtual_intf = ieee80211_add_iface,
        .del_virtual_intf = ieee80211_del_iface,
        .change_virtual_intf = ieee80211_change_iface,
+       .start_p2p_device = ieee80211_start_p2p_device,
+       .stop_p2p_device = ieee80211_stop_p2p_device,
        .add_key = ieee80211_add_key,
        .del_key = ieee80211_del_key,
        .get_key = ieee80211_get_key,
index b8dfb44..97173f8 100644 (file)
@@ -63,8 +63,6 @@ DEBUGFS_READONLY_FILE(user_power, "%d",
                      local->user_power_level);
 DEBUGFS_READONLY_FILE(power, "%d",
                      local->hw.conf.power_level);
-DEBUGFS_READONLY_FILE(frequency, "%d",
-                     local->hw.conf.channel->center_freq);
 DEBUGFS_READONLY_FILE(total_ps_buffered, "%d",
                      local->total_ps_buffered);
 DEBUGFS_READONLY_FILE(wep_iv, "%#08x",
@@ -91,33 +89,6 @@ static const struct file_operations reset_ops = {
        .llseek = noop_llseek,
 };
 
-static ssize_t channel_type_read(struct file *file, char __user *user_buf,
-                      size_t count, loff_t *ppos)
-{
-       struct ieee80211_local *local = file->private_data;
-       const char *buf;
-
-       switch (local->hw.conf.channel_type) {
-       case NL80211_CHAN_NO_HT:
-               buf = "no ht\n";
-               break;
-       case NL80211_CHAN_HT20:
-               buf = "ht20\n";
-               break;
-       case NL80211_CHAN_HT40MINUS:
-               buf = "ht40-\n";
-               break;
-       case NL80211_CHAN_HT40PLUS:
-               buf = "ht40+\n";
-               break;
-       default:
-               buf = "???";
-               break;
-       }
-
-       return simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf));
-}
-
 static ssize_t hwflags_read(struct file *file, char __user *user_buf,
                            size_t count, loff_t *ppos)
 {
@@ -205,7 +176,6 @@ static ssize_t queues_read(struct file *file, char __user *user_buf,
 }
 
 DEBUGFS_READONLY_FILE_OPS(hwflags);
-DEBUGFS_READONLY_FILE_OPS(channel_type);
 DEBUGFS_READONLY_FILE_OPS(queues);
 
 /* statistics stuff */
@@ -272,12 +242,10 @@ void debugfs_hw_add(struct ieee80211_local *local)
 
        local->debugfs.keys = debugfs_create_dir("keys", phyd);
 
-       DEBUGFS_ADD(frequency);
        DEBUGFS_ADD(total_ps_buffered);
        DEBUGFS_ADD(wep_iv);
        DEBUGFS_ADD(queues);
        DEBUGFS_ADD_MODE(reset, 0200);
-       DEBUGFS_ADD(channel_type);
        DEBUGFS_ADD(hwflags);
        DEBUGFS_ADD(user_power);
        DEBUGFS_ADD(power);
index df92031..da9003b 100644 (file)
@@ -9,7 +9,7 @@ static inline void check_sdata_in_driver(struct ieee80211_sub_if_data *sdata)
 {
        WARN(!(sdata->flags & IEEE80211_SDATA_IN_DRIVER),
             "%s:  Failed check-sdata-in-driver check, flags: 0x%x\n",
-            sdata->dev->name, sdata->flags);
+            sdata->dev ? sdata->dev->name : sdata->name, sdata->flags);
 }
 
 static inline struct ieee80211_sub_if_data *
@@ -22,9 +22,11 @@ get_bss_sdata(struct ieee80211_sub_if_data *sdata)
        return sdata;
 }
 
-static inline void drv_tx(struct ieee80211_local *local, struct sk_buff *skb)
+static inline void drv_tx(struct ieee80211_local *local,
+                         struct ieee80211_tx_control *control,
+                         struct sk_buff *skb)
 {
-       local->ops->tx(&local->hw, skb);
+       local->ops->tx(&local->hw, control, skb);
 }
 
 static inline void drv_get_et_strings(struct ieee80211_sub_if_data *sdata,
@@ -526,6 +528,9 @@ static inline void drv_sta_rc_update(struct ieee80211_local *local,
        sdata = get_bss_sdata(sdata);
        check_sdata_in_driver(sdata);
 
+       WARN_ON(changed & IEEE80211_RC_SUPP_RATES_CHANGED &&
+               sdata->vif.type != NL80211_IFTYPE_ADHOC);
+
        trace_drv_sta_rc_update(local, sdata, sta, changed);
        if (local->ops->sta_rc_update)
                local->ops->sta_rc_update(&local->hw, &sdata->vif,
index 5746d62..a9d9328 100644 (file)
@@ -109,7 +109,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
        memset(mgmt, 0, 24 + sizeof(mgmt->u.beacon));
        mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
                                          IEEE80211_STYPE_PROBE_RESP);
-       memset(mgmt->da, 0xff, ETH_ALEN);
+       eth_broadcast_addr(mgmt->da);
        memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
        memcpy(mgmt->bssid, ifibss->bssid, ETH_ALEN);
        mgmt->u.beacon.beacon_int = cpu_to_le16(beacon_int);
@@ -205,7 +205,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
        mod_timer(&ifibss->timer,
                  round_jiffies(jiffies + IEEE80211_IBSS_MERGE_INTERVAL));
 
-       bss = cfg80211_inform_bss_frame(local->hw.wiphy, local->hw.conf.channel,
+       bss = cfg80211_inform_bss_frame(local->hw.wiphy, chan,
                                        mgmt, skb->len, 0, GFP_KERNEL);
        cfg80211_put_bss(bss);
        netif_carrier_on(sdata->dev);
@@ -294,7 +294,7 @@ ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata,
        struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
        struct ieee80211_local *local = sdata->local;
        struct sta_info *sta;
-       int band = local->hw.conf.channel->band;
+       int band = local->oper_channel->band;
 
        /*
         * XXX: Consider removing the least recently used entry and
@@ -459,8 +459,11 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
                        }
                }
 
-               if (sta && rates_updated)
+               if (sta && rates_updated) {
+                       drv_sta_rc_update(local, sdata, &sta->sta,
+                                         IEEE80211_RC_SUPP_RATES_CHANGED);
                        rate_control_rate_init(sta);
+               }
 
                rcu_read_unlock();
        }
@@ -561,7 +564,7 @@ void ieee80211_ibss_rx_no_sta(struct ieee80211_sub_if_data *sdata,
        struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
        struct ieee80211_local *local = sdata->local;
        struct sta_info *sta;
-       int band = local->hw.conf.channel->band;
+       int band = local->oper_channel->band;
 
        /*
         * XXX: Consider removing the least recently used entry and
@@ -759,7 +762,7 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
                                return;
                        }
                        sdata_info(sdata, "IBSS not allowed on %d MHz\n",
-                                  local->hw.conf.channel->center_freq);
+                                  local->oper_channel->center_freq);
 
                        /* No IBSS found - decrease scan interval and continue
                         * scanning. */
index bb61f77..204bfed 100644 (file)
@@ -193,8 +193,6 @@ struct ieee80211_tx_data {
        struct sta_info *sta;
        struct ieee80211_key *key;
 
-       struct ieee80211_channel *channel;
-
        unsigned int flags;
 };
 
@@ -274,9 +272,15 @@ struct beacon_data {
        struct rcu_head rcu_head;
 };
 
+struct probe_resp {
+       struct rcu_head rcu_head;
+       int len;
+       u8 data[0];
+};
+
 struct ieee80211_if_ap {
        struct beacon_data __rcu *beacon;
-       struct sk_buff __rcu *probe_resp;
+       struct probe_resp __rcu *probe_resp;
 
        struct list_head vlans;
 
@@ -359,6 +363,7 @@ enum ieee80211_sta_flags {
        IEEE80211_STA_NULLFUNC_ACKED    = BIT(8),
        IEEE80211_STA_RESET_SIGNAL_AVE  = BIT(9),
        IEEE80211_STA_DISABLE_40MHZ     = BIT(10),
+       IEEE80211_STA_DISABLE_VHT       = BIT(11),
 };
 
 struct ieee80211_mgd_auth_data {
@@ -1075,6 +1080,8 @@ struct ieee80211_local {
        struct idr ack_status_frames;
        spinlock_t ack_status_lock;
 
+       struct ieee80211_sub_if_data __rcu *p2p_sdata;
+
        /* dummy netdev for use w/ NAPI */
        struct net_device napi_dev;
 
@@ -1131,7 +1138,7 @@ struct ieee802_11_elems {
        u8 *prep;
        u8 *perr;
        struct ieee80211_rann_ie *rann;
-       u8 *ch_switch_elem;
+       struct ieee80211_channel_sw_ie *ch_switch_ie;
        u8 *country_elem;
        u8 *pwr_constr_elem;
        u8 *quiet_elem; /* first quite element */
@@ -1157,7 +1164,6 @@ struct ieee802_11_elems {
        u8 preq_len;
        u8 prep_len;
        u8 perr_len;
-       u8 ch_switch_elem_len;
        u8 country_elem_len;
        u8 pwr_constr_elem_len;
        u8 quiet_elem_len;
@@ -1202,6 +1208,7 @@ int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata,
 void ieee80211_send_pspoll(struct ieee80211_local *local,
                           struct ieee80211_sub_if_data *sdata);
 void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency);
+void ieee80211_recalc_ps_vif(struct ieee80211_sub_if_data *sdata);
 int ieee80211_max_network_latency(struct notifier_block *nb,
                                  unsigned long data, void *dummy);
 int ieee80211_set_arp_filter(struct ieee80211_sub_if_data *sdata);
@@ -1291,6 +1298,8 @@ void ieee80211_remove_interfaces(struct ieee80211_local *local);
 void ieee80211_recalc_idle(struct ieee80211_local *local);
 void ieee80211_adjust_monitor_flags(struct ieee80211_sub_if_data *sdata,
                                    const int offset);
+int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up);
+void ieee80211_sdata_stop(struct ieee80211_sub_if_data *sdata);
 
 static inline bool ieee80211_sdata_running(struct ieee80211_sub_if_data *sdata)
 {
@@ -1425,7 +1434,6 @@ void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata,
                             struct ieee80211_hdr *hdr);
 void ieee80211_sta_tx_notify(struct ieee80211_sub_if_data *sdata,
                             struct ieee80211_hdr *hdr, bool ack);
-void ieee80211_beacon_connection_loss_work(struct work_struct *work);
 
 void ieee80211_wake_queues_by_reason(struct ieee80211_hw *hw,
                                     enum queue_stop_reason reason);
@@ -1457,13 +1465,15 @@ int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
                             u8 channel);
 struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
                                          u8 *dst, u32 ratemask,
+                                         struct ieee80211_channel *chan,
                                          const u8 *ssid, size_t ssid_len,
                                          const u8 *ie, size_t ie_len,
                                          bool directed);
 void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst,
                              const u8 *ssid, size_t ssid_len,
                              const u8 *ie, size_t ie_len,
-                             u32 ratemask, bool directed, bool no_cck);
+                             u32 ratemask, bool directed, bool no_cck,
+                             struct ieee80211_channel *channel);
 
 void ieee80211_sta_def_wmm_params(struct ieee80211_sub_if_data *sdata,
                                  const size_t supp_rates_len,
@@ -1487,9 +1497,11 @@ u8 *ieee80211_ie_build_ht_oper(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap,
 u8 *ieee80211_ie_build_vht_cap(u8 *pos, struct ieee80211_sta_vht_cap *vht_cap,
                               u32 cap);
 int ieee80211_add_srates_ie(struct ieee80211_sub_if_data *sdata,
-                           struct sk_buff *skb, bool need_basic);
+                           struct sk_buff *skb, bool need_basic,
+                           enum ieee80211_band band);
 int ieee80211_add_ext_srates_ie(struct ieee80211_sub_if_data *sdata,
-                               struct sk_buff *skb, bool need_basic);
+                               struct sk_buff *skb, bool need_basic,
+                               enum ieee80211_band band);
 
 /* channel management */
 enum ieee80211_chan_mode {
index bfb57dc..59f8adc 100644 (file)
@@ -100,6 +100,10 @@ static u32 __ieee80211_recalc_idle(struct ieee80211_local *local)
                        sdata->vif.bss_conf.idle = true;
                        continue;
                }
+
+               if (sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE)
+                       continue;
+
                /* count everything else */
                sdata->vif.bss_conf.idle = false;
                count++;
@@ -121,7 +125,8 @@ static u32 __ieee80211_recalc_idle(struct ieee80211_local *local)
 
        list_for_each_entry(sdata, &local->interfaces, list) {
                if (sdata->vif.type == NL80211_IFTYPE_MONITOR ||
-                   sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
+                   sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
+                   sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE)
                        continue;
                if (sdata->old_idle == sdata->vif.bss_conf.idle)
                        continue;
@@ -204,6 +209,8 @@ static inline int identical_mac_addr_allowed(int type1, int type2)
 {
        return type1 == NL80211_IFTYPE_MONITOR ||
                type2 == NL80211_IFTYPE_MONITOR ||
+               type1 == NL80211_IFTYPE_P2P_DEVICE ||
+               type2 == NL80211_IFTYPE_P2P_DEVICE ||
                (type1 == NL80211_IFTYPE_AP && type2 == NL80211_IFTYPE_WDS) ||
                (type1 == NL80211_IFTYPE_WDS &&
                        (type2 == NL80211_IFTYPE_WDS ||
@@ -406,9 +413,10 @@ static void ieee80211_del_virtual_monitor(struct ieee80211_local *local)
  * an error on interface type changes that have been pre-checked, so most
  * checks should be in ieee80211_check_concurrent_iface.
  */
-static int ieee80211_do_open(struct net_device *dev, bool coming_up)
+int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
 {
-       struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+       struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
+       struct net_device *dev = wdev->netdev;
        struct ieee80211_local *local = sdata->local;
        struct sta_info *sta;
        u32 changed = 0;
@@ -443,6 +451,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
        case NL80211_IFTYPE_STATION:
        case NL80211_IFTYPE_MONITOR:
        case NL80211_IFTYPE_ADHOC:
+       case NL80211_IFTYPE_P2P_DEVICE:
                /* no special treatment */
                break;
        case NL80211_IFTYPE_UNSPECIFIED:
@@ -471,7 +480,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
         * Copy the hopefully now-present MAC address to
         * this interface, if it has the special null one.
         */
-       if (is_zero_ether_addr(dev->dev_addr)) {
+       if (dev && is_zero_ether_addr(dev->dev_addr)) {
                memcpy(dev->dev_addr,
                       local->hw.wiphy->perm_addr,
                       ETH_ALEN);
@@ -536,15 +545,23 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
                        local->fif_probe_req++;
                }
 
-               changed |= ieee80211_reset_erp_info(sdata);
+               if (sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE)
+                       changed |= ieee80211_reset_erp_info(sdata);
                ieee80211_bss_info_change_notify(sdata, changed);
 
-               if (sdata->vif.type == NL80211_IFTYPE_STATION ||
-                   sdata->vif.type == NL80211_IFTYPE_ADHOC ||
-                   sdata->vif.type == NL80211_IFTYPE_AP)
+               switch (sdata->vif.type) {
+               case NL80211_IFTYPE_STATION:
+               case NL80211_IFTYPE_ADHOC:
+               case NL80211_IFTYPE_AP:
+               case NL80211_IFTYPE_MESH_POINT:
                        netif_carrier_off(dev);
-               else
+                       break;
+               case NL80211_IFTYPE_WDS:
+               case NL80211_IFTYPE_P2P_DEVICE:
+                       break;
+               default:
                        netif_carrier_on(dev);
+               }
 
                /*
                 * set default queue parameters so drivers don't
@@ -576,6 +593,9 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
                }
 
                rate_control_rate_init(sta);
+               netif_carrier_on(dev);
+       } else if (sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE) {
+               rcu_assign_pointer(local->p2p_sdata, sdata);
        }
 
        /*
@@ -601,7 +621,8 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
 
        ieee80211_recalc_ps(local, -1);
 
-       netif_tx_start_all_queues(dev);
+       if (dev)
+               netif_tx_start_all_queues(dev);
 
        return 0;
  err_del_interface:
@@ -631,7 +652,7 @@ static int ieee80211_open(struct net_device *dev)
        if (err)
                return err;
 
-       return ieee80211_do_open(dev, true);
+       return ieee80211_do_open(&sdata->wdev, true);
 }
 
 static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
@@ -652,7 +673,8 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
        /*
         * Stop TX on this interface first.
         */
-       netif_tx_stop_all_queues(sdata->dev);
+       if (sdata->dev)
+               netif_tx_stop_all_queues(sdata->dev);
 
        ieee80211_roc_purge(sdata);
 
@@ -691,14 +713,16 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
                local->fif_probe_req--;
        }
 
-       netif_addr_lock_bh(sdata->dev);
-       spin_lock_bh(&local->filter_lock);
-       __hw_addr_unsync(&local->mc_list, &sdata->dev->mc,
-                        sdata->dev->addr_len);
-       spin_unlock_bh(&local->filter_lock);
-       netif_addr_unlock_bh(sdata->dev);
+       if (sdata->dev) {
+               netif_addr_lock_bh(sdata->dev);
+               spin_lock_bh(&local->filter_lock);
+               __hw_addr_unsync(&local->mc_list, &sdata->dev->mc,
+                                sdata->dev->addr_len);
+               spin_unlock_bh(&local->filter_lock);
+               netif_addr_unlock_bh(sdata->dev);
 
-       ieee80211_configure_filter(local);
+               ieee80211_configure_filter(local);
+       }
 
        del_timer_sync(&local->dynamic_ps_timer);
        cancel_work_sync(&local->dynamic_ps_enable_work);
@@ -708,7 +732,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
                struct ieee80211_sub_if_data *vlan, *tmpsdata;
                struct beacon_data *old_beacon =
                        rtnl_dereference(sdata->u.ap.beacon);
-               struct sk_buff *old_probe_resp =
+               struct probe_resp *old_probe_resp =
                        rtnl_dereference(sdata->u.ap.probe_resp);
 
                /* sdata_running will return false, so this will disable */
@@ -720,7 +744,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
                RCU_INIT_POINTER(sdata->u.ap.probe_resp, NULL);
                synchronize_rcu();
                kfree(old_beacon);
-               kfree_skb(old_probe_resp);
+               kfree(old_probe_resp);
 
                /* down all dependent devices, that is VLANs */
                list_for_each_entry_safe(vlan, tmpsdata, &sdata->u.ap.vlans,
@@ -759,6 +783,10 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
                ieee80211_adjust_monitor_flags(sdata, -1);
                ieee80211_configure_filter(local);
                break;
+       case NL80211_IFTYPE_P2P_DEVICE:
+               /* relies on synchronize_rcu() below */
+               rcu_assign_pointer(local->p2p_sdata, NULL);
+               /* fall through */
        default:
                flush_work(&sdata->work);
                /*
@@ -771,14 +799,6 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
                skb_queue_purge(&sdata->skb_queue);
 
                /*
-                * Disable beaconing here for mesh only, AP and IBSS
-                * are already taken care of.
-                */
-               if (sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
-                       ieee80211_bss_info_change_notify(sdata,
-                               BSS_CHANGED_BEACON_ENABLED);
-
-               /*
                 * Free all remaining keys, there shouldn't be any,
                 * except maybe group keys in AP more or WDS?
                 */
@@ -877,9 +897,8 @@ static void ieee80211_set_multicast_list(struct net_device *dev)
  * Called when the netdev is removed or, by the code below, before
  * the interface type changes.
  */
-static void ieee80211_teardown_sdata(struct net_device *dev)
+static void ieee80211_teardown_sdata(struct ieee80211_sub_if_data *sdata)
 {
-       struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
        struct ieee80211_local *local = sdata->local;
        int flushed;
        int i;
@@ -900,6 +919,11 @@ static void ieee80211_teardown_sdata(struct net_device *dev)
        WARN_ON(flushed);
 }
 
+static void ieee80211_uninit(struct net_device *dev)
+{
+       ieee80211_teardown_sdata(IEEE80211_DEV_TO_SUB_IF(dev));
+}
+
 static u16 ieee80211_netdev_select_queue(struct net_device *dev,
                                         struct sk_buff *skb)
 {
@@ -909,7 +933,7 @@ static u16 ieee80211_netdev_select_queue(struct net_device *dev,
 static const struct net_device_ops ieee80211_dataif_ops = {
        .ndo_open               = ieee80211_open,
        .ndo_stop               = ieee80211_stop,
-       .ndo_uninit             = ieee80211_teardown_sdata,
+       .ndo_uninit             = ieee80211_uninit,
        .ndo_start_xmit         = ieee80211_subif_start_xmit,
        .ndo_set_rx_mode        = ieee80211_set_multicast_list,
        .ndo_change_mtu         = ieee80211_change_mtu,
@@ -940,7 +964,7 @@ static u16 ieee80211_monitor_select_queue(struct net_device *dev,
 static const struct net_device_ops ieee80211_monitorif_ops = {
        .ndo_open               = ieee80211_open,
        .ndo_stop               = ieee80211_stop,
-       .ndo_uninit             = ieee80211_teardown_sdata,
+       .ndo_uninit             = ieee80211_uninit,
        .ndo_start_xmit         = ieee80211_monitor_start_xmit,
        .ndo_set_rx_mode        = ieee80211_set_multicast_list,
        .ndo_change_mtu         = ieee80211_change_mtu,
@@ -1099,7 +1123,6 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata,
        /* and set some type-dependent values */
        sdata->vif.type = type;
        sdata->vif.p2p = false;
-       sdata->dev->netdev_ops = &ieee80211_dataif_ops;
        sdata->wdev.iftype = type;
 
        sdata->control_port_protocol = cpu_to_be16(ETH_P_PAE);
@@ -1107,8 +1130,11 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata,
 
        sdata->noack_map = 0;
 
-       /* only monitor differs */
-       sdata->dev->type = ARPHRD_ETHER;
+       /* only monitor/p2p-device differ */
+       if (sdata->dev) {
+               sdata->dev->netdev_ops = &ieee80211_dataif_ops;
+               sdata->dev->type = ARPHRD_ETHER;
+       }
 
        skb_queue_head_init(&sdata->skb_queue);
        INIT_WORK(&sdata->work, ieee80211_iface_work);
@@ -1146,6 +1172,7 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata,
                break;
        case NL80211_IFTYPE_WDS:
        case NL80211_IFTYPE_AP_VLAN:
+       case NL80211_IFTYPE_P2P_DEVICE:
                break;
        case NL80211_IFTYPE_UNSPECIFIED:
        case NUM_NL80211_IFTYPES:
@@ -1156,18 +1183,6 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata,
        ieee80211_debugfs_add_netdev(sdata);
 }
 
-static void ieee80211_clean_sdata(struct ieee80211_sub_if_data *sdata)
-{
-       switch (sdata->vif.type) {
-       case NL80211_IFTYPE_MESH_POINT:
-               mesh_path_flush_by_iface(sdata);
-               break;
-
-       default:
-               break;
-       }
-}
-
 static int ieee80211_runtime_change_iftype(struct ieee80211_sub_if_data *sdata,
                                           enum nl80211_iftype type)
 {
@@ -1225,7 +1240,7 @@ static int ieee80211_runtime_change_iftype(struct ieee80211_sub_if_data *sdata,
 
        ieee80211_do_stop(sdata, false);
 
-       ieee80211_teardown_sdata(sdata->dev);
+       ieee80211_teardown_sdata(sdata);
 
        ret = drv_change_interface(local, sdata, internal_type, p2p);
        if (ret)
@@ -1240,7 +1255,7 @@ static int ieee80211_runtime_change_iftype(struct ieee80211_sub_if_data *sdata,
 
        ieee80211_setup_sdata(sdata, type);
 
-       err = ieee80211_do_open(sdata->dev, false);
+       err = ieee80211_do_open(&sdata->wdev, false);
        WARN(err, "type change: do_open returned %d", err);
 
        return ret;
@@ -1267,14 +1282,14 @@ int ieee80211_if_change_type(struct ieee80211_sub_if_data *sdata,
                        return ret;
        } else {
                /* Purge and reset type-dependent state. */
-               ieee80211_teardown_sdata(sdata->dev);
+               ieee80211_teardown_sdata(sdata);
                ieee80211_setup_sdata(sdata, type);
        }
 
        /* reset some values that shouldn't be kept across type changes */
        sdata->vif.bss_conf.basic_rates =
                ieee80211_mandatory_rates(sdata->local,
-                       sdata->local->hw.conf.channel->band);
+                       sdata->local->oper_channel->band);
        sdata->drop_unencrypted = 0;
        if (type == NL80211_IFTYPE_STATION)
                sdata->u.mgd.use_4addr = false;
@@ -1283,8 +1298,7 @@ int ieee80211_if_change_type(struct ieee80211_sub_if_data *sdata,
 }
 
 static void ieee80211_assign_perm_addr(struct ieee80211_local *local,
-                                      struct net_device *dev,
-                                      enum nl80211_iftype type)
+                                      u8 *perm_addr, enum nl80211_iftype type)
 {
        struct ieee80211_sub_if_data *sdata;
        u64 mask, start, addr, val, inc;
@@ -1293,13 +1307,12 @@ static void ieee80211_assign_perm_addr(struct ieee80211_local *local,
        int i;
 
        /* default ... something at least */
-       memcpy(dev->perm_addr, local->hw.wiphy->perm_addr, ETH_ALEN);
+       memcpy(perm_addr, local->hw.wiphy->perm_addr, ETH_ALEN);
 
        if (is_zero_ether_addr(local->hw.wiphy->addr_mask) &&
            local->hw.wiphy->n_addresses <= 1)
                return;
 
-
        mutex_lock(&local->iflist_mtx);
 
        switch (type) {
@@ -1312,11 +1325,24 @@ static void ieee80211_assign_perm_addr(struct ieee80211_local *local,
                list_for_each_entry(sdata, &local->interfaces, list) {
                        if (sdata->vif.type != NL80211_IFTYPE_AP)
                                continue;
-                       memcpy(dev->perm_addr, sdata->vif.addr, ETH_ALEN);
+                       memcpy(perm_addr, sdata->vif.addr, ETH_ALEN);
                        break;
                }
                /* keep default if no AP interface present */
                break;
+       case NL80211_IFTYPE_P2P_CLIENT:
+       case NL80211_IFTYPE_P2P_GO:
+               if (local->hw.flags & IEEE80211_HW_P2P_DEV_ADDR_FOR_INTF) {
+                       list_for_each_entry(sdata, &local->interfaces, list) {
+                               if (sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE)
+                                       continue;
+                               if (!ieee80211_sdata_running(sdata))
+                                       continue;
+                               memcpy(perm_addr, sdata->vif.addr, ETH_ALEN);
+                               goto out_unlock;
+                       }
+               }
+               /* otherwise fall through */
        default:
                /* assign a new address if possible -- try n_addresses first */
                for (i = 0; i < local->hw.wiphy->n_addresses; i++) {
@@ -1331,7 +1357,7 @@ static void ieee80211_assign_perm_addr(struct ieee80211_local *local,
                        }
 
                        if (!used) {
-                               memcpy(dev->perm_addr,
+                               memcpy(perm_addr,
                                       local->hw.wiphy->addresses[i].addr,
                                       ETH_ALEN);
                                break;
@@ -1382,7 +1408,7 @@ static void ieee80211_assign_perm_addr(struct ieee80211_local *local,
                        }
 
                        if (!used) {
-                               memcpy(dev->perm_addr, tmp_addr, ETH_ALEN);
+                               memcpy(perm_addr, tmp_addr, ETH_ALEN);
                                break;
                        }
                        addr = (start & ~mask) | (val & mask);
@@ -1391,6 +1417,7 @@ static void ieee80211_assign_perm_addr(struct ieee80211_local *local,
                break;
        }
 
+ out_unlock:
        mutex_unlock(&local->iflist_mtx);
 }
 
@@ -1398,49 +1425,68 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
                     struct wireless_dev **new_wdev, enum nl80211_iftype type,
                     struct vif_params *params)
 {
-       struct net_device *ndev;
+       struct net_device *ndev = NULL;
        struct ieee80211_sub_if_data *sdata = NULL;
        int ret, i;
        int txqs = 1;
 
        ASSERT_RTNL();
 
-       if (local->hw.queues >= IEEE80211_NUM_ACS)
-               txqs = IEEE80211_NUM_ACS;
-
-       ndev = alloc_netdev_mqs(sizeof(*sdata) + local->hw.vif_data_size,
-                               name, ieee80211_if_setup, txqs, 1);
-       if (!ndev)
-               return -ENOMEM;
-       dev_net_set(ndev, wiphy_net(local->hw.wiphy));
-
-       ndev->needed_headroom = local->tx_headroom +
-                               4*6 /* four MAC addresses */
-                               + 2 + 2 + 2 + 2 /* ctl, dur, seq, qos */
-                               + 6 /* mesh */
-                               + 8 /* rfc1042/bridge tunnel */
-                               - ETH_HLEN /* ethernet hard_header_len */
-                               + IEEE80211_ENCRYPT_HEADROOM;
-       ndev->needed_tailroom = IEEE80211_ENCRYPT_TAILROOM;
-
-       ret = dev_alloc_name(ndev, ndev->name);
-       if (ret < 0)
-               goto fail;
-
-       ieee80211_assign_perm_addr(local, ndev, type);
-       memcpy(ndev->dev_addr, ndev->perm_addr, ETH_ALEN);
-       SET_NETDEV_DEV(ndev, wiphy_dev(local->hw.wiphy));
-
-       /* don't use IEEE80211_DEV_TO_SUB_IF because it checks too much */
-       sdata = netdev_priv(ndev);
-       ndev->ieee80211_ptr = &sdata->wdev;
-       memcpy(sdata->vif.addr, ndev->dev_addr, ETH_ALEN);
-       memcpy(sdata->name, ndev->name, IFNAMSIZ);
+       if (type == NL80211_IFTYPE_P2P_DEVICE) {
+               struct wireless_dev *wdev;
+
+               sdata = kzalloc(sizeof(*sdata) + local->hw.vif_data_size,
+                               GFP_KERNEL);
+               if (!sdata)
+                       return -ENOMEM;
+               wdev = &sdata->wdev;
+
+               sdata->dev = NULL;
+               strlcpy(sdata->name, name, IFNAMSIZ);
+               ieee80211_assign_perm_addr(local, wdev->address, type);
+               memcpy(sdata->vif.addr, wdev->address, ETH_ALEN);
+       } else {
+               if (local->hw.queues >= IEEE80211_NUM_ACS)
+                       txqs = IEEE80211_NUM_ACS;
+
+               ndev = alloc_netdev_mqs(sizeof(*sdata) +
+                                       local->hw.vif_data_size,
+                                       name, ieee80211_if_setup, txqs, 1);
+               if (!ndev)
+                       return -ENOMEM;
+               dev_net_set(ndev, wiphy_net(local->hw.wiphy));
+
+               ndev->needed_headroom = local->tx_headroom +
+                                       4*6 /* four MAC addresses */
+                                       + 2 + 2 + 2 + 2 /* ctl, dur, seq, qos */
+                                       + 6 /* mesh */
+                                       + 8 /* rfc1042/bridge tunnel */
+                                       - ETH_HLEN /* ethernet hard_header_len */
+                                       + IEEE80211_ENCRYPT_HEADROOM;
+               ndev->needed_tailroom = IEEE80211_ENCRYPT_TAILROOM;
+
+               ret = dev_alloc_name(ndev, ndev->name);
+               if (ret < 0) {
+                       free_netdev(ndev);
+                       return ret;
+               }
+
+               ieee80211_assign_perm_addr(local, ndev->perm_addr, type);
+               memcpy(ndev->dev_addr, ndev->perm_addr, ETH_ALEN);
+               SET_NETDEV_DEV(ndev, wiphy_dev(local->hw.wiphy));
+
+               /* don't use IEEE80211_DEV_TO_SUB_IF -- it checks too much */
+               sdata = netdev_priv(ndev);
+               ndev->ieee80211_ptr = &sdata->wdev;
+               memcpy(sdata->vif.addr, ndev->dev_addr, ETH_ALEN);
+               memcpy(sdata->name, ndev->name, IFNAMSIZ);
+
+               sdata->dev = ndev;
+       }
 
        /* initialise type-independent data */
        sdata->wdev.wiphy = local->hw.wiphy;
        sdata->local = local;
-       sdata->dev = ndev;
 #ifdef CONFIG_INET
        sdata->arp_filter_state = true;
 #endif
@@ -1469,17 +1515,21 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
        /* setup type-dependent data */
        ieee80211_setup_sdata(sdata, type);
 
-       if (params) {
-               ndev->ieee80211_ptr->use_4addr = params->use_4addr;
-               if (type == NL80211_IFTYPE_STATION)
-                       sdata->u.mgd.use_4addr = params->use_4addr;
-       }
+       if (ndev) {
+               if (params) {
+                       ndev->ieee80211_ptr->use_4addr = params->use_4addr;
+                       if (type == NL80211_IFTYPE_STATION)
+                               sdata->u.mgd.use_4addr = params->use_4addr;
+               }
 
-       ndev->features |= local->hw.netdev_features;
+               ndev->features |= local->hw.netdev_features;
 
-       ret = register_netdevice(ndev);
-       if (ret)
-               goto fail;
+               ret = register_netdevice(ndev);
+               if (ret) {
+                       free_netdev(ndev);
+                       return ret;
+               }
+       }
 
        mutex_lock(&local->iflist_mtx);
        list_add_tail_rcu(&sdata->list, &local->interfaces);
@@ -1489,10 +1539,6 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
                *new_wdev = &sdata->wdev;
 
        return 0;
-
- fail:
-       free_netdev(ndev);
-       return ret;
 }
 
 void ieee80211_if_remove(struct ieee80211_sub_if_data *sdata)
@@ -1503,11 +1549,22 @@ void ieee80211_if_remove(struct ieee80211_sub_if_data *sdata)
        list_del_rcu(&sdata->list);
        mutex_unlock(&sdata->local->iflist_mtx);
 
-       /* clean up type-dependent data */
-       ieee80211_clean_sdata(sdata);
-
        synchronize_rcu();
-       unregister_netdevice(sdata->dev);
+
+       if (sdata->dev) {
+               unregister_netdevice(sdata->dev);
+       } else {
+               cfg80211_unregister_wdev(&sdata->wdev);
+               kfree(sdata);
+       }
+}
+
+void ieee80211_sdata_stop(struct ieee80211_sub_if_data *sdata)
+{
+       if (WARN_ON_ONCE(!test_bit(SDATA_STATE_RUNNING, &sdata->state)))
+               return;
+       ieee80211_do_stop(sdata, true);
+       ieee80211_teardown_sdata(sdata);
 }
 
 /*
@@ -1518,6 +1575,7 @@ void ieee80211_remove_interfaces(struct ieee80211_local *local)
 {
        struct ieee80211_sub_if_data *sdata, *tmp;
        LIST_HEAD(unreg_list);
+       LIST_HEAD(wdev_list);
 
        ASSERT_RTNL();
 
@@ -1525,13 +1583,20 @@ void ieee80211_remove_interfaces(struct ieee80211_local *local)
        list_for_each_entry_safe(sdata, tmp, &local->interfaces, list) {
                list_del(&sdata->list);
 
-               ieee80211_clean_sdata(sdata);
-
-               unregister_netdevice_queue(sdata->dev, &unreg_list);
+               if (sdata->dev)
+                       unregister_netdevice_queue(sdata->dev, &unreg_list);
+               else
+                       list_add(&sdata->list, &wdev_list);
        }
        mutex_unlock(&local->iflist_mtx);
        unregister_netdevice_many(&unreg_list);
        list_del(&unreg_list);
+
+       list_for_each_entry_safe(sdata, tmp, &wdev_list, list) {
+               list_del(&sdata->list);
+               cfg80211_unregister_wdev(&sdata->wdev);
+               kfree(sdata);
+       }
 }
 
 static int netdev_notify(struct notifier_block *nb,
index c26e231..bd75293 100644 (file)
@@ -207,6 +207,10 @@ void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata,
                sdata->vif.bss_conf.bssid = NULL;
        else if (ieee80211_vif_is_mesh(&sdata->vif)) {
                sdata->vif.bss_conf.bssid = zero;
+       } else if (sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE) {
+               sdata->vif.bss_conf.bssid = sdata->vif.addr;
+               WARN_ONCE(changed & ~(BSS_CHANGED_IDLE),
+                         "P2P Device BSS changed %#x", changed);
        } else {
                WARN_ON(1);
                return;
@@ -514,6 +518,11 @@ ieee80211_default_mgmt_stypes[NUM_NL80211_IFTYPES] = {
                        BIT(IEEE80211_STYPE_AUTH >> 4) |
                        BIT(IEEE80211_STYPE_DEAUTH >> 4),
        },
+       [NL80211_IFTYPE_P2P_DEVICE] = {
+               .tx = 0xffff,
+               .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+                       BIT(IEEE80211_STYPE_PROBE_REQ >> 4),
+       },
 };
 
 static const struct ieee80211_ht_cap mac80211_ht_capa_mod_mask = {
@@ -536,6 +545,11 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
        int priv_size, i;
        struct wiphy *wiphy;
 
+       if (WARN_ON(!ops->tx || !ops->start || !ops->stop || !ops->config ||
+                   !ops->add_interface || !ops->remove_interface ||
+                   !ops->configure_filter))
+               return NULL;
+
        if (WARN_ON(ops->sta_state && (ops->sta_add || ops->sta_remove)))
                return NULL;
 
@@ -588,13 +602,6 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
 
        local->hw.priv = (char *)local + ALIGN(sizeof(*local), NETDEV_ALIGN);
 
-       BUG_ON(!ops->tx);
-       BUG_ON(!ops->start);
-       BUG_ON(!ops->stop);
-       BUG_ON(!ops->config);
-       BUG_ON(!ops->add_interface);
-       BUG_ON(!ops->remove_interface);
-       BUG_ON(!ops->configure_filter);
        local->ops = ops;
 
        /* set up some defaults */
index 8557235..ff0296c 100644 (file)
@@ -109,11 +109,11 @@ bool mesh_matches_local(struct ieee80211_sub_if_data *sdata,
 
        /* Disallow HT40+/- mismatch */
        if (ie->ht_operation &&
-           (local->_oper_channel_type == NL80211_CHAN_HT40MINUS ||
-           local->_oper_channel_type == NL80211_CHAN_HT40PLUS) &&
+           (sdata->vif.bss_conf.channel_type == NL80211_CHAN_HT40MINUS ||
+            sdata->vif.bss_conf.channel_type == NL80211_CHAN_HT40PLUS) &&
            (sta_channel_type == NL80211_CHAN_HT40MINUS ||
             sta_channel_type == NL80211_CHAN_HT40PLUS) &&
-           local->_oper_channel_type != sta_channel_type)
+           sdata->vif.bss_conf.channel_type != sta_channel_type)
                goto mismatch;
 
        return true;
@@ -136,10 +136,13 @@ bool mesh_peer_accepts_plinks(struct ieee802_11_elems *ie)
  * mesh_accept_plinks_update - update accepting_plink in local mesh beacons
  *
  * @sdata: mesh interface in which mesh beacons are going to be updated
+ *
+ * Returns: beacon changed flag if the beacon content changed.
  */
-void mesh_accept_plinks_update(struct ieee80211_sub_if_data *sdata)
+u32 mesh_accept_plinks_update(struct ieee80211_sub_if_data *sdata)
 {
        bool free_plinks;
+       u32 changed = 0;
 
        /* In case mesh_plink_free_count > 0 and mesh_plinktbl_capacity == 0,
         * the mesh interface might be able to establish plinks with peers that
@@ -149,8 +152,12 @@ void mesh_accept_plinks_update(struct ieee80211_sub_if_data *sdata)
         */
        free_plinks = mesh_plink_availables(sdata);
 
-       if (free_plinks != sdata->u.mesh.accepting_plinks)
-               ieee80211_mesh_housekeeping_timer((unsigned long) sdata);
+       if (free_plinks != sdata->u.mesh.accepting_plinks) {
+               sdata->u.mesh.accepting_plinks = free_plinks;
+               changed = BSS_CHANGED_BEACON;
+       }
+
+       return changed;
 }
 
 int mesh_rmc_init(struct ieee80211_sub_if_data *sdata)
@@ -262,7 +269,6 @@ mesh_add_meshconf_ie(struct sk_buff *skb, struct ieee80211_sub_if_data *sdata)
        neighbors = (neighbors > 15) ? 15 : neighbors;
        *pos++ = neighbors << 1;
        /* Mesh capability */
-       ifmsh->accepting_plinks = mesh_plink_availables(sdata);
        *pos = MESHCONF_CAPAB_FORWARDING;
        *pos |= ifmsh->accepting_plinks ?
            MESHCONF_CAPAB_ACCEPT_PLINKS : 0x00;
@@ -349,17 +355,18 @@ int mesh_add_ds_params_ie(struct sk_buff *skb,
 {
        struct ieee80211_local *local = sdata->local;
        struct ieee80211_supported_band *sband;
+       struct ieee80211_channel *chan = local->oper_channel;
        u8 *pos;
 
        if (skb_tailroom(skb) < 3)
                return -ENOMEM;
 
-       sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
+       sband = local->hw.wiphy->bands[chan->band];
        if (sband->band == IEEE80211_BAND_2GHZ) {
                pos = skb_put(skb, 2 + 1);
                *pos++ = WLAN_EID_DS_PARAMS;
                *pos++ = 1;
-               *pos++ = ieee80211_frequency_to_channel(local->hw.conf.channel->center_freq);
+               *pos++ = ieee80211_frequency_to_channel(chan->center_freq);
        }
 
        return 0;
@@ -374,7 +381,7 @@ int mesh_add_ht_cap_ie(struct sk_buff *skb,
 
        sband = local->hw.wiphy->bands[local->oper_channel->band];
        if (!sband->ht_cap.ht_supported ||
-           local->_oper_channel_type == NL80211_CHAN_NO_HT)
+           sdata->vif.bss_conf.channel_type == NL80211_CHAN_NO_HT)
                return 0;
 
        if (skb_tailroom(skb) < 2 + sizeof(struct ieee80211_ht_cap))
@@ -391,7 +398,8 @@ int mesh_add_ht_oper_ie(struct sk_buff *skb,
 {
        struct ieee80211_local *local = sdata->local;
        struct ieee80211_channel *channel = local->oper_channel;
-       enum nl80211_channel_type channel_type = local->_oper_channel_type;
+       enum nl80211_channel_type channel_type =
+                               sdata->vif.bss_conf.channel_type;
        struct ieee80211_supported_band *sband =
                                local->hw.wiphy->bands[channel->band];
        struct ieee80211_sta_ht_cap *ht_cap = &sband->ht_cap;
@@ -521,14 +529,13 @@ int ieee80211_new_mesh_header(struct ieee80211s_hdr *meshhdr,
 static void ieee80211_mesh_housekeeping(struct ieee80211_sub_if_data *sdata,
                           struct ieee80211_if_mesh *ifmsh)
 {
-       bool free_plinks;
+       u32 changed;
 
        ieee80211_sta_expire(sdata, IEEE80211_MESH_PEER_INACTIVITY_LIMIT);
        mesh_path_expire(sdata);
 
-       free_plinks = mesh_plink_availables(sdata);
-       if (free_plinks != sdata->u.mesh.accepting_plinks)
-               ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON);
+       changed = mesh_accept_plinks_update(sdata);
+       ieee80211_bss_info_change_notify(sdata, changed);
 
        mod_timer(&ifmsh->housekeeping_timer,
                  round_jiffies(jiffies + IEEE80211_MESH_HOUSEKEEPING_INTERVAL));
@@ -603,12 +610,14 @@ void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata)
        sdata->vif.bss_conf.beacon_int = MESH_DEFAULT_BEACON_INTERVAL;
        sdata->vif.bss_conf.basic_rates =
                ieee80211_mandatory_rates(sdata->local,
-                                         sdata->local->hw.conf.channel->band);
+                                         sdata->local->oper_channel->band);
        ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON |
                                                BSS_CHANGED_BEACON_ENABLED |
                                                BSS_CHANGED_HT |
                                                BSS_CHANGED_BASIC_RATES |
                                                BSS_CHANGED_BEACON_INT);
+
+       netif_carrier_on(sdata->dev);
 }
 
 void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata)
@@ -616,9 +625,15 @@ void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata)
        struct ieee80211_local *local = sdata->local;
        struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
 
+       netif_carrier_off(sdata->dev);
+
+       /* stop the beacon */
        ifmsh->mesh_id_len = 0;
        ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED);
-       sta_info_flush(local, NULL);
+
+       /* flush STAs and mpaths on this iface */
+       sta_info_flush(sdata->local, sdata);
+       mesh_path_flush_by_iface(sdata);
 
        del_timer_sync(&sdata->u.mesh.housekeeping_timer);
        del_timer_sync(&sdata->u.mesh.mesh_path_root_timer);
index faaa39b..25d0f17 100644 (file)
@@ -215,6 +215,9 @@ struct mesh_rmc {
 /* Maximum number of paths per interface */
 #define MESH_MAX_MPATHS                1024
 
+/* Number of frames buffered per destination for unresolved destinations */
+#define MESH_FRAME_QUEUE_LEN   10
+
 /* Public interfaces */
 /* Various */
 int ieee80211_fill_mesh_addresses(struct ieee80211_hdr *hdr, __le16 *fc,
@@ -282,7 +285,7 @@ void mesh_neighbour_update(struct ieee80211_sub_if_data *sdata,
                           u8 *hw_addr,
                           struct ieee802_11_elems *ie);
 bool mesh_peer_accepts_plinks(struct ieee802_11_elems *ie);
-void mesh_accept_plinks_update(struct ieee80211_sub_if_data *sdata);
+u32 mesh_accept_plinks_update(struct ieee80211_sub_if_data *sdata);
 void mesh_plink_broken(struct sta_info *sta);
 void mesh_plink_deactivate(struct sta_info *sta);
 int mesh_plink_open(struct sta_info *sta);
index 494bc39..47aeee2 100644 (file)
@@ -17,8 +17,6 @@
 #define MAX_METRIC     0xffffffff
 #define ARITH_SHIFT    8
 
-/* Number of frames buffered per destination for unresolved destinations */
-#define MESH_FRAME_QUEUE_LEN   10
 #define MAX_PREQ_QUEUE_LEN     64
 
 /* Destination only */
index 075bc53..aa74981 100644 (file)
@@ -203,23 +203,17 @@ void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta)
 {
        struct sk_buff *skb;
        struct ieee80211_hdr *hdr;
-       struct sk_buff_head tmpq;
        unsigned long flags;
 
        rcu_assign_pointer(mpath->next_hop, sta);
 
-       __skb_queue_head_init(&tmpq);
-
        spin_lock_irqsave(&mpath->frame_queue.lock, flags);
-
-       while ((skb = __skb_dequeue(&mpath->frame_queue)) != NULL) {
+       skb_queue_walk(&mpath->frame_queue, skb) {
                hdr = (struct ieee80211_hdr *) skb->data;
                memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN);
                memcpy(hdr->addr2, mpath->sdata->vif.addr, ETH_ALEN);
-               __skb_queue_tail(&tmpq, skb);
        }
 
-       skb_queue_splice(&tmpq, &mpath->frame_queue);
        spin_unlock_irqrestore(&mpath->frame_queue.lock, flags);
 }
 
@@ -285,40 +279,42 @@ static void mesh_path_move_to_queue(struct mesh_path *gate_mpath,
                                    struct mesh_path *from_mpath,
                                    bool copy)
 {
-       struct sk_buff *skb, *cp_skb = NULL;
-       struct sk_buff_head gateq, failq;
+       struct sk_buff *skb, *fskb, *tmp;
+       struct sk_buff_head failq;
        unsigned long flags;
-       int num_skbs;
 
        BUG_ON(gate_mpath == from_mpath);
        BUG_ON(!gate_mpath->next_hop);
 
-       __skb_queue_head_init(&gateq);
        __skb_queue_head_init(&failq);
 
        spin_lock_irqsave(&from_mpath->frame_queue.lock, flags);
        skb_queue_splice_init(&from_mpath->frame_queue, &failq);
        spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags);
 
-       num_skbs = skb_queue_len(&failq);
-
-       while (num_skbs--) {
-               skb = __skb_dequeue(&failq);
-               if (copy) {
-                       cp_skb = skb_copy(skb, GFP_ATOMIC);
-                       if (cp_skb)
-                               __skb_queue_tail(&failq, cp_skb);
+       skb_queue_walk_safe(&failq, fskb, tmp) {
+               if (skb_queue_len(&gate_mpath->frame_queue) >=
+                                 MESH_FRAME_QUEUE_LEN) {
+                       mpath_dbg(gate_mpath->sdata, "mpath queue full!\n");
+                       break;
                }
 
+               skb = skb_copy(fskb, GFP_ATOMIC);
+               if (WARN_ON(!skb))
+                       break;
+
                prepare_for_gate(skb, gate_mpath->dst, gate_mpath);
-               __skb_queue_tail(&gateq, skb);
+               skb_queue_tail(&gate_mpath->frame_queue, skb);
+
+               if (copy)
+                       continue;
+
+               __skb_unlink(fskb, &failq);
+               kfree_skb(fskb);
        }
 
-       spin_lock_irqsave(&gate_mpath->frame_queue.lock, flags);
-       skb_queue_splice(&gateq, &gate_mpath->frame_queue);
        mpath_dbg(gate_mpath->sdata, "Mpath queue for gate %pM has %d frames\n",
                  gate_mpath->dst, skb_queue_len(&gate_mpath->frame_queue));
-       spin_unlock_irqrestore(&gate_mpath->frame_queue.lock, flags);
 
        if (!copy)
                return;
@@ -531,7 +527,7 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
 
        read_lock_bh(&pathtbl_resize_lock);
        memcpy(new_mpath->dst, dst, ETH_ALEN);
-       memset(new_mpath->rann_snd_addr, 0xff, ETH_ALEN);
+       eth_broadcast_addr(new_mpath->rann_snd_addr);
        new_mpath->is_root = false;
        new_mpath->sdata = sdata;
        new_mpath->flags = 0;
index af671b9..9d7ad36 100644 (file)
@@ -48,17 +48,17 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
                u8 *da, __le16 llid, __le16 plid, __le16 reason);
 
 static inline
-void mesh_plink_inc_estab_count(struct ieee80211_sub_if_data *sdata)
+u32 mesh_plink_inc_estab_count(struct ieee80211_sub_if_data *sdata)
 {
        atomic_inc(&sdata->u.mesh.mshstats.estab_plinks);
-       mesh_accept_plinks_update(sdata);
+       return mesh_accept_plinks_update(sdata);
 }
 
 static inline
-void mesh_plink_dec_estab_count(struct ieee80211_sub_if_data *sdata)
+u32 mesh_plink_dec_estab_count(struct ieee80211_sub_if_data *sdata)
 {
        atomic_dec(&sdata->u.mesh.mshstats.estab_plinks);
-       mesh_accept_plinks_update(sdata);
+       return mesh_accept_plinks_update(sdata);
 }
 
 /**
@@ -117,7 +117,7 @@ static u32 mesh_set_ht_prot_mode(struct ieee80211_sub_if_data *sdata)
        u16 ht_opmode;
        bool non_ht_sta = false, ht20_sta = false;
 
-       if (local->_oper_channel_type == NL80211_CHAN_NO_HT)
+       if (sdata->vif.bss_conf.channel_type == NL80211_CHAN_NO_HT)
                return 0;
 
        rcu_read_lock();
@@ -147,7 +147,8 @@ out:
 
        if (non_ht_sta)
                ht_opmode = IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED;
-       else if (ht20_sta && local->_oper_channel_type > NL80211_CHAN_HT20)
+       else if (ht20_sta &&
+                sdata->vif.bss_conf.channel_type > NL80211_CHAN_HT20)
                ht_opmode = IEEE80211_HT_OP_MODE_PROTECTION_20MHZ;
        else
                ht_opmode = IEEE80211_HT_OP_MODE_PROTECTION_NONE;
@@ -170,22 +171,21 @@ out:
  * @sta: mesh peer link to deactivate
  *
  * All mesh paths with this peer as next hop will be flushed
+ * Returns beacon changed flag if the beacon content changed.
  *
  * Locking: the caller must hold sta->lock
  */
-static bool __mesh_plink_deactivate(struct sta_info *sta)
+static u32 __mesh_plink_deactivate(struct sta_info *sta)
 {
        struct ieee80211_sub_if_data *sdata = sta->sdata;
-       bool deactivated = false;
+       u32 changed = 0;
 
-       if (sta->plink_state == NL80211_PLINK_ESTAB) {
-               mesh_plink_dec_estab_count(sdata);
-               deactivated = true;
-       }
+       if (sta->plink_state == NL80211_PLINK_ESTAB)
+               changed = mesh_plink_dec_estab_count(sdata);
        sta->plink_state = NL80211_PLINK_BLOCKED;
        mesh_path_flush_by_nexthop(sta);
 
-       return deactivated;
+       return changed;
 }
 
 /**
@@ -198,18 +198,17 @@ static bool __mesh_plink_deactivate(struct sta_info *sta)
 void mesh_plink_deactivate(struct sta_info *sta)
 {
        struct ieee80211_sub_if_data *sdata = sta->sdata;
-       bool deactivated;
+       u32 changed;
 
        spin_lock_bh(&sta->lock);
-       deactivated = __mesh_plink_deactivate(sta);
+       changed = __mesh_plink_deactivate(sta);
        sta->reason = cpu_to_le16(WLAN_REASON_MESH_PEER_CANCELED);
        mesh_plink_frame_tx(sdata, WLAN_SP_MESH_PEERING_CLOSE,
                            sta->sta.addr, sta->llid, sta->plid,
                            sta->reason);
        spin_unlock_bh(&sta->lock);
 
-       if (deactivated)
-               ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON);
+       ieee80211_bss_info_change_notify(sdata, changed);
 }
 
 static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
@@ -217,12 +216,14 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
                u8 *da, __le16 llid, __le16 plid, __le16 reason) {
        struct ieee80211_local *local = sdata->local;
        struct sk_buff *skb;
+       struct ieee80211_tx_info *info;
        struct ieee80211_mgmt *mgmt;
        bool include_plid = false;
        u16 peering_proto = 0;
        u8 *pos, ie_len = 4;
        int hdr_len = offsetof(struct ieee80211_mgmt, u.action.u.self_prot) +
                      sizeof(mgmt->u.action.u.self_prot);
+       int err = -ENOMEM;
 
        skb = dev_alloc_skb(local->tx_headroom +
                            hdr_len +
@@ -238,6 +239,7 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
                            sdata->u.mesh.ie_len);
        if (!skb)
                return -1;
+       info = IEEE80211_SKB_CB(skb);
        skb_reserve(skb, local->tx_headroom);
        mgmt = (struct ieee80211_mgmt *) skb_put(skb, hdr_len);
        memset(mgmt, 0, hdr_len);
@@ -258,15 +260,18 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
                        pos = skb_put(skb, 2);
                        memcpy(pos + 2, &plid, 2);
                }
-               if (ieee80211_add_srates_ie(sdata, skb, true) ||
-                   ieee80211_add_ext_srates_ie(sdata, skb, true) ||
+               if (ieee80211_add_srates_ie(sdata, skb, true,
+                                           local->oper_channel->band) ||
+                   ieee80211_add_ext_srates_ie(sdata, skb, true,
+                                               local->oper_channel->band) ||
                    mesh_add_rsn_ie(skb, sdata) ||
                    mesh_add_meshid_ie(skb, sdata) ||
                    mesh_add_meshconf_ie(skb, sdata))
-                       return -1;
+                       goto free;
        } else {        /* WLAN_SP_MESH_PEERING_CLOSE */
+               info->flags |= IEEE80211_TX_CTL_NO_ACK;
                if (mesh_add_meshid_ie(skb, sdata))
-                       return -1;
+                       goto free;
        }
 
        /* Add Mesh Peering Management element */
@@ -285,11 +290,12 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
                ie_len += 2;    /* reason code */
                break;
        default:
-               return -EINVAL;
+               err = -EINVAL;
+               goto free;
        }
 
        if (WARN_ON(skb_tailroom(skb) < 2 + ie_len))
-               return -ENOMEM;
+               goto free;
 
        pos = skb_put(skb, 2 + ie_len);
        *pos++ = WLAN_EID_PEER_MGMT;
@@ -310,14 +316,17 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
        if (action != WLAN_SP_MESH_PEERING_CLOSE) {
                if (mesh_add_ht_cap_ie(skb, sdata) ||
                    mesh_add_ht_oper_ie(skb, sdata))
-                       return -1;
+                       goto free;
        }
 
        if (mesh_add_vendor_ies(skb, sdata))
-               return -1;
+               goto free;
 
        ieee80211_tx_skb(sdata, skb);
        return 0;
+free:
+       kfree_skb(skb);
+       return err;
 }
 
 /**
@@ -362,9 +371,14 @@ static struct sta_info *mesh_peer_init(struct ieee80211_sub_if_data *sdata,
 
        spin_lock_bh(&sta->lock);
        sta->last_rx = jiffies;
+       if (sta->plink_state == NL80211_PLINK_ESTAB) {
+               spin_unlock_bh(&sta->lock);
+               return sta;
+       }
+
        sta->sta.supp_rates[band] = rates;
        if (elems->ht_cap_elem &&
-           sdata->local->_oper_channel_type != NL80211_CHAN_NO_HT)
+           sdata->vif.bss_conf.channel_type != NL80211_CHAN_NO_HT)
                ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband,
                                                  elems->ht_cap_elem,
                                                  &sta->sta.ht_cap);
@@ -541,15 +555,14 @@ int mesh_plink_open(struct sta_info *sta)
 void mesh_plink_block(struct sta_info *sta)
 {
        struct ieee80211_sub_if_data *sdata = sta->sdata;
-       bool deactivated;
+       u32 changed;
 
        spin_lock_bh(&sta->lock);
-       deactivated = __mesh_plink_deactivate(sta);
+       changed = __mesh_plink_deactivate(sta);
        sta->plink_state = NL80211_PLINK_BLOCKED;
        spin_unlock_bh(&sta->lock);
 
-       if (deactivated)
-               ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON);
+       ieee80211_bss_info_change_notify(sdata, changed);
 }
 
 
@@ -852,9 +865,8 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
                        del_timer(&sta->plink_timer);
                        sta->plink_state = NL80211_PLINK_ESTAB;
                        spin_unlock_bh(&sta->lock);
-                       mesh_plink_inc_estab_count(sdata);
+                       changed |= mesh_plink_inc_estab_count(sdata);
                        changed |= mesh_set_ht_prot_mode(sdata);
-                       changed |= BSS_CHANGED_BEACON;
                        mpl_dbg(sdata, "Mesh plink with %pM ESTABLISHED\n",
                                sta->sta.addr);
                        break;
@@ -888,9 +900,8 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
                        del_timer(&sta->plink_timer);
                        sta->plink_state = NL80211_PLINK_ESTAB;
                        spin_unlock_bh(&sta->lock);
-                       mesh_plink_inc_estab_count(sdata);
+                       changed |= mesh_plink_inc_estab_count(sdata);
                        changed |= mesh_set_ht_prot_mode(sdata);
-                       changed |= BSS_CHANGED_BEACON;
                        mpl_dbg(sdata, "Mesh plink with %pM ESTABLISHED\n",
                                sta->sta.addr);
                        mesh_plink_frame_tx(sdata,
@@ -908,13 +919,12 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
                case CLS_ACPT:
                        reason = cpu_to_le16(WLAN_REASON_MESH_CLOSE);
                        sta->reason = reason;
-                       __mesh_plink_deactivate(sta);
+                       changed |= __mesh_plink_deactivate(sta);
                        sta->plink_state = NL80211_PLINK_HOLDING;
                        llid = sta->llid;
                        mod_plink_timer(sta, dot11MeshHoldingTimeout(sdata));
                        spin_unlock_bh(&sta->lock);
                        changed |= mesh_set_ht_prot_mode(sdata);
-                       changed |= BSS_CHANGED_BEACON;
                        mesh_plink_frame_tx(sdata, WLAN_SP_MESH_PEERING_CLOSE,
                                            sta->sta.addr, llid, plid, reason);
                        break;
index a4a5acd..a8cf70b 100644 (file)
@@ -146,6 +146,9 @@ void ieee80211_sta_reset_beacon_monitor(struct ieee80211_sub_if_data *sdata)
        if (sdata->vif.driver_flags & IEEE80211_VIF_BEACON_FILTER)
                return;
 
+       if (sdata->local->hw.flags & IEEE80211_HW_CONNECTION_MONITOR)
+               return;
+
        mod_timer(&sdata->u.mgd.bcn_mon_timer,
                  round_jiffies_up(jiffies + sdata->u.mgd.beacon_timeout));
 }
@@ -182,15 +185,15 @@ static u32 ieee80211_config_ht_tx(struct ieee80211_sub_if_data *sdata,
        u16 ht_opmode;
        bool disable_40 = false;
 
-       sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
+       sband = local->hw.wiphy->bands[local->oper_channel->band];
 
        switch (sdata->vif.bss_conf.channel_type) {
        case NL80211_CHAN_HT40PLUS:
-               if (local->hw.conf.channel->flags & IEEE80211_CHAN_NO_HT40PLUS)
+               if (local->oper_channel->flags & IEEE80211_CHAN_NO_HT40PLUS)
                        disable_40 = true;
                break;
        case NL80211_CHAN_HT40MINUS:
-               if (local->hw.conf.channel->flags & IEEE80211_CHAN_NO_HT40MINUS)
+               if (local->oper_channel->flags & IEEE80211_CHAN_NO_HT40MINUS)
                        disable_40 = true;
                break;
        default:
@@ -326,6 +329,26 @@ static void ieee80211_add_ht_ie(struct ieee80211_sub_if_data *sdata,
        ieee80211_ie_build_ht_cap(pos, &ht_cap, cap);
 }
 
+static void ieee80211_add_vht_ie(struct ieee80211_sub_if_data *sdata,
+                                struct sk_buff *skb,
+                                struct ieee80211_supported_band *sband)
+{
+       u8 *pos;
+       u32 cap;
+       struct ieee80211_sta_vht_cap vht_cap;
+
+       BUILD_BUG_ON(sizeof(vht_cap) != sizeof(sband->vht_cap));
+
+       memcpy(&vht_cap, &sband->vht_cap, sizeof(vht_cap));
+
+       /* determine capability flags */
+       cap = vht_cap.cap;
+
+       /* reserve and fill IE */
+       pos = skb_put(skb, sizeof(struct ieee80211_vht_capabilities) + 2);
+       ieee80211_ie_build_vht_cap(pos, &vht_cap, cap);
+}
+
 static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
 {
        struct ieee80211_local *local = sdata->local;
@@ -371,6 +394,7 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
                        4 + /* power capability */
                        2 + 2 * sband->n_channels + /* supported channels */
                        2 + sizeof(struct ieee80211_ht_cap) + /* HT */
+                       2 + sizeof(struct ieee80211_vht_capabilities) + /* VHT */
                        assoc_data->ie_len + /* extra IEs */
                        9, /* WMM */
                        GFP_KERNEL);
@@ -503,6 +527,9 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
                ieee80211_add_ht_ie(sdata, skb, assoc_data->ap_ht_param,
                                    sband, local->oper_channel, ifmgd->ap_smps);
 
+       if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT))
+               ieee80211_add_vht_ie(sdata, skb, sband);
+
        /* if present, add any custom non-vendor IEs that go after HT */
        if (assoc_data->ie_len && assoc_data->ie) {
                noffset = ieee80211_ie_split_vendor(assoc_data->ie,
@@ -583,8 +610,6 @@ static void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata,
                        IEEE80211_SKB_CB(skb)->flags |=
                                IEEE80211_TX_INTFL_DONT_ENCRYPT;
 
-               drv_mgd_prepare_tx(local, sdata);
-
                ieee80211_tx_skb(sdata, skb);
        }
 }
@@ -687,6 +712,7 @@ static void ieee80211_chswitch_work(struct work_struct *work)
        /* XXX: shouldn't really modify cfg80211-owned data! */
        ifmgd->associated->channel = sdata->local->oper_channel;
 
+       /* XXX: wait for a beacon first? */
        ieee80211_wake_queues_by_reason(&sdata->local->hw,
                                        IEEE80211_QUEUE_STOP_REASON_CSA);
  out:
@@ -763,36 +789,32 @@ void ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
 
        sdata->local->csa_channel = new_ch;
 
+       ifmgd->flags |= IEEE80211_STA_CSA_RECEIVED;
+
+       if (sw_elem->mode)
+               ieee80211_stop_queues_by_reason(&sdata->local->hw,
+                               IEEE80211_QUEUE_STOP_REASON_CSA);
+
        if (sdata->local->ops->channel_switch) {
                /* use driver's channel switch callback */
-               struct ieee80211_channel_switch ch_switch;
-               memset(&ch_switch, 0, sizeof(ch_switch));
-               ch_switch.timestamp = timestamp;
-               if (sw_elem->mode) {
-                       ch_switch.block_tx = true;
-                       ieee80211_stop_queues_by_reason(&sdata->local->hw,
-                                       IEEE80211_QUEUE_STOP_REASON_CSA);
-               }
-               ch_switch.channel = new_ch;
-               ch_switch.count = sw_elem->count;
-               ifmgd->flags |= IEEE80211_STA_CSA_RECEIVED;
+               struct ieee80211_channel_switch ch_switch = {
+                       .timestamp = timestamp,
+                       .block_tx = sw_elem->mode,
+                       .channel = new_ch,
+                       .count = sw_elem->count,
+               };
+
                drv_channel_switch(sdata->local, &ch_switch);
                return;
        }
 
        /* channel switch handled in software */
-       if (sw_elem->count <= 1) {
+       if (sw_elem->count <= 1)
                ieee80211_queue_work(&sdata->local->hw, &ifmgd->chswitch_work);
-       } else {
-               if (sw_elem->mode)
-                       ieee80211_stop_queues_by_reason(&sdata->local->hw,
-                                       IEEE80211_QUEUE_STOP_REASON_CSA);
-               ifmgd->flags |= IEEE80211_STA_CSA_RECEIVED;
+       else
                mod_timer(&ifmgd->chswitch_timer,
-                         jiffies +
-                         msecs_to_jiffies(sw_elem->count *
-                                          cbss->beacon_interval));
-       }
+                         TU_TO_EXP_TIME(sw_elem->count *
+                                        cbss->beacon_interval));
 }
 
 static void ieee80211_handle_pwr_constr(struct ieee80211_sub_if_data *sdata,
@@ -1007,6 +1029,16 @@ void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency)
        ieee80211_change_ps(local);
 }
 
+void ieee80211_recalc_ps_vif(struct ieee80211_sub_if_data *sdata)
+{
+       bool ps_allowed = ieee80211_powersave_allowed(sdata);
+
+       if (sdata->vif.bss_conf.ps != ps_allowed) {
+               sdata->vif.bss_conf.ps = ps_allowed;
+               ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_PS);
+       }
+}
+
 void ieee80211_dynamic_ps_disable_work(struct work_struct *work)
 {
        struct ieee80211_local *local =
@@ -1239,7 +1271,7 @@ static u32 ieee80211_handle_bss_capability(struct ieee80211_sub_if_data *sdata,
        }
 
        use_short_slot = !!(capab & WLAN_CAPABILITY_SHORT_SLOT_TIME);
-       if (sdata->local->hw.conf.channel->band == IEEE80211_BAND_5GHZ)
+       if (sdata->local->oper_channel->band == IEEE80211_BAND_5GHZ)
                use_short_slot = true;
 
        if (use_protection != bss_conf->use_cts_prot) {
@@ -1310,6 +1342,8 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
        ieee80211_recalc_smps(local);
        mutex_unlock(&local->iflist_mtx);
 
+       ieee80211_recalc_ps_vif(sdata);
+
        netif_tx_start_all_queues(sdata->dev);
        netif_carrier_on(sdata->dev);
 }
@@ -1371,6 +1405,9 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
        }
        local->ps_sdata = NULL;
 
+       /* disable per-vif ps */
+       ieee80211_recalc_ps_vif(sdata);
+
        /* flush out any pending frame (e.g. DELBA) before deauth/disassoc */
        if (tx)
                drv_flush(local, false);
@@ -1542,7 +1579,8 @@ static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata)
                        ssid_len = ssid[1];
 
                ieee80211_send_probe_req(sdata, dst, ssid + 2, ssid_len, NULL,
-                                        0, (u32) -1, true, false);
+                                        0, (u32) -1, true, false,
+                                        ifmgd->associated->channel);
        }
 
        ifmgd->probe_timeout = jiffies + msecs_to_jiffies(probe_wait_ms);
@@ -1645,7 +1683,9 @@ struct sk_buff *ieee80211_ap_probereq_get(struct ieee80211_hw *hw,
                ssid_len = ssid[1];
 
        skb = ieee80211_build_probe_req(sdata, cbss->bssid,
-                                       (u32) -1, ssid + 2, ssid_len,
+                                       (u32) -1,
+                                       sdata->local->oper_channel,
+                                       ssid + 2, ssid_len,
                                        NULL, 0, true);
 
        return skb;
@@ -1656,7 +1696,6 @@ static void __ieee80211_connection_loss(struct ieee80211_sub_if_data *sdata)
 {
        struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
        struct ieee80211_local *local = sdata->local;
-       u8 bssid[ETH_ALEN];
        u8 frame_buf[DEAUTH_DISASSOC_LEN];
 
        mutex_lock(&ifmgd->mtx);
@@ -1665,9 +1704,8 @@ static void __ieee80211_connection_loss(struct ieee80211_sub_if_data *sdata)
                return;
        }
 
-       memcpy(bssid, ifmgd->associated->bssid, ETH_ALEN);
-
-       sdata_info(sdata, "Connection to AP %pM lost\n", bssid);
+       sdata_info(sdata, "Connection to AP %pM lost\n",
+                  ifmgd->associated->bssid);
 
        ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH,
                               WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY,
@@ -1685,7 +1723,7 @@ static void __ieee80211_connection_loss(struct ieee80211_sub_if_data *sdata)
        mutex_unlock(&local->mtx);
 }
 
-void ieee80211_beacon_connection_loss_work(struct work_struct *work)
+static void ieee80211_beacon_connection_loss_work(struct work_struct *work)
 {
        struct ieee80211_sub_if_data *sdata =
                container_of(work, struct ieee80211_sub_if_data,
@@ -2232,14 +2270,10 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
                mutex_unlock(&local->iflist_mtx);
        }
 
-       if (elems->ch_switch_elem && (elems->ch_switch_elem_len == 3) &&
-           (memcmp(mgmt->bssid, sdata->u.mgd.associated->bssid,
-                                                       ETH_ALEN) == 0)) {
-               struct ieee80211_channel_sw_ie *sw_elem =
-                       (struct ieee80211_channel_sw_ie *)elems->ch_switch_elem;
-               ieee80211_sta_process_chanswitch(sdata, sw_elem,
+       if (elems->ch_switch_ie &&
+           memcmp(mgmt->bssid, sdata->u.mgd.associated->bssid, ETH_ALEN) == 0)
+               ieee80211_sta_process_chanswitch(sdata, elems->ch_switch_ie,
                                                 bss, rx_status->mactime);
-       }
 }
 
 
@@ -2326,7 +2360,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
        if (baselen > len)
                return;
 
-       if (rx_status->freq != local->hw.conf.channel->center_freq)
+       if (rx_status->freq != local->oper_channel->center_freq)
                return;
 
        if (ifmgd->assoc_data && !ifmgd->assoc_data->have_beacon &&
@@ -2490,7 +2524,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
            !(ifmgd->flags & IEEE80211_STA_DISABLE_11N)) {
                struct ieee80211_supported_band *sband;
 
-               sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
+               sband = local->hw.wiphy->bands[local->oper_channel->band];
 
                changed |= ieee80211_config_ht_tx(sdata, elems.ht_operation,
                                                  bssid, true);
@@ -2673,7 +2707,8 @@ static int ieee80211_probe_auth(struct ieee80211_sub_if_data *sdata)
                 * will not answer to direct packet in unassociated state.
                 */
                ieee80211_send_probe_req(sdata, NULL, ssidie + 2, ssidie[1],
-                                        NULL, 0, (u32) -1, true, false);
+                                        NULL, 0, (u32) -1, true, false,
+                                        auth_data->bss->channel);
        }
 
        auth_data->timeout = jiffies + IEEE80211_AUTH_TIMEOUT;
@@ -3000,41 +3035,17 @@ int ieee80211_max_network_latency(struct notifier_block *nb,
        return 0;
 }
 
-static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
-                                    struct cfg80211_bss *cbss, bool assoc)
+static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata,
+                                 struct cfg80211_bss *cbss)
 {
        struct ieee80211_local *local = sdata->local;
        struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
-       struct ieee80211_bss *bss = (void *)cbss->priv;
-       struct sta_info *sta = NULL;
-       bool have_sta = false;
-       int err;
        int ht_cfreq;
        enum nl80211_channel_type channel_type = NL80211_CHAN_NO_HT;
        const u8 *ht_oper_ie;
        const struct ieee80211_ht_operation *ht_oper = NULL;
        struct ieee80211_supported_band *sband;
 
-       if (WARN_ON(!ifmgd->auth_data && !ifmgd->assoc_data))
-               return -EINVAL;
-
-       if (assoc) {
-               rcu_read_lock();
-               have_sta = sta_info_get(sdata, cbss->bssid);
-               rcu_read_unlock();
-       }
-
-       if (!have_sta) {
-               sta = sta_info_alloc(sdata, cbss->bssid, GFP_KERNEL);
-               if (!sta)
-                       return -ENOMEM;
-       }
-
-       mutex_lock(&local->mtx);
-       ieee80211_recalc_idle(sdata->local);
-       mutex_unlock(&local->mtx);
-
-       /* switch to the right channel */
        sband = local->hw.wiphy->bands[cbss->channel->band];
 
        ifmgd->flags &= ~IEEE80211_STA_DISABLE_40MHZ;
@@ -3097,10 +3108,51 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
        local->oper_channel = cbss->channel;
        ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
 
-       if (sta) {
+       return 0;
+}
+
+static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
+                                    struct cfg80211_bss *cbss, bool assoc)
+{
+       struct ieee80211_local *local = sdata->local;
+       struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
+       struct ieee80211_bss *bss = (void *)cbss->priv;
+       struct sta_info *new_sta = NULL;
+       bool have_sta = false;
+       int err;
+
+       if (WARN_ON(!ifmgd->auth_data && !ifmgd->assoc_data))
+               return -EINVAL;
+
+       if (assoc) {
+               rcu_read_lock();
+               have_sta = sta_info_get(sdata, cbss->bssid);
+               rcu_read_unlock();
+       }
+
+       if (!have_sta) {
+               new_sta = sta_info_alloc(sdata, cbss->bssid, GFP_KERNEL);
+               if (!new_sta)
+                       return -ENOMEM;
+       }
+
+       mutex_lock(&local->mtx);
+       ieee80211_recalc_idle(sdata->local);
+       mutex_unlock(&local->mtx);
+
+       if (new_sta) {
                u32 rates = 0, basic_rates = 0;
                bool have_higher_than_11mbit;
                int min_rate = INT_MAX, min_rate_index = -1;
+               struct ieee80211_supported_band *sband;
+
+               sband = local->hw.wiphy->bands[cbss->channel->band];
+
+               err = ieee80211_prep_channel(sdata, cbss);
+               if (err) {
+                       sta_info_free(local, new_sta);
+                       return err;
+               }
 
                ieee80211_get_rates(sband, bss->supp_rates,
                                    bss->supp_rates_len,
@@ -3122,7 +3174,7 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
                        basic_rates = BIT(min_rate_index);
                }
 
-               sta->sta.supp_rates[cbss->channel->band] = rates;
+               new_sta->sta.supp_rates[cbss->channel->band] = rates;
                sdata->vif.bss_conf.basic_rates = basic_rates;
 
                /* cf. IEEE 802.11 9.2.12 */
@@ -3145,10 +3197,10 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
                        BSS_CHANGED_BEACON_INT);
 
                if (assoc)
-                       sta_info_pre_move_state(sta, IEEE80211_STA_AUTH);
+                       sta_info_pre_move_state(new_sta, IEEE80211_STA_AUTH);
 
-               err = sta_info_insert(sta);
-               sta = NULL;
+               err = sta_info_insert(new_sta);
+               new_sta = NULL;
                if (err) {
                        sdata_info(sdata,
                                   "failed to insert STA entry for the AP (error %d)\n",
@@ -3300,9 +3352,13 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
        }
 
        /* prepare assoc data */
-
-       ifmgd->flags &= ~IEEE80211_STA_DISABLE_11N;
-       ifmgd->flags &= ~IEEE80211_STA_NULLFUNC_ACKED;
+       
+       /*
+        * keep only the 40 MHz disable bit set as it might have
+        * been set during authentication already, all other bits
+        * should be reset for a new connection
+        */
+       ifmgd->flags &= IEEE80211_STA_DISABLE_40MHZ;
 
        ifmgd->beacon_crc_valid = false;
 
@@ -3318,21 +3374,34 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
                    req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_TKIP ||
                    req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_WEP104) {
                        ifmgd->flags |= IEEE80211_STA_DISABLE_11N;
+                       ifmgd->flags |= IEEE80211_STA_DISABLE_VHT;
                        netdev_info(sdata->dev,
-                                   "disabling HT due to WEP/TKIP use\n");
+                                   "disabling HT/VHT due to WEP/TKIP use\n");
                }
        }
 
-       if (req->flags & ASSOC_REQ_DISABLE_HT)
+       if (req->flags & ASSOC_REQ_DISABLE_HT) {
                ifmgd->flags |= IEEE80211_STA_DISABLE_11N;
+               ifmgd->flags |= IEEE80211_STA_DISABLE_VHT;
+       }
 
        /* Also disable HT if we don't support it or the AP doesn't use WMM */
        sband = local->hw.wiphy->bands[req->bss->channel->band];
        if (!sband->ht_cap.ht_supported ||
            local->hw.queues < IEEE80211_NUM_ACS || !bss->wmm_used) {
                ifmgd->flags |= IEEE80211_STA_DISABLE_11N;
-               netdev_info(sdata->dev,
-                           "disabling HT as WMM/QoS is not supported\n");
+               if (!bss->wmm_used)
+                       netdev_info(sdata->dev,
+                                   "disabling HT as WMM/QoS is not supported by the AP\n");
+       }
+
+       /* disable VHT if we don't support it or the AP doesn't use WMM */
+       if (!sband->vht_cap.vht_supported ||
+           local->hw.queues < IEEE80211_NUM_ACS || !bss->wmm_used) {
+               ifmgd->flags |= IEEE80211_STA_DISABLE_VHT;
+               if (!bss->wmm_used)
+                       netdev_info(sdata->dev,
+                                   "disabling VHT as WMM/QoS is not supported by the AP\n");
        }
 
        memcpy(&ifmgd->ht_capa, &req->ht_capa, sizeof(ifmgd->ht_capa));
@@ -3467,14 +3536,17 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
                   req->bssid, req->reason_code);
 
        if (ifmgd->associated &&
-           ether_addr_equal(ifmgd->associated->bssid, req->bssid))
+           ether_addr_equal(ifmgd->associated->bssid, req->bssid)) {
                ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH,
                                       req->reason_code, true, frame_buf);
-       else
+       } else {
+               drv_mgd_prepare_tx(sdata->local, sdata);
                ieee80211_send_deauth_disassoc(sdata, req->bssid,
                                               IEEE80211_STYPE_DEAUTH,
                                               req->reason_code, true,
                                               frame_buf);
+       }
+
        mutex_unlock(&ifmgd->mtx);
 
        __cfg80211_send_deauth(sdata->dev, frame_buf, DEAUTH_DISASSOC_LEN);
index 635c325..507121d 100644 (file)
@@ -116,6 +116,9 @@ void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local,
                if (!ieee80211_sdata_running(sdata))
                        continue;
 
+               if (sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE)
+                       continue;
+
                if (sdata->vif.type != NL80211_IFTYPE_MONITOR)
                        set_bit(SDATA_STATE_OFFCHANNEL, &sdata->state);
 
@@ -144,6 +147,9 @@ void ieee80211_offchannel_return(struct ieee80211_local *local,
 
        mutex_lock(&local->iflist_mtx);
        list_for_each_entry(sdata, &local->interfaces, list) {
+               if (sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE)
+                       continue;
+
                if (sdata->vif.type != NL80211_IFTYPE_MONITOR)
                        clear_bit(SDATA_STATE_OFFCHANNEL, &sdata->state);
 
index 6e4fd32..10de668 100644 (file)
@@ -56,7 +56,7 @@ static inline void rate_control_rate_init(struct sta_info *sta)
        if (!ref)
                return;
 
-       sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
+       sband = local->hw.wiphy->bands[local->oper_channel->band];
 
        ref->ops->rate_init(ref->priv, sband, ista, priv_sta);
        set_sta_flag(sta, WLAN_STA_RATE_CONTROL);
index 0cb4ede..b382605 100644 (file)
@@ -60,7 +60,9 @@ static inline int should_drop_frame(struct sk_buff *skb,
        struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
 
-       if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC))
+       if (status->flag & (RX_FLAG_FAILED_FCS_CRC |
+                           RX_FLAG_FAILED_PLCP_CRC |
+                           RX_FLAG_AMPDU_IS_ZEROLEN))
                return 1;
        if (unlikely(skb->len < 16 + present_fcs_len))
                return 1;
@@ -91,6 +93,13 @@ ieee80211_rx_radiotap_len(struct ieee80211_local *local,
        if (status->flag & RX_FLAG_HT) /* HT info */
                len += 3;
 
+       if (status->flag & RX_FLAG_AMPDU_DETAILS) {
+               /* padding */
+               while (len & 3)
+                       len++;
+               len += 8;
+       }
+
        return len;
 }
 
@@ -215,6 +224,37 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
                pos++;
                *pos++ = status->rate_idx;
        }
+
+       if (status->flag & RX_FLAG_AMPDU_DETAILS) {
+               u16 flags = 0;
+
+               /* ensure 4 byte alignment */
+               while ((pos - (u8 *)rthdr) & 3)
+                       pos++;
+               rthdr->it_present |=
+                       cpu_to_le32(1 << IEEE80211_RADIOTAP_AMPDU_STATUS);
+               put_unaligned_le32(status->ampdu_reference, pos);
+               pos += 4;
+               if (status->flag & RX_FLAG_AMPDU_REPORT_ZEROLEN)
+                       flags |= IEEE80211_RADIOTAP_AMPDU_REPORT_ZEROLEN;
+               if (status->flag & RX_FLAG_AMPDU_IS_ZEROLEN)
+                       flags |= IEEE80211_RADIOTAP_AMPDU_IS_ZEROLEN;
+               if (status->flag & RX_FLAG_AMPDU_LAST_KNOWN)
+                       flags |= IEEE80211_RADIOTAP_AMPDU_LAST_KNOWN;
+               if (status->flag & RX_FLAG_AMPDU_IS_LAST)
+                       flags |= IEEE80211_RADIOTAP_AMPDU_IS_LAST;
+               if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_ERROR)
+                       flags |= IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_ERR;
+               if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_KNOWN)
+                       flags |= IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_KNOWN;
+               put_unaligned_le16(flags, pos);
+               pos += 2;
+               if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_KNOWN)
+                       *pos++ = status->ampdu_delimiter_crc;
+               else
+                       *pos++ = 0;
+               *pos++ = 0;
+       }
 }
 
 /*
@@ -2268,7 +2308,7 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
 
                goto queue;
        case WLAN_CATEGORY_SPECTRUM_MGMT:
-               if (local->hw.conf.channel->band != IEEE80211_BAND_5GHZ)
+               if (status->band != IEEE80211_BAND_5GHZ)
                        break;
 
                if (sdata->vif.type != NL80211_IFTYPE_STATION)
@@ -2772,8 +2812,7 @@ static int prepare_for_handlers(struct ieee80211_rx_data *rx,
                if (!bssid) {
                        if (!ether_addr_equal(sdata->vif.addr, hdr->addr1))
                                return 0;
-               } else if (!ieee80211_bssid_match(bssid,
-                                       sdata->vif.addr)) {
+               } else if (!ieee80211_bssid_match(bssid, sdata->vif.addr)) {
                        /*
                         * Accept public action frames even when the
                         * BSSID doesn't match, this is used for P2P
@@ -2793,9 +2832,18 @@ static int prepare_for_handlers(struct ieee80211_rx_data *rx,
                if (!ether_addr_equal(sdata->u.wds.remote_addr, hdr->addr2))
                        return 0;
                break;
+       case NL80211_IFTYPE_P2P_DEVICE:
+               if (!ieee80211_is_public_action(hdr, skb->len) &&
+                   !ieee80211_is_probe_req(hdr->frame_control) &&
+                   !ieee80211_is_probe_resp(hdr->frame_control) &&
+                   !ieee80211_is_beacon(hdr->frame_control))
+                       return 0;
+               if (!ether_addr_equal(sdata->vif.addr, hdr->addr1))
+                       status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
+               break;
        default:
                /* should never get here */
-               WARN_ON(1);
+               WARN_ON_ONCE(1);
                break;
        }
 
index 839dd97..740e414 100644 (file)
@@ -416,7 +416,8 @@ static void ieee80211_scan_state_send_probe(struct ieee80211_local *local,
                        local->scan_req->ssids[i].ssid_len,
                        local->scan_req->ie, local->scan_req->ie_len,
                        local->scan_req->rates[band], false,
-                       local->scan_req->no_cck);
+                       local->scan_req->no_cck,
+                       local->hw.conf.channel);
 
        /*
         * After sending probe requests, wait for probe responses
@@ -479,11 +480,10 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
        if (local->ops->hw_scan) {
                __set_bit(SCAN_HW_SCANNING, &local->scanning);
        } else if ((req->n_channels == 1) &&
-                  (req->channels[0]->center_freq ==
-                   local->hw.conf.channel->center_freq)) {
-
-               /* If we are scanning only on the current channel, then
-                * we do not need to stop normal activities
+                  (req->channels[0] == local->oper_channel)) {
+               /*
+                * If we are scanning only on the operating channel
+                * then we do not need to stop normal activities
                 */
                unsigned long next_delay;
 
index 8cd7291..b0801b7 100644 (file)
@@ -519,19 +519,27 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
                u64 cookie = (unsigned long)skb;
                acked = info->flags & IEEE80211_TX_STAT_ACK;
 
-               /*
-                * TODO: When we have non-netdev frame TX,
-                * we cannot use skb->dev->ieee80211_ptr
-                */
-
                if (ieee80211_is_nullfunc(hdr->frame_control) ||
-                   ieee80211_is_qos_nullfunc(hdr->frame_control))
+                   ieee80211_is_qos_nullfunc(hdr->frame_control)) {
                        cfg80211_probe_status(skb->dev, hdr->addr1,
                                              cookie, acked, GFP_ATOMIC);
-               else
+               } else if (skb->dev) {
                        cfg80211_mgmt_tx_status(
                                skb->dev->ieee80211_ptr, cookie, skb->data,
                                skb->len, acked, GFP_ATOMIC);
+               } else {
+                       struct ieee80211_sub_if_data *p2p_sdata;
+
+                       rcu_read_lock();
+
+                       p2p_sdata = rcu_dereference(local->p2p_sdata);
+                       if (p2p_sdata) {
+                               cfg80211_mgmt_tx_status(
+                                       &p2p_sdata->wdev, cookie, skb->data,
+                                       skb->len, acked, GFP_ATOMIC);
+                       }
+                       rcu_read_unlock();
+               }
        }
 
        if (unlikely(info->ack_frame_id)) {
index c6d33b5..18d9c8a 100644 (file)
@@ -24,7 +24,7 @@
                        __string(vif_name, sdata->dev ? sdata->dev->name : "<nodev>")
 #define VIF_ASSIGN     __entry->vif_type = sdata->vif.type; __entry->sdata = sdata;    \
                        __entry->p2p = sdata->vif.p2p;                                  \
-                       __assign_str(vif_name, sdata->dev ? sdata->dev->name : "<nodev>")
+                       __assign_str(vif_name, sdata->dev ? sdata->dev->name : sdata->name)
 #define VIF_PR_FMT     " vif:%s(%d%s)"
 #define VIF_PR_ARG     __get_str(vif_name), __entry->vif_type, __entry->p2p ? "/p2p" : ""
 
@@ -274,9 +274,12 @@ TRACE_EVENT(drv_config,
                __entry->dynamic_ps_timeout = local->hw.conf.dynamic_ps_timeout;
                __entry->max_sleep_period = local->hw.conf.max_sleep_period;
                __entry->listen_interval = local->hw.conf.listen_interval;
-               __entry->long_frame_max_tx_count = local->hw.conf.long_frame_max_tx_count;
-               __entry->short_frame_max_tx_count = local->hw.conf.short_frame_max_tx_count;
-               __entry->center_freq = local->hw.conf.channel->center_freq;
+               __entry->long_frame_max_tx_count =
+                       local->hw.conf.long_frame_max_tx_count;
+               __entry->short_frame_max_tx_count =
+                       local->hw.conf.short_frame_max_tx_count;
+               __entry->center_freq = local->hw.conf.channel ?
+                                       local->hw.conf.channel->center_freq : 0;
                __entry->channel_type = local->hw.conf.channel_type;
                __entry->smps = local->hw.conf.smps_mode;
        ),
index c5e8c9c..29eb4e6 100644 (file)
@@ -55,7 +55,7 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx,
        if (WARN_ON_ONCE(info->control.rates[0].idx < 0))
                return 0;
 
-       sband = local->hw.wiphy->bands[tx->channel->band];
+       sband = local->hw.wiphy->bands[info->band];
        txrate = &sband->bitrates[info->control.rates[0].idx];
 
        erp = txrate->flags & IEEE80211_RATE_ERP_G;
@@ -615,7 +615,7 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
 
        memset(&txrc, 0, sizeof(txrc));
 
-       sband = tx->local->hw.wiphy->bands[tx->channel->band];
+       sband = tx->local->hw.wiphy->bands[info->band];
 
        len = min_t(u32, tx->skb->len + FCS_LEN,
                         tx->local->hw.wiphy->frag_threshold);
@@ -626,13 +626,13 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
        txrc.bss_conf = &tx->sdata->vif.bss_conf;
        txrc.skb = tx->skb;
        txrc.reported_rate.idx = -1;
-       txrc.rate_idx_mask = tx->sdata->rc_rateidx_mask[tx->channel->band];
+       txrc.rate_idx_mask = tx->sdata->rc_rateidx_mask[info->band];
        if (txrc.rate_idx_mask == (1 << sband->n_bitrates) - 1)
                txrc.max_rate_idx = -1;
        else
                txrc.max_rate_idx = fls(txrc.rate_idx_mask) - 1;
        memcpy(txrc.rate_idx_mcs_mask,
-              tx->sdata->rc_rateidx_mcs_mask[tx->channel->band],
+              tx->sdata->rc_rateidx_mcs_mask[info->band],
               sizeof(txrc.rate_idx_mcs_mask));
        txrc.bss = (tx->sdata->vif.type == NL80211_IFTYPE_AP ||
                    tx->sdata->vif.type == NL80211_IFTYPE_MESH_POINT ||
@@ -667,7 +667,7 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
                 "scanning and associated. Target station: "
                 "%pM on %d GHz band\n",
                 tx->sdata->name, hdr->addr1,
-                tx->channel->band ? 5 : 2))
+                info->band ? 5 : 2))
                return TX_DROP;
 
        /*
@@ -1131,7 +1131,6 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
        tx->skb = skb;
        tx->local = local;
        tx->sdata = sdata;
-       tx->channel = local->hw.conf.channel;
        __skb_queue_head_init(&tx->skbs);
 
        /*
@@ -1204,6 +1203,7 @@ static bool ieee80211_tx_frags(struct ieee80211_local *local,
                               struct sk_buff_head *skbs,
                               bool txpending)
 {
+       struct ieee80211_tx_control control;
        struct sk_buff *skb, *tmp;
        unsigned long flags;
 
@@ -1240,10 +1240,10 @@ static bool ieee80211_tx_frags(struct ieee80211_local *local,
                spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
 
                info->control.vif = vif;
-               info->control.sta = sta;
+               control.sta = sta;
 
                __skb_unlink(skb, skbs);
-               drv_tx(local, skb);
+               drv_tx(local, &control, skb);
        }
 
        return true;
@@ -1399,8 +1399,7 @@ static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata,
                goto out;
        }
 
-       tx.channel = local->hw.conf.channel;
-       info->band = tx.channel->band;
+       info->band = local->hw.conf.channel->band;
 
        /* set up hw_queue value early */
        if (!(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) ||
@@ -1720,7 +1719,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
        struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
        struct ieee80211_local *local = sdata->local;
        struct ieee80211_tx_info *info;
-       int ret = NETDEV_TX_BUSY, head_need;
+       int head_need;
        u16 ethertype, hdrlen,  meshhdrlen = 0;
        __le16 fc;
        struct ieee80211_hdr hdr;
@@ -1736,10 +1735,8 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
        u32 info_flags = 0;
        u16 info_id = 0;
 
-       if (unlikely(skb->len < ETH_HLEN)) {
-               ret = NETDEV_TX_OK;
+       if (unlikely(skb->len < ETH_HLEN))
                goto fail;
-       }
 
        /* convert Ethernet header to proper 802.11 header (based on
         * operation mode) */
@@ -1787,7 +1784,6 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
                if (!sdata->u.mesh.mshcfg.dot11MeshTTL) {
                        /* Do not send frames with mesh_ttl == 0 */
                        sdata->u.mesh.mshstats.dropped_frames_ttl++;
-                       ret = NETDEV_TX_OK;
                        goto fail;
                }
                rcu_read_lock();
@@ -1874,10 +1870,8 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
 
                if (tdls_direct) {
                        /* link during setup - throw out frames to peer */
-                       if (!tdls_auth) {
-                               ret = NETDEV_TX_OK;
+                       if (!tdls_auth)
                                goto fail;
-                       }
 
                        /* DA SA BSSID */
                        memcpy(hdr.addr1, skb->data, ETH_ALEN);
@@ -1911,7 +1905,6 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
                hdrlen = 24;
                break;
        default:
-               ret = NETDEV_TX_OK;
                goto fail;
        }
 
@@ -1956,7 +1949,6 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
 
                I802_DEBUG_INC(local->tx_handlers_drop_unauth_port);
 
-               ret = NETDEV_TX_OK;
                goto fail;
        }
 
@@ -2011,10 +2003,8 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
                skb = skb_clone(skb, GFP_ATOMIC);
                kfree_skb(tmp_skb);
 
-               if (!skb) {
-                       ret = NETDEV_TX_OK;
+               if (!skb)
                        goto fail;
-               }
        }
 
        hdr.frame_control = fc;
@@ -2117,10 +2107,8 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
        return NETDEV_TX_OK;
 
  fail:
-       if (ret == NETDEV_TX_OK)
-               dev_kfree_skb(skb);
-
-       return ret;
+       dev_kfree_skb(skb);
+       return NETDEV_TX_OK;
 }
 
 
@@ -2295,12 +2283,9 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
        struct ieee80211_sub_if_data *sdata = NULL;
        struct ieee80211_if_ap *ap = NULL;
        struct beacon_data *beacon;
-       struct ieee80211_supported_band *sband;
-       enum ieee80211_band band = local->hw.conf.channel->band;
+       enum ieee80211_band band = local->oper_channel->band;
        struct ieee80211_tx_rate_control txrc;
 
-       sband = local->hw.wiphy->bands[band];
-
        rcu_read_lock();
 
        sdata = vif_to_sdata(vif);
@@ -2410,7 +2395,7 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
                memset(mgmt, 0, hdr_len);
                mgmt->frame_control =
                    cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_BEACON);
-               memset(mgmt->da, 0xff, ETH_ALEN);
+               eth_broadcast_addr(mgmt->da);
                memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
                memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
                mgmt->u.beacon.beacon_int =
@@ -2422,9 +2407,9 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
                *pos++ = WLAN_EID_SSID;
                *pos++ = 0x0;
 
-               if (ieee80211_add_srates_ie(sdata, skb, true) ||
+               if (ieee80211_add_srates_ie(sdata, skb, true, band) ||
                    mesh_add_ds_params_ie(skb, sdata) ||
-                   ieee80211_add_ext_srates_ie(sdata, skb, true) ||
+                   ieee80211_add_ext_srates_ie(sdata, skb, true, band) ||
                    mesh_add_rsn_ie(skb, sdata) ||
                    mesh_add_ht_cap_ie(skb, sdata) ||
                    mesh_add_ht_oper_ie(skb, sdata) ||
@@ -2447,12 +2432,12 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
 
        memset(&txrc, 0, sizeof(txrc));
        txrc.hw = hw;
-       txrc.sband = sband;
+       txrc.sband = local->hw.wiphy->bands[band];
        txrc.bss_conf = &sdata->vif.bss_conf;
        txrc.skb = skb;
        txrc.reported_rate.idx = -1;
        txrc.rate_idx_mask = sdata->rc_rateidx_mask[band];
-       if (txrc.rate_idx_mask == (1 << sband->n_bitrates) - 1)
+       if (txrc.rate_idx_mask == (1 << txrc.sband->n_bitrates) - 1)
                txrc.max_rate_idx = -1;
        else
                txrc.max_rate_idx = fls(txrc.rate_idx_mask) - 1;
@@ -2476,7 +2461,8 @@ struct sk_buff *ieee80211_proberesp_get(struct ieee80211_hw *hw,
                                        struct ieee80211_vif *vif)
 {
        struct ieee80211_if_ap *ap = NULL;
-       struct sk_buff *presp = NULL, *skb = NULL;
+       struct sk_buff *skb = NULL;
+       struct probe_resp *presp = NULL;
        struct ieee80211_hdr *hdr;
        struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
 
@@ -2490,10 +2476,12 @@ struct sk_buff *ieee80211_proberesp_get(struct ieee80211_hw *hw,
        if (!presp)
                goto out;
 
-       skb = skb_copy(presp, GFP_ATOMIC);
+       skb = dev_alloc_skb(presp->len);
        if (!skb)
                goto out;
 
+       memcpy(skb_put(skb, presp->len), presp->data, presp->len);
+
        hdr = (struct ieee80211_hdr *) skb->data;
        memset(hdr->addr1, 0, sizeof(hdr->addr1));
 
@@ -2604,9 +2592,9 @@ struct sk_buff *ieee80211_probereq_get(struct ieee80211_hw *hw,
        memset(hdr, 0, sizeof(*hdr));
        hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
                                         IEEE80211_STYPE_PROBE_REQ);
-       memset(hdr->addr1, 0xff, ETH_ALEN);
+       eth_broadcast_addr(hdr->addr1);
        memcpy(hdr->addr2, vif->addr, ETH_ALEN);
-       memset(hdr->addr3, 0xff, ETH_ALEN);
+       eth_broadcast_addr(hdr->addr3);
 
        pos = skb_put(skb, ie_ssid_len);
        *pos++ = WLAN_EID_SSID;
@@ -2703,8 +2691,7 @@ ieee80211_get_buffered_bc(struct ieee80211_hw *hw,
        info = IEEE80211_SKB_CB(skb);
 
        tx.flags |= IEEE80211_TX_PS_BUFFERED;
-       tx.channel = local->hw.conf.channel;
-       info->band = tx.channel->band;
+       info->band = local->oper_channel->band;
 
        if (invoke_tx_handlers(&tx))
                skb = NULL;
index 39b82fe..471fb05 100644 (file)
@@ -276,6 +276,9 @@ void ieee80211_propagate_queue_wake(struct ieee80211_local *local, int queue)
        list_for_each_entry_rcu(sdata, &local->interfaces, list) {
                int ac;
 
+               if (!sdata->dev)
+                       continue;
+
                if (test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state))
                        continue;
 
@@ -364,6 +367,9 @@ static void __ieee80211_stop_queue(struct ieee80211_hw *hw, int queue,
        list_for_each_entry_rcu(sdata, &local->interfaces, list) {
                int ac;
 
+               if (!sdata->dev)
+                       continue;
+
                for (ac = 0; ac < n_acs; ac++) {
                        if (sdata->vif.hw_queue[ac] == queue ||
                            sdata->vif.cab_queue == queue)
@@ -768,8 +774,11 @@ u32 ieee802_11_parse_elems_crc(u8 *start, size_t len,
                                elem_parse_failed = true;
                        break;
                case WLAN_EID_CHANNEL_SWITCH:
-                       elems->ch_switch_elem = pos;
-                       elems->ch_switch_elem_len = elen;
+                       if (elen != sizeof(struct ieee80211_channel_sw_ie)) {
+                               elem_parse_failed = true;
+                               break;
+                       }
+                       elems->ch_switch_ie = (void *)pos;
                        break;
                case WLAN_EID_QUIET:
                        if (!elems->quiet_elem) {
@@ -832,7 +841,7 @@ void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata,
 
        memset(&qparam, 0, sizeof(qparam));
 
-       use_11b = (local->hw.conf.channel->band == IEEE80211_BAND_2GHZ) &&
+       use_11b = (local->oper_channel->band == IEEE80211_BAND_2GHZ) &&
                 !(sdata->flags & IEEE80211_SDATA_OPERATING_GMODE);
 
        /*
@@ -899,7 +908,8 @@ void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata,
                drv_conf_tx(local, sdata, ac, &qparam);
        }
 
-       if (sdata->vif.type != NL80211_IFTYPE_MONITOR) {
+       if (sdata->vif.type != NL80211_IFTYPE_MONITOR &&
+           sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE) {
                sdata->vif.bss_conf.qos = enable_qos;
                if (bss_notify)
                        ieee80211_bss_info_change_notify(sdata,
@@ -919,7 +929,7 @@ void ieee80211_sta_def_wmm_params(struct ieee80211_sub_if_data *sdata,
                if ((supp_rates[i] & 0x7f) * 5 > 110)
                        have_higher_than_11mbit = 1;
 
-       if (local->hw.conf.channel->band == IEEE80211_BAND_2GHZ &&
+       if (local->oper_channel->band == IEEE80211_BAND_2GHZ &&
            have_higher_than_11mbit)
                sdata->flags |= IEEE80211_SDATA_OPERATING_GMODE;
        else
@@ -1100,6 +1110,7 @@ int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
 
 struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
                                          u8 *dst, u32 ratemask,
+                                         struct ieee80211_channel *chan,
                                          const u8 *ssid, size_t ssid_len,
                                          const u8 *ie, size_t ie_len,
                                          bool directed)
@@ -1109,7 +1120,7 @@ struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
        struct ieee80211_mgmt *mgmt;
        size_t buf_len;
        u8 *buf;
-       u8 chan;
+       u8 chan_no;
 
        /* FIXME: come up with a proper value */
        buf = kmalloc(200 + ie_len, GFP_KERNEL);
@@ -1122,14 +1133,12 @@ struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
         * badly-behaved APs don't respond when this parameter is included.
         */
        if (directed)
-               chan = 0;
+               chan_no = 0;
        else
-               chan = ieee80211_frequency_to_channel(
-                       local->hw.conf.channel->center_freq);
+               chan_no = ieee80211_frequency_to_channel(chan->center_freq);
 
-       buf_len = ieee80211_build_preq_ies(local, buf, ie, ie_len,
-                                          local->hw.conf.channel->band,
-                                          ratemask, chan);
+       buf_len = ieee80211_build_preq_ies(local, buf, ie, ie_len, chan->band,
+                                          ratemask, chan_no);
 
        skb = ieee80211_probereq_get(&local->hw, &sdata->vif,
                                     ssid, ssid_len,
@@ -1154,11 +1163,13 @@ struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
 void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst,
                              const u8 *ssid, size_t ssid_len,
                              const u8 *ie, size_t ie_len,
-                             u32 ratemask, bool directed, bool no_cck)
+                             u32 ratemask, bool directed, bool no_cck,
+                             struct ieee80211_channel *channel)
 {
        struct sk_buff *skb;
 
-       skb = ieee80211_build_probe_req(sdata, dst, ratemask, ssid, ssid_len,
+       skb = ieee80211_build_probe_req(sdata, dst, ratemask, channel,
+                                       ssid, ssid_len,
                                        ie, ie_len, directed);
        if (skb) {
                if (no_cck)
@@ -1359,7 +1370,8 @@ int ieee80211_reconfig(struct ieee80211_local *local)
                switch (sdata->vif.type) {
                case NL80211_IFTYPE_STATION:
                        changed |= BSS_CHANGED_ASSOC |
-                                  BSS_CHANGED_ARP_FILTER;
+                                  BSS_CHANGED_ARP_FILTER |
+                                  BSS_CHANGED_PS;
                        mutex_lock(&sdata->u.mgd.mtx);
                        ieee80211_bss_info_change_notify(sdata, changed);
                        mutex_unlock(&sdata->u.mgd.mtx);
@@ -1385,6 +1397,9 @@ int ieee80211_reconfig(struct ieee80211_local *local)
                case NL80211_IFTYPE_MONITOR:
                        /* ignore virtual */
                        break;
+               case NL80211_IFTYPE_P2P_DEVICE:
+                       changed = BSS_CHANGED_IDLE;
+                       break;
                case NL80211_IFTYPE_UNSPECIFIED:
                case NUM_NL80211_IFTYPES:
                case NL80211_IFTYPE_P2P_CLIENT:
@@ -1571,6 +1586,8 @@ void ieee80211_recalc_smps(struct ieee80211_local *local)
        list_for_each_entry(sdata, &local->interfaces, list) {
                if (!ieee80211_sdata_running(sdata))
                        continue;
+               if (sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE)
+                       continue;
                if (sdata->vif.type != NL80211_IFTYPE_STATION)
                        goto set;
 
@@ -1809,7 +1826,8 @@ ieee80211_ht_oper_to_channel_type(struct ieee80211_ht_operation *ht_oper)
 }
 
 int ieee80211_add_srates_ie(struct ieee80211_sub_if_data *sdata,
-                           struct sk_buff *skb, bool need_basic)
+                           struct sk_buff *skb, bool need_basic,
+                           enum ieee80211_band band)
 {
        struct ieee80211_local *local = sdata->local;
        struct ieee80211_supported_band *sband;
@@ -1817,7 +1835,7 @@ int ieee80211_add_srates_ie(struct ieee80211_sub_if_data *sdata,
        u8 i, rates, *pos;
        u32 basic_rates = sdata->vif.bss_conf.basic_rates;
 
-       sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
+       sband = local->hw.wiphy->bands[band];
        rates = sband->n_bitrates;
        if (rates > 8)
                rates = 8;
@@ -1840,7 +1858,8 @@ int ieee80211_add_srates_ie(struct ieee80211_sub_if_data *sdata,
 }
 
 int ieee80211_add_ext_srates_ie(struct ieee80211_sub_if_data *sdata,
-                               struct sk_buff *skb, bool need_basic)
+                               struct sk_buff *skb, bool need_basic,
+                               enum ieee80211_band band)
 {
        struct ieee80211_local *local = sdata->local;
        struct ieee80211_supported_band *sband;
@@ -1848,7 +1867,7 @@ int ieee80211_add_ext_srates_ie(struct ieee80211_sub_if_data *sdata,
        u8 i, exrates, *pos;
        u32 basic_rates = sdata->vif.bss_conf.basic_rates;
 
-       sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
+       sband = local->hw.wiphy->bands[band];
        exrates = sband->n_bitrates;
        if (exrates > 8)
                exrates -= 8;
index 0bc6b60..8f4b0b2 100644 (file)
@@ -131,14 +131,13 @@ unsigned int nf_iterate(struct list_head *head,
                        int hook_thresh)
 {
        unsigned int verdict;
+       struct nf_hook_ops *elem = list_entry_rcu(*i, struct nf_hook_ops, list);
 
        /*
         * The caller must not block between calls to this
         * function because of risk of continuing from deleted element.
         */
-       list_for_each_continue_rcu(*i, head) {
-               struct nf_hook_ops *elem = (struct nf_hook_ops *)*i;
-
+       list_for_each_entry_continue_rcu(elem, head, list) {
                if (hook_thresh > elem->priority)
                        continue;
 
@@ -155,11 +154,14 @@ repeat:
                                continue;
                        }
 #endif
-                       if (verdict != NF_REPEAT)
+                       if (verdict != NF_REPEAT) {
+                               *i = &elem->list;
                                return verdict;
+                       }
                        goto repeat;
                }
        }
+       *i = &elem->list;
        return NF_ACCEPT;
 }
 
index f987138..8b2cffd 100644 (file)
@@ -250,7 +250,8 @@ comment 'IPVS application helper'
 
 config IP_VS_FTP
        tristate "FTP protocol helper"
-        depends on IP_VS_PROTO_TCP && NF_CONNTRACK && NF_NAT
+       depends on IP_VS_PROTO_TCP && NF_CONNTRACK && NF_NAT && \
+               NF_CONNTRACK_FTP
        select IP_VS_NFCT
        ---help---
          FTP is a protocol that transfers IP address and/or port number in
index 64f9e8f..9713e6e 100644 (file)
@@ -180,22 +180,38 @@ register_ip_vs_app_inc(struct net *net, struct ip_vs_app *app, __u16 proto,
 }
 
 
-/*
- *     ip_vs_app registration routine
- */
-int register_ip_vs_app(struct net *net, struct ip_vs_app *app)
+/* Register application for netns */
+struct ip_vs_app *register_ip_vs_app(struct net *net, struct ip_vs_app *app)
 {
        struct netns_ipvs *ipvs = net_ipvs(net);
-       /* increase the module use count */
-       ip_vs_use_count_inc();
+       struct ip_vs_app *a;
+       int err = 0;
+
+       if (!ipvs)
+               return ERR_PTR(-ENOENT);
 
        mutex_lock(&__ip_vs_app_mutex);
 
-       list_add(&app->a_list, &ipvs->app_list);
+       list_for_each_entry(a, &ipvs->app_list, a_list) {
+               if (!strcmp(app->name, a->name)) {
+                       err = -EEXIST;
+                       goto out_unlock;
+               }
+       }
+       a = kmemdup(app, sizeof(*app), GFP_KERNEL);
+       if (!a) {
+               err = -ENOMEM;
+               goto out_unlock;
+       }
+       INIT_LIST_HEAD(&a->incs_list);
+       list_add(&a->a_list, &ipvs->app_list);
+       /* increase the module use count */
+       ip_vs_use_count_inc();
 
+out_unlock:
        mutex_unlock(&__ip_vs_app_mutex);
 
-       return 0;
+       return err ? ERR_PTR(err) : a;
 }
 
 
@@ -205,20 +221,29 @@ int register_ip_vs_app(struct net *net, struct ip_vs_app *app)
  */
 void unregister_ip_vs_app(struct net *net, struct ip_vs_app *app)
 {
-       struct ip_vs_app *inc, *nxt;
+       struct netns_ipvs *ipvs = net_ipvs(net);
+       struct ip_vs_app *a, *anxt, *inc, *nxt;
+
+       if (!ipvs)
+               return;
 
        mutex_lock(&__ip_vs_app_mutex);
 
-       list_for_each_entry_safe(inc, nxt, &app->incs_list, a_list) {
-               ip_vs_app_inc_release(net, inc);
-       }
+       list_for_each_entry_safe(a, anxt, &ipvs->app_list, a_list) {
+               if (app && strcmp(app->name, a->name))
+                       continue;
+               list_for_each_entry_safe(inc, nxt, &a->incs_list, a_list) {
+                       ip_vs_app_inc_release(net, inc);
+               }
 
-       list_del(&app->a_list);
+               list_del(&a->a_list);
+               kfree(a);
 
-       mutex_unlock(&__ip_vs_app_mutex);
+               /* decrease the module use count */
+               ip_vs_use_count_dec();
+       }
 
-       /* decrease the module use count */
-       ip_vs_use_count_dec();
+       mutex_unlock(&__ip_vs_app_mutex);
 }
 
 
@@ -586,5 +611,6 @@ int __net_init ip_vs_app_net_init(struct net *net)
 
 void __net_exit ip_vs_app_net_cleanup(struct net *net)
 {
+       unregister_ip_vs_app(net, NULL /* all */);
        proc_net_remove(net, "ip_vs_app");
 }
index b54ecce..58918e2 100644 (file)
@@ -1303,7 +1303,8 @@ ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum)
        struct ip_vs_conn *cp;
        struct ip_vs_protocol *pp;
        struct ip_vs_proto_data *pd;
-       unsigned int offset, ihl, verdict;
+       unsigned int offset, offset2, ihl, verdict;
+       bool ipip;
 
        *related = 1;
 
@@ -1345,6 +1346,21 @@ ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum)
 
        net = skb_net(skb);
 
+       /* Special case for errors for IPIP packets */
+       ipip = false;
+       if (cih->protocol == IPPROTO_IPIP) {
+               if (unlikely(cih->frag_off & htons(IP_OFFSET)))
+                       return NF_ACCEPT;
+               /* Error for our IPIP must arrive at LOCAL_IN */
+               if (!(skb_rtable(skb)->rt_flags & RTCF_LOCAL))
+                       return NF_ACCEPT;
+               offset += cih->ihl * 4;
+               cih = skb_header_pointer(skb, offset, sizeof(_ciph), &_ciph);
+               if (cih == NULL)
+                       return NF_ACCEPT; /* The packet looks wrong, ignore */
+               ipip = true;
+       }
+
        pd = ip_vs_proto_data_get(net, cih->protocol);
        if (!pd)
                return NF_ACCEPT;
@@ -1358,11 +1374,14 @@ ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum)
        IP_VS_DBG_PKT(11, AF_INET, pp, skb, offset,
                      "Checking incoming ICMP for");
 
+       offset2 = offset;
        offset += cih->ihl * 4;
 
        ip_vs_fill_iphdr(AF_INET, cih, &ciph);
-       /* The embedded headers contain source and dest in reverse order */
-       cp = pp->conn_in_get(AF_INET, skb, &ciph, offset, 1);
+       /* The embedded headers contain source and dest in reverse order.
+        * For IPIP this is error for request, not for reply.
+        */
+       cp = pp->conn_in_get(AF_INET, skb, &ciph, offset, ipip ? 0 : 1);
        if (!cp)
                return NF_ACCEPT;
 
@@ -1376,6 +1395,57 @@ ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum)
                goto out;
        }
 
+       if (ipip) {
+               __be32 info = ic->un.gateway;
+
+               /* Update the MTU */
+               if (ic->type == ICMP_DEST_UNREACH &&
+                   ic->code == ICMP_FRAG_NEEDED) {
+                       struct ip_vs_dest *dest = cp->dest;
+                       u32 mtu = ntohs(ic->un.frag.mtu);
+
+                       /* Strip outer IP and ICMP, go to IPIP header */
+                       __skb_pull(skb, ihl + sizeof(_icmph));
+                       offset2 -= ihl + sizeof(_icmph);
+                       skb_reset_network_header(skb);
+                       IP_VS_DBG(12, "ICMP for IPIP %pI4->%pI4: mtu=%u\n",
+                               &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr, mtu);
+                       rcu_read_lock();
+                       ipv4_update_pmtu(skb, dev_net(skb->dev),
+                                        mtu, 0, 0, 0, 0);
+                       rcu_read_unlock();
+                       /* Client uses PMTUD? */
+                       if (!(cih->frag_off & htons(IP_DF)))
+                               goto ignore_ipip;
+                       /* Prefer the resulting PMTU */
+                       if (dest) {
+                               spin_lock(&dest->dst_lock);
+                               if (dest->dst_cache)
+                                       mtu = dst_mtu(dest->dst_cache);
+                               spin_unlock(&dest->dst_lock);
+                       }
+                       if (mtu > 68 + sizeof(struct iphdr))
+                               mtu -= sizeof(struct iphdr);
+                       info = htonl(mtu);
+               }
+               /* Strip outer IP, ICMP and IPIP, go to IP header of
+                * original request.
+                */
+               __skb_pull(skb, offset2);
+               skb_reset_network_header(skb);
+               IP_VS_DBG(12, "Sending ICMP for %pI4->%pI4: t=%u, c=%u, i=%u\n",
+                       &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr,
+                       ic->type, ic->code, ntohl(info));
+               icmp_send(skb, ic->type, ic->code, info);
+               /* ICMP can be shorter but anyways, account it */
+               ip_vs_out_stats(cp, skb);
+
+ignore_ipip:
+               consume_skb(skb);
+               verdict = NF_STOLEN;
+               goto out;
+       }
+
        /* do the statistics and put it back */
        ip_vs_in_stats(cp, skb);
        if (IPPROTO_TCP == cih->protocol || IPPROTO_UDP == cih->protocol)
index f51013c..767cc12 100644 (file)
@@ -1803,6 +1803,12 @@ static struct ctl_table vs_vars[] = {
                .mode           = 0644,
                .proc_handler   = proc_dointvec,
        },
+       {
+               .procname       = "pmtu_disc",
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec,
+       },
 #ifdef CONFIG_IP_VS_DEBUG
        {
                .procname       = "debug_level",
@@ -3678,7 +3684,7 @@ static void ip_vs_genl_unregister(void)
  * per netns intit/exit func.
  */
 #ifdef CONFIG_SYSCTL
-int __net_init ip_vs_control_net_init_sysctl(struct net *net)
+static int __net_init ip_vs_control_net_init_sysctl(struct net *net)
 {
        int idx;
        struct netns_ipvs *ipvs = net_ipvs(net);
@@ -3729,6 +3735,8 @@ int __net_init ip_vs_control_net_init_sysctl(struct net *net)
        ipvs->sysctl_sync_retries = clamp_t(int, DEFAULT_SYNC_RETRIES, 0, 3);
        tbl[idx++].data = &ipvs->sysctl_sync_retries;
        tbl[idx++].data = &ipvs->sysctl_nat_icmp_send;
+       ipvs->sysctl_pmtu_disc = 1;
+       tbl[idx++].data = &ipvs->sysctl_pmtu_disc;
 
 
        ipvs->sysctl_hdr = register_net_sysctl(net, "net/ipv4/vs", tbl);
@@ -3746,7 +3754,7 @@ int __net_init ip_vs_control_net_init_sysctl(struct net *net)
        return 0;
 }
 
-void __net_exit ip_vs_control_net_cleanup_sysctl(struct net *net)
+static void __net_exit ip_vs_control_net_cleanup_sysctl(struct net *net)
 {
        struct netns_ipvs *ipvs = net_ipvs(net);
 
@@ -3757,8 +3765,8 @@ void __net_exit ip_vs_control_net_cleanup_sysctl(struct net *net)
 
 #else
 
-int __net_init ip_vs_control_net_init_sysctl(struct net *net) { return 0; }
-void __net_exit ip_vs_control_net_cleanup_sysctl(struct net *net) { }
+static int __net_init ip_vs_control_net_init_sysctl(struct net *net) { return 0; }
+static void __net_exit ip_vs_control_net_cleanup_sysctl(struct net *net) { }
 
 #endif
 
index b20b29c..ad70b7e 100644 (file)
@@ -441,16 +441,10 @@ static int __net_init __ip_vs_ftp_init(struct net *net)
 
        if (!ipvs)
                return -ENOENT;
-       app = kmemdup(&ip_vs_ftp, sizeof(struct ip_vs_app), GFP_KERNEL);
-       if (!app)
-               return -ENOMEM;
-       INIT_LIST_HEAD(&app->a_list);
-       INIT_LIST_HEAD(&app->incs_list);
-       ipvs->ftp_app = app;
 
-       ret = register_ip_vs_app(net, app);
-       if (ret)
-               goto err_exit;
+       app = register_ip_vs_app(net, &ip_vs_ftp);
+       if (IS_ERR(app))
+               return PTR_ERR(app);
 
        for (i = 0; i < ports_count; i++) {
                if (!ports[i])
@@ -464,9 +458,7 @@ static int __net_init __ip_vs_ftp_init(struct net *net)
        return 0;
 
 err_unreg:
-       unregister_ip_vs_app(net, app);
-err_exit:
-       kfree(ipvs->ftp_app);
+       unregister_ip_vs_app(net, &ip_vs_ftp);
        return ret;
 }
 /*
@@ -474,10 +466,7 @@ err_exit:
  */
 static void __ip_vs_ftp_exit(struct net *net)
 {
-       struct netns_ipvs *ipvs = net_ipvs(net);
-
-       unregister_ip_vs_app(net, ipvs->ftp_app);
-       kfree(ipvs->ftp_app);
+       unregister_ip_vs_app(net, &ip_vs_ftp);
 }
 
 static struct pernet_operations ip_vs_ftp_ops = {
index 65b616a..543a554 100644 (file)
@@ -49,6 +49,7 @@ enum {
        IP_VS_RT_MODE_RDR       = 4, /* Allow redirect from remote daddr to
                                      * local
                                      */
+       IP_VS_RT_MODE_CONNECT   = 8, /* Always bind route to saddr */
 };
 
 /*
@@ -84,6 +85,42 @@ __ip_vs_dst_check(struct ip_vs_dest *dest, u32 rtos)
        return dst;
 }
 
+/* Get route to daddr, update *saddr, optionally bind route to saddr */
+static struct rtable *do_output_route4(struct net *net, __be32 daddr,
+                                      u32 rtos, int rt_mode, __be32 *saddr)
+{
+       struct flowi4 fl4;
+       struct rtable *rt;
+       int loop = 0;
+
+       memset(&fl4, 0, sizeof(fl4));
+       fl4.daddr = daddr;
+       fl4.saddr = (rt_mode & IP_VS_RT_MODE_CONNECT) ? *saddr : 0;
+       fl4.flowi4_tos = rtos;
+
+retry:
+       rt = ip_route_output_key(net, &fl4);
+       if (IS_ERR(rt)) {
+               /* Invalid saddr ? */
+               if (PTR_ERR(rt) == -EINVAL && *saddr &&
+                   rt_mode & IP_VS_RT_MODE_CONNECT && !loop) {
+                       *saddr = 0;
+                       flowi4_update_output(&fl4, 0, rtos, daddr, 0);
+                       goto retry;
+               }
+               IP_VS_DBG_RL("ip_route_output error, dest: %pI4\n", &daddr);
+               return NULL;
+       } else if (!*saddr && rt_mode & IP_VS_RT_MODE_CONNECT && fl4.saddr) {
+               ip_rt_put(rt);
+               *saddr = fl4.saddr;
+               flowi4_update_output(&fl4, 0, rtos, daddr, fl4.saddr);
+               loop++;
+               goto retry;
+       }
+       *saddr = fl4.saddr;
+       return rt;
+}
+
 /* Get route to destination or remote server */
 static struct rtable *
 __ip_vs_get_out_rt(struct sk_buff *skb, struct ip_vs_dest *dest,
@@ -98,20 +135,13 @@ __ip_vs_get_out_rt(struct sk_buff *skb, struct ip_vs_dest *dest,
                spin_lock(&dest->dst_lock);
                if (!(rt = (struct rtable *)
                      __ip_vs_dst_check(dest, rtos))) {
-                       struct flowi4 fl4;
-
-                       memset(&fl4, 0, sizeof(fl4));
-                       fl4.daddr = dest->addr.ip;
-                       fl4.flowi4_tos = rtos;
-                       rt = ip_route_output_key(net, &fl4);
-                       if (IS_ERR(rt)) {
+                       rt = do_output_route4(net, dest->addr.ip, rtos,
+                                             rt_mode, &dest->dst_saddr.ip);
+                       if (!rt) {
                                spin_unlock(&dest->dst_lock);
-                               IP_VS_DBG_RL("ip_route_output error, dest: %pI4\n",
-                                            &dest->addr.ip);
                                return NULL;
                        }
                        __ip_vs_dst_set(dest, rtos, dst_clone(&rt->dst), 0);
-                       dest->dst_saddr.ip = fl4.saddr;
                        IP_VS_DBG(10, "new dst %pI4, src %pI4, refcnt=%d, "
                                  "rtos=%X\n",
                                  &dest->addr.ip, &dest->dst_saddr.ip,
@@ -122,19 +152,17 @@ __ip_vs_get_out_rt(struct sk_buff *skb, struct ip_vs_dest *dest,
                        *ret_saddr = dest->dst_saddr.ip;
                spin_unlock(&dest->dst_lock);
        } else {
-               struct flowi4 fl4;
+               __be32 saddr = htonl(INADDR_ANY);
 
-               memset(&fl4, 0, sizeof(fl4));
-               fl4.daddr = daddr;
-               fl4.flowi4_tos = rtos;
-               rt = ip_route_output_key(net, &fl4);
-               if (IS_ERR(rt)) {
-                       IP_VS_DBG_RL("ip_route_output error, dest: %pI4\n",
-                                    &daddr);
+               /* For such unconfigured boxes avoid many route lookups
+                * for performance reasons because we do not remember saddr
+                */
+               rt_mode &= ~IP_VS_RT_MODE_CONNECT;
+               rt = do_output_route4(net, daddr, rtos, rt_mode, &saddr);
+               if (!rt)
                        return NULL;
-               }
                if (ret_saddr)
-                       *ret_saddr = fl4.saddr;
+                       *ret_saddr = saddr;
        }
 
        local = rt->rt_flags & RTCF_LOCAL;
@@ -331,6 +359,7 @@ ip_vs_dst_reset(struct ip_vs_dest *dest)
        old_dst = dest->dst_cache;
        dest->dst_cache = NULL;
        dst_release(old_dst);
+       dest->dst_saddr.ip = 0;
 }
 
 #define IP_VS_XMIT_TUNNEL(skb, cp)                             \
@@ -766,12 +795,13 @@ int
 ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
                  struct ip_vs_protocol *pp)
 {
+       struct netns_ipvs *ipvs = net_ipvs(skb_net(skb));
        struct rtable *rt;                      /* Route to the other host */
        __be32 saddr;                           /* Source for tunnel */
        struct net_device *tdev;                /* Device to other host */
        struct iphdr  *old_iph = ip_hdr(skb);
        u8     tos = old_iph->tos;
-       __be16 df = old_iph->frag_off;
+       __be16 df;
        struct iphdr  *iph;                     /* Our new IP header */
        unsigned int max_headroom;              /* The extra header space needed */
        int    mtu;
@@ -781,7 +811,8 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
 
        if (!(rt = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip,
                                      RT_TOS(tos), IP_VS_RT_MODE_LOCAL |
-                                                  IP_VS_RT_MODE_NON_LOCAL,
+                                                  IP_VS_RT_MODE_NON_LOCAL |
+                                                  IP_VS_RT_MODE_CONNECT,
                                                   &saddr)))
                goto tx_error_icmp;
        if (rt->rt_flags & RTCF_LOCAL) {
@@ -796,13 +827,13 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
                IP_VS_DBG_RL("%s(): mtu less than 68\n", __func__);
                goto tx_error_put;
        }
-       if (skb_dst(skb))
+       if (rt_is_output_route(skb_rtable(skb)))
                skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
 
-       df |= (old_iph->frag_off & htons(IP_DF));
+       /* Copy DF, reset fragment offset and MF */
+       df = sysctl_pmtu_disc(ipvs) ? old_iph->frag_off & htons(IP_DF) : 0;
 
-       if ((old_iph->frag_off & htons(IP_DF) &&
-           mtu < ntohs(old_iph->tot_len) && !skb_is_gso(skb))) {
+       if (df && mtu < ntohs(old_iph->tot_len) && !skb_is_gso(skb)) {
                icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu));
                IP_VS_DBG_RL("%s(): frag needed\n", __func__);
                goto tx_error_put;
index 0dc6385..51e928d 100644 (file)
@@ -21,7 +21,6 @@
 #include <linux/notifier.h>
 #include <linux/kernel.h>
 #include <linux/netdevice.h>
-#include <linux/rtnetlink.h>
 
 #include <net/netfilter/nf_conntrack.h>
 #include <net/netfilter/nf_conntrack_l3proto.h>
@@ -294,9 +293,7 @@ void nf_conntrack_l3proto_unregister(struct net *net,
        nf_ct_l3proto_unregister_sysctl(net, proto);
 
        /* Remove all contrack entries for this protocol */
-       rtnl_lock();
        nf_ct_iterate_cleanup(net, kill_l3proto, proto);
-       rtnl_unlock();
 }
 EXPORT_SYMBOL_GPL(nf_conntrack_l3proto_unregister);
 
@@ -502,9 +499,7 @@ void nf_conntrack_l4proto_unregister(struct net *net,
        nf_ct_l4proto_unregister_sysctl(net, pn, l4proto);
 
        /* Remove all contrack entries for this protocol */
-       rtnl_lock();
        nf_ct_iterate_cleanup(net, kill_l4proto, l4proto);
-       rtnl_unlock();
 }
 EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_unregister);
 
index b2e7310..d7ec928 100644 (file)
@@ -79,11 +79,11 @@ nfnl_acct_new(struct sock *nfnl, struct sk_buff *skb,
 
        if (tb[NFACCT_BYTES]) {
                atomic64_set(&nfacct->bytes,
-                            be64_to_cpu(nla_get_u64(tb[NFACCT_BYTES])));
+                            be64_to_cpu(nla_get_be64(tb[NFACCT_BYTES])));
        }
        if (tb[NFACCT_PKTS]) {
                atomic64_set(&nfacct->pkts,
-                            be64_to_cpu(nla_get_u64(tb[NFACCT_PKTS])));
+                            be64_to_cpu(nla_get_be64(tb[NFACCT_PKTS])));
        }
        atomic_set(&nfacct->refcnt, 1);
        list_add_tail_rcu(&nfacct->head, &nfnl_acct_list);
index d683619..32a1ba3 100644 (file)
@@ -74,7 +74,7 @@ nfnl_cthelper_parse_tuple(struct nf_conntrack_tuple *tuple,
        if (!tb[NFCTH_TUPLE_L3PROTONUM] || !tb[NFCTH_TUPLE_L4PROTONUM])
                return -EINVAL;
 
-       tuple->src.l3num = ntohs(nla_get_u16(tb[NFCTH_TUPLE_L3PROTONUM]));
+       tuple->src.l3num = ntohs(nla_get_be16(tb[NFCTH_TUPLE_L3PROTONUM]));
        tuple->dst.protonum = nla_get_u8(tb[NFCTH_TUPLE_L4PROTONUM]);
 
        return 0;
index 14e2f39..be194b1 100644 (file)
@@ -55,6 +55,7 @@ struct nfulnl_instance {
        unsigned int qlen;              /* number of nlmsgs in skb */
        struct sk_buff *skb;            /* pre-allocatd skb */
        struct timer_list timer;
+       struct user_namespace *peer_user_ns;    /* User namespace of the peer process */
        int peer_pid;                   /* PID of the peer process */
 
        /* configurable parameters */
@@ -132,7 +133,7 @@ instance_put(struct nfulnl_instance *inst)
 static void nfulnl_timer(unsigned long data);
 
 static struct nfulnl_instance *
-instance_create(u_int16_t group_num, int pid)
+instance_create(u_int16_t group_num, int pid, struct user_namespace *user_ns)
 {
        struct nfulnl_instance *inst;
        int err;
@@ -162,6 +163,7 @@ instance_create(u_int16_t group_num, int pid)
 
        setup_timer(&inst->timer, nfulnl_timer, (unsigned long)inst);
 
+       inst->peer_user_ns = user_ns;
        inst->peer_pid = pid;
        inst->group_num = group_num;
 
@@ -503,8 +505,11 @@ __build_packet_message(struct nfulnl_instance *inst,
                read_lock_bh(&skb->sk->sk_callback_lock);
                if (skb->sk->sk_socket && skb->sk->sk_socket->file) {
                        struct file *file = skb->sk->sk_socket->file;
-                       __be32 uid = htonl(file->f_cred->fsuid);
-                       __be32 gid = htonl(file->f_cred->fsgid);
+                       __be32 uid = htonl(from_kuid_munged(inst->peer_user_ns,
+                                                           file->f_cred->fsuid));
+                       __be32 gid = htonl(from_kgid_munged(inst->peer_user_ns,
+                                                           file->f_cred->fsgid));
+                       /* need to unlock here since NLA_PUT may goto */
                        read_unlock_bh(&skb->sk->sk_callback_lock);
                        if (nla_put_be32(inst->skb, NFULA_UID, uid) ||
                            nla_put_be32(inst->skb, NFULA_GID, gid))
@@ -783,7 +788,8 @@ nfulnl_recv_config(struct sock *ctnl, struct sk_buff *skb,
                        }
 
                        inst = instance_create(group_num,
-                                              NETLINK_CB(skb).pid);
+                                              NETLINK_CB(skb).pid,
+                                              sk_user_ns(NETLINK_CB(skb).ssk));
                        if (IS_ERR(inst)) {
                                ret = PTR_ERR(inst);
                                goto out;
index ff5f75f..02a2bf4 100644 (file)
@@ -363,10 +363,12 @@ static void dump_ipv4_packet(struct sbuff *m,
        /* Max length: 15 "UID=4294967295 " */
        if ((logflags & XT_LOG_UID) && !iphoff && skb->sk) {
                read_lock_bh(&skb->sk->sk_callback_lock);
-               if (skb->sk->sk_socket && skb->sk->sk_socket->file)
+               if (skb->sk->sk_socket && skb->sk->sk_socket->file) {
+                       const struct cred *cred = skb->sk->sk_socket->file->f_cred;
                        sb_add(m, "UID=%u GID=%u ",
-                               skb->sk->sk_socket->file->f_cred->fsuid,
-                               skb->sk->sk_socket->file->f_cred->fsgid);
+                               from_kuid_munged(&init_user_ns, cred->fsuid),
+                               from_kgid_munged(&init_user_ns, cred->fsgid));
+               }
                read_unlock_bh(&skb->sk->sk_callback_lock);
        }
 
@@ -719,10 +721,12 @@ static void dump_ipv6_packet(struct sbuff *m,
        /* Max length: 15 "UID=4294967295 " */
        if ((logflags & XT_LOG_UID) && recurse && skb->sk) {
                read_lock_bh(&skb->sk->sk_callback_lock);
-               if (skb->sk->sk_socket && skb->sk->sk_socket->file)
+               if (skb->sk->sk_socket && skb->sk->sk_socket->file) {
+                       const struct cred *cred = skb->sk->sk_socket->file->f_cred;
                        sb_add(m, "UID=%u GID=%u ",
-                               skb->sk->sk_socket->file->f_cred->fsuid,
-                               skb->sk->sk_socket->file->f_cred->fsgid);
+                               from_kuid_munged(&init_user_ns, cred->fsuid),
+                               from_kgid_munged(&init_user_ns, cred->fsgid));
+               }
                read_unlock_bh(&skb->sk->sk_callback_lock);
        }
 
index 7babe7d..817f9e9 100644 (file)
@@ -43,7 +43,7 @@ static u32 hash_v4(const struct sk_buff *skb)
        const struct iphdr *iph = ip_hdr(skb);
 
        /* packets in either direction go into same queue */
-       if (iph->saddr < iph->daddr)
+       if ((__force u32)iph->saddr < (__force u32)iph->daddr)
                return jhash_3words((__force u32)iph->saddr,
                        (__force u32)iph->daddr, iph->protocol, jhash_initval);
 
@@ -57,7 +57,8 @@ static u32 hash_v6(const struct sk_buff *skb)
        const struct ipv6hdr *ip6h = ipv6_hdr(skb);
        u32 a, b, c;
 
-       if (ip6h->saddr.s6_addr32[3] < ip6h->daddr.s6_addr32[3]) {
+       if ((__force u32)ip6h->saddr.s6_addr32[3] <
+           (__force u32)ip6h->daddr.s6_addr32[3]) {
                a = (__force u32) ip6h->saddr.s6_addr32[3];
                b = (__force u32) ip6h->daddr.s6_addr32[3];
        } else {
@@ -65,7 +66,8 @@ static u32 hash_v6(const struct sk_buff *skb)
                a = (__force u32) ip6h->daddr.s6_addr32[3];
        }
 
-       if (ip6h->saddr.s6_addr32[1] < ip6h->daddr.s6_addr32[1])
+       if ((__force u32)ip6h->saddr.s6_addr32[1] <
+           (__force u32)ip6h->daddr.s6_addr32[1])
                c = (__force u32) ip6h->saddr.s6_addr32[1];
        else
                c = (__force u32) ip6h->daddr.s6_addr32[1];
index 846f895..a5e673d 100644 (file)
@@ -269,7 +269,7 @@ xt_osf_match_packet(const struct sk_buff *skb, struct xt_action_param *p)
                                                mss <<= 8;
                                                mss |= optp[2];
 
-                                               mss = ntohs(mss);
+                                               mss = ntohs((__force __be16)mss);
                                                break;
                                        case OSFOPT_TS:
                                                loop_cont = 1;
index 772d738..ca2e577 100644 (file)
 #include <linux/netfilter/x_tables.h>
 #include <linux/netfilter/xt_owner.h>
 
+static int owner_check(const struct xt_mtchk_param *par)
+{
+       struct xt_owner_match_info *info = par->matchinfo;
+
+       /* For now only allow adding matches from the initial user namespace */
+       if ((info->match & (XT_OWNER_UID|XT_OWNER_GID)) &&
+           (current_user_ns() != &init_user_ns))
+               return -EINVAL;
+       return 0;
+}
+
 static bool
 owner_mt(const struct sk_buff *skb, struct xt_action_param *par)
 {
@@ -37,17 +48,23 @@ owner_mt(const struct sk_buff *skb, struct xt_action_param *par)
                return ((info->match ^ info->invert) &
                       (XT_OWNER_UID | XT_OWNER_GID)) == 0;
 
-       if (info->match & XT_OWNER_UID)
-               if ((filp->f_cred->fsuid >= info->uid_min &&
-                   filp->f_cred->fsuid <= info->uid_max) ^
+       if (info->match & XT_OWNER_UID) {
+               kuid_t uid_min = make_kuid(&init_user_ns, info->uid_min);
+               kuid_t uid_max = make_kuid(&init_user_ns, info->uid_max);
+               if ((uid_gte(filp->f_cred->fsuid, uid_min) &&
+                    uid_lte(filp->f_cred->fsuid, uid_max)) ^
                    !(info->invert & XT_OWNER_UID))
                        return false;
+       }
 
-       if (info->match & XT_OWNER_GID)
-               if ((filp->f_cred->fsgid >= info->gid_min &&
-                   filp->f_cred->fsgid <= info->gid_max) ^
+       if (info->match & XT_OWNER_GID) {
+               kgid_t gid_min = make_kgid(&init_user_ns, info->gid_min);
+               kgid_t gid_max = make_kgid(&init_user_ns, info->gid_max);
+               if ((gid_gte(filp->f_cred->fsgid, gid_min) &&
+                    gid_lte(filp->f_cred->fsgid, gid_max)) ^
                    !(info->invert & XT_OWNER_GID))
                        return false;
+       }
 
        return true;
 }
@@ -56,6 +73,7 @@ static struct xt_match owner_mt_reg __read_mostly = {
        .name       = "owner",
        .revision   = 1,
        .family     = NFPROTO_UNSPEC,
+       .checkentry = owner_check,
        .match      = owner_mt,
        .matchsize  = sizeof(struct xt_owner_match_info),
        .hooks      = (1 << NF_INET_LOCAL_OUT) |
index ae2ad1e..4635c9b 100644 (file)
@@ -317,6 +317,8 @@ static int recent_mt_check(const struct xt_mtchk_param *par,
        struct recent_table *t;
 #ifdef CONFIG_PROC_FS
        struct proc_dir_entry *pde;
+       kuid_t uid;
+       kgid_t gid;
 #endif
        unsigned int i;
        int ret = -EINVAL;
@@ -372,6 +374,13 @@ static int recent_mt_check(const struct xt_mtchk_param *par,
        for (i = 0; i < ip_list_hash_size; i++)
                INIT_LIST_HEAD(&t->iphash[i]);
 #ifdef CONFIG_PROC_FS
+       uid = make_kuid(&init_user_ns, ip_list_uid);
+       gid = make_kgid(&init_user_ns, ip_list_gid);
+       if (!uid_valid(uid) || !gid_valid(gid)) {
+               kfree(t);
+               ret = -EINVAL;
+               goto out;
+       }
        pde = proc_create_data(t->name, ip_list_perms, recent_net->xt_recent,
                  &recent_mt_fops, t);
        if (pde == NULL) {
@@ -379,8 +388,8 @@ static int recent_mt_check(const struct xt_mtchk_param *par,
                ret = -ENOMEM;
                goto out;
        }
-       pde->uid = ip_list_uid;
-       pde->gid = ip_list_gid;
+       pde->uid = uid;
+       pde->gid = gid;
 #endif
        spin_lock_bh(&recent_lock);
        list_add_tail(&t->list, &recent_net->tables);
index 5270238..3821199 100644 (file)
@@ -912,7 +912,8 @@ static void netlink_rcv_wake(struct sock *sk)
                wake_up_interruptible(&nlk->wait);
 }
 
-static int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb)
+static int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb,
+                                 struct sock *ssk)
 {
        int ret;
        struct netlink_sock *nlk = nlk_sk(sk);
@@ -921,6 +922,7 @@ static int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb)
        if (nlk->netlink_rcv != NULL) {
                ret = skb->len;
                skb_set_owner_r(skb, sk);
+               NETLINK_CB(skb).ssk = ssk;
                nlk->netlink_rcv(skb);
                consume_skb(skb);
        } else {
@@ -947,7 +949,7 @@ retry:
                return PTR_ERR(sk);
        }
        if (netlink_is_kernel(sk))
-               return netlink_unicast_kernel(sk, skb);
+               return netlink_unicast_kernel(sk, skb, ssk);
 
        if (sk_filter(sk, skb)) {
                err = skb->len;
index 0060e3b..cc55b35 100644 (file)
@@ -14,3 +14,11 @@ config PACKET
          be called af_packet.
 
          If unsure, say Y.
+
+config PACKET_DIAG
+       tristate "Packet: sockets monitoring interface"
+       depends on PACKET
+       default n
+       ---help---
+         Support for PF_PACKET sockets monitoring interface used by the ss tool.
+         If unsure, say Y.
index 81183ea..9df6134 100644 (file)
@@ -3,3 +3,5 @@
 #
 
 obj-$(CONFIG_PACKET) += af_packet.o
+obj-$(CONFIG_PACKET_DIAG) += af_packet_diag.o
+af_packet_diag-y += diag.o
index c5c9e2a..94060ed 100644 (file)
@@ -93,6 +93,8 @@
 #include <net/inet_common.h>
 #endif
 
+#include "internal.h"
+
 /*
    Assumptions:
    - if device has no dev->hard_header routine, it adds and removes ll header
@@ -146,14 +148,6 @@ dev->hard_header == NULL (ll header is added by device, we cannot control it)
 
 /* Private packet socket structures. */
 
-struct packet_mclist {
-       struct packet_mclist    *next;
-       int                     ifindex;
-       int                     count;
-       unsigned short          type;
-       unsigned short          alen;
-       unsigned char           addr[MAX_ADDR_LEN];
-};
 /* identical to struct packet_mreq except it has
  * a longer address field.
  */
@@ -175,63 +169,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
 #define BLK_PLUS_PRIV(sz_of_priv) \
        (BLK_HDR_LEN + ALIGN((sz_of_priv), V3_ALIGNMENT))
 
-/* kbdq - kernel block descriptor queue */
-struct tpacket_kbdq_core {
-       struct pgv      *pkbdq;
-       unsigned int    feature_req_word;
-       unsigned int    hdrlen;
-       unsigned char   reset_pending_on_curr_blk;
-       unsigned char   delete_blk_timer;
-       unsigned short  kactive_blk_num;
-       unsigned short  blk_sizeof_priv;
-
-       /* last_kactive_blk_num:
-        * trick to see if user-space has caught up
-        * in order to avoid refreshing timer when every single pkt arrives.
-        */
-       unsigned short  last_kactive_blk_num;
-
-       char            *pkblk_start;
-       char            *pkblk_end;
-       int             kblk_size;
-       unsigned int    knum_blocks;
-       uint64_t        knxt_seq_num;
-       char            *prev;
-       char            *nxt_offset;
-       struct sk_buff  *skb;
-
-       atomic_t        blk_fill_in_prog;
-
-       /* Default is set to 8ms */
-#define DEFAULT_PRB_RETIRE_TOV (8)
-
-       unsigned short  retire_blk_tov;
-       unsigned short  version;
-       unsigned long   tov_in_jiffies;
-
-       /* timer to retire an outstanding block */
-       struct timer_list retire_blk_timer;
-};
-
 #define PGV_FROM_VMALLOC 1
-struct pgv {
-       char *buffer;
-};
-
-struct packet_ring_buffer {
-       struct pgv              *pg_vec;
-       unsigned int            head;
-       unsigned int            frames_per_block;
-       unsigned int            frame_size;
-       unsigned int            frame_max;
-
-       unsigned int            pg_vec_order;
-       unsigned int            pg_vec_pages;
-       unsigned int            pg_vec_len;
-
-       struct tpacket_kbdq_core        prb_bdqc;
-       atomic_t                pending;
-};
 
 #define BLOCK_STATUS(x)        ((x)->hdr.bh1.block_status)
 #define BLOCK_NUM_PKTS(x)      ((x)->hdr.bh1.num_pkts)
@@ -269,52 +207,6 @@ static void prb_fill_vlan_info(struct tpacket_kbdq_core *,
                struct tpacket3_hdr *);
 static void packet_flush_mclist(struct sock *sk);
 
-struct packet_fanout;
-struct packet_sock {
-       /* struct sock has to be the first member of packet_sock */
-       struct sock             sk;
-       struct packet_fanout    *fanout;
-       struct tpacket_stats    stats;
-       union  tpacket_stats_u  stats_u;
-       struct packet_ring_buffer       rx_ring;
-       struct packet_ring_buffer       tx_ring;
-       int                     copy_thresh;
-       spinlock_t              bind_lock;
-       struct mutex            pg_vec_lock;
-       unsigned int            running:1,      /* prot_hook is attached*/
-                               auxdata:1,
-                               origdev:1,
-                               has_vnet_hdr:1;
-       int                     ifindex;        /* bound device         */
-       __be16                  num;
-       struct packet_mclist    *mclist;
-       atomic_t                mapped;
-       enum tpacket_versions   tp_version;
-       unsigned int            tp_hdrlen;
-       unsigned int            tp_reserve;
-       unsigned int            tp_loss:1;
-       unsigned int            tp_tstamp;
-       struct packet_type      prot_hook ____cacheline_aligned_in_smp;
-};
-
-#define PACKET_FANOUT_MAX      256
-
-struct packet_fanout {
-#ifdef CONFIG_NET_NS
-       struct net              *net;
-#endif
-       unsigned int            num_members;
-       u16                     id;
-       u8                      type;
-       u8                      defrag;
-       atomic_t                rr_cur;
-       struct list_head        list;
-       struct sock             *arr[PACKET_FANOUT_MAX];
-       spinlock_t              lock;
-       atomic_t                sk_ref;
-       struct packet_type      prot_hook ____cacheline_aligned_in_smp;
-};
-
 struct packet_skb_cb {
        unsigned int origlen;
        union {
@@ -334,11 +226,6 @@ struct packet_skb_cb {
        (((x)->kactive_blk_num < ((x)->knum_blocks-1)) ? \
        ((x)->kactive_blk_num+1) : 0)
 
-static struct packet_sock *pkt_sk(struct sock *sk)
-{
-       return (struct packet_sock *)sk;
-}
-
 static void __fanout_unlink(struct sock *sk, struct packet_sock *po);
 static void __fanout_link(struct sock *sk, struct packet_sock *po);
 
@@ -968,7 +855,8 @@ static void prb_fill_vlan_info(struct tpacket_kbdq_core *pkc,
                ppd->hv1.tp_vlan_tci = vlan_tx_tag_get(pkc->skb);
                ppd->tp_status = TP_STATUS_VLAN_VALID;
        } else {
-               ppd->hv1.tp_vlan_tci = ppd->tp_status = 0;
+               ppd->hv1.tp_vlan_tci = 0;
+               ppd->tp_status = TP_STATUS_AVAILABLE;
        }
 }
 
@@ -1243,7 +1131,8 @@ static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
        return po->prot_hook.func(skb, dev, &po->prot_hook, orig_dev);
 }
 
-static DEFINE_MUTEX(fanout_mutex);
+DEFINE_MUTEX(fanout_mutex);
+EXPORT_SYMBOL_GPL(fanout_mutex);
 static LIST_HEAD(fanout_list);
 
 static void __fanout_link(struct sock *sk, struct packet_sock *po)
@@ -1364,9 +1253,9 @@ static void fanout_release(struct sock *sk)
        if (!f)
                return;
 
+       mutex_lock(&fanout_mutex);
        po->fanout = NULL;
 
-       mutex_lock(&fanout_mutex);
        if (atomic_dec_and_test(&f->sk_ref)) {
                list_del(&f->list);
                dev_remove_pack(&f->prot_hook);
@@ -2063,7 +1952,7 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
        int tp_len, size_max;
        unsigned char *addr;
        int len_sum = 0;
-       int status = 0;
+       int status = TP_STATUS_AVAILABLE;
        int hlen, tlen;
 
        mutex_lock(&po->pg_vec_lock);
@@ -2428,10 +2317,13 @@ static int packet_release(struct socket *sock)
        net = sock_net(sk);
        po = pkt_sk(sk);
 
-       spin_lock_bh(&net->packet.sklist_lock);
+       mutex_lock(&net->packet.sklist_lock);
        sk_del_node_init_rcu(sk);
+       mutex_unlock(&net->packet.sklist_lock);
+
+       preempt_disable();
        sock_prot_inuse_add(net, sk->sk_prot, -1);
-       spin_unlock_bh(&net->packet.sklist_lock);
+       preempt_enable();
 
        spin_lock(&po->bind_lock);
        unregister_prot_hook(sk, false);
@@ -2630,10 +2522,13 @@ static int packet_create(struct net *net, struct socket *sock, int protocol,
                register_prot_hook(sk);
        }
 
-       spin_lock_bh(&net->packet.sklist_lock);
+       mutex_lock(&net->packet.sklist_lock);
        sk_add_node_rcu(sk, &net->packet.sklist);
+       mutex_unlock(&net->packet.sklist_lock);
+
+       preempt_disable();
        sock_prot_inuse_add(net, &packet_proto, 1);
-       spin_unlock_bh(&net->packet.sklist_lock);
+       preempt_enable();
 
        return 0;
 out:
@@ -3854,7 +3749,7 @@ static int packet_seq_show(struct seq_file *seq, void *v)
                           po->ifindex,
                           po->running,
                           atomic_read(&s->sk_rmem_alloc),
-                          sock_i_uid(s),
+                          from_kuid_munged(seq_user_ns(seq), sock_i_uid(s)),
                           sock_i_ino(s));
        }
 
@@ -3886,7 +3781,7 @@ static const struct file_operations packet_seq_fops = {
 
 static int __net_init packet_net_init(struct net *net)
 {
-       spin_lock_init(&net->packet.sklist_lock);
+       mutex_init(&net->packet.sklist_lock);
        INIT_HLIST_HEAD(&net->packet.sklist);
 
        if (!proc_net_fops_create(net, "packet", 0, &packet_seq_fops))
diff --git a/net/packet/diag.c b/net/packet/diag.c
new file mode 100644 (file)
index 0000000..39bce0d
--- /dev/null
@@ -0,0 +1,242 @@
+#include <linux/module.h>
+#include <linux/sock_diag.h>
+#include <linux/net.h>
+#include <linux/netdevice.h>
+#include <linux/packet_diag.h>
+#include <net/net_namespace.h>
+#include <net/sock.h>
+
+#include "internal.h"
+
+static int pdiag_put_info(const struct packet_sock *po, struct sk_buff *nlskb)
+{
+       struct packet_diag_info pinfo;
+
+       pinfo.pdi_index = po->ifindex;
+       pinfo.pdi_version = po->tp_version;
+       pinfo.pdi_reserve = po->tp_reserve;
+       pinfo.pdi_copy_thresh = po->copy_thresh;
+       pinfo.pdi_tstamp = po->tp_tstamp;
+
+       pinfo.pdi_flags = 0;
+       if (po->running)
+               pinfo.pdi_flags |= PDI_RUNNING;
+       if (po->auxdata)
+               pinfo.pdi_flags |= PDI_AUXDATA;
+       if (po->origdev)
+               pinfo.pdi_flags |= PDI_ORIGDEV;
+       if (po->has_vnet_hdr)
+               pinfo.pdi_flags |= PDI_VNETHDR;
+       if (po->tp_loss)
+               pinfo.pdi_flags |= PDI_LOSS;
+
+       return nla_put(nlskb, PACKET_DIAG_INFO, sizeof(pinfo), &pinfo);
+}
+
+static int pdiag_put_mclist(const struct packet_sock *po, struct sk_buff *nlskb)
+{
+       struct nlattr *mca;
+       struct packet_mclist *ml;
+
+       mca = nla_nest_start(nlskb, PACKET_DIAG_MCLIST);
+       if (!mca)
+               return -EMSGSIZE;
+
+       rtnl_lock();
+       for (ml = po->mclist; ml; ml = ml->next) {
+               struct packet_diag_mclist *dml;
+
+               dml = nla_reserve_nohdr(nlskb, sizeof(*dml));
+               if (!dml) {
+                       rtnl_unlock();
+                       nla_nest_cancel(nlskb, mca);
+                       return -EMSGSIZE;
+               }
+
+               dml->pdmc_index = ml->ifindex;
+               dml->pdmc_type = ml->type;
+               dml->pdmc_alen = ml->alen;
+               dml->pdmc_count = ml->count;
+               BUILD_BUG_ON(sizeof(dml->pdmc_addr) != sizeof(ml->addr));
+               memcpy(dml->pdmc_addr, ml->addr, sizeof(ml->addr));
+       }
+
+       rtnl_unlock();
+       nla_nest_end(nlskb, mca);
+
+       return 0;
+}
+
+static int pdiag_put_ring(struct packet_ring_buffer *ring, int ver, int nl_type,
+               struct sk_buff *nlskb)
+{
+       struct packet_diag_ring pdr;
+
+       if (!ring->pg_vec || ((ver > TPACKET_V2) &&
+                               (nl_type == PACKET_DIAG_TX_RING)))
+               return 0;
+
+       pdr.pdr_block_size = ring->pg_vec_pages << PAGE_SHIFT;
+       pdr.pdr_block_nr = ring->pg_vec_len;
+       pdr.pdr_frame_size = ring->frame_size;
+       pdr.pdr_frame_nr = ring->frame_max + 1;
+
+       if (ver > TPACKET_V2) {
+               pdr.pdr_retire_tmo = ring->prb_bdqc.retire_blk_tov;
+               pdr.pdr_sizeof_priv = ring->prb_bdqc.blk_sizeof_priv;
+               pdr.pdr_features = ring->prb_bdqc.feature_req_word;
+       } else {
+               pdr.pdr_retire_tmo = 0;
+               pdr.pdr_sizeof_priv = 0;
+               pdr.pdr_features = 0;
+       }
+
+       return nla_put(nlskb, nl_type, sizeof(pdr), &pdr);
+}
+
+static int pdiag_put_rings_cfg(struct packet_sock *po, struct sk_buff *skb)
+{
+       int ret;
+
+       mutex_lock(&po->pg_vec_lock);
+       ret = pdiag_put_ring(&po->rx_ring, po->tp_version,
+                       PACKET_DIAG_RX_RING, skb);
+       if (!ret)
+               ret = pdiag_put_ring(&po->tx_ring, po->tp_version,
+                               PACKET_DIAG_TX_RING, skb);
+       mutex_unlock(&po->pg_vec_lock);
+
+       return ret;
+}
+
+static int pdiag_put_fanout(struct packet_sock *po, struct sk_buff *nlskb)
+{
+       int ret = 0;
+
+       mutex_lock(&fanout_mutex);
+       if (po->fanout) {
+               u32 val;
+
+               val = (u32)po->fanout->id | ((u32)po->fanout->type << 16);
+               ret = nla_put_u32(nlskb, PACKET_DIAG_FANOUT, val);
+       }
+       mutex_unlock(&fanout_mutex);
+
+       return ret;
+}
+
+static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct packet_diag_req *req,
+               u32 pid, u32 seq, u32 flags, int sk_ino)
+{
+       struct nlmsghdr *nlh;
+       struct packet_diag_msg *rp;
+       struct packet_sock *po = pkt_sk(sk);
+
+       nlh = nlmsg_put(skb, pid, seq, SOCK_DIAG_BY_FAMILY, sizeof(*rp), flags);
+       if (!nlh)
+               return -EMSGSIZE;
+
+       rp = nlmsg_data(nlh);
+       rp->pdiag_family = AF_PACKET;
+       rp->pdiag_type = sk->sk_type;
+       rp->pdiag_num = ntohs(po->num);
+       rp->pdiag_ino = sk_ino;
+       sock_diag_save_cookie(sk, rp->pdiag_cookie);
+
+       if ((req->pdiag_show & PACKET_SHOW_INFO) &&
+                       pdiag_put_info(po, skb))
+               goto out_nlmsg_trim;
+
+       if ((req->pdiag_show & PACKET_SHOW_MCLIST) &&
+                       pdiag_put_mclist(po, skb))
+               goto out_nlmsg_trim;
+
+       if ((req->pdiag_show & PACKET_SHOW_RING_CFG) &&
+                       pdiag_put_rings_cfg(po, skb))
+               goto out_nlmsg_trim;
+
+       if ((req->pdiag_show & PACKET_SHOW_FANOUT) &&
+                       pdiag_put_fanout(po, skb))
+               goto out_nlmsg_trim;
+
+       return nlmsg_end(skb, nlh);
+
+out_nlmsg_trim:
+       nlmsg_cancel(skb, nlh);
+       return -EMSGSIZE;
+}
+
+static int packet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
+{
+       int num = 0, s_num = cb->args[0];
+       struct packet_diag_req *req;
+       struct net *net;
+       struct sock *sk;
+       struct hlist_node *node;
+
+       net = sock_net(skb->sk);
+       req = nlmsg_data(cb->nlh);
+
+       mutex_lock(&net->packet.sklist_lock);
+       sk_for_each(sk, node, &net->packet.sklist) {
+               if (!net_eq(sock_net(sk), net))
+                       continue;
+               if (num < s_num)
+                       goto next;
+
+               if (sk_diag_fill(sk, skb, req, NETLINK_CB(cb->skb).pid,
+                                       cb->nlh->nlmsg_seq, NLM_F_MULTI,
+                                       sock_i_ino(sk)) < 0)
+                       goto done;
+next:
+               num++;
+       }
+done:
+       mutex_unlock(&net->packet.sklist_lock);
+       cb->args[0] = num;
+
+       return skb->len;
+}
+
+static int packet_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
+{
+       int hdrlen = sizeof(struct packet_diag_req);
+       struct net *net = sock_net(skb->sk);
+       struct packet_diag_req *req;
+
+       if (nlmsg_len(h) < hdrlen)
+               return -EINVAL;
+
+       req = nlmsg_data(h);
+       /* Make it possible to support protocol filtering later */
+       if (req->sdiag_protocol)
+               return -EINVAL;
+
+       if (h->nlmsg_flags & NLM_F_DUMP) {
+               struct netlink_dump_control c = {
+                       .dump = packet_diag_dump,
+               };
+               return netlink_dump_start(net->diag_nlsk, skb, h, &c);
+       } else
+               return -EOPNOTSUPP;
+}
+
+static const struct sock_diag_handler packet_diag_handler = {
+       .family = AF_PACKET,
+       .dump = packet_diag_handler_dump,
+};
+
+static int __init packet_diag_init(void)
+{
+       return sock_diag_register(&packet_diag_handler);
+}
+
+static void __exit packet_diag_exit(void)
+{
+       sock_diag_unregister(&packet_diag_handler);
+}
+
+module_init(packet_diag_init);
+module_exit(packet_diag_exit);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 17 /* AF_PACKET */);
diff --git a/net/packet/internal.h b/net/packet/internal.h
new file mode 100644 (file)
index 0000000..44945f6
--- /dev/null
@@ -0,0 +1,121 @@
+#ifndef __PACKET_INTERNAL_H__
+#define __PACKET_INTERNAL_H__
+
+struct packet_mclist {
+       struct packet_mclist    *next;
+       int                     ifindex;
+       int                     count;
+       unsigned short          type;
+       unsigned short          alen;
+       unsigned char           addr[MAX_ADDR_LEN];
+};
+
+/* kbdq - kernel block descriptor queue */
+struct tpacket_kbdq_core {
+       struct pgv      *pkbdq;
+       unsigned int    feature_req_word;
+       unsigned int    hdrlen;
+       unsigned char   reset_pending_on_curr_blk;
+       unsigned char   delete_blk_timer;
+       unsigned short  kactive_blk_num;
+       unsigned short  blk_sizeof_priv;
+
+       /* last_kactive_blk_num:
+        * trick to see if user-space has caught up
+        * in order to avoid refreshing timer when every single pkt arrives.
+        */
+       unsigned short  last_kactive_blk_num;
+
+       char            *pkblk_start;
+       char            *pkblk_end;
+       int             kblk_size;
+       unsigned int    knum_blocks;
+       uint64_t        knxt_seq_num;
+       char            *prev;
+       char            *nxt_offset;
+       struct sk_buff  *skb;
+
+       atomic_t        blk_fill_in_prog;
+
+       /* Default is set to 8ms */
+#define DEFAULT_PRB_RETIRE_TOV (8)
+
+       unsigned short  retire_blk_tov;
+       unsigned short  version;
+       unsigned long   tov_in_jiffies;
+
+       /* timer to retire an outstanding block */
+       struct timer_list retire_blk_timer;
+};
+
+struct pgv {
+       char *buffer;
+};
+
+struct packet_ring_buffer {
+       struct pgv              *pg_vec;
+       unsigned int            head;
+       unsigned int            frames_per_block;
+       unsigned int            frame_size;
+       unsigned int            frame_max;
+
+       unsigned int            pg_vec_order;
+       unsigned int            pg_vec_pages;
+       unsigned int            pg_vec_len;
+
+       struct tpacket_kbdq_core        prb_bdqc;
+       atomic_t                pending;
+};
+
+extern struct mutex fanout_mutex;
+#define PACKET_FANOUT_MAX      256
+
+struct packet_fanout {
+#ifdef CONFIG_NET_NS
+       struct net              *net;
+#endif
+       unsigned int            num_members;
+       u16                     id;
+       u8                      type;
+       u8                      defrag;
+       atomic_t                rr_cur;
+       struct list_head        list;
+       struct sock             *arr[PACKET_FANOUT_MAX];
+       spinlock_t              lock;
+       atomic_t                sk_ref;
+       struct packet_type      prot_hook ____cacheline_aligned_in_smp;
+};
+
+struct packet_sock {
+       /* struct sock has to be the first member of packet_sock */
+       struct sock             sk;
+       struct packet_fanout    *fanout;
+       struct tpacket_stats    stats;
+       union  tpacket_stats_u  stats_u;
+       struct packet_ring_buffer       rx_ring;
+       struct packet_ring_buffer       tx_ring;
+       int                     copy_thresh;
+       spinlock_t              bind_lock;
+       struct mutex            pg_vec_lock;
+       unsigned int            running:1,      /* prot_hook is attached*/
+                               auxdata:1,
+                               origdev:1,
+                               has_vnet_hdr:1;
+       int                     ifindex;        /* bound device         */
+       __be16                  num;
+       struct packet_mclist    *mclist;
+       atomic_t                mapped;
+       enum tpacket_versions   tp_version;
+       unsigned int            tp_hdrlen;
+       unsigned int            tp_reserve;
+       unsigned int            tp_loss:1;
+       unsigned int            tp_tstamp;
+       struct packet_type      prot_hook ____cacheline_aligned_in_smp;
+};
+
+static struct packet_sock *pkt_sk(struct sock *sk)
+{
+       return (struct packet_sock *)sk;
+}
+
+#endif
index 0acc943..b7e9827 100644 (file)
@@ -612,7 +612,8 @@ static int pn_sock_seq_show(struct seq_file *seq, void *v)
                        sk->sk_protocol, pn->sobject, pn->dobject,
                        pn->resource, sk->sk_state,
                        sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk),
-                       sock_i_uid(sk), sock_i_ino(sk),
+                       from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)),
+                       sock_i_ino(sk),
                        atomic_read(&sk->sk_refcnt), sk,
                        atomic_read(&sk->sk_drops), &len);
        }
@@ -796,7 +797,8 @@ static int pn_res_seq_show(struct seq_file *seq, void *v)
                struct sock *sk = *psk;
 
                seq_printf(seq, "%02X %5d %lu%n",
-                          (int) (psk - pnres.sk), sock_i_uid(sk),
+                          (int) (psk - pnres.sk),
+                          from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)),
                           sock_i_ino(sk), &len);
        }
        seq_printf(seq, "%*s\n", 63 - len, "");
index af95c8e..a65ee78 100644 (file)
@@ -43,7 +43,7 @@ void rds_tcp_state_change(struct sock *sk)
        struct rds_connection *conn;
        struct rds_tcp_connection *tc;
 
-       read_lock_bh(&sk->sk_callback_lock);
+       read_lock(&sk->sk_callback_lock);
        conn = sk->sk_user_data;
        if (!conn) {
                state_change = sk->sk_state_change;
@@ -68,7 +68,7 @@ void rds_tcp_state_change(struct sock *sk)
                        break;
        }
 out:
-       read_unlock_bh(&sk->sk_callback_lock);
+       read_unlock(&sk->sk_callback_lock);
        state_change(sk);
 }
 
index 7298137..7787537 100644 (file)
@@ -114,7 +114,7 @@ void rds_tcp_listen_data_ready(struct sock *sk, int bytes)
 
        rdsdebug("listen data ready sk %p\n", sk);
 
-       read_lock_bh(&sk->sk_callback_lock);
+       read_lock(&sk->sk_callback_lock);
        ready = sk->sk_user_data;
        if (!ready) { /* check for teardown race */
                ready = sk->sk_data_ready;
@@ -131,7 +131,7 @@ void rds_tcp_listen_data_ready(struct sock *sk, int bytes)
                queue_work(rds_wq, &rds_tcp_listen_work);
 
 out:
-       read_unlock_bh(&sk->sk_callback_lock);
+       read_unlock(&sk->sk_callback_lock);
        ready(sk, bytes);
 }
 
index 6243258..4fac4f2 100644 (file)
@@ -322,7 +322,7 @@ void rds_tcp_data_ready(struct sock *sk, int bytes)
 
        rdsdebug("data ready sk %p bytes %d\n", sk, bytes);
 
-       read_lock_bh(&sk->sk_callback_lock);
+       read_lock(&sk->sk_callback_lock);
        conn = sk->sk_user_data;
        if (!conn) { /* check for teardown race */
                ready = sk->sk_data_ready;
@@ -336,7 +336,7 @@ void rds_tcp_data_ready(struct sock *sk, int bytes)
        if (rds_tcp_read_sock(conn, GFP_ATOMIC) == -ENOMEM)
                queue_delayed_work(rds_wq, &conn->c_recv_w, 0);
 out:
-       read_unlock_bh(&sk->sk_callback_lock);
+       read_unlock(&sk->sk_callback_lock);
        ready(sk, bytes);
 }
 
index 1b4fd68..81cf5a4 100644 (file)
@@ -174,7 +174,7 @@ void rds_tcp_write_space(struct sock *sk)
        struct rds_connection *conn;
        struct rds_tcp_connection *tc;
 
-       read_lock_bh(&sk->sk_callback_lock);
+       read_lock(&sk->sk_callback_lock);
        conn = sk->sk_user_data;
        if (!conn) {
                write_space = sk->sk_write_space;
@@ -194,7 +194,7 @@ void rds_tcp_write_space(struct sock *sk)
                queue_delayed_work(rds_wq, &conn->c_send_w, 0);
 
 out:
-       read_unlock_bh(&sk->sk_callback_lock);
+       read_unlock(&sk->sk_callback_lock);
 
        /*
         * write_space is only called when data leaves tcp's send queue if
index 752b723..c275bad 100644 (file)
@@ -150,6 +150,20 @@ static void rfkill_led_trigger_activate(struct led_classdev *led)
        rfkill_led_trigger_event(rfkill);
 }
 
+const char *rfkill_get_led_trigger_name(struct rfkill *rfkill)
+{
+       return rfkill->led_trigger.name;
+}
+EXPORT_SYMBOL(rfkill_get_led_trigger_name);
+
+void rfkill_set_led_trigger_name(struct rfkill *rfkill, const char *name)
+{
+       BUG_ON(!rfkill);
+
+       rfkill->ledtrigname = name;
+}
+EXPORT_SYMBOL(rfkill_set_led_trigger_name);
+
 static int rfkill_led_trigger_register(struct rfkill *rfkill)
 {
        rfkill->led_trigger.name = rfkill->ledtrigname
index 6dd1131..dc3ef5a 100644 (file)
@@ -319,7 +319,7 @@ replay:
                }
        }
 
-       err = tp->ops->change(tp, cl, t->tcm_handle, tca, &fh);
+       err = tp->ops->change(skb, tp, cl, t->tcm_handle, tca, &fh);
        if (err == 0) {
                if (tp_created) {
                        spin_lock_bh(root_lock);
index 590960a..344a11b 100644 (file)
@@ -162,7 +162,8 @@ errout:
        return err;
 }
 
-static int basic_change(struct tcf_proto *tp, unsigned long base, u32 handle,
+static int basic_change(struct sk_buff *in_skb,
+                       struct tcf_proto *tp, unsigned long base, u32 handle,
                        struct nlattr **tca, unsigned long *arg)
 {
        int err;
index 7743ea8..91de666 100644 (file)
@@ -151,7 +151,8 @@ static const struct nla_policy cgroup_policy[TCA_CGROUP_MAX + 1] = {
        [TCA_CGROUP_EMATCHES]   = { .type = NLA_NESTED },
 };
 
-static int cls_cgroup_change(struct tcf_proto *tp, unsigned long base,
+static int cls_cgroup_change(struct sk_buff *in_skb,
+                            struct tcf_proto *tp, unsigned long base,
                             u32 handle, struct nlattr **tca,
                             unsigned long *arg)
 {
index ccd08c8..ce82d0c 100644 (file)
@@ -193,15 +193,19 @@ static u32 flow_get_rtclassid(const struct sk_buff *skb)
 
 static u32 flow_get_skuid(const struct sk_buff *skb)
 {
-       if (skb->sk && skb->sk->sk_socket && skb->sk->sk_socket->file)
-               return skb->sk->sk_socket->file->f_cred->fsuid;
+       if (skb->sk && skb->sk->sk_socket && skb->sk->sk_socket->file) {
+               kuid_t skuid = skb->sk->sk_socket->file->f_cred->fsuid;
+               return from_kuid(&init_user_ns, skuid);
+       }
        return 0;
 }
 
 static u32 flow_get_skgid(const struct sk_buff *skb)
 {
-       if (skb->sk && skb->sk->sk_socket && skb->sk->sk_socket->file)
-               return skb->sk->sk_socket->file->f_cred->fsgid;
+       if (skb->sk && skb->sk->sk_socket && skb->sk->sk_socket->file) {
+               kgid_t skgid = skb->sk->sk_socket->file->f_cred->fsgid;
+               return from_kgid(&init_user_ns, skgid);
+       }
        return 0;
 }
 
@@ -347,7 +351,8 @@ static const struct nla_policy flow_policy[TCA_FLOW_MAX + 1] = {
        [TCA_FLOW_PERTURB]      = { .type = NLA_U32 },
 };
 
-static int flow_change(struct tcf_proto *tp, unsigned long base,
+static int flow_change(struct sk_buff *in_skb, 
+                      struct tcf_proto *tp, unsigned long base,
                       u32 handle, struct nlattr **tca,
                       unsigned long *arg)
 {
@@ -386,6 +391,10 @@ static int flow_change(struct tcf_proto *tp, unsigned long base,
 
                if (fls(keymask) - 1 > FLOW_KEY_MAX)
                        return -EOPNOTSUPP;
+
+               if ((keymask & (FLOW_KEY_SKUID|FLOW_KEY_SKGID)) &&
+                   sk_user_ns(NETLINK_CB(in_skb).ssk) != &init_user_ns)
+                       return -EOPNOTSUPP;
        }
 
        err = tcf_exts_validate(tp, tb, tca[TCA_RATE], &e, &flow_ext_map);
index 8384a47..4075a0a 100644 (file)
@@ -233,7 +233,8 @@ errout:
        return err;
 }
 
-static int fw_change(struct tcf_proto *tp, unsigned long base,
+static int fw_change(struct sk_buff *in_skb,
+                    struct tcf_proto *tp, unsigned long base,
                     u32 handle,
                     struct nlattr **tca,
                     unsigned long *arg)
index 44f405c..c10d57b 100644 (file)
@@ -427,7 +427,8 @@ errout:
        return err;
 }
 
-static int route4_change(struct tcf_proto *tp, unsigned long base,
+static int route4_change(struct sk_buff *in_skb,
+                      struct tcf_proto *tp, unsigned long base,
                       u32 handle,
                       struct nlattr **tca,
                       unsigned long *arg)
index 18ab93e..494bbb9 100644 (file)
@@ -416,7 +416,8 @@ static const struct nla_policy rsvp_policy[TCA_RSVP_MAX + 1] = {
        [TCA_RSVP_PINFO]        = { .len = sizeof(struct tc_rsvp_pinfo) },
 };
 
-static int rsvp_change(struct tcf_proto *tp, unsigned long base,
+static int rsvp_change(struct sk_buff *in_skb,
+                      struct tcf_proto *tp, unsigned long base,
                       u32 handle,
                       struct nlattr **tca,
                       unsigned long *arg)
index fe29420..a1293b4 100644 (file)
@@ -332,7 +332,8 @@ errout:
 }
 
 static int
-tcindex_change(struct tcf_proto *tp, unsigned long base, u32 handle,
+tcindex_change(struct sk_buff *in_skb,
+              struct tcf_proto *tp, unsigned long base, u32 handle,
               struct nlattr **tca, unsigned long *arg)
 {
        struct nlattr *opt = tca[TCA_OPTIONS];
index d45373f..c7c27bc 100644 (file)
@@ -544,7 +544,8 @@ errout:
        return err;
 }
 
-static int u32_change(struct tcf_proto *tp, unsigned long base, u32 handle,
+static int u32_change(struct sk_buff *in_skb,
+                     struct tcf_proto *tp, unsigned long base, u32 handle,
                      struct nlattr **tca,
                      unsigned long *arg)
 {
index 511323e..6c4d5fe 100644 (file)
@@ -324,24 +324,6 @@ void netif_carrier_off(struct net_device *dev)
 }
 EXPORT_SYMBOL(netif_carrier_off);
 
-/**
- *     netif_notify_peers - notify network peers about existence of @dev
- *     @dev: network device
- *
- * Generate traffic such that interested network peers are aware of
- * @dev, such as by generating a gratuitous ARP. This may be used when
- * a device wants to inform the rest of the network about some sort of
- * reconfiguration such as a failover event or virtual machine
- * migration.
- */
-void netif_notify_peers(struct net_device *dev)
-{
-       rtnl_lock();
-       call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
-       rtnl_unlock();
-}
-EXPORT_SYMBOL(netif_notify_peers);
-
 /* "NOOP" scheduler: the best scheduler, recommended for all interfaces
    under all circumstances. It is difficult to invent anything faster or
    cheaper.
index ebaef3e..b1ef3bc 100644 (file)
@@ -82,6 +82,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
                                          sctp_scope_t scope,
                                          gfp_t gfp)
 {
+       struct net *net = sock_net(sk);
        struct sctp_sock *sp;
        int i;
        sctp_paramhdr_t *p;
@@ -124,7 +125,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
         * socket values.
         */
        asoc->max_retrans = sp->assocparams.sasoc_asocmaxrxt;
-       asoc->pf_retrans  = sctp_pf_retrans;
+       asoc->pf_retrans  = net->sctp.pf_retrans;
 
        asoc->rto_initial = msecs_to_jiffies(sp->rtoinfo.srto_initial);
        asoc->rto_max = msecs_to_jiffies(sp->rtoinfo.srto_max);
@@ -175,7 +176,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
        asoc->timeouts[SCTP_EVENT_TIMEOUT_HEARTBEAT] = 0;
        asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = asoc->sackdelay;
        asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] =
-               min_t(unsigned long, sp->autoclose, sctp_max_autoclose) * HZ;
+               min_t(unsigned long, sp->autoclose, net->sctp.max_autoclose) * HZ;
 
        /* Initializes the timers */
        for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i)
@@ -281,7 +282,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
         * and will revert old behavior.
         */
        asoc->peer.asconf_capable = 0;
-       if (sctp_addip_noauth)
+       if (net->sctp.addip_noauth)
                asoc->peer.asconf_capable = 1;
        asoc->asconf_addr_del_pending = NULL;
        asoc->src_out_of_asoc_ok = 0;
@@ -641,6 +642,7 @@ struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc,
                                           const gfp_t gfp,
                                           const int peer_state)
 {
+       struct net *net = sock_net(asoc->base.sk);
        struct sctp_transport *peer;
        struct sctp_sock *sp;
        unsigned short port;
@@ -674,7 +676,7 @@ struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc,
                return peer;
        }
 
-       peer = sctp_transport_new(addr, gfp);
+       peer = sctp_transport_new(net, addr, gfp);
        if (!peer)
                return NULL;
 
@@ -1089,13 +1091,15 @@ out:
 
 /* Is this the association we are looking for? */
 struct sctp_transport *sctp_assoc_is_match(struct sctp_association *asoc,
+                                          struct net *net,
                                           const union sctp_addr *laddr,
                                           const union sctp_addr *paddr)
 {
        struct sctp_transport *transport;
 
        if ((htons(asoc->base.bind_addr.port) == laddr->v4.sin_port) &&
-           (htons(asoc->peer.port) == paddr->v4.sin_port)) {
+           (htons(asoc->peer.port) == paddr->v4.sin_port) &&
+           net_eq(sock_net(asoc->base.sk), net)) {
                transport = sctp_assoc_lookup_paddr(asoc, paddr);
                if (!transport)
                        goto out;
@@ -1116,6 +1120,7 @@ static void sctp_assoc_bh_rcv(struct work_struct *work)
        struct sctp_association *asoc =
                container_of(work, struct sctp_association,
                             base.inqueue.immediate);
+       struct net *net = sock_net(asoc->base.sk);
        struct sctp_endpoint *ep;
        struct sctp_chunk *chunk;
        struct sctp_inq *inqueue;
@@ -1148,13 +1153,13 @@ static void sctp_assoc_bh_rcv(struct work_struct *work)
                if (sctp_chunk_is_data(chunk))
                        asoc->peer.last_data_from = chunk->transport;
                else
-                       SCTP_INC_STATS(SCTP_MIB_INCTRLCHUNKS);
+                       SCTP_INC_STATS(net, SCTP_MIB_INCTRLCHUNKS);
 
                if (chunk->transport)
                        chunk->transport->last_time_heard = jiffies;
 
                /* Run through the state machine. */
-               error = sctp_do_sm(SCTP_EVENT_T_CHUNK, subtype,
+               error = sctp_do_sm(net, SCTP_EVENT_T_CHUNK, subtype,
                                   state, ep, asoc, chunk, GFP_ATOMIC);
 
                /* Check to see if the association is freed in response to
@@ -1414,6 +1419,7 @@ void sctp_assoc_sync_pmtu(struct sock *sk, struct sctp_association *asoc)
 /* Should we send a SACK to update our peer? */
 static inline int sctp_peer_needs_update(struct sctp_association *asoc)
 {
+       struct net *net = sock_net(asoc->base.sk);
        switch (asoc->state) {
        case SCTP_STATE_ESTABLISHED:
        case SCTP_STATE_SHUTDOWN_PENDING:
@@ -1421,7 +1427,7 @@ static inline int sctp_peer_needs_update(struct sctp_association *asoc)
        case SCTP_STATE_SHUTDOWN_SENT:
                if ((asoc->rwnd > asoc->a_rwnd) &&
                    ((asoc->rwnd - asoc->a_rwnd) >= max_t(__u32,
-                          (asoc->base.sk->sk_rcvbuf >> sctp_rwnd_upd_shift),
+                          (asoc->base.sk->sk_rcvbuf >> net->sctp.rwnd_upd_shift),
                           asoc->pathmtu)))
                        return 1;
                break;
@@ -1542,7 +1548,8 @@ int sctp_assoc_set_bind_addr_from_ep(struct sctp_association *asoc,
        if (asoc->peer.ipv6_address)
                flags |= SCTP_ADDR6_PEERSUPP;
 
-       return sctp_bind_addr_copy(&asoc->base.bind_addr,
+       return sctp_bind_addr_copy(sock_net(asoc->base.sk),
+                                  &asoc->base.bind_addr,
                                   &asoc->ep->base.bind_addr,
                                   scope, gfp, flags);
 }
index bf81204..159b9bc 100644 (file)
@@ -392,13 +392,14 @@ nomem:
  */
 int sctp_auth_asoc_init_active_key(struct sctp_association *asoc, gfp_t gfp)
 {
+       struct net *net = sock_net(asoc->base.sk);
        struct sctp_auth_bytes  *secret;
        struct sctp_shared_key *ep_key;
 
        /* If we don't support AUTH, or peer is not capable
         * we don't need to do anything.
         */
-       if (!sctp_auth_enable || !asoc->peer.auth_capable)
+       if (!net->sctp.auth_enable || !asoc->peer.auth_capable)
                return 0;
 
        /* If the key_id is non-zero and we couldn't find an
@@ -445,11 +446,12 @@ struct sctp_shared_key *sctp_auth_get_shkey(
  */
 int sctp_auth_init_hmacs(struct sctp_endpoint *ep, gfp_t gfp)
 {
+       struct net *net = sock_net(ep->base.sk);
        struct crypto_hash *tfm = NULL;
        __u16   id;
 
        /* if the transforms are already allocted, we are done */
-       if (!sctp_auth_enable) {
+       if (!net->sctp.auth_enable) {
                ep->auth_hmacs = NULL;
                return 0;
        }
@@ -674,7 +676,12 @@ static int __sctp_auth_cid(sctp_cid_t chunk, struct sctp_chunks_param *param)
 /* Check if peer requested that this chunk is authenticated */
 int sctp_auth_send_cid(sctp_cid_t chunk, const struct sctp_association *asoc)
 {
-       if (!sctp_auth_enable || !asoc || !asoc->peer.auth_capable)
+       struct net  *net;
+       if (!asoc)
+               return 0;
+
+       net = sock_net(asoc->base.sk);
+       if (!net->sctp.auth_enable || !asoc->peer.auth_capable)
                return 0;
 
        return __sctp_auth_cid(chunk, asoc->peer.peer_chunks);
@@ -683,7 +690,12 @@ int sctp_auth_send_cid(sctp_cid_t chunk, const struct sctp_association *asoc)
 /* Check if we requested that peer authenticate this chunk. */
 int sctp_auth_recv_cid(sctp_cid_t chunk, const struct sctp_association *asoc)
 {
-       if (!sctp_auth_enable || !asoc)
+       struct net *net;
+       if (!asoc)
+               return 0;
+
+       net = sock_net(asoc->base.sk);
+       if (!net->sctp.auth_enable)
                return 0;
 
        return __sctp_auth_cid(chunk,
index 4ece451..d886b3b 100644 (file)
@@ -52,8 +52,8 @@
 #include <net/sctp/sm.h>
 
 /* Forward declarations for internal helpers. */
-static int sctp_copy_one_addr(struct sctp_bind_addr *, union sctp_addr *,
-                             sctp_scope_t scope, gfp_t gfp,
+static int sctp_copy_one_addr(struct net *, struct sctp_bind_addr *,
+                             union sctp_addr *, sctp_scope_t scope, gfp_t gfp,
                              int flags);
 static void sctp_bind_addr_clean(struct sctp_bind_addr *);
 
@@ -62,7 +62,7 @@ static void sctp_bind_addr_clean(struct sctp_bind_addr *);
 /* Copy 'src' to 'dest' taking 'scope' into account.  Omit addresses
  * in 'src' which have a broader scope than 'scope'.
  */
-int sctp_bind_addr_copy(struct sctp_bind_addr *dest,
+int sctp_bind_addr_copy(struct net *net, struct sctp_bind_addr *dest,
                        const struct sctp_bind_addr *src,
                        sctp_scope_t scope, gfp_t gfp,
                        int flags)
@@ -75,7 +75,7 @@ int sctp_bind_addr_copy(struct sctp_bind_addr *dest,
 
        /* Extract the addresses which are relevant for this scope.  */
        list_for_each_entry(addr, &src->address_list, list) {
-               error = sctp_copy_one_addr(dest, &addr->a, scope,
+               error = sctp_copy_one_addr(net, dest, &addr->a, scope,
                                           gfp, flags);
                if (error < 0)
                        goto out;
@@ -87,7 +87,7 @@ int sctp_bind_addr_copy(struct sctp_bind_addr *dest,
         */
        if (list_empty(&dest->address_list) && (SCTP_SCOPE_GLOBAL == scope)) {
                list_for_each_entry(addr, &src->address_list, list) {
-                       error = sctp_copy_one_addr(dest, &addr->a,
+                       error = sctp_copy_one_addr(net, dest, &addr->a,
                                                   SCTP_SCOPE_LINK, gfp,
                                                   flags);
                        if (error < 0)
@@ -448,7 +448,7 @@ union sctp_addr *sctp_find_unmatch_addr(struct sctp_bind_addr       *bp,
 }
 
 /* Copy out addresses from the global local address list. */
-static int sctp_copy_one_addr(struct sctp_bind_addr *dest,
+static int sctp_copy_one_addr(struct net *net, struct sctp_bind_addr *dest,
                              union sctp_addr *addr,
                              sctp_scope_t scope, gfp_t gfp,
                              int flags)
@@ -456,8 +456,8 @@ static int sctp_copy_one_addr(struct sctp_bind_addr *dest,
        int error = 0;
 
        if (sctp_is_any(NULL, addr)) {
-               error = sctp_copy_local_addr_list(dest, scope, gfp, flags);
-       } else if (sctp_in_scope(addr, scope)) {
+               error = sctp_copy_local_addr_list(net, dest, scope, gfp, flags);
+       } else if (sctp_in_scope(net, addr, scope)) {
                /* Now that the address is in scope, check to see if
                 * the address type is supported by local sock as
                 * well as the remote peer.
@@ -494,7 +494,7 @@ int sctp_is_any(struct sock *sk, const union sctp_addr *addr)
 }
 
 /* Is 'addr' valid for 'scope'?  */
-int sctp_in_scope(const union sctp_addr *addr, sctp_scope_t scope)
+int sctp_in_scope(struct net *net, const union sctp_addr *addr, sctp_scope_t scope)
 {
        sctp_scope_t addr_scope = sctp_scope(addr);
 
@@ -512,7 +512,7 @@ int sctp_in_scope(const union sctp_addr *addr, sctp_scope_t scope)
         * Address scoping can be selectively controlled via sysctl
         * option
         */
-       switch (sctp_scope_policy) {
+       switch (net->sctp.scope_policy) {
        case SCTP_SCOPE_POLICY_DISABLE:
                return 1;
        case SCTP_SCOPE_POLICY_ENABLE:
index 6c85564..7c2df9c 100644 (file)
@@ -257,7 +257,7 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
        offset = 0;
 
        if ((whole > 1) || (whole && over))
-               SCTP_INC_STATS_USER(SCTP_MIB_FRAGUSRMSGS);
+               SCTP_INC_STATS_USER(sock_net(asoc->base.sk), SCTP_MIB_FRAGUSRMSGS);
 
        /* Create chunks for all the full sized DATA chunks. */
        for (i=0, len=first_len; i < whole; i++) {
index 68a385d..1859e2b 100644 (file)
@@ -65,6 +65,7 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
                                                struct sock *sk,
                                                gfp_t gfp)
 {
+       struct net *net = sock_net(sk);
        struct sctp_hmac_algo_param *auth_hmacs = NULL;
        struct sctp_chunks_param *auth_chunks = NULL;
        struct sctp_shared_key *null_key;
@@ -74,7 +75,7 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
        if (!ep->digest)
                return NULL;
 
-       if (sctp_auth_enable) {
+       if (net->sctp.auth_enable) {
                /* Allocate space for HMACS and CHUNKS authentication
                 * variables.  There are arrays that we encode directly
                 * into parameters to make the rest of the operations easier.
@@ -106,7 +107,7 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
                /* If the Add-IP functionality is enabled, we must
                 * authenticate, ASCONF and ASCONF-ACK chunks
                 */
-               if (sctp_addip_enable) {
+               if (net->sctp.addip_enable) {
                        auth_chunks->chunks[0] = SCTP_CID_ASCONF;
                        auth_chunks->chunks[1] = SCTP_CID_ASCONF_ACK;
                        auth_chunks->param_hdr.length =
@@ -140,14 +141,14 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
        INIT_LIST_HEAD(&ep->asocs);
 
        /* Use SCTP specific send buffer space queues.  */
-       ep->sndbuf_policy = sctp_sndbuf_policy;
+       ep->sndbuf_policy = net->sctp.sndbuf_policy;
 
        sk->sk_data_ready = sctp_data_ready;
        sk->sk_write_space = sctp_write_space;
        sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
 
        /* Get the receive buffer policy for this endpoint */
-       ep->rcvbuf_policy = sctp_rcvbuf_policy;
+       ep->rcvbuf_policy = net->sctp.rcvbuf_policy;
 
        /* Initialize the secret key used with cookie. */
        get_random_bytes(&ep->secret_key[0], SCTP_SECRET_SIZE);
@@ -302,11 +303,13 @@ void sctp_endpoint_put(struct sctp_endpoint *ep)
 
 /* Is this the endpoint we are looking for?  */
 struct sctp_endpoint *sctp_endpoint_is_match(struct sctp_endpoint *ep,
+                                              struct net *net,
                                               const union sctp_addr *laddr)
 {
        struct sctp_endpoint *retval = NULL;
 
-       if (htons(ep->base.bind_addr.port) == laddr->v4.sin_port) {
+       if ((htons(ep->base.bind_addr.port) == laddr->v4.sin_port) &&
+           net_eq(sock_net(ep->base.sk), net)) {
                if (sctp_bind_addr_match(&ep->base.bind_addr, laddr,
                                         sctp_sk(ep->base.sk)))
                        retval = ep;
@@ -343,7 +346,8 @@ static struct sctp_association *__sctp_endpoint_lookup_assoc(
 
        rport = ntohs(paddr->v4.sin_port);
 
-       hash = sctp_assoc_hashfn(ep->base.bind_addr.port, rport);
+       hash = sctp_assoc_hashfn(sock_net(ep->base.sk), ep->base.bind_addr.port,
+                                rport);
        head = &sctp_assoc_hashtable[hash];
        read_lock(&head->lock);
        sctp_for_each_hentry(epb, node, &head->chain) {
@@ -386,13 +390,14 @@ int sctp_endpoint_is_peeled_off(struct sctp_endpoint *ep,
 {
        struct sctp_sockaddr_entry *addr;
        struct sctp_bind_addr *bp;
+       struct net *net = sock_net(ep->base.sk);
 
        bp = &ep->base.bind_addr;
        /* This function is called with the socket lock held,
         * so the address_list can not change.
         */
        list_for_each_entry(addr, &bp->address_list, list) {
-               if (sctp_has_association(&addr->a, paddr))
+               if (sctp_has_association(net, &addr->a, paddr))
                        return 1;
        }
 
@@ -409,6 +414,7 @@ static void sctp_endpoint_bh_rcv(struct work_struct *work)
                             base.inqueue.immediate);
        struct sctp_association *asoc;
        struct sock *sk;
+       struct net *net;
        struct sctp_transport *transport;
        struct sctp_chunk *chunk;
        struct sctp_inq *inqueue;
@@ -423,6 +429,7 @@ static void sctp_endpoint_bh_rcv(struct work_struct *work)
        asoc = NULL;
        inqueue = &ep->base.inqueue;
        sk = ep->base.sk;
+       net = sock_net(sk);
 
        while (NULL != (chunk = sctp_inq_pop(inqueue))) {
                subtype = SCTP_ST_CHUNK(chunk->chunk_hdr->type);
@@ -474,12 +481,12 @@ normal:
                if (asoc && sctp_chunk_is_data(chunk))
                        asoc->peer.last_data_from = chunk->transport;
                else
-                       SCTP_INC_STATS(SCTP_MIB_INCTRLCHUNKS);
+                       SCTP_INC_STATS(sock_net(ep->base.sk), SCTP_MIB_INCTRLCHUNKS);
 
                if (chunk->transport)
                        chunk->transport->last_time_heard = jiffies;
 
-               error = sctp_do_sm(SCTP_EVENT_T_CHUNK, subtype, state,
+               error = sctp_do_sm(net, SCTP_EVENT_T_CHUNK, subtype, state,
                                   ep, asoc, chunk, GFP_ATOMIC);
 
                if (error && chunk)
index e64d521..25dfe73 100644 (file)
 
 /* Forward declarations for internal helpers. */
 static int sctp_rcv_ootb(struct sk_buff *);
-static struct sctp_association *__sctp_rcv_lookup(struct sk_buff *skb,
+static struct sctp_association *__sctp_rcv_lookup(struct net *net,
+                                     struct sk_buff *skb,
                                      const union sctp_addr *laddr,
                                      const union sctp_addr *paddr,
                                      struct sctp_transport **transportp);
-static struct sctp_endpoint *__sctp_rcv_lookup_endpoint(const union sctp_addr *laddr);
+static struct sctp_endpoint *__sctp_rcv_lookup_endpoint(struct net *net,
+                                               const union sctp_addr *laddr);
 static struct sctp_association *__sctp_lookup_association(
+                                       struct net *net,
                                        const union sctp_addr *local,
                                        const union sctp_addr *peer,
                                        struct sctp_transport **pt);
@@ -80,7 +83,7 @@ static int sctp_add_backlog(struct sock *sk, struct sk_buff *skb);
 
 
 /* Calculate the SCTP checksum of an SCTP packet.  */
-static inline int sctp_rcv_checksum(struct sk_buff *skb)
+static inline int sctp_rcv_checksum(struct net *net, struct sk_buff *skb)
 {
        struct sctphdr *sh = sctp_hdr(skb);
        __le32 cmp = sh->checksum;
@@ -96,7 +99,7 @@ static inline int sctp_rcv_checksum(struct sk_buff *skb)
 
        if (val != cmp) {
                /* CRC failure, dump it. */
-               SCTP_INC_STATS_BH(SCTP_MIB_CHECKSUMERRORS);
+               SCTP_INC_STATS_BH(net, SCTP_MIB_CHECKSUMERRORS);
                return -1;
        }
        return 0;
@@ -129,11 +132,12 @@ int sctp_rcv(struct sk_buff *skb)
        union sctp_addr dest;
        int family;
        struct sctp_af *af;
+       struct net *net = dev_net(skb->dev);
 
        if (skb->pkt_type!=PACKET_HOST)
                goto discard_it;
 
-       SCTP_INC_STATS_BH(SCTP_MIB_INSCTPPACKS);
+       SCTP_INC_STATS_BH(net, SCTP_MIB_INSCTPPACKS);
 
        if (skb_linearize(skb))
                goto discard_it;
@@ -145,7 +149,7 @@ int sctp_rcv(struct sk_buff *skb)
        if (skb->len < sizeof(struct sctphdr))
                goto discard_it;
        if (!sctp_checksum_disable && !skb_csum_unnecessary(skb) &&
-                 sctp_rcv_checksum(skb) < 0)
+                 sctp_rcv_checksum(net, skb) < 0)
                goto discard_it;
 
        skb_pull(skb, sizeof(struct sctphdr));
@@ -178,10 +182,10 @@ int sctp_rcv(struct sk_buff *skb)
            !af->addr_valid(&dest, NULL, skb))
                goto discard_it;
 
-       asoc = __sctp_rcv_lookup(skb, &src, &dest, &transport);
+       asoc = __sctp_rcv_lookup(net, skb, &src, &dest, &transport);
 
        if (!asoc)
-               ep = __sctp_rcv_lookup_endpoint(&dest);
+               ep = __sctp_rcv_lookup_endpoint(net, &dest);
 
        /* Retrieve the common input handling substructure. */
        rcvr = asoc ? &asoc->base : &ep->base;
@@ -200,7 +204,7 @@ int sctp_rcv(struct sk_buff *skb)
                        sctp_endpoint_put(ep);
                        ep = NULL;
                }
-               sk = sctp_get_ctl_sock();
+               sk = net->sctp.ctl_sock;
                ep = sctp_sk(sk)->ep;
                sctp_endpoint_hold(ep);
                rcvr = &ep->base;
@@ -216,7 +220,7 @@ int sctp_rcv(struct sk_buff *skb)
         */
        if (!asoc) {
                if (sctp_rcv_ootb(skb)) {
-                       SCTP_INC_STATS_BH(SCTP_MIB_OUTOFBLUES);
+                       SCTP_INC_STATS_BH(net, SCTP_MIB_OUTOFBLUES);
                        goto discard_release;
                }
        }
@@ -272,9 +276,9 @@ int sctp_rcv(struct sk_buff *skb)
                        skb = NULL; /* sctp_chunk_free already freed the skb */
                        goto discard_release;
                }
-               SCTP_INC_STATS_BH(SCTP_MIB_IN_PKT_BACKLOG);
+               SCTP_INC_STATS_BH(net, SCTP_MIB_IN_PKT_BACKLOG);
        } else {
-               SCTP_INC_STATS_BH(SCTP_MIB_IN_PKT_SOFTIRQ);
+               SCTP_INC_STATS_BH(net, SCTP_MIB_IN_PKT_SOFTIRQ);
                sctp_inq_push(&chunk->rcvr->inqueue, chunk);
        }
 
@@ -289,7 +293,7 @@ int sctp_rcv(struct sk_buff *skb)
        return 0;
 
 discard_it:
-       SCTP_INC_STATS_BH(SCTP_MIB_IN_PKT_DISCARDS);
+       SCTP_INC_STATS_BH(net, SCTP_MIB_IN_PKT_DISCARDS);
        kfree_skb(skb);
        return 0;
 
@@ -462,11 +466,13 @@ void sctp_icmp_proto_unreachable(struct sock *sk,
                }
                        
        } else {
+               struct net *net = sock_net(sk);
+
                if (timer_pending(&t->proto_unreach_timer) &&
                    del_timer(&t->proto_unreach_timer))
                        sctp_association_put(asoc);
 
-               sctp_do_sm(SCTP_EVENT_T_OTHER,
+               sctp_do_sm(net, SCTP_EVENT_T_OTHER,
                           SCTP_ST_OTHER(SCTP_EVENT_ICMP_PROTO_UNREACH),
                           asoc->state, asoc->ep, asoc, t,
                           GFP_ATOMIC);
@@ -474,7 +480,7 @@ void sctp_icmp_proto_unreachable(struct sock *sk,
 }
 
 /* Common lookup code for icmp/icmpv6 error handler. */
-struct sock *sctp_err_lookup(int family, struct sk_buff *skb,
+struct sock *sctp_err_lookup(struct net *net, int family, struct sk_buff *skb,
                             struct sctphdr *sctphdr,
                             struct sctp_association **app,
                             struct sctp_transport **tpp)
@@ -503,7 +509,7 @@ struct sock *sctp_err_lookup(int family, struct sk_buff *skb,
        /* Look for an association that matches the incoming ICMP error
         * packet.
         */
-       asoc = __sctp_lookup_association(&saddr, &daddr, &transport);
+       asoc = __sctp_lookup_association(net, &saddr, &daddr, &transport);
        if (!asoc)
                return NULL;
 
@@ -539,7 +545,7 @@ struct sock *sctp_err_lookup(int family, struct sk_buff *skb,
         * servers this needs to be solved differently.
         */
        if (sock_owned_by_user(sk))
-               NET_INC_STATS_BH(&init_net, LINUX_MIB_LOCKDROPPEDICMPS);
+               NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
 
        *app = asoc;
        *tpp = transport;
@@ -586,9 +592,10 @@ void sctp_v4_err(struct sk_buff *skb, __u32 info)
        struct inet_sock *inet;
        sk_buff_data_t saveip, savesctp;
        int err;
+       struct net *net = dev_net(skb->dev);
 
        if (skb->len < ihlen + 8) {
-               ICMP_INC_STATS_BH(&init_net, ICMP_MIB_INERRORS);
+               ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
                return;
        }
 
@@ -597,12 +604,12 @@ void sctp_v4_err(struct sk_buff *skb, __u32 info)
        savesctp = skb->transport_header;
        skb_reset_network_header(skb);
        skb_set_transport_header(skb, ihlen);
-       sk = sctp_err_lookup(AF_INET, skb, sctp_hdr(skb), &asoc, &transport);
+       sk = sctp_err_lookup(net, AF_INET, skb, sctp_hdr(skb), &asoc, &transport);
        /* Put back, the original values. */
        skb->network_header = saveip;
        skb->transport_header = savesctp;
        if (!sk) {
-               ICMP_INC_STATS_BH(&init_net, ICMP_MIB_INERRORS);
+               ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
                return;
        }
        /* Warning:  The sock lock is held.  Remember to call
@@ -723,12 +730,13 @@ discard:
 /* Insert endpoint into the hash table.  */
 static void __sctp_hash_endpoint(struct sctp_endpoint *ep)
 {
+       struct net *net = sock_net(ep->base.sk);
        struct sctp_ep_common *epb;
        struct sctp_hashbucket *head;
 
        epb = &ep->base;
 
-       epb->hashent = sctp_ep_hashfn(epb->bind_addr.port);
+       epb->hashent = sctp_ep_hashfn(net, epb->bind_addr.port);
        head = &sctp_ep_hashtable[epb->hashent];
 
        sctp_write_lock(&head->lock);
@@ -747,12 +755,13 @@ void sctp_hash_endpoint(struct sctp_endpoint *ep)
 /* Remove endpoint from the hash table.  */
 static void __sctp_unhash_endpoint(struct sctp_endpoint *ep)
 {
+       struct net *net = sock_net(ep->base.sk);
        struct sctp_hashbucket *head;
        struct sctp_ep_common *epb;
 
        epb = &ep->base;
 
-       epb->hashent = sctp_ep_hashfn(epb->bind_addr.port);
+       epb->hashent = sctp_ep_hashfn(net, epb->bind_addr.port);
 
        head = &sctp_ep_hashtable[epb->hashent];
 
@@ -770,7 +779,8 @@ void sctp_unhash_endpoint(struct sctp_endpoint *ep)
 }
 
 /* Look up an endpoint. */
-static struct sctp_endpoint *__sctp_rcv_lookup_endpoint(const union sctp_addr *laddr)
+static struct sctp_endpoint *__sctp_rcv_lookup_endpoint(struct net *net,
+                                               const union sctp_addr *laddr)
 {
        struct sctp_hashbucket *head;
        struct sctp_ep_common *epb;
@@ -778,16 +788,16 @@ static struct sctp_endpoint *__sctp_rcv_lookup_endpoint(const union sctp_addr *l
        struct hlist_node *node;
        int hash;
 
-       hash = sctp_ep_hashfn(ntohs(laddr->v4.sin_port));
+       hash = sctp_ep_hashfn(net, ntohs(laddr->v4.sin_port));
        head = &sctp_ep_hashtable[hash];
        read_lock(&head->lock);
        sctp_for_each_hentry(epb, node, &head->chain) {
                ep = sctp_ep(epb);
-               if (sctp_endpoint_is_match(ep, laddr))
+               if (sctp_endpoint_is_match(ep, net, laddr))
                        goto hit;
        }
 
-       ep = sctp_sk((sctp_get_ctl_sock()))->ep;
+       ep = sctp_sk(net->sctp.ctl_sock)->ep;
 
 hit:
        sctp_endpoint_hold(ep);
@@ -798,13 +808,15 @@ hit:
 /* Insert association into the hash table.  */
 static void __sctp_hash_established(struct sctp_association *asoc)
 {
+       struct net *net = sock_net(asoc->base.sk);
        struct sctp_ep_common *epb;
        struct sctp_hashbucket *head;
 
        epb = &asoc->base;
 
        /* Calculate which chain this entry will belong to. */
-       epb->hashent = sctp_assoc_hashfn(epb->bind_addr.port, asoc->peer.port);
+       epb->hashent = sctp_assoc_hashfn(net, epb->bind_addr.port,
+                                        asoc->peer.port);
 
        head = &sctp_assoc_hashtable[epb->hashent];
 
@@ -827,12 +839,13 @@ void sctp_hash_established(struct sctp_association *asoc)
 /* Remove association from the hash table.  */
 static void __sctp_unhash_established(struct sctp_association *asoc)
 {
+       struct net *net = sock_net(asoc->base.sk);
        struct sctp_hashbucket *head;
        struct sctp_ep_common *epb;
 
        epb = &asoc->base;
 
-       epb->hashent = sctp_assoc_hashfn(epb->bind_addr.port,
+       epb->hashent = sctp_assoc_hashfn(net, epb->bind_addr.port,
                                         asoc->peer.port);
 
        head = &sctp_assoc_hashtable[epb->hashent];
@@ -855,6 +868,7 @@ void sctp_unhash_established(struct sctp_association *asoc)
 
 /* Look up an association. */
 static struct sctp_association *__sctp_lookup_association(
+                                       struct net *net,
                                        const union sctp_addr *local,
                                        const union sctp_addr *peer,
                                        struct sctp_transport **pt)
@@ -869,12 +883,13 @@ static struct sctp_association *__sctp_lookup_association(
        /* Optimize here for direct hit, only listening connections can
         * have wildcards anyways.
         */
-       hash = sctp_assoc_hashfn(ntohs(local->v4.sin_port), ntohs(peer->v4.sin_port));
+       hash = sctp_assoc_hashfn(net, ntohs(local->v4.sin_port),
+                                ntohs(peer->v4.sin_port));
        head = &sctp_assoc_hashtable[hash];
        read_lock(&head->lock);
        sctp_for_each_hentry(epb, node, &head->chain) {
                asoc = sctp_assoc(epb);
-               transport = sctp_assoc_is_match(asoc, local, peer);
+               transport = sctp_assoc_is_match(asoc, net, local, peer);
                if (transport)
                        goto hit;
        }
@@ -892,27 +907,29 @@ hit:
 
 /* Look up an association. BH-safe. */
 SCTP_STATIC
-struct sctp_association *sctp_lookup_association(const union sctp_addr *laddr,
+struct sctp_association *sctp_lookup_association(struct net *net,
+                                                const union sctp_addr *laddr,
                                                 const union sctp_addr *paddr,
                                            struct sctp_transport **transportp)
 {
        struct sctp_association *asoc;
 
        sctp_local_bh_disable();
-       asoc = __sctp_lookup_association(laddr, paddr, transportp);
+       asoc = __sctp_lookup_association(net, laddr, paddr, transportp);
        sctp_local_bh_enable();
 
        return asoc;
 }
 
 /* Is there an association matching the given local and peer addresses? */
-int sctp_has_association(const union sctp_addr *laddr,
+int sctp_has_association(struct net *net,
+                        const union sctp_addr *laddr,
                         const union sctp_addr *paddr)
 {
        struct sctp_association *asoc;
        struct sctp_transport *transport;
 
-       if ((asoc = sctp_lookup_association(laddr, paddr, &transport))) {
+       if ((asoc = sctp_lookup_association(net, laddr, paddr, &transport))) {
                sctp_association_put(asoc);
                return 1;
        }
@@ -938,7 +955,8 @@ int sctp_has_association(const union sctp_addr *laddr,
  * in certain circumstances.
  *
  */
-static struct sctp_association *__sctp_rcv_init_lookup(struct sk_buff *skb,
+static struct sctp_association *__sctp_rcv_init_lookup(struct net *net,
+       struct sk_buff *skb,
        const union sctp_addr *laddr, struct sctp_transport **transportp)
 {
        struct sctp_association *asoc;
@@ -978,7 +996,7 @@ static struct sctp_association *__sctp_rcv_init_lookup(struct sk_buff *skb,
 
                af->from_addr_param(paddr, params.addr, sh->source, 0);
 
-               asoc = __sctp_lookup_association(laddr, paddr, &transport);
+               asoc = __sctp_lookup_association(net, laddr, paddr, &transport);
                if (asoc)
                        return asoc;
        }
@@ -1001,6 +1019,7 @@ static struct sctp_association *__sctp_rcv_init_lookup(struct sk_buff *skb,
  * subsequent ASCONF Chunks. If found, proceed to rule D4.
  */
 static struct sctp_association *__sctp_rcv_asconf_lookup(
+                                       struct net *net,
                                        sctp_chunkhdr_t *ch,
                                        const union sctp_addr *laddr,
                                        __be16 peer_port,
@@ -1020,7 +1039,7 @@ static struct sctp_association *__sctp_rcv_asconf_lookup(
 
        af->from_addr_param(&paddr, param, peer_port, 0);
 
-       return __sctp_lookup_association(laddr, &paddr, transportp);
+       return __sctp_lookup_association(net, laddr, &paddr, transportp);
 }
 
 
@@ -1033,7 +1052,8 @@ static struct sctp_association *__sctp_rcv_asconf_lookup(
 * This means that any chunks that can help us identify the association need
 * to be looked at to find this association.
 */
-static struct sctp_association *__sctp_rcv_walk_lookup(struct sk_buff *skb,
+static struct sctp_association *__sctp_rcv_walk_lookup(struct net *net,
+                                     struct sk_buff *skb,
                                      const union sctp_addr *laddr,
                                      struct sctp_transport **transportp)
 {
@@ -1074,8 +1094,9 @@ static struct sctp_association *__sctp_rcv_walk_lookup(struct sk_buff *skb,
                            break;
 
                    case SCTP_CID_ASCONF:
-                           if (have_auth || sctp_addip_noauth)
-                                   asoc = __sctp_rcv_asconf_lookup(ch, laddr,
+                           if (have_auth || net->sctp.addip_noauth)
+                                   asoc = __sctp_rcv_asconf_lookup(
+                                                       net, ch, laddr,
                                                        sctp_hdr(skb)->source,
                                                        transportp);
                    default:
@@ -1098,7 +1119,8 @@ static struct sctp_association *__sctp_rcv_walk_lookup(struct sk_buff *skb,
  * include looking inside of INIT/INIT-ACK chunks or after the AUTH
  * chunks.
  */
-static struct sctp_association *__sctp_rcv_lookup_harder(struct sk_buff *skb,
+static struct sctp_association *__sctp_rcv_lookup_harder(struct net *net,
+                                     struct sk_buff *skb,
                                      const union sctp_addr *laddr,
                                      struct sctp_transport **transportp)
 {
@@ -1118,11 +1140,11 @@ static struct sctp_association *__sctp_rcv_lookup_harder(struct sk_buff *skb,
        switch (ch->type) {
        case SCTP_CID_INIT:
        case SCTP_CID_INIT_ACK:
-               return __sctp_rcv_init_lookup(skb, laddr, transportp);
+               return __sctp_rcv_init_lookup(net, skb, laddr, transportp);
                break;
 
        default:
-               return __sctp_rcv_walk_lookup(skb, laddr, transportp);
+               return __sctp_rcv_walk_lookup(net, skb, laddr, transportp);
                break;
        }
 
@@ -1131,21 +1153,22 @@ static struct sctp_association *__sctp_rcv_lookup_harder(struct sk_buff *skb,
 }
 
 /* Lookup an association for an inbound skb. */
-static struct sctp_association *__sctp_rcv_lookup(struct sk_buff *skb,
+static struct sctp_association *__sctp_rcv_lookup(struct net *net,
+                                     struct sk_buff *skb,
                                      const union sctp_addr *paddr,
                                      const union sctp_addr *laddr,
                                      struct sctp_transport **transportp)
 {
        struct sctp_association *asoc;
 
-       asoc = __sctp_lookup_association(laddr, paddr, transportp);
+       asoc = __sctp_lookup_association(net, laddr, paddr, transportp);
 
        /* Further lookup for INIT/INIT-ACK packets.
         * SCTP Implementors Guide, 2.18 Handling of address
         * parameters within the INIT or INIT-ACK.
         */
        if (!asoc)
-               asoc = __sctp_rcv_lookup_harder(skb, laddr, transportp);
+               asoc = __sctp_rcv_lookup_harder(net, skb, laddr, transportp);
 
        return asoc;
 }
index ed7139e..ea14cb4 100644 (file)
@@ -99,6 +99,7 @@ static int sctp_inet6addr_event(struct notifier_block *this, unsigned long ev,
        struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr;
        struct sctp_sockaddr_entry *addr = NULL;
        struct sctp_sockaddr_entry *temp;
+       struct net *net = dev_net(ifa->idev->dev);
        int found = 0;
 
        switch (ev) {
@@ -110,27 +111,27 @@ static int sctp_inet6addr_event(struct notifier_block *this, unsigned long ev,
                        addr->a.v6.sin6_addr = ifa->addr;
                        addr->a.v6.sin6_scope_id = ifa->idev->dev->ifindex;
                        addr->valid = 1;
-                       spin_lock_bh(&sctp_local_addr_lock);
-                       list_add_tail_rcu(&addr->list, &sctp_local_addr_list);
-                       sctp_addr_wq_mgmt(addr, SCTP_ADDR_NEW);
-                       spin_unlock_bh(&sctp_local_addr_lock);
+                       spin_lock_bh(&net->sctp.local_addr_lock);
+                       list_add_tail_rcu(&addr->list, &net->sctp.local_addr_list);
+                       sctp_addr_wq_mgmt(net, addr, SCTP_ADDR_NEW);
+                       spin_unlock_bh(&net->sctp.local_addr_lock);
                }
                break;
        case NETDEV_DOWN:
-               spin_lock_bh(&sctp_local_addr_lock);
+               spin_lock_bh(&net->sctp.local_addr_lock);
                list_for_each_entry_safe(addr, temp,
-                                       &sctp_local_addr_list, list) {
+                                       &net->sctp.local_addr_list, list) {
                        if (addr->a.sa.sa_family == AF_INET6 &&
                                        ipv6_addr_equal(&addr->a.v6.sin6_addr,
                                                &ifa->addr)) {
-                               sctp_addr_wq_mgmt(addr, SCTP_ADDR_DEL);
+                               sctp_addr_wq_mgmt(net, addr, SCTP_ADDR_DEL);
                                found = 1;
                                addr->valid = 0;
                                list_del_rcu(&addr->list);
                                break;
                        }
                }
-               spin_unlock_bh(&sctp_local_addr_lock);
+               spin_unlock_bh(&net->sctp.local_addr_lock);
                if (found)
                        kfree_rcu(addr, rcu);
                break;
@@ -154,6 +155,7 @@ SCTP_STATIC void sctp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
        struct ipv6_pinfo *np;
        sk_buff_data_t saveip, savesctp;
        int err;
+       struct net *net = dev_net(skb->dev);
 
        idev = in6_dev_get(skb->dev);
 
@@ -162,12 +164,12 @@ SCTP_STATIC void sctp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
        savesctp = skb->transport_header;
        skb_reset_network_header(skb);
        skb_set_transport_header(skb, offset);
-       sk = sctp_err_lookup(AF_INET6, skb, sctp_hdr(skb), &asoc, &transport);
+       sk = sctp_err_lookup(net, AF_INET6, skb, sctp_hdr(skb), &asoc, &transport);
        /* Put back, the original pointers. */
        skb->network_header   = saveip;
        skb->transport_header = savesctp;
        if (!sk) {
-               ICMP6_INC_STATS_BH(dev_net(skb->dev), idev, ICMP6_MIB_INERRORS);
+               ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_INERRORS);
                goto out;
        }
 
@@ -241,7 +243,7 @@ static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *transport)
                          __func__, skb, skb->len,
                          &fl6.saddr, &fl6.daddr);
 
-       SCTP_INC_STATS(SCTP_MIB_OUTSCTPPACKS);
+       SCTP_INC_STATS(sock_net(sk), SCTP_MIB_OUTSCTPPACKS);
 
        if (!(transport->param_flags & SPP_PMTUD_ENABLE))
                skb->local_df = 1;
@@ -580,7 +582,7 @@ static int sctp_v6_available(union sctp_addr *addr, struct sctp_sock *sp)
        if (!(type & IPV6_ADDR_UNICAST))
                return 0;
 
-       return ipv6_chk_addr(&init_net, in6, NULL, 0);
+       return ipv6_chk_addr(sock_net(&sp->inet.sk), in6, NULL, 0);
 }
 
 /* This function checks if the address is a valid address to be used for
@@ -857,14 +859,14 @@ static int sctp_inet6_bind_verify(struct sctp_sock *opt, union sctp_addr *addr)
                struct net_device *dev;
 
                if (type & IPV6_ADDR_LINKLOCAL) {
+                       struct net *net;
                        if (!addr->v6.sin6_scope_id)
                                return 0;
+                       net = sock_net(&opt->inet.sk);
                        rcu_read_lock();
-                       dev = dev_get_by_index_rcu(&init_net,
-                                                  addr->v6.sin6_scope_id);
+                       dev = dev_get_by_index_rcu(net, addr->v6.sin6_scope_id);
                        if (!dev ||
-                           !ipv6_chk_addr(&init_net, &addr->v6.sin6_addr,
-                                          dev, 0)) {
+                           !ipv6_chk_addr(net, &addr->v6.sin6_addr, dev, 0)) {
                                rcu_read_unlock();
                                return 0;
                        }
@@ -897,7 +899,7 @@ static int sctp_inet6_send_verify(struct sctp_sock *opt, union sctp_addr *addr)
                        if (!addr->v6.sin6_scope_id)
                                return 0;
                        rcu_read_lock();
-                       dev = dev_get_by_index_rcu(&init_net,
+                       dev = dev_get_by_index_rcu(sock_net(&opt->inet.sk),
                                                   addr->v6.sin6_scope_id);
                        rcu_read_unlock();
                        if (!dev)
index 8ef8e7d..fe012c4 100644 (file)
@@ -129,20 +129,20 @@ static const struct file_operations sctp_objcnt_ops = {
 };
 
 /* Initialize the objcount in the proc filesystem.  */
-void sctp_dbg_objcnt_init(void)
+void sctp_dbg_objcnt_init(struct net *net)
 {
        struct proc_dir_entry *ent;
 
        ent = proc_create("sctp_dbg_objcnt", 0,
-                         proc_net_sctp, &sctp_objcnt_ops);
+                         net->sctp.proc_net_sctp, &sctp_objcnt_ops);
        if (!ent)
                pr_warn("sctp_dbg_objcnt: Unable to create /proc entry.\n");
 }
 
 /* Cleanup the objcount entry in the proc filesystem.  */
-void sctp_dbg_objcnt_exit(void)
+void sctp_dbg_objcnt_exit(struct net *net)
 {
-       remove_proc_entry("sctp_dbg_objcnt", proc_net_sctp);
+       remove_proc_entry("sctp_dbg_objcnt", net->sctp.proc_net_sctp);
 }
 
 
index 838e18b..0c6359b 100644 (file)
@@ -597,7 +597,7 @@ out:
        return err;
 no_route:
        kfree_skb(nskb);
-       IP_INC_STATS_BH(&init_net, IPSTATS_MIB_OUTNOROUTES);
+       IP_INC_STATS_BH(sock_net(asoc->base.sk), IPSTATS_MIB_OUTNOROUTES);
 
        /* FIXME: Returning the 'err' will effect all the associations
         * associated with a socket, although only one of the paths of the
index e7aa177..072bf6a 100644 (file)
@@ -299,6 +299,7 @@ void sctp_outq_free(struct sctp_outq *q)
 /* Put a new chunk in an sctp_outq.  */
 int sctp_outq_tail(struct sctp_outq *q, struct sctp_chunk *chunk)
 {
+       struct net *net = sock_net(q->asoc->base.sk);
        int error = 0;
 
        SCTP_DEBUG_PRINTK("sctp_outq_tail(%p, %p[%s])\n",
@@ -337,15 +338,15 @@ int sctp_outq_tail(struct sctp_outq *q, struct sctp_chunk *chunk)
 
                        sctp_outq_tail_data(q, chunk);
                        if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
-                               SCTP_INC_STATS(SCTP_MIB_OUTUNORDERCHUNKS);
+                               SCTP_INC_STATS(net, SCTP_MIB_OUTUNORDERCHUNKS);
                        else
-                               SCTP_INC_STATS(SCTP_MIB_OUTORDERCHUNKS);
+                               SCTP_INC_STATS(net, SCTP_MIB_OUTORDERCHUNKS);
                        q->empty = 0;
                        break;
                }
        } else {
                list_add_tail(&chunk->list, &q->control_chunk_list);
-               SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
+               SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
        }
 
        if (error < 0)
@@ -478,11 +479,12 @@ void sctp_retransmit_mark(struct sctp_outq *q,
 void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport,
                     sctp_retransmit_reason_t reason)
 {
+       struct net *net = sock_net(q->asoc->base.sk);
        int error = 0;
 
        switch(reason) {
        case SCTP_RTXR_T3_RTX:
-               SCTP_INC_STATS(SCTP_MIB_T3_RETRANSMITS);
+               SCTP_INC_STATS(net, SCTP_MIB_T3_RETRANSMITS);
                sctp_transport_lower_cwnd(transport, SCTP_LOWER_CWND_T3_RTX);
                /* Update the retran path if the T3-rtx timer has expired for
                 * the current retran path.
@@ -493,15 +495,15 @@ void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport,
                        transport->asoc->unack_data;
                break;
        case SCTP_RTXR_FAST_RTX:
-               SCTP_INC_STATS(SCTP_MIB_FAST_RETRANSMITS);
+               SCTP_INC_STATS(net, SCTP_MIB_FAST_RETRANSMITS);
                sctp_transport_lower_cwnd(transport, SCTP_LOWER_CWND_FAST_RTX);
                q->fast_rtx = 1;
                break;
        case SCTP_RTXR_PMTUD:
-               SCTP_INC_STATS(SCTP_MIB_PMTUD_RETRANSMITS);
+               SCTP_INC_STATS(net, SCTP_MIB_PMTUD_RETRANSMITS);
                break;
        case SCTP_RTXR_T1_RTX:
-               SCTP_INC_STATS(SCTP_MIB_T1_RETRANSMITS);
+               SCTP_INC_STATS(net, SCTP_MIB_T1_RETRANSMITS);
                transport->asoc->init_retries++;
                break;
        default:
@@ -1914,6 +1916,6 @@ static void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 ctsn)
 
        if (ftsn_chunk) {
                list_add_tail(&ftsn_chunk->list, &q->control_chunk_list);
-               SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
+               SCTP_INC_STATS(sock_net(asoc->base.sk), SCTP_MIB_OUTCTRLCHUNKS);
        }
 }
index 534c7ea..794bb14 100644 (file)
@@ -57,7 +57,7 @@
 
 #define DECLARE_PRIMITIVE(name) \
 /* This is called in the code as sctp_primitive_ ## name.  */ \
-int sctp_primitive_ ## name(struct sctp_association *asoc, \
+int sctp_primitive_ ## name(struct net *net, struct sctp_association *asoc, \
                            void *arg) { \
        int error = 0; \
        sctp_event_t event_type; sctp_subtype_t subtype; \
@@ -69,7 +69,7 @@ int sctp_primitive_ ## name(struct sctp_association *asoc, \
        state = asoc ? asoc->state : SCTP_STATE_CLOSED; \
        ep = asoc ? asoc->ep : NULL; \
        \
-       error = sctp_do_sm(event_type, subtype, state, ep, asoc, \
+       error = sctp_do_sm(net, event_type, subtype, state, ep, asoc,   \
                           arg, GFP_KERNEL); \
        return error; \
 }
index 1e2eee8..c3bea26 100644 (file)
@@ -80,11 +80,12 @@ static const struct snmp_mib sctp_snmp_list[] = {
 /* Display sctp snmp mib statistics(/proc/net/sctp/snmp). */
 static int sctp_snmp_seq_show(struct seq_file *seq, void *v)
 {
+       struct net *net = seq->private;
        int i;
 
        for (i = 0; sctp_snmp_list[i].name != NULL; i++)
                seq_printf(seq, "%-32s\t%ld\n", sctp_snmp_list[i].name,
-                          snmp_fold_field((void __percpu **)sctp_statistics,
+                          snmp_fold_field((void __percpu **)net->sctp.sctp_statistics,
                                      sctp_snmp_list[i].entry));
 
        return 0;
@@ -93,7 +94,7 @@ static int sctp_snmp_seq_show(struct seq_file *seq, void *v)
 /* Initialize the seq file operations for 'snmp' object. */
 static int sctp_snmp_seq_open(struct inode *inode, struct file *file)
 {
-       return single_open(file, sctp_snmp_seq_show, NULL);
+       return single_open_net(inode, file, sctp_snmp_seq_show);
 }
 
 static const struct file_operations sctp_snmp_seq_fops = {
@@ -105,11 +106,12 @@ static const struct file_operations sctp_snmp_seq_fops = {
 };
 
 /* Set up the proc fs entry for 'snmp' object. */
-int __init sctp_snmp_proc_init(void)
+int __net_init sctp_snmp_proc_init(struct net *net)
 {
        struct proc_dir_entry *p;
 
-       p = proc_create("snmp", S_IRUGO, proc_net_sctp, &sctp_snmp_seq_fops);
+       p = proc_create("snmp", S_IRUGO, net->sctp.proc_net_sctp,
+                       &sctp_snmp_seq_fops);
        if (!p)
                return -ENOMEM;
 
@@ -117,9 +119,9 @@ int __init sctp_snmp_proc_init(void)
 }
 
 /* Cleanup the proc fs entry for 'snmp' object. */
-void sctp_snmp_proc_exit(void)
+void sctp_snmp_proc_exit(struct net *net)
 {
-       remove_proc_entry("snmp", proc_net_sctp);
+       remove_proc_entry("snmp", net->sctp.proc_net_sctp);
 }
 
 /* Dump local addresses of an association/endpoint. */
@@ -213,10 +215,13 @@ static int sctp_eps_seq_show(struct seq_file *seq, void *v)
        sctp_for_each_hentry(epb, node, &head->chain) {
                ep = sctp_ep(epb);
                sk = epb->sk;
+               if (!net_eq(sock_net(sk), seq_file_net(seq)))
+                       continue;
                seq_printf(seq, "%8pK %8pK %-3d %-3d %-4d %-5d %5d %5lu ", ep, sk,
                           sctp_sk(sk)->type, sk->sk_state, hash,
                           epb->bind_addr.port,
-                          sock_i_uid(sk), sock_i_ino(sk));
+                          from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)),
+                          sock_i_ino(sk));
 
                sctp_seq_dump_local_addrs(seq, epb);
                seq_printf(seq, "\n");
@@ -238,7 +243,8 @@ static const struct seq_operations sctp_eps_ops = {
 /* Initialize the seq file operations for 'eps' object. */
 static int sctp_eps_seq_open(struct inode *inode, struct file *file)
 {
-       return seq_open(file, &sctp_eps_ops);
+       return seq_open_net(inode, file, &sctp_eps_ops,
+                           sizeof(struct seq_net_private));
 }
 
 static const struct file_operations sctp_eps_seq_fops = {
@@ -249,11 +255,12 @@ static const struct file_operations sctp_eps_seq_fops = {
 };
 
 /* Set up the proc fs entry for 'eps' object. */
-int __init sctp_eps_proc_init(void)
+int __net_init sctp_eps_proc_init(struct net *net)
 {
        struct proc_dir_entry *p;
 
-       p = proc_create("eps", S_IRUGO, proc_net_sctp, &sctp_eps_seq_fops);
+       p = proc_create("eps", S_IRUGO, net->sctp.proc_net_sctp,
+                       &sctp_eps_seq_fops);
        if (!p)
                return -ENOMEM;
 
@@ -261,9 +268,9 @@ int __init sctp_eps_proc_init(void)
 }
 
 /* Cleanup the proc fs entry for 'eps' object. */
-void sctp_eps_proc_exit(void)
+void sctp_eps_proc_exit(struct net *net)
 {
-       remove_proc_entry("eps", proc_net_sctp);
+       remove_proc_entry("eps", net->sctp.proc_net_sctp);
 }
 
 
@@ -316,6 +323,8 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
        sctp_for_each_hentry(epb, node, &head->chain) {
                assoc = sctp_assoc(epb);
                sk = epb->sk;
+               if (!net_eq(sock_net(sk), seq_file_net(seq)))
+                       continue;
                seq_printf(seq,
                           "%8pK %8pK %-3d %-3d %-2d %-4d "
                           "%4d %8d %8d %7d %5lu %-5d %5d ",
@@ -324,7 +333,8 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
                           assoc->assoc_id,
                           assoc->sndbuf_used,
                           atomic_read(&assoc->rmem_alloc),
-                          sock_i_uid(sk), sock_i_ino(sk),
+                          from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)),
+                          sock_i_ino(sk),
                           epb->bind_addr.port,
                           assoc->peer.port);
                seq_printf(seq, " ");
@@ -354,7 +364,8 @@ static const struct seq_operations sctp_assoc_ops = {
 /* Initialize the seq file operations for 'assocs' object. */
 static int sctp_assocs_seq_open(struct inode *inode, struct file *file)
 {
-       return seq_open(file, &sctp_assoc_ops);
+       return seq_open_net(inode, file, &sctp_assoc_ops,
+                           sizeof(struct seq_net_private));
 }
 
 static const struct file_operations sctp_assocs_seq_fops = {
@@ -365,11 +376,11 @@ static const struct file_operations sctp_assocs_seq_fops = {
 };
 
 /* Set up the proc fs entry for 'assocs' object. */
-int __init sctp_assocs_proc_init(void)
+int __net_init sctp_assocs_proc_init(struct net *net)
 {
        struct proc_dir_entry *p;
 
-       p = proc_create("assocs", S_IRUGO, proc_net_sctp,
+       p = proc_create("assocs", S_IRUGO, net->sctp.proc_net_sctp,
                        &sctp_assocs_seq_fops);
        if (!p)
                return -ENOMEM;
@@ -378,9 +389,9 @@ int __init sctp_assocs_proc_init(void)
 }
 
 /* Cleanup the proc fs entry for 'assocs' object. */
-void sctp_assocs_proc_exit(void)
+void sctp_assocs_proc_exit(struct net *net)
 {
-       remove_proc_entry("assocs", proc_net_sctp);
+       remove_proc_entry("assocs", net->sctp.proc_net_sctp);
 }
 
 static void *sctp_remaddr_seq_start(struct seq_file *seq, loff_t *pos)
@@ -426,6 +437,8 @@ static int sctp_remaddr_seq_show(struct seq_file *seq, void *v)
        sctp_local_bh_disable();
        read_lock(&head->lock);
        sctp_for_each_hentry(epb, node, &head->chain) {
+               if (!net_eq(sock_net(epb->sk), seq_file_net(seq)))
+                       continue;
                assoc = sctp_assoc(epb);
                list_for_each_entry(tsp, &assoc->peer.transport_addr_list,
                                        transports) {
@@ -489,14 +502,15 @@ static const struct seq_operations sctp_remaddr_ops = {
 };
 
 /* Cleanup the proc fs entry for 'remaddr' object. */
-void sctp_remaddr_proc_exit(void)
+void sctp_remaddr_proc_exit(struct net *net)
 {
-       remove_proc_entry("remaddr", proc_net_sctp);
+       remove_proc_entry("remaddr", net->sctp.proc_net_sctp);
 }
 
 static int sctp_remaddr_seq_open(struct inode *inode, struct file *file)
 {
-       return seq_open(file, &sctp_remaddr_ops);
+       return seq_open_net(inode, file, &sctp_remaddr_ops,
+                           sizeof(struct seq_net_private));
 }
 
 static const struct file_operations sctp_remaddr_seq_fops = {
@@ -506,11 +520,12 @@ static const struct file_operations sctp_remaddr_seq_fops = {
        .release = seq_release,
 };
 
-int __init sctp_remaddr_proc_init(void)
+int __net_init sctp_remaddr_proc_init(struct net *net)
 {
        struct proc_dir_entry *p;
 
-       p = proc_create("remaddr", S_IRUGO, proc_net_sctp, &sctp_remaddr_seq_fops);
+       p = proc_create("remaddr", S_IRUGO, net->sctp.proc_net_sctp,
+                       &sctp_remaddr_seq_fops);
        if (!p)
                return -ENOMEM;
        return 0;
index 1f89c4e..2d51842 100644 (file)
 
 /* Global data structures. */
 struct sctp_globals sctp_globals __read_mostly;
-DEFINE_SNMP_STAT(struct sctp_mib, sctp_statistics) __read_mostly;
-
-#ifdef CONFIG_PROC_FS
-struct proc_dir_entry  *proc_net_sctp;
-#endif
 
 struct idr sctp_assocs_id;
 DEFINE_SPINLOCK(sctp_assocs_id_lock);
 
-/* This is the global socket data structure used for responding to
- * the Out-of-the-blue (OOTB) packets.  A control sock will be created
- * for this socket at the initialization time.
- */
-static struct sock *sctp_ctl_sock;
-
 static struct sctp_pf *sctp_pf_inet6_specific;
 static struct sctp_pf *sctp_pf_inet_specific;
 static struct sctp_af *sctp_af_v4_specific;
@@ -96,74 +85,54 @@ long sysctl_sctp_mem[3];
 int sysctl_sctp_rmem[3];
 int sysctl_sctp_wmem[3];
 
-/* Return the address of the control sock. */
-struct sock *sctp_get_ctl_sock(void)
-{
-       return sctp_ctl_sock;
-}
-
 /* Set up the proc fs entry for the SCTP protocol. */
-static __init int sctp_proc_init(void)
+static __net_init int sctp_proc_init(struct net *net)
 {
-       if (percpu_counter_init(&sctp_sockets_allocated, 0))
-               goto out_nomem;
 #ifdef CONFIG_PROC_FS
-       if (!proc_net_sctp) {
-               proc_net_sctp = proc_mkdir("sctp", init_net.proc_net);
-               if (!proc_net_sctp)
-                       goto out_free_percpu;
-       }
-
-       if (sctp_snmp_proc_init())
+       net->sctp.proc_net_sctp = proc_net_mkdir(net, "sctp", net->proc_net);
+       if (!net->sctp.proc_net_sctp)
+               goto out_proc_net_sctp;
+       if (sctp_snmp_proc_init(net))
                goto out_snmp_proc_init;
-       if (sctp_eps_proc_init())
+       if (sctp_eps_proc_init(net))
                goto out_eps_proc_init;
-       if (sctp_assocs_proc_init())
+       if (sctp_assocs_proc_init(net))
                goto out_assocs_proc_init;
-       if (sctp_remaddr_proc_init())
+       if (sctp_remaddr_proc_init(net))
                goto out_remaddr_proc_init;
 
        return 0;
 
 out_remaddr_proc_init:
-       sctp_assocs_proc_exit();
+       sctp_assocs_proc_exit(net);
 out_assocs_proc_init:
-       sctp_eps_proc_exit();
+       sctp_eps_proc_exit(net);
 out_eps_proc_init:
-       sctp_snmp_proc_exit();
+       sctp_snmp_proc_exit(net);
 out_snmp_proc_init:
-       if (proc_net_sctp) {
-               proc_net_sctp = NULL;
-               remove_proc_entry("sctp", init_net.proc_net);
-       }
-out_free_percpu:
-       percpu_counter_destroy(&sctp_sockets_allocated);
-#else
-       return 0;
-#endif /* CONFIG_PROC_FS */
-
-out_nomem:
+       remove_proc_entry("sctp", net->proc_net);
+       net->sctp.proc_net_sctp = NULL;
+out_proc_net_sctp:
        return -ENOMEM;
+#endif /* CONFIG_PROC_FS */
+       return 0;
 }
 
 /* Clean up the proc fs entry for the SCTP protocol.
  * Note: Do not make this __exit as it is used in the init error
  * path.
  */
-static void sctp_proc_exit(void)
+static void sctp_proc_exit(struct net *net)
 {
 #ifdef CONFIG_PROC_FS
-       sctp_snmp_proc_exit();
-       sctp_eps_proc_exit();
-       sctp_assocs_proc_exit();
-       sctp_remaddr_proc_exit();
-
-       if (proc_net_sctp) {
-               proc_net_sctp = NULL;
-               remove_proc_entry("sctp", init_net.proc_net);
-       }
+       sctp_snmp_proc_exit(net);
+       sctp_eps_proc_exit(net);
+       sctp_assocs_proc_exit(net);
+       sctp_remaddr_proc_exit(net);
+
+       remove_proc_entry("sctp", net->proc_net);
+       net->sctp.proc_net_sctp = NULL;
 #endif
-       percpu_counter_destroy(&sctp_sockets_allocated);
 }
 
 /* Private helper to extract ipv4 address and stash them in
@@ -201,29 +170,29 @@ static void sctp_v4_copy_addrlist(struct list_head *addrlist,
 /* Extract our IP addresses from the system and stash them in the
  * protocol structure.
  */
-static void sctp_get_local_addr_list(void)
+static void sctp_get_local_addr_list(struct net *net)
 {
        struct net_device *dev;
        struct list_head *pos;
        struct sctp_af *af;
 
        rcu_read_lock();
-       for_each_netdev_rcu(&init_net, dev) {
+       for_each_netdev_rcu(net, dev) {
                __list_for_each(pos, &sctp_address_families) {
                        af = list_entry(pos, struct sctp_af, list);
-                       af->copy_addrlist(&sctp_local_addr_list, dev);
+                       af->copy_addrlist(&net->sctp.local_addr_list, dev);
                }
        }
        rcu_read_unlock();
 }
 
 /* Free the existing local addresses.  */
-static void sctp_free_local_addr_list(void)
+static void sctp_free_local_addr_list(struct net *net)
 {
        struct sctp_sockaddr_entry *addr;
        struct list_head *pos, *temp;
 
-       list_for_each_safe(pos, temp, &sctp_local_addr_list) {
+       list_for_each_safe(pos, temp, &net->sctp.local_addr_list) {
                addr = list_entry(pos, struct sctp_sockaddr_entry, list);
                list_del(pos);
                kfree(addr);
@@ -231,17 +200,17 @@ static void sctp_free_local_addr_list(void)
 }
 
 /* Copy the local addresses which are valid for 'scope' into 'bp'.  */
-int sctp_copy_local_addr_list(struct sctp_bind_addr *bp, sctp_scope_t scope,
-                             gfp_t gfp, int copy_flags)
+int sctp_copy_local_addr_list(struct net *net, struct sctp_bind_addr *bp,
+                             sctp_scope_t scope, gfp_t gfp, int copy_flags)
 {
        struct sctp_sockaddr_entry *addr;
        int error = 0;
 
        rcu_read_lock();
-       list_for_each_entry_rcu(addr, &sctp_local_addr_list, list) {
+       list_for_each_entry_rcu(addr, &net->sctp.local_addr_list, list) {
                if (!addr->valid)
                        continue;
-               if (sctp_in_scope(&addr->a, scope)) {
+               if (sctp_in_scope(net, &addr->a, scope)) {
                        /* Now that the address is in scope, check to see if
                         * the address type is really supported by the local
                         * sock as well as the remote peer.
@@ -397,7 +366,8 @@ static int sctp_v4_addr_valid(union sctp_addr *addr,
 /* Should this be available for binding?   */
 static int sctp_v4_available(union sctp_addr *addr, struct sctp_sock *sp)
 {
-       int ret = inet_addr_type(&init_net, addr->v4.sin_addr.s_addr);
+       struct net *net = sock_net(&sp->inet.sk);
+       int ret = inet_addr_type(net, addr->v4.sin_addr.s_addr);
 
 
        if (addr->v4.sin_addr.s_addr != htonl(INADDR_ANY) &&
@@ -484,7 +454,7 @@ static void sctp_v4_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
        SCTP_DEBUG_PRINTK("%s: DST:%pI4, SRC:%pI4 - ",
                          __func__, &fl4->daddr, &fl4->saddr);
 
-       rt = ip_route_output_key(&init_net, fl4);
+       rt = ip_route_output_key(sock_net(sk), fl4);
        if (!IS_ERR(rt))
                dst = &rt->dst;
 
@@ -530,7 +500,7 @@ static void sctp_v4_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
                    (AF_INET == laddr->a.sa.sa_family)) {
                        fl4->saddr = laddr->a.v4.sin_addr.s_addr;
                        fl4->fl4_sport = laddr->a.v4.sin_port;
-                       rt = ip_route_output_key(&init_net, fl4);
+                       rt = ip_route_output_key(sock_net(sk), fl4);
                        if (!IS_ERR(rt)) {
                                dst = &rt->dst;
                                goto out_unlock;
@@ -627,14 +597,15 @@ static void sctp_v4_ecn_capable(struct sock *sk)
 
 void sctp_addr_wq_timeout_handler(unsigned long arg)
 {
+       struct net *net = (struct net *)arg;
        struct sctp_sockaddr_entry *addrw, *temp;
        struct sctp_sock *sp;
 
-       spin_lock_bh(&sctp_addr_wq_lock);
+       spin_lock_bh(&net->sctp.addr_wq_lock);
 
-       list_for_each_entry_safe(addrw, temp, &sctp_addr_waitq, list) {
+       list_for_each_entry_safe(addrw, temp, &net->sctp.addr_waitq, list) {
                SCTP_DEBUG_PRINTK_IPADDR("sctp_addrwq_timo_handler: the first ent in wq %p is ",
-                   " for cmd %d at entry %p\n", &sctp_addr_waitq, &addrw->a, addrw->state,
+                   " for cmd %d at entry %p\n", &net->sctp.addr_waitq, &addrw->a, addrw->state,
                    addrw);
 
 #if IS_ENABLED(CONFIG_IPV6)
@@ -648,7 +619,7 @@ void sctp_addr_wq_timeout_handler(unsigned long arg)
                                goto free_next;
 
                        in6 = (struct in6_addr *)&addrw->a.v6.sin6_addr;
-                       if (ipv6_chk_addr(&init_net, in6, NULL, 0) == 0 &&
+                       if (ipv6_chk_addr(net, in6, NULL, 0) == 0 &&
                            addrw->state == SCTP_ADDR_NEW) {
                                unsigned long timeo_val;
 
@@ -656,12 +627,12 @@ void sctp_addr_wq_timeout_handler(unsigned long arg)
                                    SCTP_ADDRESS_TICK_DELAY);
                                timeo_val = jiffies;
                                timeo_val += msecs_to_jiffies(SCTP_ADDRESS_TICK_DELAY);
-                               mod_timer(&sctp_addr_wq_timer, timeo_val);
+                               mod_timer(&net->sctp.addr_wq_timer, timeo_val);
                                break;
                        }
                }
 #endif
-               list_for_each_entry(sp, &sctp_auto_asconf_splist, auto_asconf_list) {
+               list_for_each_entry(sp, &net->sctp.auto_asconf_splist, auto_asconf_list) {
                        struct sock *sk;
 
                        sk = sctp_opt2sk(sp);
@@ -679,31 +650,32 @@ free_next:
                list_del(&addrw->list);
                kfree(addrw);
        }
-       spin_unlock_bh(&sctp_addr_wq_lock);
+       spin_unlock_bh(&net->sctp.addr_wq_lock);
 }
 
-static void sctp_free_addr_wq(void)
+static void sctp_free_addr_wq(struct net *net)
 {
        struct sctp_sockaddr_entry *addrw;
        struct sctp_sockaddr_entry *temp;
 
-       spin_lock_bh(&sctp_addr_wq_lock);
-       del_timer(&sctp_addr_wq_timer);
-       list_for_each_entry_safe(addrw, temp, &sctp_addr_waitq, list) {
+       spin_lock_bh(&net->sctp.addr_wq_lock);
+       del_timer(&net->sctp.addr_wq_timer);
+       list_for_each_entry_safe(addrw, temp, &net->sctp.addr_waitq, list) {
                list_del(&addrw->list);
                kfree(addrw);
        }
-       spin_unlock_bh(&sctp_addr_wq_lock);
+       spin_unlock_bh(&net->sctp.addr_wq_lock);
 }
 
 /* lookup the entry for the same address in the addr_waitq
  * sctp_addr_wq MUST be locked
  */
-static struct sctp_sockaddr_entry *sctp_addr_wq_lookup(struct sctp_sockaddr_entry *addr)
+static struct sctp_sockaddr_entry *sctp_addr_wq_lookup(struct net *net,
+                                       struct sctp_sockaddr_entry *addr)
 {
        struct sctp_sockaddr_entry *addrw;
 
-       list_for_each_entry(addrw, &sctp_addr_waitq, list) {
+       list_for_each_entry(addrw, &net->sctp.addr_waitq, list) {
                if (addrw->a.sa.sa_family != addr->a.sa.sa_family)
                        continue;
                if (addrw->a.sa.sa_family == AF_INET) {
@@ -719,7 +691,7 @@ static struct sctp_sockaddr_entry *sctp_addr_wq_lookup(struct sctp_sockaddr_entr
        return NULL;
 }
 
-void sctp_addr_wq_mgmt(struct sctp_sockaddr_entry *addr, int cmd)
+void sctp_addr_wq_mgmt(struct net *net, struct sctp_sockaddr_entry *addr, int cmd)
 {
        struct sctp_sockaddr_entry *addrw;
        unsigned long timeo_val;
@@ -730,38 +702,38 @@ void sctp_addr_wq_mgmt(struct sctp_sockaddr_entry *addr, int cmd)
         * new address after a couple of addition and deletion of that address
         */
 
-       spin_lock_bh(&sctp_addr_wq_lock);
+       spin_lock_bh(&net->sctp.addr_wq_lock);
        /* Offsets existing events in addr_wq */
-       addrw = sctp_addr_wq_lookup(addr);
+       addrw = sctp_addr_wq_lookup(net, addr);
        if (addrw) {
                if (addrw->state != cmd) {
                        SCTP_DEBUG_PRINTK_IPADDR("sctp_addr_wq_mgmt offsets existing entry for %d ",
                            " in wq %p\n", addrw->state, &addrw->a,
-                           &sctp_addr_waitq);
+                           &net->sctp.addr_waitq);
                        list_del(&addrw->list);
                        kfree(addrw);
                }
-               spin_unlock_bh(&sctp_addr_wq_lock);
+               spin_unlock_bh(&net->sctp.addr_wq_lock);
                return;
        }
 
        /* OK, we have to add the new address to the wait queue */
        addrw = kmemdup(addr, sizeof(struct sctp_sockaddr_entry), GFP_ATOMIC);
        if (addrw == NULL) {
-               spin_unlock_bh(&sctp_addr_wq_lock);
+               spin_unlock_bh(&net->sctp.addr_wq_lock);
                return;
        }
        addrw->state = cmd;
-       list_add_tail(&addrw->list, &sctp_addr_waitq);
+       list_add_tail(&addrw->list, &net->sctp.addr_waitq);
        SCTP_DEBUG_PRINTK_IPADDR("sctp_addr_wq_mgmt add new entry for cmd:%d ",
-           " in wq %p\n", addrw->state, &addrw->a, &sctp_addr_waitq);
+           " in wq %p\n", addrw->state, &addrw->a, &net->sctp.addr_waitq);
 
-       if (!timer_pending(&sctp_addr_wq_timer)) {
+       if (!timer_pending(&net->sctp.addr_wq_timer)) {
                timeo_val = jiffies;
                timeo_val += msecs_to_jiffies(SCTP_ADDRESS_TICK_DELAY);
-               mod_timer(&sctp_addr_wq_timer, timeo_val);
+               mod_timer(&net->sctp.addr_wq_timer, timeo_val);
        }
-       spin_unlock_bh(&sctp_addr_wq_lock);
+       spin_unlock_bh(&net->sctp.addr_wq_lock);
 }
 
 /* Event handler for inet address addition/deletion events.
@@ -776,11 +748,9 @@ static int sctp_inetaddr_event(struct notifier_block *this, unsigned long ev,
        struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
        struct sctp_sockaddr_entry *addr = NULL;
        struct sctp_sockaddr_entry *temp;
+       struct net *net = dev_net(ifa->ifa_dev->dev);
        int found = 0;
 
-       if (!net_eq(dev_net(ifa->ifa_dev->dev), &init_net))
-               return NOTIFY_DONE;
-
        switch (ev) {
        case NETDEV_UP:
                addr = kmalloc(sizeof(struct sctp_sockaddr_entry), GFP_ATOMIC);
@@ -789,27 +759,27 @@ static int sctp_inetaddr_event(struct notifier_block *this, unsigned long ev,
                        addr->a.v4.sin_port = 0;
                        addr->a.v4.sin_addr.s_addr = ifa->ifa_local;
                        addr->valid = 1;
-                       spin_lock_bh(&sctp_local_addr_lock);
-                       list_add_tail_rcu(&addr->list, &sctp_local_addr_list);
-                       sctp_addr_wq_mgmt(addr, SCTP_ADDR_NEW);
-                       spin_unlock_bh(&sctp_local_addr_lock);
+                       spin_lock_bh(&net->sctp.local_addr_lock);
+                       list_add_tail_rcu(&addr->list, &net->sctp.local_addr_list);
+                       sctp_addr_wq_mgmt(net, addr, SCTP_ADDR_NEW);
+                       spin_unlock_bh(&net->sctp.local_addr_lock);
                }
                break;
        case NETDEV_DOWN:
-               spin_lock_bh(&sctp_local_addr_lock);
+               spin_lock_bh(&net->sctp.local_addr_lock);
                list_for_each_entry_safe(addr, temp,
-                                       &sctp_local_addr_list, list) {
+                                       &net->sctp.local_addr_list, list) {
                        if (addr->a.sa.sa_family == AF_INET &&
                                        addr->a.v4.sin_addr.s_addr ==
                                        ifa->ifa_local) {
-                               sctp_addr_wq_mgmt(addr, SCTP_ADDR_DEL);
+                               sctp_addr_wq_mgmt(net, addr, SCTP_ADDR_DEL);
                                found = 1;
                                addr->valid = 0;
                                list_del_rcu(&addr->list);
                                break;
                        }
                }
-               spin_unlock_bh(&sctp_local_addr_lock);
+               spin_unlock_bh(&net->sctp.local_addr_lock);
                if (found)
                        kfree_rcu(addr, rcu);
                break;
@@ -822,7 +792,7 @@ static int sctp_inetaddr_event(struct notifier_block *this, unsigned long ev,
  * Initialize the control inode/socket with a control endpoint data
  * structure.  This endpoint is reserved exclusively for the OOTB processing.
  */
-static int sctp_ctl_sock_init(void)
+static int sctp_ctl_sock_init(struct net *net)
 {
        int err;
        sa_family_t family = PF_INET;
@@ -830,14 +800,14 @@ static int sctp_ctl_sock_init(void)
        if (sctp_get_pf_specific(PF_INET6))
                family = PF_INET6;
 
-       err = inet_ctl_sock_create(&sctp_ctl_sock, family,
-                                  SOCK_SEQPACKET, IPPROTO_SCTP, &init_net);
+       err = inet_ctl_sock_create(&net->sctp.ctl_sock, family,
+                                  SOCK_SEQPACKET, IPPROTO_SCTP, net);
 
        /* If IPv6 socket could not be created, try the IPv4 socket */
        if (err < 0 && family == PF_INET6)
-               err = inet_ctl_sock_create(&sctp_ctl_sock, AF_INET,
+               err = inet_ctl_sock_create(&net->sctp.ctl_sock, AF_INET,
                                           SOCK_SEQPACKET, IPPROTO_SCTP,
-                                          &init_net);
+                                          net);
 
        if (err < 0) {
                pr_err("Failed to create the SCTP control socket\n");
@@ -990,7 +960,7 @@ static inline int sctp_v4_xmit(struct sk_buff *skb,
        inet->pmtudisc = transport->param_flags & SPP_PMTUD_ENABLE ?
                         IP_PMTUDISC_DO : IP_PMTUDISC_DONT;
 
-       SCTP_INC_STATS(SCTP_MIB_OUTSCTPPACKS);
+       SCTP_INC_STATS(sock_net(&inet->sk), SCTP_MIB_OUTSCTPPACKS);
        return ip_queue_xmit(skb, &transport->fl);
 }
 
@@ -1063,6 +1033,7 @@ static const struct net_protocol sctp_protocol = {
        .handler     = sctp_rcv,
        .err_handler = sctp_v4_err,
        .no_policy   = 1,
+       .netns_ok    = 1,
 };
 
 /* IPv4 address related functions.  */
@@ -1130,16 +1101,16 @@ int sctp_register_pf(struct sctp_pf *pf, sa_family_t family)
        return 1;
 }
 
-static inline int init_sctp_mibs(void)
+static inline int init_sctp_mibs(struct net *net)
 {
-       return snmp_mib_init((void __percpu **)sctp_statistics,
+       return snmp_mib_init((void __percpu **)net->sctp.sctp_statistics,
                             sizeof(struct sctp_mib),
                             __alignof__(struct sctp_mib));
 }
 
-static inline void cleanup_sctp_mibs(void)
+static inline void cleanup_sctp_mibs(struct net *net)
 {
-       snmp_mib_free((void __percpu **)sctp_statistics);
+       snmp_mib_free((void __percpu **)net->sctp.sctp_statistics);
 }
 
 static void sctp_v4_pf_init(void)
@@ -1194,6 +1165,143 @@ static void sctp_v4_del_protocol(void)
        unregister_inetaddr_notifier(&sctp_inetaddr_notifier);
 }
 
+static int sctp_net_init(struct net *net)
+{
+       int status;
+
+       /*
+        * 14. Suggested SCTP Protocol Parameter Values
+        */
+       /* The following protocol parameters are RECOMMENDED:  */
+       /* RTO.Initial              - 3  seconds */
+       net->sctp.rto_initial                   = SCTP_RTO_INITIAL;
+       /* RTO.Min                  - 1  second */
+       net->sctp.rto_min                       = SCTP_RTO_MIN;
+       /* RTO.Max                 -  60 seconds */
+       net->sctp.rto_max                       = SCTP_RTO_MAX;
+       /* RTO.Alpha                - 1/8 */
+       net->sctp.rto_alpha                     = SCTP_RTO_ALPHA;
+       /* RTO.Beta                 - 1/4 */
+       net->sctp.rto_beta                      = SCTP_RTO_BETA;
+
+       /* Valid.Cookie.Life        - 60  seconds */
+       net->sctp.valid_cookie_life             = SCTP_DEFAULT_COOKIE_LIFE;
+
+       /* Whether Cookie Preservative is enabled(1) or not(0) */
+       net->sctp.cookie_preserve_enable        = 1;
+
+       /* Max.Burst                - 4 */
+       net->sctp.max_burst                     = SCTP_DEFAULT_MAX_BURST;
+
+       /* Association.Max.Retrans  - 10 attempts
+        * Path.Max.Retrans         - 5  attempts (per destination address)
+        * Max.Init.Retransmits     - 8  attempts
+        */
+       net->sctp.max_retrans_association       = 10;
+       net->sctp.max_retrans_path              = 5;
+       net->sctp.max_retrans_init              = 8;
+
+       /* Sendbuffer growth        - do per-socket accounting */
+       net->sctp.sndbuf_policy                 = 0;
+
+       /* Rcvbuffer growth         - do per-socket accounting */
+       net->sctp.rcvbuf_policy                 = 0;
+
+       /* HB.interval              - 30 seconds */
+       net->sctp.hb_interval                   = SCTP_DEFAULT_TIMEOUT_HEARTBEAT;
+
+       /* delayed SACK timeout */
+       net->sctp.sack_timeout                  = SCTP_DEFAULT_TIMEOUT_SACK;
+
+       /* Disable ADDIP by default. */
+       net->sctp.addip_enable = 0;
+       net->sctp.addip_noauth = 0;
+       net->sctp.default_auto_asconf = 0;
+
+       /* Enable PR-SCTP by default. */
+       net->sctp.prsctp_enable = 1;
+
+       /* Disable AUTH by default. */
+       net->sctp.auth_enable = 0;
+
+       /* Set SCOPE policy to enabled */
+       net->sctp.scope_policy = SCTP_SCOPE_POLICY_ENABLE;
+
+       /* Set the default rwnd update threshold */
+       net->sctp.rwnd_upd_shift = SCTP_DEFAULT_RWND_SHIFT;
+
+       /* Initialize maximum autoclose timeout. */
+       net->sctp.max_autoclose         = INT_MAX / HZ;
+
+       status = sctp_sysctl_net_register(net);
+       if (status)
+               goto err_sysctl_register;
+
+       /* Allocate and initialise sctp mibs.  */
+       status = init_sctp_mibs(net);
+       if (status)
+               goto err_init_mibs;
+
+       /* Initialize proc fs directory.  */
+       status = sctp_proc_init(net);
+       if (status)
+               goto err_init_proc;
+
+       sctp_dbg_objcnt_init(net);
+
+       /* Initialize the control inode/socket for handling OOTB packets.  */
+       if ((status = sctp_ctl_sock_init(net))) {
+               pr_err("Failed to initialize the SCTP control sock\n");
+               goto err_ctl_sock_init;
+       }
+
+       /* Initialize the local address list. */
+       INIT_LIST_HEAD(&net->sctp.local_addr_list);
+       spin_lock_init(&net->sctp.local_addr_lock);
+       sctp_get_local_addr_list(net);
+
+       /* Initialize the address event list */
+       INIT_LIST_HEAD(&net->sctp.addr_waitq);
+       INIT_LIST_HEAD(&net->sctp.auto_asconf_splist);
+       spin_lock_init(&net->sctp.addr_wq_lock);
+       net->sctp.addr_wq_timer.expires = 0;
+       setup_timer(&net->sctp.addr_wq_timer, sctp_addr_wq_timeout_handler,
+                   (unsigned long)net);
+
+       return 0;
+
+err_ctl_sock_init:
+       sctp_dbg_objcnt_exit(net);
+       sctp_proc_exit(net);
+err_init_proc:
+       cleanup_sctp_mibs(net);
+err_init_mibs:
+       sctp_sysctl_net_unregister(net);
+err_sysctl_register:
+       return status;
+}
+
+static void sctp_net_exit(struct net *net)
+{
+       /* Free the local address list */
+       sctp_free_addr_wq(net);
+       sctp_free_local_addr_list(net);
+
+       /* Free the control endpoint.  */
+       inet_ctl_sock_destroy(net->sctp.ctl_sock);
+
+       sctp_dbg_objcnt_exit(net);
+
+       sctp_proc_exit(net);
+       cleanup_sctp_mibs(net);
+       sctp_sysctl_net_unregister(net);
+}
+
+static struct pernet_operations sctp_net_ops = {
+       .init = sctp_net_init,
+       .exit = sctp_net_exit,
+};
+
 /* Initialize the universe into something sensible.  */
 SCTP_STATIC __init int sctp_init(void)
 {
@@ -1224,62 +1332,9 @@ SCTP_STATIC __init int sctp_init(void)
        if (!sctp_chunk_cachep)
                goto err_chunk_cachep;
 
-       /* Allocate and initialise sctp mibs.  */
-       status = init_sctp_mibs();
+       status = percpu_counter_init(&sctp_sockets_allocated, 0);
        if (status)
-               goto err_init_mibs;
-
-       /* Initialize proc fs directory.  */
-       status = sctp_proc_init();
-       if (status)
-               goto err_init_proc;
-
-       /* Initialize object count debugging.  */
-       sctp_dbg_objcnt_init();
-
-       /*
-        * 14. Suggested SCTP Protocol Parameter Values
-        */
-       /* The following protocol parameters are RECOMMENDED:  */
-       /* RTO.Initial              - 3  seconds */
-       sctp_rto_initial                = SCTP_RTO_INITIAL;
-       /* RTO.Min                  - 1  second */
-       sctp_rto_min                    = SCTP_RTO_MIN;
-       /* RTO.Max                 -  60 seconds */
-       sctp_rto_max                    = SCTP_RTO_MAX;
-       /* RTO.Alpha                - 1/8 */
-       sctp_rto_alpha                  = SCTP_RTO_ALPHA;
-       /* RTO.Beta                 - 1/4 */
-       sctp_rto_beta                   = SCTP_RTO_BETA;
-
-       /* Valid.Cookie.Life        - 60  seconds */
-       sctp_valid_cookie_life          = SCTP_DEFAULT_COOKIE_LIFE;
-
-       /* Whether Cookie Preservative is enabled(1) or not(0) */
-       sctp_cookie_preserve_enable     = 1;
-
-       /* Max.Burst                - 4 */
-       sctp_max_burst                  = SCTP_DEFAULT_MAX_BURST;
-
-       /* Association.Max.Retrans  - 10 attempts
-        * Path.Max.Retrans         - 5  attempts (per destination address)
-        * Max.Init.Retransmits     - 8  attempts
-        */
-       sctp_max_retrans_association    = 10;
-       sctp_max_retrans_path           = 5;
-       sctp_max_retrans_init           = 8;
-
-       /* Sendbuffer growth        - do per-socket accounting */
-       sctp_sndbuf_policy              = 0;
-
-       /* Rcvbuffer growth         - do per-socket accounting */
-       sctp_rcvbuf_policy              = 0;
-
-       /* HB.interval              - 30 seconds */
-       sctp_hb_interval                = SCTP_DEFAULT_TIMEOUT_HEARTBEAT;
-
-       /* delayed SACK timeout */
-       sctp_sack_timeout               = SCTP_DEFAULT_TIMEOUT_SACK;
+               goto err_percpu_counter_init;
 
        /* Implementation specific variables. */
 
@@ -1287,9 +1342,6 @@ SCTP_STATIC __init int sctp_init(void)
        sctp_max_instreams              = SCTP_DEFAULT_INSTREAMS;
        sctp_max_outstreams             = SCTP_DEFAULT_OUTSTREAMS;
 
-       /* Initialize maximum autoclose timeout. */
-       sctp_max_autoclose              = INT_MAX / HZ;
-
        /* Initialize handle used for association ids. */
        idr_init(&sctp_assocs_id);
 
@@ -1376,41 +1428,12 @@ SCTP_STATIC __init int sctp_init(void)
        pr_info("Hash tables configured (established %d bind %d)\n",
                sctp_assoc_hashsize, sctp_port_hashsize);
 
-       /* Disable ADDIP by default. */
-       sctp_addip_enable = 0;
-       sctp_addip_noauth = 0;
-       sctp_default_auto_asconf = 0;
-
-       /* Enable PR-SCTP by default. */
-       sctp_prsctp_enable = 1;
-
-       /* Disable AUTH by default. */
-       sctp_auth_enable = 0;
-
-       /* Set SCOPE policy to enabled */
-       sctp_scope_policy = SCTP_SCOPE_POLICY_ENABLE;
-
-       /* Set the default rwnd update threshold */
-       sctp_rwnd_upd_shift             = SCTP_DEFAULT_RWND_SHIFT;
-
        sctp_sysctl_register();
 
        INIT_LIST_HEAD(&sctp_address_families);
        sctp_v4_pf_init();
        sctp_v6_pf_init();
 
-       /* Initialize the local address list. */
-       INIT_LIST_HEAD(&sctp_local_addr_list);
-       spin_lock_init(&sctp_local_addr_lock);
-       sctp_get_local_addr_list();
-
-       /* Initialize the address event list */
-       INIT_LIST_HEAD(&sctp_addr_waitq);
-       INIT_LIST_HEAD(&sctp_auto_asconf_splist);
-       spin_lock_init(&sctp_addr_wq_lock);
-       sctp_addr_wq_timer.expires = 0;
-       setup_timer(&sctp_addr_wq_timer, sctp_addr_wq_timeout_handler, 0);
-
        status = sctp_v4_protosw_init();
 
        if (status)
@@ -1420,11 +1443,9 @@ SCTP_STATIC __init int sctp_init(void)
        if (status)
                goto err_v6_protosw_init;
 
-       /* Initialize the control inode/socket for handling OOTB packets.  */
-       if ((status = sctp_ctl_sock_init())) {
-               pr_err("Failed to initialize the SCTP control sock\n");
-               goto err_ctl_sock_init;
-       }
+       status = register_pernet_subsys(&sctp_net_ops);
+       if (status)
+               goto err_register_pernet_subsys;
 
        status = sctp_v4_add_protocol();
        if (status)
@@ -1441,13 +1462,12 @@ out:
 err_v6_add_protocol:
        sctp_v4_del_protocol();
 err_add_protocol:
-       inet_ctl_sock_destroy(sctp_ctl_sock);
-err_ctl_sock_init:
+       unregister_pernet_subsys(&sctp_net_ops);
+err_register_pernet_subsys:
        sctp_v6_protosw_exit();
 err_v6_protosw_init:
        sctp_v4_protosw_exit();
 err_protosw_init:
-       sctp_free_local_addr_list();
        sctp_v4_pf_exit();
        sctp_v6_pf_exit();
        sctp_sysctl_unregister();
@@ -1461,11 +1481,8 @@ err_ehash_alloc:
                   get_order(sctp_assoc_hashsize *
                             sizeof(struct sctp_hashbucket)));
 err_ahash_alloc:
-       sctp_dbg_objcnt_exit();
-       sctp_proc_exit();
-err_init_proc:
-       cleanup_sctp_mibs();
-err_init_mibs:
+       percpu_counter_destroy(&sctp_sockets_allocated);
+err_percpu_counter_init:
        kmem_cache_destroy(sctp_chunk_cachep);
 err_chunk_cachep:
        kmem_cache_destroy(sctp_bucket_cachep);
@@ -1482,18 +1499,13 @@ SCTP_STATIC __exit void sctp_exit(void)
        /* Unregister with inet6/inet layers. */
        sctp_v6_del_protocol();
        sctp_v4_del_protocol();
-       sctp_free_addr_wq();
 
-       /* Free the control endpoint.  */
-       inet_ctl_sock_destroy(sctp_ctl_sock);
+       unregister_pernet_subsys(&sctp_net_ops);
 
        /* Free protosw registrations */
        sctp_v6_protosw_exit();
        sctp_v4_protosw_exit();
 
-       /* Free the local address list.  */
-       sctp_free_local_addr_list();
-
        /* Unregister with socket layer. */
        sctp_v6_pf_exit();
        sctp_v4_pf_exit();
@@ -1508,9 +1520,7 @@ SCTP_STATIC __exit void sctp_exit(void)
                   get_order(sctp_port_hashsize *
                             sizeof(struct sctp_bind_hashbucket)));
 
-       sctp_dbg_objcnt_exit();
-       sctp_proc_exit();
-       cleanup_sctp_mibs();
+       percpu_counter_destroy(&sctp_sockets_allocated);
 
        rcu_barrier(); /* Wait for completion of call_rcu()'s */
 
index 479a70e..fbe1636 100644 (file)
@@ -198,6 +198,7 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc,
                             const struct sctp_bind_addr *bp,
                             gfp_t gfp, int vparam_len)
 {
+       struct net *net = sock_net(asoc->base.sk);
        sctp_inithdr_t init;
        union sctp_params addrs;
        size_t chunksize;
@@ -237,7 +238,7 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc,
        chunksize += WORD_ROUND(SCTP_SAT_LEN(num_types));
        chunksize += sizeof(ecap_param);
 
-       if (sctp_prsctp_enable)
+       if (net->sctp.prsctp_enable)
                chunksize += sizeof(prsctp_param);
 
        /* ADDIP: Section 4.2.7:
@@ -245,7 +246,7 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc,
         *  the ASCONF,the ASCONF-ACK, and the AUTH  chunks in its INIT and
         *  INIT-ACK parameters.
         */
-       if (sctp_addip_enable) {
+       if (net->sctp.addip_enable) {
                extensions[num_ext] = SCTP_CID_ASCONF;
                extensions[num_ext+1] = SCTP_CID_ASCONF_ACK;
                num_ext += 2;
@@ -257,7 +258,7 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc,
        chunksize += vparam_len;
 
        /* Account for AUTH related parameters */
-       if (sctp_auth_enable) {
+       if (net->sctp.auth_enable) {
                /* Add random parameter length*/
                chunksize += sizeof(asoc->c.auth_random);
 
@@ -331,7 +332,7 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc,
                sctp_addto_param(retval, num_ext, extensions);
        }
 
-       if (sctp_prsctp_enable)
+       if (net->sctp.prsctp_enable)
                sctp_addto_chunk(retval, sizeof(prsctp_param), &prsctp_param);
 
        if (sp->adaptation_ind) {
@@ -342,7 +343,7 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc,
        }
 
        /* Add SCTP-AUTH chunks to the parameter list */
-       if (sctp_auth_enable) {
+       if (net->sctp.auth_enable) {
                sctp_addto_chunk(retval, sizeof(asoc->c.auth_random),
                                 asoc->c.auth_random);
                if (auth_hmacs)
@@ -1940,7 +1941,7 @@ static int sctp_process_hn_param(const struct sctp_association *asoc,
        return 0;
 }
 
-static int sctp_verify_ext_param(union sctp_params param)
+static int sctp_verify_ext_param(struct net *net, union sctp_params param)
 {
        __u16 num_ext = ntohs(param.p->length) - sizeof(sctp_paramhdr_t);
        int have_auth = 0;
@@ -1964,10 +1965,10 @@ static int sctp_verify_ext_param(union sctp_params param)
         * only if ADD-IP is turned on and we are not backward-compatible
         * mode.
         */
-       if (sctp_addip_noauth)
+       if (net->sctp.addip_noauth)
                return 1;
 
-       if (sctp_addip_enable && !have_auth && have_asconf)
+       if (net->sctp.addip_enable && !have_auth && have_asconf)
                return 0;
 
        return 1;
@@ -1976,13 +1977,14 @@ static int sctp_verify_ext_param(union sctp_params param)
 static void sctp_process_ext_param(struct sctp_association *asoc,
                                    union sctp_params param)
 {
+       struct net *net = sock_net(asoc->base.sk);
        __u16 num_ext = ntohs(param.p->length) - sizeof(sctp_paramhdr_t);
        int i;
 
        for (i = 0; i < num_ext; i++) {
                switch (param.ext->chunks[i]) {
                    case SCTP_CID_FWD_TSN:
-                           if (sctp_prsctp_enable &&
+                           if (net->sctp.prsctp_enable &&
                                !asoc->peer.prsctp_capable)
                                    asoc->peer.prsctp_capable = 1;
                            break;
@@ -1990,12 +1992,12 @@ static void sctp_process_ext_param(struct sctp_association *asoc,
                            /* if the peer reports AUTH, assume that he
                             * supports AUTH.
                             */
-                           if (sctp_auth_enable)
+                           if (net->sctp.auth_enable)
                                    asoc->peer.auth_capable = 1;
                            break;
                    case SCTP_CID_ASCONF:
                    case SCTP_CID_ASCONF_ACK:
-                           if (sctp_addip_enable)
+                           if (net->sctp.addip_enable)
                                    asoc->peer.asconf_capable = 1;
                            break;
                    default:
@@ -2081,7 +2083,8 @@ static sctp_ierror_t sctp_process_unk_param(const struct sctp_association *asoc,
  *     SCTP_IERROR_ERROR - stop processing, trigger an ERROR
  *     SCTP_IERROR_NO_ERROR - continue with the chunk
  */
-static sctp_ierror_t sctp_verify_param(const struct sctp_association *asoc,
+static sctp_ierror_t sctp_verify_param(struct net *net,
+                                       const struct sctp_association *asoc,
                                        union sctp_params param,
                                        sctp_cid_t cid,
                                        struct sctp_chunk *chunk,
@@ -2110,12 +2113,12 @@ static sctp_ierror_t sctp_verify_param(const struct sctp_association *asoc,
                break;
 
        case SCTP_PARAM_SUPPORTED_EXT:
-               if (!sctp_verify_ext_param(param))
+               if (!sctp_verify_ext_param(net, param))
                        return SCTP_IERROR_ABORT;
                break;
 
        case SCTP_PARAM_SET_PRIMARY:
-               if (sctp_addip_enable)
+               if (net->sctp.addip_enable)
                        break;
                goto fallthrough;
 
@@ -2126,12 +2129,12 @@ static sctp_ierror_t sctp_verify_param(const struct sctp_association *asoc,
                break;
 
        case SCTP_PARAM_FWD_TSN_SUPPORT:
-               if (sctp_prsctp_enable)
+               if (net->sctp.prsctp_enable)
                        break;
                goto fallthrough;
 
        case SCTP_PARAM_RANDOM:
-               if (!sctp_auth_enable)
+               if (!net->sctp.auth_enable)
                        goto fallthrough;
 
                /* SCTP-AUTH: Secion 6.1
@@ -2148,7 +2151,7 @@ static sctp_ierror_t sctp_verify_param(const struct sctp_association *asoc,
                break;
 
        case SCTP_PARAM_CHUNKS:
-               if (!sctp_auth_enable)
+               if (!net->sctp.auth_enable)
                        goto fallthrough;
 
                /* SCTP-AUTH: Section 3.2
@@ -2164,7 +2167,7 @@ static sctp_ierror_t sctp_verify_param(const struct sctp_association *asoc,
                break;
 
        case SCTP_PARAM_HMAC_ALGO:
-               if (!sctp_auth_enable)
+               if (!net->sctp.auth_enable)
                        goto fallthrough;
 
                hmacs = (struct sctp_hmac_algo_param *)param.p;
@@ -2198,7 +2201,7 @@ fallthrough:
 }
 
 /* Verify the INIT packet before we process it.  */
-int sctp_verify_init(const struct sctp_association *asoc,
+int sctp_verify_init(struct net *net, const struct sctp_association *asoc,
                     sctp_cid_t cid,
                     sctp_init_chunk_t *peer_init,
                     struct sctp_chunk *chunk,
@@ -2245,7 +2248,7 @@ int sctp_verify_init(const struct sctp_association *asoc,
        /* Verify all the variable length parameters */
        sctp_walk_params(param, peer_init, init_hdr.params) {
 
-               result = sctp_verify_param(asoc, param, cid, chunk, errp);
+               result = sctp_verify_param(net, asoc, param, cid, chunk, errp);
                switch (result) {
                    case SCTP_IERROR_ABORT:
                    case SCTP_IERROR_NOMEM:
@@ -2270,6 +2273,7 @@ int sctp_process_init(struct sctp_association *asoc, struct sctp_chunk *chunk,
                      const union sctp_addr *peer_addr,
                      sctp_init_chunk_t *peer_init, gfp_t gfp)
 {
+       struct net *net = sock_net(asoc->base.sk);
        union sctp_params param;
        struct sctp_transport *transport;
        struct list_head *pos, *temp;
@@ -2326,7 +2330,7 @@ int sctp_process_init(struct sctp_association *asoc, struct sctp_chunk *chunk,
         * also give us an option to silently ignore the packet, which
         * is what we'll do here.
         */
-       if (!sctp_addip_noauth &&
+       if (!net->sctp.addip_noauth &&
             (asoc->peer.asconf_capable && !asoc->peer.auth_capable)) {
                asoc->peer.addip_disabled_mask |= (SCTP_PARAM_ADD_IP |
                                                  SCTP_PARAM_DEL_IP |
@@ -2466,6 +2470,7 @@ static int sctp_process_param(struct sctp_association *asoc,
                              const union sctp_addr *peer_addr,
                              gfp_t gfp)
 {
+       struct net *net = sock_net(asoc->base.sk);
        union sctp_addr addr;
        int i;
        __u16 sat;
@@ -2494,13 +2499,13 @@ do_addr_param:
                af = sctp_get_af_specific(param_type2af(param.p->type));
                af->from_addr_param(&addr, param.addr, htons(asoc->peer.port), 0);
                scope = sctp_scope(peer_addr);
-               if (sctp_in_scope(&addr, scope))
+               if (sctp_in_scope(net, &addr, scope))
                        if (!sctp_assoc_add_peer(asoc, &addr, gfp, SCTP_UNCONFIRMED))
                                return 0;
                break;
 
        case SCTP_PARAM_COOKIE_PRESERVATIVE:
-               if (!sctp_cookie_preserve_enable)
+               if (!net->sctp.cookie_preserve_enable)
                        break;
 
                stale = ntohl(param.life->lifespan_increment);
@@ -2580,7 +2585,7 @@ do_addr_param:
                break;
 
        case SCTP_PARAM_SET_PRIMARY:
-               if (!sctp_addip_enable)
+               if (!net->sctp.addip_enable)
                        goto fall_through;
 
                addr_param = param.v + sizeof(sctp_addip_param_t);
@@ -2607,7 +2612,7 @@ do_addr_param:
                break;
 
        case SCTP_PARAM_FWD_TSN_SUPPORT:
-               if (sctp_prsctp_enable) {
+               if (net->sctp.prsctp_enable) {
                        asoc->peer.prsctp_capable = 1;
                        break;
                }
@@ -2615,7 +2620,7 @@ do_addr_param:
                goto fall_through;
 
        case SCTP_PARAM_RANDOM:
-               if (!sctp_auth_enable)
+               if (!net->sctp.auth_enable)
                        goto fall_through;
 
                /* Save peer's random parameter */
@@ -2628,7 +2633,7 @@ do_addr_param:
                break;
 
        case SCTP_PARAM_HMAC_ALGO:
-               if (!sctp_auth_enable)
+               if (!net->sctp.auth_enable)
                        goto fall_through;
 
                /* Save peer's HMAC list */
@@ -2644,7 +2649,7 @@ do_addr_param:
                break;
 
        case SCTP_PARAM_CHUNKS:
-               if (!sctp_auth_enable)
+               if (!net->sctp.auth_enable)
                        goto fall_through;
 
                asoc->peer.peer_chunks = kmemdup(param.p,
index fe99628..bcfebb9 100644 (file)
@@ -251,6 +251,7 @@ void sctp_generate_t3_rtx_event(unsigned long peer)
        int error;
        struct sctp_transport *transport = (struct sctp_transport *) peer;
        struct sctp_association *asoc = transport->asoc;
+       struct net *net = sock_net(asoc->base.sk);
 
        /* Check whether a task is in the sock.  */
 
@@ -271,7 +272,7 @@ void sctp_generate_t3_rtx_event(unsigned long peer)
                goto out_unlock;
 
        /* Run through the state machine.  */
-       error = sctp_do_sm(SCTP_EVENT_T_TIMEOUT,
+       error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT,
                           SCTP_ST_TIMEOUT(SCTP_EVENT_TIMEOUT_T3_RTX),
                           asoc->state,
                           asoc->ep, asoc,
@@ -291,6 +292,7 @@ out_unlock:
 static void sctp_generate_timeout_event(struct sctp_association *asoc,
                                        sctp_event_timeout_t timeout_type)
 {
+       struct net *net = sock_net(asoc->base.sk);
        int error = 0;
 
        sctp_bh_lock_sock(asoc->base.sk);
@@ -312,7 +314,7 @@ static void sctp_generate_timeout_event(struct sctp_association *asoc,
                goto out_unlock;
 
        /* Run through the state machine.  */
-       error = sctp_do_sm(SCTP_EVENT_T_TIMEOUT,
+       error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT,
                           SCTP_ST_TIMEOUT(timeout_type),
                           asoc->state, asoc->ep, asoc,
                           (void *)timeout_type, GFP_ATOMIC);
@@ -371,6 +373,7 @@ void sctp_generate_heartbeat_event(unsigned long data)
        int error = 0;
        struct sctp_transport *transport = (struct sctp_transport *) data;
        struct sctp_association *asoc = transport->asoc;
+       struct net *net = sock_net(asoc->base.sk);
 
        sctp_bh_lock_sock(asoc->base.sk);
        if (sock_owned_by_user(asoc->base.sk)) {
@@ -388,7 +391,7 @@ void sctp_generate_heartbeat_event(unsigned long data)
        if (transport->dead)
                goto out_unlock;
 
-       error = sctp_do_sm(SCTP_EVENT_T_TIMEOUT,
+       error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT,
                           SCTP_ST_TIMEOUT(SCTP_EVENT_TIMEOUT_HEARTBEAT),
                           asoc->state, asoc->ep, asoc,
                           transport, GFP_ATOMIC);
@@ -408,6 +411,7 @@ void sctp_generate_proto_unreach_event(unsigned long data)
 {
        struct sctp_transport *transport = (struct sctp_transport *) data;
        struct sctp_association *asoc = transport->asoc;
+       struct net *net = sock_net(asoc->base.sk);
        
        sctp_bh_lock_sock(asoc->base.sk);
        if (sock_owned_by_user(asoc->base.sk)) {
@@ -426,7 +430,7 @@ void sctp_generate_proto_unreach_event(unsigned long data)
        if (asoc->base.dead)
                goto out_unlock;
 
-       sctp_do_sm(SCTP_EVENT_T_OTHER,
+       sctp_do_sm(net, SCTP_EVENT_T_OTHER,
                   SCTP_ST_OTHER(SCTP_EVENT_ICMP_PROTO_UNREACH),
                   asoc->state, asoc->ep, asoc, transport, GFP_ATOMIC);
 
@@ -753,8 +757,10 @@ static int sctp_cmd_process_sack(sctp_cmd_seq_t *cmds,
        int err = 0;
 
        if (sctp_outq_sack(&asoc->outqueue, sackh)) {
+               struct net *net = sock_net(asoc->base.sk);
+
                /* There are no more TSNs awaiting SACK.  */
-               err = sctp_do_sm(SCTP_EVENT_T_OTHER,
+               err = sctp_do_sm(net, SCTP_EVENT_T_OTHER,
                                 SCTP_ST_OTHER(SCTP_EVENT_NO_PENDING_TSN),
                                 asoc->state, asoc->ep, asoc, NULL,
                                 GFP_ATOMIC);
@@ -1042,6 +1048,8 @@ static int sctp_cmd_send_msg(struct sctp_association *asoc,
  */
 static void sctp_cmd_send_asconf(struct sctp_association *asoc)
 {
+       struct net *net = sock_net(asoc->base.sk);
+
        /* Send the next asconf chunk from the addip chunk
         * queue.
         */
@@ -1053,7 +1061,7 @@ static void sctp_cmd_send_asconf(struct sctp_association *asoc)
 
                /* Hold the chunk until an ASCONF_ACK is received. */
                sctp_chunk_hold(asconf);
-               if (sctp_primitive_ASCONF(asoc, asconf))
+               if (sctp_primitive_ASCONF(net, asoc, asconf))
                        sctp_chunk_free(asconf);
                else
                        asoc->addip_last_asconf = asconf;
@@ -1089,7 +1097,7 @@ static void sctp_cmd_send_asconf(struct sctp_association *asoc)
  * If you want to understand all of lksctp, this is a
  * good place to start.
  */
-int sctp_do_sm(sctp_event_t event_type, sctp_subtype_t subtype,
+int sctp_do_sm(struct net *net, sctp_event_t event_type, sctp_subtype_t subtype,
               sctp_state_t state,
               struct sctp_endpoint *ep,
               struct sctp_association *asoc,
@@ -1110,12 +1118,12 @@ int sctp_do_sm(sctp_event_t event_type, sctp_subtype_t subtype,
        /* Look up the state function, run it, and then process the
         * side effects.  These three steps are the heart of lksctp.
         */
-       state_fn = sctp_sm_lookup_event(event_type, state, subtype);
+       state_fn = sctp_sm_lookup_event(net, event_type, state, subtype);
 
        sctp_init_cmd_seq(&commands);
 
        DEBUG_PRE;
-       status = (*state_fn->fn)(ep, asoc, subtype, event_arg, &commands);
+       status = (*state_fn->fn)(net, ep, asoc, subtype, event_arg, &commands);
        DEBUG_POST;
 
        error = sctp_side_effects(event_type, subtype, state,
index 9fca103..094813b 100644 (file)
@@ -66,7 +66,8 @@
 #include <net/sctp/sm.h>
 #include <net/sctp/structs.h>
 
-static struct sctp_packet *sctp_abort_pkt_new(const struct sctp_endpoint *ep,
+static struct sctp_packet *sctp_abort_pkt_new(struct net *net,
+                                 const struct sctp_endpoint *ep,
                                  const struct sctp_association *asoc,
                                  struct sctp_chunk *chunk,
                                  const void *payload,
@@ -74,36 +75,43 @@ static struct sctp_packet *sctp_abort_pkt_new(const struct sctp_endpoint *ep,
 static int sctp_eat_data(const struct sctp_association *asoc,
                         struct sctp_chunk *chunk,
                         sctp_cmd_seq_t *commands);
-static struct sctp_packet *sctp_ootb_pkt_new(const struct sctp_association *asoc,
+static struct sctp_packet *sctp_ootb_pkt_new(struct net *net,
+                                            const struct sctp_association *asoc,
                                             const struct sctp_chunk *chunk);
-static void sctp_send_stale_cookie_err(const struct sctp_endpoint *ep,
+static void sctp_send_stale_cookie_err(struct net *net,
+                                      const struct sctp_endpoint *ep,
                                       const struct sctp_association *asoc,
                                       const struct sctp_chunk *chunk,
                                       sctp_cmd_seq_t *commands,
                                       struct sctp_chunk *err_chunk);
-static sctp_disposition_t sctp_sf_do_5_2_6_stale(const struct sctp_endpoint *ep,
+static sctp_disposition_t sctp_sf_do_5_2_6_stale(struct net *net,
+                                                const struct sctp_endpoint *ep,
                                                 const struct sctp_association *asoc,
                                                 const sctp_subtype_t type,
                                                 void *arg,
                                                 sctp_cmd_seq_t *commands);
-static sctp_disposition_t sctp_sf_shut_8_4_5(const struct sctp_endpoint *ep,
+static sctp_disposition_t sctp_sf_shut_8_4_5(struct net *net,
+                                            const struct sctp_endpoint *ep,
                                             const struct sctp_association *asoc,
                                             const sctp_subtype_t type,
                                             void *arg,
                                             sctp_cmd_seq_t *commands);
-static sctp_disposition_t sctp_sf_tabort_8_4_8(const struct sctp_endpoint *ep,
+static sctp_disposition_t sctp_sf_tabort_8_4_8(struct net *net,
+                                       const struct sctp_endpoint *ep,
                                        const struct sctp_association *asoc,
                                        const sctp_subtype_t type,
                                        void *arg,
                                        sctp_cmd_seq_t *commands);
 static struct sctp_sackhdr *sctp_sm_pull_sack(struct sctp_chunk *chunk);
 
-static sctp_disposition_t sctp_stop_t1_and_abort(sctp_cmd_seq_t *commands,
+static sctp_disposition_t sctp_stop_t1_and_abort(struct net *net,
+                                          sctp_cmd_seq_t *commands,
                                           __be16 error, int sk_err,
                                           const struct sctp_association *asoc,
                                           struct sctp_transport *transport);
 
 static sctp_disposition_t sctp_sf_abort_violation(
+                                    struct net *net,
                                     const struct sctp_endpoint *ep,
                                     const struct sctp_association *asoc,
                                     void *arg,
@@ -112,6 +120,7 @@ static sctp_disposition_t sctp_sf_abort_violation(
                                     const size_t paylen);
 
 static sctp_disposition_t sctp_sf_violation_chunklen(
+                                    struct net *net,
                                     const struct sctp_endpoint *ep,
                                     const struct sctp_association *asoc,
                                     const sctp_subtype_t type,
@@ -119,6 +128,7 @@ static sctp_disposition_t sctp_sf_violation_chunklen(
                                     sctp_cmd_seq_t *commands);
 
 static sctp_disposition_t sctp_sf_violation_paramlen(
+                                    struct net *net,
                                     const struct sctp_endpoint *ep,
                                     const struct sctp_association *asoc,
                                     const sctp_subtype_t type,
@@ -126,6 +136,7 @@ static sctp_disposition_t sctp_sf_violation_paramlen(
                                     sctp_cmd_seq_t *commands);
 
 static sctp_disposition_t sctp_sf_violation_ctsn(
+                                    struct net *net,
                                     const struct sctp_endpoint *ep,
                                     const struct sctp_association *asoc,
                                     const sctp_subtype_t type,
@@ -133,18 +144,21 @@ static sctp_disposition_t sctp_sf_violation_ctsn(
                                     sctp_cmd_seq_t *commands);
 
 static sctp_disposition_t sctp_sf_violation_chunk(
+                                    struct net *net,
                                     const struct sctp_endpoint *ep,
                                     const struct sctp_association *asoc,
                                     const sctp_subtype_t type,
                                     void *arg,
                                     sctp_cmd_seq_t *commands);
 
-static sctp_ierror_t sctp_sf_authenticate(const struct sctp_endpoint *ep,
+static sctp_ierror_t sctp_sf_authenticate(struct net *net,
+                                   const struct sctp_endpoint *ep,
                                    const struct sctp_association *asoc,
                                    const sctp_subtype_t type,
                                    struct sctp_chunk *chunk);
 
-static sctp_disposition_t __sctp_sf_do_9_1_abort(const struct sctp_endpoint *ep,
+static sctp_disposition_t __sctp_sf_do_9_1_abort(struct net *net,
+                                       const struct sctp_endpoint *ep,
                                        const struct sctp_association *asoc,
                                        const sctp_subtype_t type,
                                        void *arg,
@@ -204,7 +218,8 @@ sctp_chunk_length_valid(struct sctp_chunk *chunk,
  *
  * The return value is the disposition of the chunk.
  */
-sctp_disposition_t sctp_sf_do_4_C(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_do_4_C(struct net *net,
+                                 const struct sctp_endpoint *ep,
                                  const struct sctp_association *asoc,
                                  const sctp_subtype_t type,
                                  void *arg,
@@ -214,7 +229,7 @@ sctp_disposition_t sctp_sf_do_4_C(const struct sctp_endpoint *ep,
        struct sctp_ulpevent *ev;
 
        if (!sctp_vtag_verify_either(chunk, asoc))
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
        /* RFC 2960 6.10 Bundling
         *
@@ -222,11 +237,11 @@ sctp_disposition_t sctp_sf_do_4_C(const struct sctp_endpoint *ep,
         * SHUTDOWN COMPLETE with any other chunks.
         */
        if (!chunk->singleton)
-               return sctp_sf_violation_chunk(ep, asoc, type, arg, commands);
+               return sctp_sf_violation_chunk(net, ep, asoc, type, arg, commands);
 
        /* Make sure that the SHUTDOWN_COMPLETE chunk has a valid length. */
        if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
-               return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+               return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
                                                  commands);
 
        /* RFC 2960 10.2 SCTP-to-ULP
@@ -259,8 +274,8 @@ sctp_disposition_t sctp_sf_do_4_C(const struct sctp_endpoint *ep,
        sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
                        SCTP_STATE(SCTP_STATE_CLOSED));
 
-       SCTP_INC_STATS(SCTP_MIB_SHUTDOWNS);
-       SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
+       SCTP_INC_STATS(net, SCTP_MIB_SHUTDOWNS);
+       SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
 
        sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());
 
@@ -289,7 +304,8 @@ sctp_disposition_t sctp_sf_do_4_C(const struct sctp_endpoint *ep,
  *
  * The return value is the disposition of the chunk.
  */
-sctp_disposition_t sctp_sf_do_5_1B_init(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_do_5_1B_init(struct net *net,
+                                       const struct sctp_endpoint *ep,
                                        const struct sctp_association *asoc,
                                        const sctp_subtype_t type,
                                        void *arg,
@@ -313,21 +329,21 @@ sctp_disposition_t sctp_sf_do_5_1B_init(const struct sctp_endpoint *ep,
         * with an INIT chunk that is bundled with other chunks.
         */
        if (!chunk->singleton)
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
        /* If the packet is an OOTB packet which is temporarily on the
         * control endpoint, respond with an ABORT.
         */
-       if (ep == sctp_sk((sctp_get_ctl_sock()))->ep) {
-               SCTP_INC_STATS(SCTP_MIB_OUTOFBLUES);
-               return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands);
+       if (ep == sctp_sk(net->sctp.ctl_sock)->ep) {
+               SCTP_INC_STATS(net, SCTP_MIB_OUTOFBLUES);
+               return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands);
        }
 
        /* 3.1 A packet containing an INIT chunk MUST have a zero Verification
         * Tag.
         */
        if (chunk->sctp_hdr->vtag != 0)
-               return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands);
+               return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands);
 
        /* Make sure that the INIT chunk has a valid length.
         * Normally, this would cause an ABORT with a Protocol Violation
@@ -335,7 +351,7 @@ sctp_disposition_t sctp_sf_do_5_1B_init(const struct sctp_endpoint *ep,
         * just discard the packet.
         */
        if (!sctp_chunk_length_valid(chunk, sizeof(sctp_init_chunk_t)))
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
        /* If the INIT is coming toward a closing socket, we'll send back
         * and ABORT.  Essentially, this catches the race of INIT being
@@ -344,18 +360,18 @@ sctp_disposition_t sctp_sf_do_5_1B_init(const struct sctp_endpoint *ep,
         * can treat this OOTB
         */
        if (sctp_sstate(ep->base.sk, CLOSING))
-               return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands);
+               return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands);
 
        /* Verify the INIT chunk before processing it. */
        err_chunk = NULL;
-       if (!sctp_verify_init(asoc, chunk->chunk_hdr->type,
+       if (!sctp_verify_init(net, asoc, chunk->chunk_hdr->type,
                              (sctp_init_chunk_t *)chunk->chunk_hdr, chunk,
                              &err_chunk)) {
                /* This chunk contains fatal error. It is to be discarded.
                 * Send an ABORT, with causes if there is any.
                 */
                if (err_chunk) {
-                       packet = sctp_abort_pkt_new(ep, asoc, arg,
+                       packet = sctp_abort_pkt_new(net, ep, asoc, arg,
                                        (__u8 *)(err_chunk->chunk_hdr) +
                                        sizeof(sctp_chunkhdr_t),
                                        ntohs(err_chunk->chunk_hdr->length) -
@@ -366,13 +382,13 @@ sctp_disposition_t sctp_sf_do_5_1B_init(const struct sctp_endpoint *ep,
                        if (packet) {
                                sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
                                                SCTP_PACKET(packet));
-                               SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
+                               SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
                                return SCTP_DISPOSITION_CONSUME;
                        } else {
                                return SCTP_DISPOSITION_NOMEM;
                        }
                } else {
-                       return sctp_sf_tabort_8_4_8(ep, asoc, type, arg,
+                       return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg,
                                                    commands);
                }
        }
@@ -484,7 +500,8 @@ nomem:
  *
  * The return value is the disposition of the chunk.
  */
-sctp_disposition_t sctp_sf_do_5_1C_ack(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_do_5_1C_ack(struct net *net,
+                                      const struct sctp_endpoint *ep,
                                       const struct sctp_association *asoc,
                                       const sctp_subtype_t type,
                                       void *arg,
@@ -496,25 +513,25 @@ sctp_disposition_t sctp_sf_do_5_1C_ack(const struct sctp_endpoint *ep,
        struct sctp_packet *packet;
 
        if (!sctp_vtag_verify(chunk, asoc))
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
        /* 6.10 Bundling
         * An endpoint MUST NOT bundle INIT, INIT ACK or
         * SHUTDOWN COMPLETE with any other chunks.
         */
        if (!chunk->singleton)
-               return sctp_sf_violation_chunk(ep, asoc, type, arg, commands);
+               return sctp_sf_violation_chunk(net, ep, asoc, type, arg, commands);
 
        /* Make sure that the INIT-ACK chunk has a valid length */
        if (!sctp_chunk_length_valid(chunk, sizeof(sctp_initack_chunk_t)))
-               return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+               return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
                                                  commands);
        /* Grab the INIT header.  */
        chunk->subh.init_hdr = (sctp_inithdr_t *) chunk->skb->data;
 
        /* Verify the INIT chunk before processing it. */
        err_chunk = NULL;
-       if (!sctp_verify_init(asoc, chunk->chunk_hdr->type,
+       if (!sctp_verify_init(net, asoc, chunk->chunk_hdr->type,
                              (sctp_init_chunk_t *)chunk->chunk_hdr, chunk,
                              &err_chunk)) {
 
@@ -526,7 +543,7 @@ sctp_disposition_t sctp_sf_do_5_1C_ack(const struct sctp_endpoint *ep,
                 * the association.
                 */
                if (err_chunk) {
-                       packet = sctp_abort_pkt_new(ep, asoc, arg,
+                       packet = sctp_abort_pkt_new(net, ep, asoc, arg,
                                        (__u8 *)(err_chunk->chunk_hdr) +
                                        sizeof(sctp_chunkhdr_t),
                                        ntohs(err_chunk->chunk_hdr->length) -
@@ -537,7 +554,7 @@ sctp_disposition_t sctp_sf_do_5_1C_ack(const struct sctp_endpoint *ep,
                        if (packet) {
                                sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
                                                SCTP_PACKET(packet));
-                               SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
+                               SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
                                error = SCTP_ERROR_INV_PARAM;
                        }
                }
@@ -554,10 +571,10 @@ sctp_disposition_t sctp_sf_do_5_1C_ack(const struct sctp_endpoint *ep,
                 * was malformed.
                 */
                if (sctp_auth_recv_cid(SCTP_CID_ABORT, asoc))
-                       return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+                       return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
-               SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
-               return sctp_stop_t1_and_abort(commands, error, ECONNREFUSED,
+               SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
+               return sctp_stop_t1_and_abort(net, commands, error, ECONNREFUSED,
                                                asoc, chunk->transport);
        }
 
@@ -633,7 +650,8 @@ sctp_disposition_t sctp_sf_do_5_1C_ack(const struct sctp_endpoint *ep,
  *
  * The return value is the disposition of the chunk.
  */
-sctp_disposition_t sctp_sf_do_5_1D_ce(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_do_5_1D_ce(struct net *net,
+                                     const struct sctp_endpoint *ep,
                                      const struct sctp_association *asoc,
                                      const sctp_subtype_t type, void *arg,
                                      sctp_cmd_seq_t *commands)
@@ -650,9 +668,9 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(const struct sctp_endpoint *ep,
        /* If the packet is an OOTB packet which is temporarily on the
         * control endpoint, respond with an ABORT.
         */
-       if (ep == sctp_sk((sctp_get_ctl_sock()))->ep) {
-               SCTP_INC_STATS(SCTP_MIB_OUTOFBLUES);
-               return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands);
+       if (ep == sctp_sk(net->sctp.ctl_sock)->ep) {
+               SCTP_INC_STATS(net, SCTP_MIB_OUTOFBLUES);
+               return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands);
        }
 
        /* Make sure that the COOKIE_ECHO chunk has a valid length.
@@ -661,7 +679,7 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(const struct sctp_endpoint *ep,
         * in sctp_unpack_cookie().
         */
        if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
        /* If the endpoint is not listening or if the number of associations
         * on the TCP-style socket exceed the max backlog, respond with an
@@ -670,7 +688,7 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(const struct sctp_endpoint *ep,
        sk = ep->base.sk;
        if (!sctp_sstate(sk, LISTENING) ||
            (sctp_style(sk, TCP) && sk_acceptq_is_full(sk)))
-               return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands);
+               return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands);
 
        /* "Decode" the chunk.  We have no optional parameters so we
         * are in good shape.
@@ -703,13 +721,13 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(const struct sctp_endpoint *ep,
                        goto nomem;
 
                case -SCTP_IERROR_STALE_COOKIE:
-                       sctp_send_stale_cookie_err(ep, asoc, chunk, commands,
+                       sctp_send_stale_cookie_err(net, ep, asoc, chunk, commands,
                                                   err_chk_p);
-                       return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+                       return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
                case -SCTP_IERROR_BAD_SIG:
                default:
-                       return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+                       return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
                }
        }
 
@@ -756,14 +774,14 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(const struct sctp_endpoint *ep,
                skb_pull(chunk->auth_chunk, sizeof(sctp_chunkhdr_t));
                auth.transport = chunk->transport;
 
-               ret = sctp_sf_authenticate(ep, new_asoc, type, &auth);
+               ret = sctp_sf_authenticate(net, ep, new_asoc, type, &auth);
 
                /* We can now safely free the auth_chunk clone */
                kfree_skb(chunk->auth_chunk);
 
                if (ret != SCTP_IERROR_NO_ERROR) {
                        sctp_association_free(new_asoc);
-                       return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+                       return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
                }
        }
 
@@ -804,8 +822,8 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(const struct sctp_endpoint *ep,
        sctp_add_cmd_sf(commands, SCTP_CMD_NEW_ASOC, SCTP_ASOC(new_asoc));
        sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
                        SCTP_STATE(SCTP_STATE_ESTABLISHED));
-       SCTP_INC_STATS(SCTP_MIB_CURRESTAB);
-       SCTP_INC_STATS(SCTP_MIB_PASSIVEESTABS);
+       SCTP_INC_STATS(net, SCTP_MIB_CURRESTAB);
+       SCTP_INC_STATS(net, SCTP_MIB_PASSIVEESTABS);
        sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START, SCTP_NULL());
 
        if (new_asoc->autoclose)
@@ -856,7 +874,8 @@ nomem:
  *
  * The return value is the disposition of the chunk.
  */
-sctp_disposition_t sctp_sf_do_5_1E_ca(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_do_5_1E_ca(struct net *net,
+                                     const struct sctp_endpoint *ep,
                                      const struct sctp_association *asoc,
                                      const sctp_subtype_t type, void *arg,
                                      sctp_cmd_seq_t *commands)
@@ -865,13 +884,13 @@ sctp_disposition_t sctp_sf_do_5_1E_ca(const struct sctp_endpoint *ep,
        struct sctp_ulpevent *ev;
 
        if (!sctp_vtag_verify(chunk, asoc))
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
        /* Verify that the chunk length for the COOKIE-ACK is OK.
         * If we don't do this, any bundled chunks may be junked.
         */
        if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
-               return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+               return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
                                                  commands);
 
        /* Reset init error count upon receipt of COOKIE-ACK,
@@ -892,8 +911,8 @@ sctp_disposition_t sctp_sf_do_5_1E_ca(const struct sctp_endpoint *ep,
                        SCTP_TO(SCTP_EVENT_TIMEOUT_T1_COOKIE));
        sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
                        SCTP_STATE(SCTP_STATE_ESTABLISHED));
-       SCTP_INC_STATS(SCTP_MIB_CURRESTAB);
-       SCTP_INC_STATS(SCTP_MIB_ACTIVEESTABS);
+       SCTP_INC_STATS(net, SCTP_MIB_CURRESTAB);
+       SCTP_INC_STATS(net, SCTP_MIB_ACTIVEESTABS);
        sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START, SCTP_NULL());
        if (asoc->autoclose)
                sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START,
@@ -958,7 +977,8 @@ static sctp_disposition_t sctp_sf_heartbeat(const struct sctp_endpoint *ep,
 }
 
 /* Generate a HEARTBEAT packet on the given transport.  */
-sctp_disposition_t sctp_sf_sendbeat_8_3(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_sendbeat_8_3(struct net *net,
+                                       const struct sctp_endpoint *ep,
                                        const struct sctp_association *asoc,
                                        const sctp_subtype_t type,
                                        void *arg,
@@ -972,8 +992,8 @@ sctp_disposition_t sctp_sf_sendbeat_8_3(const struct sctp_endpoint *ep,
                /* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */
                sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
                                SCTP_PERR(SCTP_ERROR_NO_ERROR));
-               SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
-               SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
+               SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
+               SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
                return SCTP_DISPOSITION_DELETE_TCB;
        }
 
@@ -1028,7 +1048,8 @@ sctp_disposition_t sctp_sf_sendbeat_8_3(const struct sctp_endpoint *ep,
  *
  * The return value is the disposition of the chunk.
  */
-sctp_disposition_t sctp_sf_beat_8_3(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_beat_8_3(struct net *net,
+                                   const struct sctp_endpoint *ep,
                                    const struct sctp_association *asoc,
                                    const sctp_subtype_t type,
                                    void *arg,
@@ -1039,11 +1060,11 @@ sctp_disposition_t sctp_sf_beat_8_3(const struct sctp_endpoint *ep,
        size_t paylen = 0;
 
        if (!sctp_vtag_verify(chunk, asoc))
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
        /* Make sure that the HEARTBEAT chunk has a valid length. */
        if (!sctp_chunk_length_valid(chunk, sizeof(sctp_heartbeat_chunk_t)))
-               return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+               return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
                                                  commands);
 
        /* 8.3 The receiver of the HEARTBEAT should immediately
@@ -1095,7 +1116,8 @@ nomem:
  *
  * The return value is the disposition of the chunk.
  */
-sctp_disposition_t sctp_sf_backbeat_8_3(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_backbeat_8_3(struct net *net,
+                                       const struct sctp_endpoint *ep,
                                        const struct sctp_association *asoc,
                                        const sctp_subtype_t type,
                                        void *arg,
@@ -1108,12 +1130,12 @@ sctp_disposition_t sctp_sf_backbeat_8_3(const struct sctp_endpoint *ep,
        unsigned long max_interval;
 
        if (!sctp_vtag_verify(chunk, asoc))
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
        /* Make sure that the HEARTBEAT-ACK chunk has a valid length.  */
        if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t) +
                                            sizeof(sctp_sender_hb_info_t)))
-               return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+               return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
                                                  commands);
 
        hbinfo = (sctp_sender_hb_info_t *) chunk->skb->data;
@@ -1171,7 +1193,7 @@ sctp_disposition_t sctp_sf_backbeat_8_3(const struct sctp_endpoint *ep,
 /* Helper function to send out an abort for the restart
  * condition.
  */
-static int sctp_sf_send_restart_abort(union sctp_addr *ssa,
+static int sctp_sf_send_restart_abort(struct net *net, union sctp_addr *ssa,
                                      struct sctp_chunk *init,
                                      sctp_cmd_seq_t *commands)
 {
@@ -1197,18 +1219,18 @@ static int sctp_sf_send_restart_abort(union sctp_addr *ssa,
        errhdr->length = htons(len);
 
        /* Assign to the control socket. */
-       ep = sctp_sk((sctp_get_ctl_sock()))->ep;
+       ep = sctp_sk(net->sctp.ctl_sock)->ep;
 
        /* Association is NULL since this may be a restart attack and we
         * want to send back the attacker's vtag.
         */
-       pkt = sctp_abort_pkt_new(ep, NULL, init, errhdr, len);
+       pkt = sctp_abort_pkt_new(net, ep, NULL, init, errhdr, len);
 
        if (!pkt)
                goto out;
        sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT, SCTP_PACKET(pkt));
 
-       SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
+       SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
 
        /* Discard the rest of the inbound packet. */
        sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET, SCTP_NULL());
@@ -1240,6 +1262,7 @@ static int sctp_sf_check_restart_addrs(const struct sctp_association *new_asoc,
                                       struct sctp_chunk *init,
                                       sctp_cmd_seq_t *commands)
 {
+       struct net *net = sock_net(new_asoc->base.sk);
        struct sctp_transport *new_addr;
        int ret = 1;
 
@@ -1258,7 +1281,7 @@ static int sctp_sf_check_restart_addrs(const struct sctp_association *new_asoc,
                            transports) {
                if (!list_has_sctp_addr(&asoc->peer.transport_addr_list,
                                        &new_addr->ipaddr)) {
-                       sctp_sf_send_restart_abort(&new_addr->ipaddr, init,
+                       sctp_sf_send_restart_abort(net, &new_addr->ipaddr, init,
                                                   commands);
                        ret = 0;
                        break;
@@ -1358,6 +1381,7 @@ static char sctp_tietags_compare(struct sctp_association *new_asoc,
  * chunk handling.
  */
 static sctp_disposition_t sctp_sf_do_unexpected_init(
+       struct net *net,
        const struct sctp_endpoint *ep,
        const struct sctp_association *asoc,
        const sctp_subtype_t type,
@@ -1382,20 +1406,20 @@ static sctp_disposition_t sctp_sf_do_unexpected_init(
         * with an INIT chunk that is bundled with other chunks.
         */
        if (!chunk->singleton)
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
        /* 3.1 A packet containing an INIT chunk MUST have a zero Verification
         * Tag.
         */
        if (chunk->sctp_hdr->vtag != 0)
-               return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands);
+               return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands);
 
        /* Make sure that the INIT chunk has a valid length.
         * In this case, we generate a protocol violation since we have
         * an association established.
         */
        if (!sctp_chunk_length_valid(chunk, sizeof(sctp_init_chunk_t)))
-               return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+               return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
                                                  commands);
        /* Grab the INIT header.  */
        chunk->subh.init_hdr = (sctp_inithdr_t *) chunk->skb->data;
@@ -1405,14 +1429,14 @@ static sctp_disposition_t sctp_sf_do_unexpected_init(
 
        /* Verify the INIT chunk before processing it. */
        err_chunk = NULL;
-       if (!sctp_verify_init(asoc, chunk->chunk_hdr->type,
+       if (!sctp_verify_init(net, asoc, chunk->chunk_hdr->type,
                              (sctp_init_chunk_t *)chunk->chunk_hdr, chunk,
                              &err_chunk)) {
                /* This chunk contains fatal error. It is to be discarded.
                 * Send an ABORT, with causes if there is any.
                 */
                if (err_chunk) {
-                       packet = sctp_abort_pkt_new(ep, asoc, arg,
+                       packet = sctp_abort_pkt_new(net, ep, asoc, arg,
                                        (__u8 *)(err_chunk->chunk_hdr) +
                                        sizeof(sctp_chunkhdr_t),
                                        ntohs(err_chunk->chunk_hdr->length) -
@@ -1421,14 +1445,14 @@ static sctp_disposition_t sctp_sf_do_unexpected_init(
                        if (packet) {
                                sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
                                                SCTP_PACKET(packet));
-                               SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
+                               SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
                                retval = SCTP_DISPOSITION_CONSUME;
                        } else {
                                retval = SCTP_DISPOSITION_NOMEM;
                        }
                        goto cleanup;
                } else {
-                       return sctp_sf_tabort_8_4_8(ep, asoc, type, arg,
+                       return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg,
                                                    commands);
                }
        }
@@ -1570,7 +1594,8 @@ cleanup:
  *
  * The return value is the disposition of the chunk.
  */
-sctp_disposition_t sctp_sf_do_5_2_1_siminit(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_do_5_2_1_siminit(struct net *net,
+                                   const struct sctp_endpoint *ep,
                                    const struct sctp_association *asoc,
                                    const sctp_subtype_t type,
                                    void *arg,
@@ -1579,7 +1604,7 @@ sctp_disposition_t sctp_sf_do_5_2_1_siminit(const struct sctp_endpoint *ep,
        /* Call helper to do the real work for both simulataneous and
         * duplicate INIT chunk handling.
         */
-       return sctp_sf_do_unexpected_init(ep, asoc, type, arg, commands);
+       return sctp_sf_do_unexpected_init(net, ep, asoc, type, arg, commands);
 }
 
 /*
@@ -1623,7 +1648,8 @@ sctp_disposition_t sctp_sf_do_5_2_1_siminit(const struct sctp_endpoint *ep,
  *
  * The return value is the disposition of the chunk.
  */
-sctp_disposition_t sctp_sf_do_5_2_2_dupinit(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_do_5_2_2_dupinit(struct net *net,
+                                       const struct sctp_endpoint *ep,
                                        const struct sctp_association *asoc,
                                        const sctp_subtype_t type,
                                        void *arg,
@@ -1632,7 +1658,7 @@ sctp_disposition_t sctp_sf_do_5_2_2_dupinit(const struct sctp_endpoint *ep,
        /* Call helper to do the real work for both simulataneous and
         * duplicate INIT chunk handling.
         */
-       return sctp_sf_do_unexpected_init(ep, asoc, type, arg, commands);
+       return sctp_sf_do_unexpected_init(net, ep, asoc, type, arg, commands);
 }
 
 
@@ -1645,7 +1671,8 @@ sctp_disposition_t sctp_sf_do_5_2_2_dupinit(const struct sctp_endpoint *ep,
  * An unexpected INIT ACK usually indicates the processing of an old or
  * duplicated INIT chunk.
 */
-sctp_disposition_t sctp_sf_do_5_2_3_initack(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_do_5_2_3_initack(struct net *net,
+                                           const struct sctp_endpoint *ep,
                                            const struct sctp_association *asoc,
                                            const sctp_subtype_t type,
                                            void *arg, sctp_cmd_seq_t *commands)
@@ -1653,10 +1680,10 @@ sctp_disposition_t sctp_sf_do_5_2_3_initack(const struct sctp_endpoint *ep,
        /* Per the above section, we'll discard the chunk if we have an
         * endpoint.  If this is an OOTB INIT-ACK, treat it as such.
         */
-       if (ep == sctp_sk((sctp_get_ctl_sock()))->ep)
-               return sctp_sf_ootb(ep, asoc, type, arg, commands);
+       if (ep == sctp_sk(net->sctp.ctl_sock)->ep)
+               return sctp_sf_ootb(net, ep, asoc, type, arg, commands);
        else
-               return sctp_sf_discard_chunk(ep, asoc, type, arg, commands);
+               return sctp_sf_discard_chunk(net, ep, asoc, type, arg, commands);
 }
 
 /* Unexpected COOKIE-ECHO handler for peer restart (Table 2, action 'A')
@@ -1664,7 +1691,8 @@ sctp_disposition_t sctp_sf_do_5_2_3_initack(const struct sctp_endpoint *ep,
  * Section 5.2.4
  *  A)  In this case, the peer may have restarted.
  */
-static sctp_disposition_t sctp_sf_do_dupcook_a(const struct sctp_endpoint *ep,
+static sctp_disposition_t sctp_sf_do_dupcook_a(struct net *net,
+                                       const struct sctp_endpoint *ep,
                                        const struct sctp_association *asoc,
                                        struct sctp_chunk *chunk,
                                        sctp_cmd_seq_t *commands,
@@ -1700,7 +1728,7 @@ static sctp_disposition_t sctp_sf_do_dupcook_a(const struct sctp_endpoint *ep,
         * its peer.
        */
        if (sctp_state(asoc, SHUTDOWN_ACK_SENT)) {
-               disposition = sctp_sf_do_9_2_reshutack(ep, asoc,
+               disposition = sctp_sf_do_9_2_reshutack(net, ep, asoc,
                                SCTP_ST_CHUNK(chunk->chunk_hdr->type),
                                chunk, commands);
                if (SCTP_DISPOSITION_NOMEM == disposition)
@@ -1763,7 +1791,8 @@ nomem:
  *      after responding to the local endpoint's INIT
  */
 /* This case represents an initialization collision.  */
-static sctp_disposition_t sctp_sf_do_dupcook_b(const struct sctp_endpoint *ep,
+static sctp_disposition_t sctp_sf_do_dupcook_b(struct net *net,
+                                       const struct sctp_endpoint *ep,
                                        const struct sctp_association *asoc,
                                        struct sctp_chunk *chunk,
                                        sctp_cmd_seq_t *commands,
@@ -1784,7 +1813,7 @@ static sctp_disposition_t sctp_sf_do_dupcook_b(const struct sctp_endpoint *ep,
        sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_ASSOC, SCTP_ASOC(new_asoc));
        sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
                        SCTP_STATE(SCTP_STATE_ESTABLISHED));
-       SCTP_INC_STATS(SCTP_MIB_CURRESTAB);
+       SCTP_INC_STATS(net, SCTP_MIB_CURRESTAB);
        sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START, SCTP_NULL());
 
        repl = sctp_make_cookie_ack(new_asoc, chunk);
@@ -1833,7 +1862,8 @@ nomem:
  *     but a new tag of its own.
  */
 /* This case represents an initialization collision.  */
-static sctp_disposition_t sctp_sf_do_dupcook_c(const struct sctp_endpoint *ep,
+static sctp_disposition_t sctp_sf_do_dupcook_c(struct net *net,
+                                       const struct sctp_endpoint *ep,
                                        const struct sctp_association *asoc,
                                        struct sctp_chunk *chunk,
                                        sctp_cmd_seq_t *commands,
@@ -1854,7 +1884,8 @@ static sctp_disposition_t sctp_sf_do_dupcook_c(const struct sctp_endpoint *ep,
  *    enter the ESTABLISHED state, if it has not already done so.
  */
 /* This case represents an initialization collision.  */
-static sctp_disposition_t sctp_sf_do_dupcook_d(const struct sctp_endpoint *ep,
+static sctp_disposition_t sctp_sf_do_dupcook_d(struct net *net,
+                                       const struct sctp_endpoint *ep,
                                        const struct sctp_association *asoc,
                                        struct sctp_chunk *chunk,
                                        sctp_cmd_seq_t *commands,
@@ -1876,7 +1907,7 @@ static sctp_disposition_t sctp_sf_do_dupcook_d(const struct sctp_endpoint *ep,
                                SCTP_TO(SCTP_EVENT_TIMEOUT_T1_COOKIE));
                sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
                                SCTP_STATE(SCTP_STATE_ESTABLISHED));
-               SCTP_INC_STATS(SCTP_MIB_CURRESTAB);
+               SCTP_INC_STATS(net, SCTP_MIB_CURRESTAB);
                sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START,
                                SCTP_NULL());
 
@@ -1948,7 +1979,8 @@ nomem:
  *
  * The return value is the disposition of the chunk.
  */
-sctp_disposition_t sctp_sf_do_5_2_4_dupcook(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_do_5_2_4_dupcook(struct net *net,
+                                       const struct sctp_endpoint *ep,
                                        const struct sctp_association *asoc,
                                        const sctp_subtype_t type,
                                        void *arg,
@@ -1967,7 +1999,7 @@ sctp_disposition_t sctp_sf_do_5_2_4_dupcook(const struct sctp_endpoint *ep,
         * done later.
         */
        if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
-               return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+               return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
                                                  commands);
 
        /* "Decode" the chunk.  We have no optional parameters so we
@@ -2001,12 +2033,12 @@ sctp_disposition_t sctp_sf_do_5_2_4_dupcook(const struct sctp_endpoint *ep,
                        goto nomem;
 
                case -SCTP_IERROR_STALE_COOKIE:
-                       sctp_send_stale_cookie_err(ep, asoc, chunk, commands,
+                       sctp_send_stale_cookie_err(net, ep, asoc, chunk, commands,
                                                   err_chk_p);
-                       return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+                       return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
                case -SCTP_IERROR_BAD_SIG:
                default:
-                       return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+                       return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
                }
        }
 
@@ -2017,27 +2049,27 @@ sctp_disposition_t sctp_sf_do_5_2_4_dupcook(const struct sctp_endpoint *ep,
 
        switch (action) {
        case 'A': /* Association restart. */
-               retval = sctp_sf_do_dupcook_a(ep, asoc, chunk, commands,
+               retval = sctp_sf_do_dupcook_a(net, ep, asoc, chunk, commands,
                                              new_asoc);
                break;
 
        case 'B': /* Collision case B. */
-               retval = sctp_sf_do_dupcook_b(ep, asoc, chunk, commands,
+               retval = sctp_sf_do_dupcook_b(net, ep, asoc, chunk, commands,
                                              new_asoc);
                break;
 
        case 'C': /* Collision case C. */
-               retval = sctp_sf_do_dupcook_c(ep, asoc, chunk, commands,
+               retval = sctp_sf_do_dupcook_c(net, ep, asoc, chunk, commands,
                                              new_asoc);
                break;
 
        case 'D': /* Collision case D. */
-               retval = sctp_sf_do_dupcook_d(ep, asoc, chunk, commands,
+               retval = sctp_sf_do_dupcook_d(net, ep, asoc, chunk, commands,
                                              new_asoc);
                break;
 
        default: /* Discard packet for all others. */
-               retval = sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               retval = sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
                break;
        }
 
@@ -2063,6 +2095,7 @@ nomem:
  * See sctp_sf_do_9_1_abort().
  */
 sctp_disposition_t sctp_sf_shutdown_pending_abort(
+       struct net *net,
        const struct sctp_endpoint *ep,
        const struct sctp_association *asoc,
        const sctp_subtype_t type,
@@ -2072,7 +2105,7 @@ sctp_disposition_t sctp_sf_shutdown_pending_abort(
        struct sctp_chunk *chunk = arg;
 
        if (!sctp_vtag_verify_either(chunk, asoc))
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
        /* Make sure that the ABORT chunk has a valid length.
         * Since this is an ABORT chunk, we have to discard it
@@ -2085,7 +2118,7 @@ sctp_disposition_t sctp_sf_shutdown_pending_abort(
         * packet.
         */
        if (!sctp_chunk_length_valid(chunk, sizeof(sctp_abort_chunk_t)))
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
        /* ADD-IP: Special case for ABORT chunks
         * F4)  One special consideration is that ABORT Chunks arriving
@@ -2094,9 +2127,9 @@ sctp_disposition_t sctp_sf_shutdown_pending_abort(
         */
        if (SCTP_ADDR_DEL ==
                    sctp_bind_addr_state(&asoc->base.bind_addr, &chunk->dest))
-               return sctp_sf_discard_chunk(ep, asoc, type, arg, commands);
+               return sctp_sf_discard_chunk(net, ep, asoc, type, arg, commands);
 
-       return __sctp_sf_do_9_1_abort(ep, asoc, type, arg, commands);
+       return __sctp_sf_do_9_1_abort(net, ep, asoc, type, arg, commands);
 }
 
 /*
@@ -2104,7 +2137,8 @@ sctp_disposition_t sctp_sf_shutdown_pending_abort(
  *
  * See sctp_sf_do_9_1_abort().
  */
-sctp_disposition_t sctp_sf_shutdown_sent_abort(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_shutdown_sent_abort(struct net *net,
+                                       const struct sctp_endpoint *ep,
                                        const struct sctp_association *asoc,
                                        const sctp_subtype_t type,
                                        void *arg,
@@ -2113,7 +2147,7 @@ sctp_disposition_t sctp_sf_shutdown_sent_abort(const struct sctp_endpoint *ep,
        struct sctp_chunk *chunk = arg;
 
        if (!sctp_vtag_verify_either(chunk, asoc))
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
        /* Make sure that the ABORT chunk has a valid length.
         * Since this is an ABORT chunk, we have to discard it
@@ -2126,7 +2160,7 @@ sctp_disposition_t sctp_sf_shutdown_sent_abort(const struct sctp_endpoint *ep,
         * packet.
         */
        if (!sctp_chunk_length_valid(chunk, sizeof(sctp_abort_chunk_t)))
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
        /* ADD-IP: Special case for ABORT chunks
         * F4)  One special consideration is that ABORT Chunks arriving
@@ -2135,7 +2169,7 @@ sctp_disposition_t sctp_sf_shutdown_sent_abort(const struct sctp_endpoint *ep,
         */
        if (SCTP_ADDR_DEL ==
                    sctp_bind_addr_state(&asoc->base.bind_addr, &chunk->dest))
-               return sctp_sf_discard_chunk(ep, asoc, type, arg, commands);
+               return sctp_sf_discard_chunk(net, ep, asoc, type, arg, commands);
 
        /* Stop the T2-shutdown timer. */
        sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
@@ -2145,7 +2179,7 @@ sctp_disposition_t sctp_sf_shutdown_sent_abort(const struct sctp_endpoint *ep,
        sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
                        SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD));
 
-       return __sctp_sf_do_9_1_abort(ep, asoc, type, arg, commands);
+       return __sctp_sf_do_9_1_abort(net, ep, asoc, type, arg, commands);
 }
 
 /*
@@ -2154,6 +2188,7 @@ sctp_disposition_t sctp_sf_shutdown_sent_abort(const struct sctp_endpoint *ep,
  * See sctp_sf_do_9_1_abort().
  */
 sctp_disposition_t sctp_sf_shutdown_ack_sent_abort(
+       struct net *net,
        const struct sctp_endpoint *ep,
        const struct sctp_association *asoc,
        const sctp_subtype_t type,
@@ -2163,7 +2198,7 @@ sctp_disposition_t sctp_sf_shutdown_ack_sent_abort(
        /* The same T2 timer, so we should be able to use
         * common function with the SHUTDOWN-SENT state.
         */
-       return sctp_sf_shutdown_sent_abort(ep, asoc, type, arg, commands);
+       return sctp_sf_shutdown_sent_abort(net, ep, asoc, type, arg, commands);
 }
 
 /*
@@ -2180,7 +2215,8 @@ sctp_disposition_t sctp_sf_shutdown_ack_sent_abort(
  *
  * The return value is the disposition of the chunk.
  */
-sctp_disposition_t sctp_sf_cookie_echoed_err(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_cookie_echoed_err(struct net *net,
+                                       const struct sctp_endpoint *ep,
                                        const struct sctp_association *asoc,
                                        const sctp_subtype_t type,
                                        void *arg,
@@ -2190,13 +2226,13 @@ sctp_disposition_t sctp_sf_cookie_echoed_err(const struct sctp_endpoint *ep,
        sctp_errhdr_t *err;
 
        if (!sctp_vtag_verify(chunk, asoc))
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
        /* Make sure that the ERROR chunk has a valid length.
         * The parameter walking depends on this as well.
         */
        if (!sctp_chunk_length_valid(chunk, sizeof(sctp_operr_chunk_t)))
-               return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+               return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
                                                  commands);
 
        /* Process the error here */
@@ -2206,7 +2242,7 @@ sctp_disposition_t sctp_sf_cookie_echoed_err(const struct sctp_endpoint *ep,
         */
        sctp_walk_errors(err, chunk->chunk_hdr) {
                if (SCTP_ERROR_STALE_COOKIE == err->cause)
-                       return sctp_sf_do_5_2_6_stale(ep, asoc, type,
+                       return sctp_sf_do_5_2_6_stale(net, ep, asoc, type,
                                                        arg, commands);
        }
 
@@ -2215,7 +2251,7 @@ sctp_disposition_t sctp_sf_cookie_echoed_err(const struct sctp_endpoint *ep,
         * we are discarding the packet, there should be no adverse
         * affects.
         */
-       return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+       return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 }
 
 /*
@@ -2243,7 +2279,8 @@ sctp_disposition_t sctp_sf_cookie_echoed_err(const struct sctp_endpoint *ep,
  *
  * The return value is the disposition of the chunk.
  */
-static sctp_disposition_t sctp_sf_do_5_2_6_stale(const struct sctp_endpoint *ep,
+static sctp_disposition_t sctp_sf_do_5_2_6_stale(struct net *net,
+                                                const struct sctp_endpoint *ep,
                                                 const struct sctp_association *asoc,
                                                 const sctp_subtype_t type,
                                                 void *arg,
@@ -2365,7 +2402,8 @@ nomem:
  *
  * The return value is the disposition of the chunk.
  */
-sctp_disposition_t sctp_sf_do_9_1_abort(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_do_9_1_abort(struct net *net,
+                                       const struct sctp_endpoint *ep,
                                        const struct sctp_association *asoc,
                                        const sctp_subtype_t type,
                                        void *arg,
@@ -2374,7 +2412,7 @@ sctp_disposition_t sctp_sf_do_9_1_abort(const struct sctp_endpoint *ep,
        struct sctp_chunk *chunk = arg;
 
        if (!sctp_vtag_verify_either(chunk, asoc))
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
        /* Make sure that the ABORT chunk has a valid length.
         * Since this is an ABORT chunk, we have to discard it
@@ -2387,7 +2425,7 @@ sctp_disposition_t sctp_sf_do_9_1_abort(const struct sctp_endpoint *ep,
         * packet.
         */
        if (!sctp_chunk_length_valid(chunk, sizeof(sctp_abort_chunk_t)))
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
        /* ADD-IP: Special case for ABORT chunks
         * F4)  One special consideration is that ABORT Chunks arriving
@@ -2396,12 +2434,13 @@ sctp_disposition_t sctp_sf_do_9_1_abort(const struct sctp_endpoint *ep,
         */
        if (SCTP_ADDR_DEL ==
                    sctp_bind_addr_state(&asoc->base.bind_addr, &chunk->dest))
-               return sctp_sf_discard_chunk(ep, asoc, type, arg, commands);
+               return sctp_sf_discard_chunk(net, ep, asoc, type, arg, commands);
 
-       return __sctp_sf_do_9_1_abort(ep, asoc, type, arg, commands);
+       return __sctp_sf_do_9_1_abort(net, ep, asoc, type, arg, commands);
 }
 
-static sctp_disposition_t __sctp_sf_do_9_1_abort(const struct sctp_endpoint *ep,
+static sctp_disposition_t __sctp_sf_do_9_1_abort(struct net *net,
+                                       const struct sctp_endpoint *ep,
                                        const struct sctp_association *asoc,
                                        const sctp_subtype_t type,
                                        void *arg,
@@ -2418,7 +2457,7 @@ static sctp_disposition_t __sctp_sf_do_9_1_abort(const struct sctp_endpoint *ep,
                sctp_errhdr_t *err;
                sctp_walk_errors(err, chunk->chunk_hdr);
                if ((void *)err != (void *)chunk->chunk_end)
-                       return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+                       return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
                error = ((sctp_errhdr_t *)chunk->skb->data)->cause;
        }
@@ -2426,8 +2465,8 @@ static sctp_disposition_t __sctp_sf_do_9_1_abort(const struct sctp_endpoint *ep,
        sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(ECONNRESET));
        /* ASSOC_FAILED will DELETE_TCB. */
        sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, SCTP_PERR(error));
-       SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
-       SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
+       SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
+       SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
 
        return SCTP_DISPOSITION_ABORT;
 }
@@ -2437,7 +2476,8 @@ static sctp_disposition_t __sctp_sf_do_9_1_abort(const struct sctp_endpoint *ep,
  *
  * See sctp_sf_do_9_1_abort() above.
  */
-sctp_disposition_t sctp_sf_cookie_wait_abort(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_cookie_wait_abort(struct net *net,
+                                    const struct sctp_endpoint *ep,
                                     const struct sctp_association *asoc,
                                     const sctp_subtype_t type,
                                     void *arg,
@@ -2448,7 +2488,7 @@ sctp_disposition_t sctp_sf_cookie_wait_abort(const struct sctp_endpoint *ep,
        __be16 error = SCTP_ERROR_NO_ERROR;
 
        if (!sctp_vtag_verify_either(chunk, asoc))
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
        /* Make sure that the ABORT chunk has a valid length.
         * Since this is an ABORT chunk, we have to discard it
@@ -2461,27 +2501,28 @@ sctp_disposition_t sctp_sf_cookie_wait_abort(const struct sctp_endpoint *ep,
         * packet.
         */
        if (!sctp_chunk_length_valid(chunk, sizeof(sctp_abort_chunk_t)))
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
        /* See if we have an error cause code in the chunk.  */
        len = ntohs(chunk->chunk_hdr->length);
        if (len >= sizeof(struct sctp_chunkhdr) + sizeof(struct sctp_errhdr))
                error = ((sctp_errhdr_t *)chunk->skb->data)->cause;
 
-       return sctp_stop_t1_and_abort(commands, error, ECONNREFUSED, asoc,
+       return sctp_stop_t1_and_abort(net, commands, error, ECONNREFUSED, asoc,
                                      chunk->transport);
 }
 
 /*
  * Process an incoming ICMP as an ABORT.  (COOKIE-WAIT state)
  */
-sctp_disposition_t sctp_sf_cookie_wait_icmp_abort(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_cookie_wait_icmp_abort(struct net *net,
+                                       const struct sctp_endpoint *ep,
                                        const struct sctp_association *asoc,
                                        const sctp_subtype_t type,
                                        void *arg,
                                        sctp_cmd_seq_t *commands)
 {
-       return sctp_stop_t1_and_abort(commands, SCTP_ERROR_NO_ERROR,
+       return sctp_stop_t1_and_abort(net, commands, SCTP_ERROR_NO_ERROR,
                                      ENOPROTOOPT, asoc,
                                      (struct sctp_transport *)arg);
 }
@@ -2489,7 +2530,8 @@ sctp_disposition_t sctp_sf_cookie_wait_icmp_abort(const struct sctp_endpoint *ep
 /*
  * Process an ABORT.  (COOKIE-ECHOED state)
  */
-sctp_disposition_t sctp_sf_cookie_echoed_abort(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_cookie_echoed_abort(struct net *net,
+                                              const struct sctp_endpoint *ep,
                                               const struct sctp_association *asoc,
                                               const sctp_subtype_t type,
                                               void *arg,
@@ -2498,7 +2540,7 @@ sctp_disposition_t sctp_sf_cookie_echoed_abort(const struct sctp_endpoint *ep,
        /* There is a single T1 timer, so we should be able to use
         * common function with the COOKIE-WAIT state.
         */
-       return sctp_sf_cookie_wait_abort(ep, asoc, type, arg, commands);
+       return sctp_sf_cookie_wait_abort(net, ep, asoc, type, arg, commands);
 }
 
 /*
@@ -2506,7 +2548,8 @@ sctp_disposition_t sctp_sf_cookie_echoed_abort(const struct sctp_endpoint *ep,
  *
  * This is common code called by several sctp_sf_*_abort() functions above.
  */
-static sctp_disposition_t sctp_stop_t1_and_abort(sctp_cmd_seq_t *commands,
+static sctp_disposition_t sctp_stop_t1_and_abort(struct net *net,
+                                          sctp_cmd_seq_t *commands,
                                           __be16 error, int sk_err,
                                           const struct sctp_association *asoc,
                                           struct sctp_transport *transport)
@@ -2514,7 +2557,7 @@ static sctp_disposition_t sctp_stop_t1_and_abort(sctp_cmd_seq_t *commands,
        SCTP_DEBUG_PRINTK("ABORT received (INIT).\n");
        sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
                        SCTP_STATE(SCTP_STATE_CLOSED));
-       SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
+       SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
        sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
                        SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT));
        sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(sk_err));
@@ -2557,7 +2600,8 @@ static sctp_disposition_t sctp_stop_t1_and_abort(sctp_cmd_seq_t *commands,
  *
  * The return value is the disposition of the chunk.
  */
-sctp_disposition_t sctp_sf_do_9_2_shutdown(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_do_9_2_shutdown(struct net *net,
+                                          const struct sctp_endpoint *ep,
                                           const struct sctp_association *asoc,
                                           const sctp_subtype_t type,
                                           void *arg,
@@ -2570,12 +2614,12 @@ sctp_disposition_t sctp_sf_do_9_2_shutdown(const struct sctp_endpoint *ep,
        __u32 ctsn;
 
        if (!sctp_vtag_verify(chunk, asoc))
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
        /* Make sure that the SHUTDOWN chunk has a valid length. */
        if (!sctp_chunk_length_valid(chunk,
                                      sizeof(struct sctp_shutdown_chunk_t)))
-               return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+               return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
                                                  commands);
 
        /* Convert the elaborate header.  */
@@ -2595,7 +2639,7 @@ sctp_disposition_t sctp_sf_do_9_2_shutdown(const struct sctp_endpoint *ep,
         * sender with an ABORT.
         */
        if (!TSN_lt(ctsn, asoc->next_tsn))
-               return sctp_sf_violation_ctsn(ep, asoc, type, arg, commands);
+               return sctp_sf_violation_ctsn(net, ep, asoc, type, arg, commands);
 
        /* API 5.3.1.5 SCTP_SHUTDOWN_EVENT
         * When a peer sends a SHUTDOWN, SCTP delivers this notification to
@@ -2619,7 +2663,7 @@ sctp_disposition_t sctp_sf_do_9_2_shutdown(const struct sctp_endpoint *ep,
        disposition = SCTP_DISPOSITION_CONSUME;
 
        if (sctp_outq_is_empty(&asoc->outqueue)) {
-               disposition = sctp_sf_do_9_2_shutdown_ack(ep, asoc, type,
+               disposition = sctp_sf_do_9_2_shutdown_ack(net, ep, asoc, type,
                                                          arg, commands);
        }
 
@@ -2645,7 +2689,8 @@ out:
  * The Cumulative TSN Ack of the received SHUTDOWN chunk
  * MUST be processed.
  */
-sctp_disposition_t sctp_sf_do_9_2_shut_ctsn(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_do_9_2_shut_ctsn(struct net *net,
+                                          const struct sctp_endpoint *ep,
                                           const struct sctp_association *asoc,
                                           const sctp_subtype_t type,
                                           void *arg,
@@ -2656,12 +2701,12 @@ sctp_disposition_t sctp_sf_do_9_2_shut_ctsn(const struct sctp_endpoint *ep,
        __u32 ctsn;
 
        if (!sctp_vtag_verify(chunk, asoc))
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
        /* Make sure that the SHUTDOWN chunk has a valid length. */
        if (!sctp_chunk_length_valid(chunk,
                                      sizeof(struct sctp_shutdown_chunk_t)))
-               return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+               return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
                                                  commands);
 
        sdh = (sctp_shutdownhdr_t *)chunk->skb->data;
@@ -2678,7 +2723,7 @@ sctp_disposition_t sctp_sf_do_9_2_shut_ctsn(const struct sctp_endpoint *ep,
         * sender with an ABORT.
         */
        if (!TSN_lt(ctsn, asoc->next_tsn))
-               return sctp_sf_violation_ctsn(ep, asoc, type, arg, commands);
+               return sctp_sf_violation_ctsn(net, ep, asoc, type, arg, commands);
 
        /* verify, by checking the Cumulative TSN Ack field of the
         * chunk, that all its outstanding DATA chunks have been
@@ -2697,7 +2742,8 @@ sctp_disposition_t sctp_sf_do_9_2_shut_ctsn(const struct sctp_endpoint *ep,
  * that belong to this association, it should discard the INIT chunk and
  * retransmit the SHUTDOWN ACK chunk.
  */
-sctp_disposition_t sctp_sf_do_9_2_reshutack(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_do_9_2_reshutack(struct net *net,
+                                   const struct sctp_endpoint *ep,
                                    const struct sctp_association *asoc,
                                    const sctp_subtype_t type,
                                    void *arg,
@@ -2708,7 +2754,7 @@ sctp_disposition_t sctp_sf_do_9_2_reshutack(const struct sctp_endpoint *ep,
 
        /* Make sure that the chunk has a valid length */
        if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
-               return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+               return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
                                                  commands);
 
        /* Since we are not going to really process this INIT, there
@@ -2760,7 +2806,8 @@ nomem:
  *
  * The return value is the disposition of the chunk.
  */
-sctp_disposition_t sctp_sf_do_ecn_cwr(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_do_ecn_cwr(struct net *net,
+                                     const struct sctp_endpoint *ep,
                                      const struct sctp_association *asoc,
                                      const sctp_subtype_t type,
                                      void *arg,
@@ -2771,10 +2818,10 @@ sctp_disposition_t sctp_sf_do_ecn_cwr(const struct sctp_endpoint *ep,
        u32 lowest_tsn;
 
        if (!sctp_vtag_verify(chunk, asoc))
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
        if (!sctp_chunk_length_valid(chunk, sizeof(sctp_ecne_chunk_t)))
-               return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+               return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
                                                  commands);
 
        cwr = (sctp_cwrhdr_t *) chunk->skb->data;
@@ -2815,7 +2862,8 @@ sctp_disposition_t sctp_sf_do_ecn_cwr(const struct sctp_endpoint *ep,
  *
  * The return value is the disposition of the chunk.
  */
-sctp_disposition_t sctp_sf_do_ecne(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_do_ecne(struct net *net,
+                                  const struct sctp_endpoint *ep,
                                   const struct sctp_association *asoc,
                                   const sctp_subtype_t type,
                                   void *arg,
@@ -2825,10 +2873,10 @@ sctp_disposition_t sctp_sf_do_ecne(const struct sctp_endpoint *ep,
        struct sctp_chunk *chunk = arg;
 
        if (!sctp_vtag_verify(chunk, asoc))
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
        if (!sctp_chunk_length_valid(chunk, sizeof(sctp_ecne_chunk_t)))
-               return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+               return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
                                                  commands);
 
        ecne = (sctp_ecnehdr_t *) chunk->skb->data;
@@ -2871,7 +2919,8 @@ sctp_disposition_t sctp_sf_do_ecne(const struct sctp_endpoint *ep,
  *
  * The return value is the disposition of the chunk.
  */
-sctp_disposition_t sctp_sf_eat_data_6_2(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_eat_data_6_2(struct net *net,
+                                       const struct sctp_endpoint *ep,
                                        const struct sctp_association *asoc,
                                        const sctp_subtype_t type,
                                        void *arg,
@@ -2884,11 +2933,11 @@ sctp_disposition_t sctp_sf_eat_data_6_2(const struct sctp_endpoint *ep,
        if (!sctp_vtag_verify(chunk, asoc)) {
                sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
                                SCTP_NULL());
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
        }
 
        if (!sctp_chunk_length_valid(chunk, sizeof(sctp_data_chunk_t)))
-               return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+               return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
                                                  commands);
 
        error = sctp_eat_data(asoc, chunk, commands );
@@ -2897,16 +2946,16 @@ sctp_disposition_t sctp_sf_eat_data_6_2(const struct sctp_endpoint *ep,
                break;
        case SCTP_IERROR_HIGH_TSN:
        case SCTP_IERROR_BAD_STREAM:
-               SCTP_INC_STATS(SCTP_MIB_IN_DATA_CHUNK_DISCARDS);
+               SCTP_INC_STATS(net, SCTP_MIB_IN_DATA_CHUNK_DISCARDS);
                goto discard_noforce;
        case SCTP_IERROR_DUP_TSN:
        case SCTP_IERROR_IGNORE_TSN:
-               SCTP_INC_STATS(SCTP_MIB_IN_DATA_CHUNK_DISCARDS);
+               SCTP_INC_STATS(net, SCTP_MIB_IN_DATA_CHUNK_DISCARDS);
                goto discard_force;
        case SCTP_IERROR_NO_DATA:
                goto consume;
        case SCTP_IERROR_PROTO_VIOLATION:
-               return sctp_sf_abort_violation(ep, asoc, chunk, commands,
+               return sctp_sf_abort_violation(net, ep, asoc, chunk, commands,
                        (u8 *)chunk->subh.data_hdr, sizeof(sctp_datahdr_t));
        default:
                BUG();
@@ -2992,7 +3041,8 @@ consume:
  *
  * The return value is the disposition of the chunk.
  */
-sctp_disposition_t sctp_sf_eat_data_fast_4_4(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_eat_data_fast_4_4(struct net *net,
+                                    const struct sctp_endpoint *ep,
                                     const struct sctp_association *asoc,
                                     const sctp_subtype_t type,
                                     void *arg,
@@ -3004,11 +3054,11 @@ sctp_disposition_t sctp_sf_eat_data_fast_4_4(const struct sctp_endpoint *ep,
        if (!sctp_vtag_verify(chunk, asoc)) {
                sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
                                SCTP_NULL());
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
        }
 
        if (!sctp_chunk_length_valid(chunk, sizeof(sctp_data_chunk_t)))
-               return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+               return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
                                                  commands);
 
        error = sctp_eat_data(asoc, chunk, commands );
@@ -3022,7 +3072,7 @@ sctp_disposition_t sctp_sf_eat_data_fast_4_4(const struct sctp_endpoint *ep,
        case SCTP_IERROR_NO_DATA:
                goto consume;
        case SCTP_IERROR_PROTO_VIOLATION:
-               return sctp_sf_abort_violation(ep, asoc, chunk, commands,
+               return sctp_sf_abort_violation(net, ep, asoc, chunk, commands,
                        (u8 *)chunk->subh.data_hdr, sizeof(sctp_datahdr_t));
        default:
                BUG();
@@ -3082,7 +3132,8 @@ consume:
  *
  * The return value is the disposition of the chunk.
  */
-sctp_disposition_t sctp_sf_eat_sack_6_2(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_eat_sack_6_2(struct net *net,
+                                       const struct sctp_endpoint *ep,
                                        const struct sctp_association *asoc,
                                        const sctp_subtype_t type,
                                        void *arg,
@@ -3093,18 +3144,18 @@ sctp_disposition_t sctp_sf_eat_sack_6_2(const struct sctp_endpoint *ep,
        __u32 ctsn;
 
        if (!sctp_vtag_verify(chunk, asoc))
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
        /* Make sure that the SACK chunk has a valid length. */
        if (!sctp_chunk_length_valid(chunk, sizeof(sctp_sack_chunk_t)))
-               return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+               return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
                                                  commands);
 
        /* Pull the SACK chunk from the data buffer */
        sackh = sctp_sm_pull_sack(chunk);
        /* Was this a bogus SACK? */
        if (!sackh)
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
        chunk->subh.sack_hdr = sackh;
        ctsn = ntohl(sackh->cum_tsn_ack);
 
@@ -3125,7 +3176,7 @@ sctp_disposition_t sctp_sf_eat_sack_6_2(const struct sctp_endpoint *ep,
         * sender with an ABORT.
         */
        if (!TSN_lt(ctsn, asoc->next_tsn))
-               return sctp_sf_violation_ctsn(ep, asoc, type, arg, commands);
+               return sctp_sf_violation_ctsn(net, ep, asoc, type, arg, commands);
 
        /* Return this SACK for further processing.  */
        sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_SACK, SCTP_SACKH(sackh));
@@ -3154,7 +3205,8 @@ sctp_disposition_t sctp_sf_eat_sack_6_2(const struct sctp_endpoint *ep,
  *
  * The return value is the disposition of the chunk.
 */
-static sctp_disposition_t sctp_sf_tabort_8_4_8(const struct sctp_endpoint *ep,
+static sctp_disposition_t sctp_sf_tabort_8_4_8(struct net *net,
+                                       const struct sctp_endpoint *ep,
                                        const struct sctp_association *asoc,
                                        const sctp_subtype_t type,
                                        void *arg,
@@ -3164,7 +3216,7 @@ static sctp_disposition_t sctp_sf_tabort_8_4_8(const struct sctp_endpoint *ep,
        struct sctp_chunk *chunk = arg;
        struct sctp_chunk *abort;
 
-       packet = sctp_ootb_pkt_new(asoc, chunk);
+       packet = sctp_ootb_pkt_new(net, asoc, chunk);
 
        if (packet) {
                /* Make an ABORT. The T bit will be set if the asoc
@@ -3188,9 +3240,9 @@ static sctp_disposition_t sctp_sf_tabort_8_4_8(const struct sctp_endpoint *ep,
                sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
                                SCTP_PACKET(packet));
 
-               SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
+               SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
 
-               sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
                return SCTP_DISPOSITION_CONSUME;
        }
 
@@ -3205,7 +3257,8 @@ static sctp_disposition_t sctp_sf_tabort_8_4_8(const struct sctp_endpoint *ep,
  *
  * The return value is the disposition of the chunk.
 */
-sctp_disposition_t sctp_sf_operr_notify(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_operr_notify(struct net *net,
+                                       const struct sctp_endpoint *ep,
                                        const struct sctp_association *asoc,
                                        const sctp_subtype_t type,
                                        void *arg,
@@ -3215,15 +3268,15 @@ sctp_disposition_t sctp_sf_operr_notify(const struct sctp_endpoint *ep,
        sctp_errhdr_t *err;
 
        if (!sctp_vtag_verify(chunk, asoc))
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
        /* Make sure that the ERROR chunk has a valid length. */
        if (!sctp_chunk_length_valid(chunk, sizeof(sctp_operr_chunk_t)))
-               return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+               return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
                                                  commands);
        sctp_walk_errors(err, chunk->chunk_hdr);
        if ((void *)err != (void *)chunk->chunk_end)
-               return sctp_sf_violation_paramlen(ep, asoc, type, arg,
+               return sctp_sf_violation_paramlen(net, ep, asoc, type, arg,
                                                  (void *)err, commands);
 
        sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_OPERR,
@@ -3242,7 +3295,8 @@ sctp_disposition_t sctp_sf_operr_notify(const struct sctp_endpoint *ep,
  *
  * The return value is the disposition.
  */
-sctp_disposition_t sctp_sf_do_9_2_final(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_do_9_2_final(struct net *net,
+                                       const struct sctp_endpoint *ep,
                                        const struct sctp_association *asoc,
                                        const sctp_subtype_t type,
                                        void *arg,
@@ -3253,11 +3307,11 @@ sctp_disposition_t sctp_sf_do_9_2_final(const struct sctp_endpoint *ep,
        struct sctp_ulpevent *ev;
 
        if (!sctp_vtag_verify(chunk, asoc))
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
        /* Make sure that the SHUTDOWN_ACK chunk has a valid length. */
        if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
-               return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+               return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
                                                  commands);
        /* 10.2 H) SHUTDOWN COMPLETE notification
         *
@@ -3290,8 +3344,8 @@ sctp_disposition_t sctp_sf_do_9_2_final(const struct sctp_endpoint *ep,
 
        sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
                        SCTP_STATE(SCTP_STATE_CLOSED));
-       SCTP_INC_STATS(SCTP_MIB_SHUTDOWNS);
-       SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
+       SCTP_INC_STATS(net, SCTP_MIB_SHUTDOWNS);
+       SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
        sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply));
 
        /* ...and remove all record of the association. */
@@ -3324,7 +3378,8 @@ nomem:
  *    receiver of the OOTB packet shall discard the OOTB packet and take
  *    no further action.
  */
-sctp_disposition_t sctp_sf_ootb(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_ootb(struct net *net,
+                               const struct sctp_endpoint *ep,
                                const struct sctp_association *asoc,
                                const sctp_subtype_t type,
                                void *arg,
@@ -3338,13 +3393,13 @@ sctp_disposition_t sctp_sf_ootb(const struct sctp_endpoint *ep,
        int ootb_shut_ack = 0;
        int ootb_cookie_ack = 0;
 
-       SCTP_INC_STATS(SCTP_MIB_OUTOFBLUES);
+       SCTP_INC_STATS(net, SCTP_MIB_OUTOFBLUES);
 
        ch = (sctp_chunkhdr_t *) chunk->chunk_hdr;
        do {
                /* Report violation if the chunk is less then minimal */
                if (ntohs(ch->length) < sizeof(sctp_chunkhdr_t))
-                       return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+                       return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
                                                  commands);
 
                /* Now that we know we at least have a chunk header,
@@ -3359,7 +3414,7 @@ sctp_disposition_t sctp_sf_ootb(const struct sctp_endpoint *ep,
                 *   sending an ABORT of its own.
                 */
                if (SCTP_CID_ABORT == ch->type)
-                       return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+                       return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
                /* RFC 8.4, 7) If the packet contains a "Stale cookie" ERROR
                 * or a COOKIE ACK the SCTP Packet should be silently
@@ -3381,18 +3436,18 @@ sctp_disposition_t sctp_sf_ootb(const struct sctp_endpoint *ep,
                /* Report violation if chunk len overflows */
                ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length));
                if (ch_end > skb_tail_pointer(skb))
-                       return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+                       return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
                                                  commands);
 
                ch = (sctp_chunkhdr_t *) ch_end;
        } while (ch_end < skb_tail_pointer(skb));
 
        if (ootb_shut_ack)
-               return sctp_sf_shut_8_4_5(ep, asoc, type, arg, commands);
+               return sctp_sf_shut_8_4_5(net, ep, asoc, type, arg, commands);
        else if (ootb_cookie_ack)
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
        else
-               return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands);
+               return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands);
 }
 
 /*
@@ -3416,7 +3471,8 @@ sctp_disposition_t sctp_sf_ootb(const struct sctp_endpoint *ep,
  *
  * The return value is the disposition of the chunk.
  */
-static sctp_disposition_t sctp_sf_shut_8_4_5(const struct sctp_endpoint *ep,
+static sctp_disposition_t sctp_sf_shut_8_4_5(struct net *net,
+                                            const struct sctp_endpoint *ep,
                                             const struct sctp_association *asoc,
                                             const sctp_subtype_t type,
                                             void *arg,
@@ -3426,7 +3482,7 @@ static sctp_disposition_t sctp_sf_shut_8_4_5(const struct sctp_endpoint *ep,
        struct sctp_chunk *chunk = arg;
        struct sctp_chunk *shut;
 
-       packet = sctp_ootb_pkt_new(asoc, chunk);
+       packet = sctp_ootb_pkt_new(net, asoc, chunk);
 
        if (packet) {
                /* Make an SHUTDOWN_COMPLETE.
@@ -3450,19 +3506,19 @@ static sctp_disposition_t sctp_sf_shut_8_4_5(const struct sctp_endpoint *ep,
                sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
                                SCTP_PACKET(packet));
 
-               SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
+               SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
 
                /* If the chunk length is invalid, we don't want to process
                 * the reset of the packet.
                 */
                if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
-                       return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+                       return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
                /* We need to discard the rest of the packet to prevent
                 * potential bomming attacks from additional bundled chunks.
                 * This is documented in SCTP Threats ID.
                 */
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
        }
 
        return SCTP_DISPOSITION_NOMEM;
@@ -3479,7 +3535,8 @@ static sctp_disposition_t sctp_sf_shut_8_4_5(const struct sctp_endpoint *ep,
  *   chunks. --piggy ]
  *
  */
-sctp_disposition_t sctp_sf_do_8_5_1_E_sa(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_do_8_5_1_E_sa(struct net *net,
+                                     const struct sctp_endpoint *ep,
                                      const struct sctp_association *asoc,
                                      const sctp_subtype_t type,
                                      void *arg,
@@ -3489,7 +3546,7 @@ sctp_disposition_t sctp_sf_do_8_5_1_E_sa(const struct sctp_endpoint *ep,
 
        /* Make sure that the SHUTDOWN_ACK chunk has a valid length. */
        if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
-               return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+               return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
                                                  commands);
 
        /* Although we do have an association in this case, it corresponds
@@ -3497,13 +3554,14 @@ sctp_disposition_t sctp_sf_do_8_5_1_E_sa(const struct sctp_endpoint *ep,
         * packet and the state function that handles OOTB SHUTDOWN_ACK is
         * called with a NULL association.
         */
-       SCTP_INC_STATS(SCTP_MIB_OUTOFBLUES);
+       SCTP_INC_STATS(net, SCTP_MIB_OUTOFBLUES);
 
-       return sctp_sf_shut_8_4_5(ep, NULL, type, arg, commands);
+       return sctp_sf_shut_8_4_5(net, ep, NULL, type, arg, commands);
 }
 
 /* ADDIP Section 4.2 Upon reception of an ASCONF Chunk.  */
-sctp_disposition_t sctp_sf_do_asconf(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_do_asconf(struct net *net,
+                                    const struct sctp_endpoint *ep,
                                     const struct sctp_association *asoc,
                                     const sctp_subtype_t type, void *arg,
                                     sctp_cmd_seq_t *commands)
@@ -3519,7 +3577,7 @@ sctp_disposition_t sctp_sf_do_asconf(const struct sctp_endpoint *ep,
        if (!sctp_vtag_verify(chunk, asoc)) {
                sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
                                SCTP_NULL());
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
        }
 
        /* ADD-IP: Section 4.1.1
@@ -3528,12 +3586,12 @@ sctp_disposition_t sctp_sf_do_asconf(const struct sctp_endpoint *ep,
         * is received unauthenticated it MUST be silently discarded as
         * described in [I-D.ietf-tsvwg-sctp-auth].
         */
-       if (!sctp_addip_noauth && !chunk->auth)
-               return sctp_sf_discard_chunk(ep, asoc, type, arg, commands);
+       if (!net->sctp.addip_noauth && !chunk->auth)
+               return sctp_sf_discard_chunk(net, ep, asoc, type, arg, commands);
 
        /* Make sure that the ASCONF ADDIP chunk has a valid length.  */
        if (!sctp_chunk_length_valid(chunk, sizeof(sctp_addip_chunk_t)))
-               return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+               return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
                                                  commands);
 
        hdr = (sctp_addiphdr_t *)chunk->skb->data;
@@ -3542,7 +3600,7 @@ sctp_disposition_t sctp_sf_do_asconf(const struct sctp_endpoint *ep,
        addr_param = (union sctp_addr_param *)hdr->params;
        length = ntohs(addr_param->p.length);
        if (length < sizeof(sctp_paramhdr_t))
-               return sctp_sf_violation_paramlen(ep, asoc, type, arg,
+               return sctp_sf_violation_paramlen(net, ep, asoc, type, arg,
                           (void *)addr_param, commands);
 
        /* Verify the ASCONF chunk before processing it. */
@@ -3550,7 +3608,7 @@ sctp_disposition_t sctp_sf_do_asconf(const struct sctp_endpoint *ep,
                            (sctp_paramhdr_t *)((void *)addr_param + length),
                            (void *)chunk->chunk_end,
                            &err_param))
-               return sctp_sf_violation_paramlen(ep, asoc, type, arg,
+               return sctp_sf_violation_paramlen(net, ep, asoc, type, arg,
                                                  (void *)err_param, commands);
 
        /* ADDIP 5.2 E1) Compare the value of the serial number to the value
@@ -3630,7 +3688,8 @@ sctp_disposition_t sctp_sf_do_asconf(const struct sctp_endpoint *ep,
  * When building TLV parameters for the ASCONF Chunk that will add or
  * delete IP addresses the D0 to D13 rules should be applied:
  */
-sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_do_asconf_ack(struct net *net,
+                                        const struct sctp_endpoint *ep,
                                         const struct sctp_association *asoc,
                                         const sctp_subtype_t type, void *arg,
                                         sctp_cmd_seq_t *commands)
@@ -3645,7 +3704,7 @@ sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep,
        if (!sctp_vtag_verify(asconf_ack, asoc)) {
                sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
                                SCTP_NULL());
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
        }
 
        /* ADD-IP, Section 4.1.2:
@@ -3654,12 +3713,12 @@ sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep,
         * is received unauthenticated it MUST be silently discarded as
         * described in [I-D.ietf-tsvwg-sctp-auth].
         */
-       if (!sctp_addip_noauth && !asconf_ack->auth)
-               return sctp_sf_discard_chunk(ep, asoc, type, arg, commands);
+       if (!net->sctp.addip_noauth && !asconf_ack->auth)
+               return sctp_sf_discard_chunk(net, ep, asoc, type, arg, commands);
 
        /* Make sure that the ADDIP chunk has a valid length.  */
        if (!sctp_chunk_length_valid(asconf_ack, sizeof(sctp_addip_chunk_t)))
-               return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+               return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
                                                  commands);
 
        addip_hdr = (sctp_addiphdr_t *)asconf_ack->skb->data;
@@ -3670,7 +3729,7 @@ sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep,
            (sctp_paramhdr_t *)addip_hdr->params,
            (void *)asconf_ack->chunk_end,
            &err_param))
-               return sctp_sf_violation_paramlen(ep, asoc, type, arg,
+               return sctp_sf_violation_paramlen(net, ep, asoc, type, arg,
                           (void *)err_param, commands);
 
        if (last_asconf) {
@@ -3705,8 +3764,8 @@ sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep,
                                SCTP_ERROR(ECONNABORTED));
                sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
                                SCTP_PERR(SCTP_ERROR_ASCONF_ACK));
-               SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
-               SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
+               SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
+               SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
                return SCTP_DISPOSITION_ABORT;
        }
 
@@ -3739,8 +3798,8 @@ sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep,
                                SCTP_ERROR(ECONNABORTED));
                sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
                                SCTP_PERR(SCTP_ERROR_ASCONF_ACK));
-               SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
-               SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
+               SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
+               SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
                return SCTP_DISPOSITION_ABORT;
        }
 
@@ -3761,7 +3820,8 @@ sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep,
  *
  * The return value is the disposition of the chunk.
  */
-sctp_disposition_t sctp_sf_eat_fwd_tsn(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_eat_fwd_tsn(struct net *net,
+                                      const struct sctp_endpoint *ep,
                                       const struct sctp_association *asoc,
                                       const sctp_subtype_t type,
                                       void *arg,
@@ -3776,12 +3836,12 @@ sctp_disposition_t sctp_sf_eat_fwd_tsn(const struct sctp_endpoint *ep,
        if (!sctp_vtag_verify(chunk, asoc)) {
                sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
                                SCTP_NULL());
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
        }
 
        /* Make sure that the FORWARD_TSN chunk has valid length.  */
        if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_fwdtsn_chunk)))
-               return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+               return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
                                                  commands);
 
        fwdtsn_hdr = (struct sctp_fwdtsn_hdr *)chunk->skb->data;
@@ -3828,6 +3888,7 @@ discard_noforce:
 }
 
 sctp_disposition_t sctp_sf_eat_fwd_tsn_fast(
+       struct net *net,
        const struct sctp_endpoint *ep,
        const struct sctp_association *asoc,
        const sctp_subtype_t type,
@@ -3843,12 +3904,12 @@ sctp_disposition_t sctp_sf_eat_fwd_tsn_fast(
        if (!sctp_vtag_verify(chunk, asoc)) {
                sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
                                SCTP_NULL());
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
        }
 
        /* Make sure that the FORWARD_TSN chunk has a valid length.  */
        if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_fwdtsn_chunk)))
-               return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+               return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
                                                  commands);
 
        fwdtsn_hdr = (struct sctp_fwdtsn_hdr *)chunk->skb->data;
@@ -3915,7 +3976,8 @@ gen_shutdown:
  *
  * The return value is the disposition of the chunk.
  */
-static sctp_ierror_t sctp_sf_authenticate(const struct sctp_endpoint *ep,
+static sctp_ierror_t sctp_sf_authenticate(struct net *net,
+                                   const struct sctp_endpoint *ep,
                                    const struct sctp_association *asoc,
                                    const sctp_subtype_t type,
                                    struct sctp_chunk *chunk)
@@ -3988,7 +4050,8 @@ nomem:
        return SCTP_IERROR_NOMEM;
 }
 
-sctp_disposition_t sctp_sf_eat_auth(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_eat_auth(struct net *net,
+                                   const struct sctp_endpoint *ep,
                                    const struct sctp_association *asoc,
                                    const sctp_subtype_t type,
                                    void *arg,
@@ -4001,21 +4064,21 @@ sctp_disposition_t sctp_sf_eat_auth(const struct sctp_endpoint *ep,
 
        /* Make sure that the peer has AUTH capable */
        if (!asoc->peer.auth_capable)
-               return sctp_sf_unk_chunk(ep, asoc, type, arg, commands);
+               return sctp_sf_unk_chunk(net, ep, asoc, type, arg, commands);
 
        if (!sctp_vtag_verify(chunk, asoc)) {
                sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
                                SCTP_NULL());
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
        }
 
        /* Make sure that the AUTH chunk has valid length.  */
        if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_auth_chunk)))
-               return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+               return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
                                                  commands);
 
        auth_hdr = (struct sctp_authhdr *)chunk->skb->data;
-       error = sctp_sf_authenticate(ep, asoc, type, chunk);
+       error = sctp_sf_authenticate(net, ep, asoc, type, chunk);
        switch (error) {
        case SCTP_IERROR_AUTH_BAD_HMAC:
                /* Generate the ERROR chunk and discard the rest
@@ -4032,10 +4095,10 @@ sctp_disposition_t sctp_sf_eat_auth(const struct sctp_endpoint *ep,
                /* Fall Through */
        case SCTP_IERROR_AUTH_BAD_KEYID:
        case SCTP_IERROR_BAD_SIG:
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
        case SCTP_IERROR_PROTO_VIOLATION:
-               return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+               return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
                                                  commands);
 
        case SCTP_IERROR_NOMEM:
@@ -4084,7 +4147,8 @@ sctp_disposition_t sctp_sf_eat_auth(const struct sctp_endpoint *ep,
  *
  * The return value is the disposition of the chunk.
  */
-sctp_disposition_t sctp_sf_unk_chunk(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_unk_chunk(struct net *net,
+                                    const struct sctp_endpoint *ep,
                                     const struct sctp_association *asoc,
                                     const sctp_subtype_t type,
                                     void *arg,
@@ -4097,20 +4161,20 @@ sctp_disposition_t sctp_sf_unk_chunk(const struct sctp_endpoint *ep,
        SCTP_DEBUG_PRINTK("Processing the unknown chunk id %d.\n", type.chunk);
 
        if (!sctp_vtag_verify(unk_chunk, asoc))
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
        /* Make sure that the chunk has a valid length.
         * Since we don't know the chunk type, we use a general
         * chunkhdr structure to make a comparison.
         */
        if (!sctp_chunk_length_valid(unk_chunk, sizeof(sctp_chunkhdr_t)))
-               return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+               return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
                                                  commands);
 
        switch (type.chunk & SCTP_CID_ACTION_MASK) {
        case SCTP_CID_ACTION_DISCARD:
                /* Discard the packet.  */
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
                break;
        case SCTP_CID_ACTION_DISCARD_ERR:
                /* Generate an ERROR chunk as response. */
@@ -4125,7 +4189,7 @@ sctp_disposition_t sctp_sf_unk_chunk(const struct sctp_endpoint *ep,
                }
 
                /* Discard the packet.  */
-               sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
                return SCTP_DISPOSITION_CONSUME;
                break;
        case SCTP_CID_ACTION_SKIP:
@@ -4167,7 +4231,8 @@ sctp_disposition_t sctp_sf_unk_chunk(const struct sctp_endpoint *ep,
  *
  * The return value is the disposition of the chunk.
  */
-sctp_disposition_t sctp_sf_discard_chunk(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_discard_chunk(struct net *net,
+                                        const struct sctp_endpoint *ep,
                                         const struct sctp_association *asoc,
                                         const sctp_subtype_t type,
                                         void *arg,
@@ -4180,7 +4245,7 @@ sctp_disposition_t sctp_sf_discard_chunk(const struct sctp_endpoint *ep,
         * chunkhdr structure to make a comparison.
         */
        if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
-               return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+               return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
                                                  commands);
 
        SCTP_DEBUG_PRINTK("Chunk %d is discarded\n", type.chunk);
@@ -4205,13 +4270,14 @@ sctp_disposition_t sctp_sf_discard_chunk(const struct sctp_endpoint *ep,
  *
  * The return value is the disposition of the chunk.
  */
-sctp_disposition_t sctp_sf_pdiscard(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_pdiscard(struct net *net,
+                                   const struct sctp_endpoint *ep,
                                    const struct sctp_association *asoc,
                                    const sctp_subtype_t type,
                                    void *arg,
                                    sctp_cmd_seq_t *commands)
 {
-       SCTP_INC_STATS(SCTP_MIB_IN_PKT_DISCARDS);
+       SCTP_INC_STATS(net, SCTP_MIB_IN_PKT_DISCARDS);
        sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET, SCTP_NULL());
 
        return SCTP_DISPOSITION_CONSUME;
@@ -4232,7 +4298,8 @@ sctp_disposition_t sctp_sf_pdiscard(const struct sctp_endpoint *ep,
  * We simply tag the chunk as a violation.  The state machine will log
  * the violation and continue.
  */
-sctp_disposition_t sctp_sf_violation(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_violation(struct net *net,
+                                    const struct sctp_endpoint *ep,
                                     const struct sctp_association *asoc,
                                     const sctp_subtype_t type,
                                     void *arg,
@@ -4242,7 +4309,7 @@ sctp_disposition_t sctp_sf_violation(const struct sctp_endpoint *ep,
 
        /* Make sure that the chunk has a valid length. */
        if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
-               return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+               return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
                                                  commands);
 
        return SCTP_DISPOSITION_VIOLATION;
@@ -4252,6 +4319,7 @@ sctp_disposition_t sctp_sf_violation(const struct sctp_endpoint *ep,
  * Common function to handle a protocol violation.
  */
 static sctp_disposition_t sctp_sf_abort_violation(
+                                    struct net *net,
                                     const struct sctp_endpoint *ep,
                                     const struct sctp_association *asoc,
                                     void *arg,
@@ -4302,7 +4370,7 @@ static sctp_disposition_t sctp_sf_abort_violation(
                }
 
                sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort));
-               SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
+               SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
 
                if (asoc->state <= SCTP_STATE_COOKIE_ECHOED) {
                        sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
@@ -4316,10 +4384,10 @@ static sctp_disposition_t sctp_sf_abort_violation(
                                        SCTP_ERROR(ECONNABORTED));
                        sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
                                        SCTP_PERR(SCTP_ERROR_PROTO_VIOLATION));
-                       SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
+                       SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
                }
        } else {
-               packet = sctp_ootb_pkt_new(asoc, chunk);
+               packet = sctp_ootb_pkt_new(net, asoc, chunk);
 
                if (!packet)
                        goto nomem_pkt;
@@ -4334,13 +4402,13 @@ static sctp_disposition_t sctp_sf_abort_violation(
                sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
                        SCTP_PACKET(packet));
 
-               SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
+               SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
        }
 
-       SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
+       SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
 
 discard:
-       sctp_sf_pdiscard(ep, asoc, SCTP_ST_CHUNK(0), arg, commands);
+       sctp_sf_pdiscard(net, ep, asoc, SCTP_ST_CHUNK(0), arg, commands);
        return SCTP_DISPOSITION_ABORT;
 
 nomem_pkt:
@@ -4369,6 +4437,7 @@ nomem:
  * Generate an  ABORT chunk and terminate the association.
  */
 static sctp_disposition_t sctp_sf_violation_chunklen(
+                                    struct net *net,
                                     const struct sctp_endpoint *ep,
                                     const struct sctp_association *asoc,
                                     const sctp_subtype_t type,
@@ -4377,7 +4446,7 @@ static sctp_disposition_t sctp_sf_violation_chunklen(
 {
        static const char err_str[]="The following chunk had invalid length:";
 
-       return sctp_sf_abort_violation(ep, asoc, arg, commands, err_str,
+       return sctp_sf_abort_violation(net, ep, asoc, arg, commands, err_str,
                                        sizeof(err_str));
 }
 
@@ -4388,6 +4457,7 @@ static sctp_disposition_t sctp_sf_violation_chunklen(
  * the length is considered as invalid.
  */
 static sctp_disposition_t sctp_sf_violation_paramlen(
+                                    struct net *net,
                                     const struct sctp_endpoint *ep,
                                     const struct sctp_association *asoc,
                                     const sctp_subtype_t type,
@@ -4407,17 +4477,17 @@ static sctp_disposition_t sctp_sf_violation_paramlen(
                goto nomem;
 
        sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort));
-       SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
+       SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
 
        sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
                        SCTP_ERROR(ECONNABORTED));
        sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
                        SCTP_PERR(SCTP_ERROR_PROTO_VIOLATION));
-       SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
-       SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
+       SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
+       SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
 
 discard:
-       sctp_sf_pdiscard(ep, asoc, SCTP_ST_CHUNK(0), arg, commands);
+       sctp_sf_pdiscard(net, ep, asoc, SCTP_ST_CHUNK(0), arg, commands);
        return SCTP_DISPOSITION_ABORT;
 nomem:
        return SCTP_DISPOSITION_NOMEM;
@@ -4430,6 +4500,7 @@ nomem:
  * error code.
  */
 static sctp_disposition_t sctp_sf_violation_ctsn(
+                                    struct net *net,
                                     const struct sctp_endpoint *ep,
                                     const struct sctp_association *asoc,
                                     const sctp_subtype_t type,
@@ -4438,7 +4509,7 @@ static sctp_disposition_t sctp_sf_violation_ctsn(
 {
        static const char err_str[]="The cumulative tsn ack beyond the max tsn currently sent:";
 
-       return sctp_sf_abort_violation(ep, asoc, arg, commands, err_str,
+       return sctp_sf_abort_violation(net, ep, asoc, arg, commands, err_str,
                                        sizeof(err_str));
 }
 
@@ -4449,6 +4520,7 @@ static sctp_disposition_t sctp_sf_violation_ctsn(
  * on the path and we may not want to continue this communication.
  */
 static sctp_disposition_t sctp_sf_violation_chunk(
+                                    struct net *net,
                                     const struct sctp_endpoint *ep,
                                     const struct sctp_association *asoc,
                                     const sctp_subtype_t type,
@@ -4458,9 +4530,9 @@ static sctp_disposition_t sctp_sf_violation_chunk(
        static const char err_str[]="The following chunk violates protocol:";
 
        if (!asoc)
-               return sctp_sf_violation(ep, asoc, type, arg, commands);
+               return sctp_sf_violation(net, ep, asoc, type, arg, commands);
 
-       return sctp_sf_abort_violation(ep, asoc, arg, commands, err_str,
+       return sctp_sf_abort_violation(net, ep, asoc, arg, commands, err_str,
                                        sizeof(err_str));
 }
 /***************************************************************************
@@ -4523,7 +4595,8 @@ static sctp_disposition_t sctp_sf_violation_chunk(
  *
  * The return value is a disposition.
  */
-sctp_disposition_t sctp_sf_do_prm_asoc(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_do_prm_asoc(struct net *net,
+                                      const struct sctp_endpoint *ep,
                                       const struct sctp_association *asoc,
                                       const sctp_subtype_t type,
                                       void *arg,
@@ -4634,7 +4707,8 @@ nomem:
  *
  * The return value is the disposition.
  */
-sctp_disposition_t sctp_sf_do_prm_send(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_do_prm_send(struct net *net,
+                                      const struct sctp_endpoint *ep,
                                       const struct sctp_association *asoc,
                                       const sctp_subtype_t type,
                                       void *arg,
@@ -4673,6 +4747,7 @@ sctp_disposition_t sctp_sf_do_prm_send(const struct sctp_endpoint *ep,
  * The return value is the disposition.
  */
 sctp_disposition_t sctp_sf_do_9_2_prm_shutdown(
+       struct net *net,
        const struct sctp_endpoint *ep,
        const struct sctp_association *asoc,
        const sctp_subtype_t type,
@@ -4694,7 +4769,7 @@ sctp_disposition_t sctp_sf_do_9_2_prm_shutdown(
 
        disposition = SCTP_DISPOSITION_CONSUME;
        if (sctp_outq_is_empty(&asoc->outqueue)) {
-               disposition = sctp_sf_do_9_2_start_shutdown(ep, asoc, type,
+               disposition = sctp_sf_do_9_2_start_shutdown(net, ep, asoc, type,
                                                            arg, commands);
        }
        return disposition;
@@ -4728,6 +4803,7 @@ sctp_disposition_t sctp_sf_do_9_2_prm_shutdown(
  * The return value is the disposition.
  */
 sctp_disposition_t sctp_sf_do_9_1_prm_abort(
+       struct net *net,
        const struct sctp_endpoint *ep,
        const struct sctp_association *asoc,
        const sctp_subtype_t type,
@@ -4759,14 +4835,15 @@ sctp_disposition_t sctp_sf_do_9_1_prm_abort(
        sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
                        SCTP_PERR(SCTP_ERROR_USER_ABORT));
 
-       SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
-       SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
+       SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
+       SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
 
        return retval;
 }
 
 /* We tried an illegal operation on an association which is closed.  */
-sctp_disposition_t sctp_sf_error_closed(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_error_closed(struct net *net,
+                                       const struct sctp_endpoint *ep,
                                        const struct sctp_association *asoc,
                                        const sctp_subtype_t type,
                                        void *arg,
@@ -4779,7 +4856,8 @@ sctp_disposition_t sctp_sf_error_closed(const struct sctp_endpoint *ep,
 /* We tried an illegal operation on an association which is shutting
  * down.
  */
-sctp_disposition_t sctp_sf_error_shutdown(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_error_shutdown(struct net *net,
+                                         const struct sctp_endpoint *ep,
                                          const struct sctp_association *asoc,
                                          const sctp_subtype_t type,
                                          void *arg,
@@ -4805,6 +4883,7 @@ sctp_disposition_t sctp_sf_error_shutdown(const struct sctp_endpoint *ep,
  * (timers)
  */
 sctp_disposition_t sctp_sf_cookie_wait_prm_shutdown(
+       struct net *net,
        const struct sctp_endpoint *ep,
        const struct sctp_association *asoc,
        const sctp_subtype_t type,
@@ -4817,7 +4896,7 @@ sctp_disposition_t sctp_sf_cookie_wait_prm_shutdown(
        sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
                        SCTP_STATE(SCTP_STATE_CLOSED));
 
-       SCTP_INC_STATS(SCTP_MIB_SHUTDOWNS);
+       SCTP_INC_STATS(net, SCTP_MIB_SHUTDOWNS);
 
        sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());
 
@@ -4839,6 +4918,7 @@ sctp_disposition_t sctp_sf_cookie_wait_prm_shutdown(
  * (timers)
  */
 sctp_disposition_t sctp_sf_cookie_echoed_prm_shutdown(
+       struct net *net,
        const struct sctp_endpoint *ep,
        const struct sctp_association *asoc,
        const sctp_subtype_t type,
@@ -4847,7 +4927,7 @@ sctp_disposition_t sctp_sf_cookie_echoed_prm_shutdown(
        /* There is a single T1 timer, so we should be able to use
         * common function with the COOKIE-WAIT state.
         */
-       return sctp_sf_cookie_wait_prm_shutdown(ep, asoc, type, arg, commands);
+       return sctp_sf_cookie_wait_prm_shutdown(net, ep, asoc, type, arg, commands);
 }
 
 /*
@@ -4865,6 +4945,7 @@ sctp_disposition_t sctp_sf_cookie_echoed_prm_shutdown(
  * (timers)
  */
 sctp_disposition_t sctp_sf_cookie_wait_prm_abort(
+       struct net *net,
        const struct sctp_endpoint *ep,
        const struct sctp_association *asoc,
        const sctp_subtype_t type,
@@ -4884,7 +4965,7 @@ sctp_disposition_t sctp_sf_cookie_wait_prm_abort(
        sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
                        SCTP_STATE(SCTP_STATE_CLOSED));
 
-       SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
+       SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
 
        /* Even if we can't send the ABORT due to low memory delete the
         * TCB.  This is a departure from our typical NOMEM handling.
@@ -4914,6 +4995,7 @@ sctp_disposition_t sctp_sf_cookie_wait_prm_abort(
  * (timers)
  */
 sctp_disposition_t sctp_sf_cookie_echoed_prm_abort(
+       struct net *net,
        const struct sctp_endpoint *ep,
        const struct sctp_association *asoc,
        const sctp_subtype_t type,
@@ -4923,7 +5005,7 @@ sctp_disposition_t sctp_sf_cookie_echoed_prm_abort(
        /* There is a single T1 timer, so we should be able to use
         * common function with the COOKIE-WAIT state.
         */
-       return sctp_sf_cookie_wait_prm_abort(ep, asoc, type, arg, commands);
+       return sctp_sf_cookie_wait_prm_abort(net, ep, asoc, type, arg, commands);
 }
 
 /*
@@ -4939,6 +5021,7 @@ sctp_disposition_t sctp_sf_cookie_echoed_prm_abort(
  * (timers)
  */
 sctp_disposition_t sctp_sf_shutdown_pending_prm_abort(
+       struct net *net,
        const struct sctp_endpoint *ep,
        const struct sctp_association *asoc,
        const sctp_subtype_t type,
@@ -4949,7 +5032,7 @@ sctp_disposition_t sctp_sf_shutdown_pending_prm_abort(
        sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
                        SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD));
 
-       return sctp_sf_do_9_1_prm_abort(ep, asoc, type, arg, commands);
+       return sctp_sf_do_9_1_prm_abort(net, ep, asoc, type, arg, commands);
 }
 
 /*
@@ -4965,6 +5048,7 @@ sctp_disposition_t sctp_sf_shutdown_pending_prm_abort(
  * (timers)
  */
 sctp_disposition_t sctp_sf_shutdown_sent_prm_abort(
+       struct net *net,
        const struct sctp_endpoint *ep,
        const struct sctp_association *asoc,
        const sctp_subtype_t type,
@@ -4979,7 +5063,7 @@ sctp_disposition_t sctp_sf_shutdown_sent_prm_abort(
        sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
                        SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD));
 
-       return sctp_sf_do_9_1_prm_abort(ep, asoc, type, arg, commands);
+       return sctp_sf_do_9_1_prm_abort(net, ep, asoc, type, arg, commands);
 }
 
 /*
@@ -4995,6 +5079,7 @@ sctp_disposition_t sctp_sf_shutdown_sent_prm_abort(
  * (timers)
  */
 sctp_disposition_t sctp_sf_shutdown_ack_sent_prm_abort(
+       struct net *net,
        const struct sctp_endpoint *ep,
        const struct sctp_association *asoc,
        const sctp_subtype_t type,
@@ -5004,7 +5089,7 @@ sctp_disposition_t sctp_sf_shutdown_ack_sent_prm_abort(
        /* The same T2 timer, so we should be able to use
         * common function with the SHUTDOWN-SENT state.
         */
-       return sctp_sf_shutdown_sent_prm_abort(ep, asoc, type, arg, commands);
+       return sctp_sf_shutdown_sent_prm_abort(net, ep, asoc, type, arg, commands);
 }
 
 /*
@@ -5030,6 +5115,7 @@ sctp_disposition_t sctp_sf_shutdown_ack_sent_prm_abort(
  *   association on which a heartbeat should be issued.
  */
 sctp_disposition_t sctp_sf_do_prm_requestheartbeat(
+                                       struct net *net,
                                        const struct sctp_endpoint *ep,
                                        const struct sctp_association *asoc,
                                        const sctp_subtype_t type,
@@ -5061,7 +5147,8 @@ sctp_disposition_t sctp_sf_do_prm_requestheartbeat(
  * When an endpoint has an ASCONF signaled change to be sent to the
  * remote endpoint it should do A1 to A9
  */
-sctp_disposition_t sctp_sf_do_prm_asconf(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_do_prm_asconf(struct net *net,
+                                       const struct sctp_endpoint *ep,
                                        const struct sctp_association *asoc,
                                        const sctp_subtype_t type,
                                        void *arg,
@@ -5082,6 +5169,7 @@ sctp_disposition_t sctp_sf_do_prm_asconf(const struct sctp_endpoint *ep,
  * The return value is the disposition of the primitive.
  */
 sctp_disposition_t sctp_sf_ignore_primitive(
+       struct net *net,
        const struct sctp_endpoint *ep,
        const struct sctp_association *asoc,
        const sctp_subtype_t type,
@@ -5103,6 +5191,7 @@ sctp_disposition_t sctp_sf_ignore_primitive(
  * retransmit, the stack will immediately send up this notification.
  */
 sctp_disposition_t sctp_sf_do_no_pending_tsn(
+       struct net *net,
        const struct sctp_endpoint *ep,
        const struct sctp_association *asoc,
        const sctp_subtype_t type,
@@ -5134,6 +5223,7 @@ sctp_disposition_t sctp_sf_do_no_pending_tsn(
  * The return value is the disposition.
  */
 sctp_disposition_t sctp_sf_do_9_2_start_shutdown(
+       struct net *net,
        const struct sctp_endpoint *ep,
        const struct sctp_association *asoc,
        const sctp_subtype_t type,
@@ -5203,6 +5293,7 @@ nomem:
  * The return value is the disposition.
  */
 sctp_disposition_t sctp_sf_do_9_2_shutdown_ack(
+       struct net *net,
        const struct sctp_endpoint *ep,
        const struct sctp_association *asoc,
        const sctp_subtype_t type,
@@ -5221,11 +5312,11 @@ sctp_disposition_t sctp_sf_do_9_2_shutdown_ack(
         */
        if (chunk) {
                if (!sctp_vtag_verify(chunk, asoc))
-                       return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+                       return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
                /* Make sure that the SHUTDOWN chunk has a valid length. */
                if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_shutdown_chunk_t)))
-                       return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+                       return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
                                                          commands);
        }
 
@@ -5273,7 +5364,8 @@ nomem:
  *
  * The return value is the disposition of the event.
  */
-sctp_disposition_t sctp_sf_ignore_other(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_ignore_other(struct net *net,
+                                       const struct sctp_endpoint *ep,
                                        const struct sctp_association *asoc,
                                        const sctp_subtype_t type,
                                        void *arg,
@@ -5298,7 +5390,8 @@ sctp_disposition_t sctp_sf_ignore_other(const struct sctp_endpoint *ep,
  *
  * The return value is the disposition of the chunk.
  */
-sctp_disposition_t sctp_sf_do_6_3_3_rtx(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_do_6_3_3_rtx(struct net *net,
+                                       const struct sctp_endpoint *ep,
                                        const struct sctp_association *asoc,
                                        const sctp_subtype_t type,
                                        void *arg,
@@ -5306,7 +5399,7 @@ sctp_disposition_t sctp_sf_do_6_3_3_rtx(const struct sctp_endpoint *ep,
 {
        struct sctp_transport *transport = arg;
 
-       SCTP_INC_STATS(SCTP_MIB_T3_RTX_EXPIREDS);
+       SCTP_INC_STATS(net, SCTP_MIB_T3_RTX_EXPIREDS);
 
        if (asoc->overall_error_count >= asoc->max_retrans) {
                if (asoc->state == SCTP_STATE_SHUTDOWN_PENDING) {
@@ -5327,8 +5420,8 @@ sctp_disposition_t sctp_sf_do_6_3_3_rtx(const struct sctp_endpoint *ep,
                        /* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */
                        sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
                                        SCTP_PERR(SCTP_ERROR_NO_ERROR));
-                       SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
-                       SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
+                       SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
+                       SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
                        return SCTP_DISPOSITION_DELETE_TCB;
                }
        }
@@ -5384,13 +5477,14 @@ sctp_disposition_t sctp_sf_do_6_3_3_rtx(const struct sctp_endpoint *ep,
  * allow. However, an SCTP transmitter MUST NOT be more aggressive than
  * the following algorithms allow.
  */
-sctp_disposition_t sctp_sf_do_6_2_sack(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_do_6_2_sack(struct net *net,
+                                      const struct sctp_endpoint *ep,
                                       const struct sctp_association *asoc,
                                       const sctp_subtype_t type,
                                       void *arg,
                                       sctp_cmd_seq_t *commands)
 {
-       SCTP_INC_STATS(SCTP_MIB_DELAY_SACK_EXPIREDS);
+       SCTP_INC_STATS(net, SCTP_MIB_DELAY_SACK_EXPIREDS);
        sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SACK, SCTP_FORCE());
        return SCTP_DISPOSITION_CONSUME;
 }
@@ -5414,7 +5508,8 @@ sctp_disposition_t sctp_sf_do_6_2_sack(const struct sctp_endpoint *ep,
  * (timers, events)
  *
  */
-sctp_disposition_t sctp_sf_t1_init_timer_expire(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_t1_init_timer_expire(struct net *net,
+                                          const struct sctp_endpoint *ep,
                                           const struct sctp_association *asoc,
                                           const sctp_subtype_t type,
                                           void *arg,
@@ -5425,7 +5520,7 @@ sctp_disposition_t sctp_sf_t1_init_timer_expire(const struct sctp_endpoint *ep,
        int attempts = asoc->init_err_counter + 1;
 
        SCTP_DEBUG_PRINTK("Timer T1 expired (INIT).\n");
-       SCTP_INC_STATS(SCTP_MIB_T1_INIT_EXPIREDS);
+       SCTP_INC_STATS(net, SCTP_MIB_T1_INIT_EXPIREDS);
 
        if (attempts <= asoc->max_init_attempts) {
                bp = (struct sctp_bind_addr *) &asoc->base.bind_addr;
@@ -5475,7 +5570,8 @@ sctp_disposition_t sctp_sf_t1_init_timer_expire(const struct sctp_endpoint *ep,
  * (timers, events)
  *
  */
-sctp_disposition_t sctp_sf_t1_cookie_timer_expire(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_t1_cookie_timer_expire(struct net *net,
+                                          const struct sctp_endpoint *ep,
                                           const struct sctp_association *asoc,
                                           const sctp_subtype_t type,
                                           void *arg,
@@ -5485,7 +5581,7 @@ sctp_disposition_t sctp_sf_t1_cookie_timer_expire(const struct sctp_endpoint *ep
        int attempts = asoc->init_err_counter + 1;
 
        SCTP_DEBUG_PRINTK("Timer T1 expired (COOKIE-ECHO).\n");
-       SCTP_INC_STATS(SCTP_MIB_T1_COOKIE_EXPIREDS);
+       SCTP_INC_STATS(net, SCTP_MIB_T1_COOKIE_EXPIREDS);
 
        if (attempts <= asoc->max_init_attempts) {
                repl = sctp_make_cookie_echo(asoc, NULL);
@@ -5523,7 +5619,8 @@ sctp_disposition_t sctp_sf_t1_cookie_timer_expire(const struct sctp_endpoint *ep
  * the T2-Shutdown timer,  giving its peer ample opportunity to transmit
  * all of its queued DATA chunks that have not yet been sent.
  */
-sctp_disposition_t sctp_sf_t2_timer_expire(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_t2_timer_expire(struct net *net,
+                                          const struct sctp_endpoint *ep,
                                           const struct sctp_association *asoc,
                                           const sctp_subtype_t type,
                                           void *arg,
@@ -5532,7 +5629,7 @@ sctp_disposition_t sctp_sf_t2_timer_expire(const struct sctp_endpoint *ep,
        struct sctp_chunk *reply = NULL;
 
        SCTP_DEBUG_PRINTK("Timer T2 expired.\n");
-       SCTP_INC_STATS(SCTP_MIB_T2_SHUTDOWN_EXPIREDS);
+       SCTP_INC_STATS(net, SCTP_MIB_T2_SHUTDOWN_EXPIREDS);
 
        ((struct sctp_association *)asoc)->shutdown_retries++;
 
@@ -5542,8 +5639,8 @@ sctp_disposition_t sctp_sf_t2_timer_expire(const struct sctp_endpoint *ep,
                /* Note:  CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */
                sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
                                SCTP_PERR(SCTP_ERROR_NO_ERROR));
-               SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
-               SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
+               SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
+               SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
                return SCTP_DISPOSITION_DELETE_TCB;
        }
 
@@ -5592,6 +5689,7 @@ nomem:
  * If the T4 RTO timer expires the endpoint should do B1 to B5
  */
 sctp_disposition_t sctp_sf_t4_timer_expire(
+       struct net *net,
        const struct sctp_endpoint *ep,
        const struct sctp_association *asoc,
        const sctp_subtype_t type,
@@ -5601,7 +5699,7 @@ sctp_disposition_t sctp_sf_t4_timer_expire(
        struct sctp_chunk *chunk = asoc->addip_last_asconf;
        struct sctp_transport *transport = chunk->transport;
 
-       SCTP_INC_STATS(SCTP_MIB_T4_RTO_EXPIREDS);
+       SCTP_INC_STATS(net, SCTP_MIB_T4_RTO_EXPIREDS);
 
        /* ADDIP 4.1 B1) Increment the error counters and perform path failure
         * detection on the appropriate destination address as defined in
@@ -5626,8 +5724,8 @@ sctp_disposition_t sctp_sf_t4_timer_expire(
                                SCTP_ERROR(ETIMEDOUT));
                sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
                                SCTP_PERR(SCTP_ERROR_NO_ERROR));
-               SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
-               SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
+               SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
+               SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
                return SCTP_DISPOSITION_ABORT;
        }
 
@@ -5662,7 +5760,8 @@ sctp_disposition_t sctp_sf_t4_timer_expire(
  * At the expiration of this timer the sender SHOULD abort the association
  * by sending an ABORT chunk.
  */
-sctp_disposition_t sctp_sf_t5_timer_expire(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_t5_timer_expire(struct net *net,
+                                          const struct sctp_endpoint *ep,
                                           const struct sctp_association *asoc,
                                           const sctp_subtype_t type,
                                           void *arg,
@@ -5671,7 +5770,7 @@ sctp_disposition_t sctp_sf_t5_timer_expire(const struct sctp_endpoint *ep,
        struct sctp_chunk *reply = NULL;
 
        SCTP_DEBUG_PRINTK("Timer T5 expired.\n");
-       SCTP_INC_STATS(SCTP_MIB_T5_SHUTDOWN_GUARD_EXPIREDS);
+       SCTP_INC_STATS(net, SCTP_MIB_T5_SHUTDOWN_GUARD_EXPIREDS);
 
        reply = sctp_make_abort(asoc, NULL, 0);
        if (!reply)
@@ -5683,8 +5782,8 @@ sctp_disposition_t sctp_sf_t5_timer_expire(const struct sctp_endpoint *ep,
        sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
                        SCTP_PERR(SCTP_ERROR_NO_ERROR));
 
-       SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
-       SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
+       SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
+       SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
 
        return SCTP_DISPOSITION_DELETE_TCB;
 nomem:
@@ -5697,6 +5796,7 @@ nomem:
  * the user.  So this routine looks same as sctp_sf_do_9_2_prm_shutdown().
  */
 sctp_disposition_t sctp_sf_autoclose_timer_expire(
+       struct net *net,
        const struct sctp_endpoint *ep,
        const struct sctp_association *asoc,
        const sctp_subtype_t type,
@@ -5705,7 +5805,7 @@ sctp_disposition_t sctp_sf_autoclose_timer_expire(
 {
        int disposition;
 
-       SCTP_INC_STATS(SCTP_MIB_AUTOCLOSE_EXPIREDS);
+       SCTP_INC_STATS(net, SCTP_MIB_AUTOCLOSE_EXPIREDS);
 
        /* From 9.2 Shutdown of an Association
         * Upon receipt of the SHUTDOWN primitive from its upper
@@ -5720,7 +5820,7 @@ sctp_disposition_t sctp_sf_autoclose_timer_expire(
 
        disposition = SCTP_DISPOSITION_CONSUME;
        if (sctp_outq_is_empty(&asoc->outqueue)) {
-               disposition = sctp_sf_do_9_2_start_shutdown(ep, asoc, type,
+               disposition = sctp_sf_do_9_2_start_shutdown(net, ep, asoc, type,
                                                            arg, commands);
        }
        return disposition;
@@ -5738,7 +5838,8 @@ sctp_disposition_t sctp_sf_autoclose_timer_expire(
  *
  * The return value is the disposition of the chunk.
  */
-sctp_disposition_t sctp_sf_not_impl(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_not_impl(struct net *net,
+                                   const struct sctp_endpoint *ep,
                                    const struct sctp_association *asoc,
                                    const sctp_subtype_t type,
                                    void *arg,
@@ -5755,7 +5856,8 @@ sctp_disposition_t sctp_sf_not_impl(const struct sctp_endpoint *ep,
  *
  * The return value is the disposition of the chunk.
  */
-sctp_disposition_t sctp_sf_bug(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_bug(struct net *net,
+                              const struct sctp_endpoint *ep,
                               const struct sctp_association *asoc,
                               const sctp_subtype_t type,
                               void *arg,
@@ -5775,7 +5877,8 @@ sctp_disposition_t sctp_sf_bug(const struct sctp_endpoint *ep,
  *
  * The return value is the disposition of the chunk.
  */
-sctp_disposition_t sctp_sf_timer_ignore(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_timer_ignore(struct net *net,
+                                       const struct sctp_endpoint *ep,
                                        const struct sctp_association *asoc,
                                        const sctp_subtype_t type,
                                        void *arg,
@@ -5817,7 +5920,8 @@ static struct sctp_sackhdr *sctp_sm_pull_sack(struct sctp_chunk *chunk)
 /* Create an ABORT packet to be sent as a response, with the specified
  * error causes.
  */
-static struct sctp_packet *sctp_abort_pkt_new(const struct sctp_endpoint *ep,
+static struct sctp_packet *sctp_abort_pkt_new(struct net *net,
+                                 const struct sctp_endpoint *ep,
                                  const struct sctp_association *asoc,
                                  struct sctp_chunk *chunk,
                                  const void *payload,
@@ -5826,7 +5930,7 @@ static struct sctp_packet *sctp_abort_pkt_new(const struct sctp_endpoint *ep,
        struct sctp_packet *packet;
        struct sctp_chunk *abort;
 
-       packet = sctp_ootb_pkt_new(asoc, chunk);
+       packet = sctp_ootb_pkt_new(net, asoc, chunk);
 
        if (packet) {
                /* Make an ABORT.
@@ -5858,7 +5962,8 @@ static struct sctp_packet *sctp_abort_pkt_new(const struct sctp_endpoint *ep,
 }
 
 /* Allocate a packet for responding in the OOTB conditions.  */
-static struct sctp_packet *sctp_ootb_pkt_new(const struct sctp_association *asoc,
+static struct sctp_packet *sctp_ootb_pkt_new(struct net *net,
+                                            const struct sctp_association *asoc,
                                             const struct sctp_chunk *chunk)
 {
        struct sctp_packet *packet;
@@ -5911,7 +6016,7 @@ static struct sctp_packet *sctp_ootb_pkt_new(const struct sctp_association *asoc
        }
 
        /* Make a transport for the bucket, Eliza... */
-       transport = sctp_transport_new(sctp_source(chunk), GFP_ATOMIC);
+       transport = sctp_transport_new(net, sctp_source(chunk), GFP_ATOMIC);
        if (!transport)
                goto nomem;
 
@@ -5919,7 +6024,7 @@ static struct sctp_packet *sctp_ootb_pkt_new(const struct sctp_association *asoc
         * the source address.
         */
        sctp_transport_route(transport, (union sctp_addr *)&chunk->dest,
-                            sctp_sk(sctp_get_ctl_sock()));
+                            sctp_sk(net->sctp.ctl_sock));
 
        packet = sctp_packet_init(&transport->packet, transport, sport, dport);
        packet = sctp_packet_config(packet, vtag, 0);
@@ -5937,7 +6042,8 @@ void sctp_ootb_pkt_free(struct sctp_packet *packet)
 }
 
 /* Send a stale cookie error when a invalid COOKIE ECHO chunk is found  */
-static void sctp_send_stale_cookie_err(const struct sctp_endpoint *ep,
+static void sctp_send_stale_cookie_err(struct net *net,
+                                      const struct sctp_endpoint *ep,
                                       const struct sctp_association *asoc,
                                       const struct sctp_chunk *chunk,
                                       sctp_cmd_seq_t *commands,
@@ -5946,7 +6052,7 @@ static void sctp_send_stale_cookie_err(const struct sctp_endpoint *ep,
        struct sctp_packet *packet;
 
        if (err_chunk) {
-               packet = sctp_ootb_pkt_new(asoc, chunk);
+               packet = sctp_ootb_pkt_new(net, asoc, chunk);
                if (packet) {
                        struct sctp_signed_cookie *cookie;
 
@@ -5959,7 +6065,7 @@ static void sctp_send_stale_cookie_err(const struct sctp_endpoint *ep,
                        sctp_packet_append_chunk(packet, err_chunk);
                        sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
                                        SCTP_PACKET(packet));
-                       SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
+                       SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
                } else
                        sctp_chunk_free (err_chunk);
        }
@@ -5979,6 +6085,7 @@ static int sctp_eat_data(const struct sctp_association *asoc,
        __u32 tsn;
        struct sctp_tsnmap *map = (struct sctp_tsnmap *)&asoc->peer.tsn_map;
        struct sock *sk = asoc->base.sk;
+       struct net *net = sock_net(sk);
        u16 ssn;
        u16 sid;
        u8 ordered = 0;
@@ -6109,8 +6216,8 @@ static int sctp_eat_data(const struct sctp_association *asoc,
                                SCTP_ERROR(ECONNABORTED));
                sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
                                SCTP_PERR(SCTP_ERROR_NO_DATA));
-               SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
-               SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
+               SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
+               SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
                return SCTP_IERROR_NO_DATA;
        }
 
@@ -6120,9 +6227,9 @@ static int sctp_eat_data(const struct sctp_association *asoc,
         * if we renege and the chunk arrives again.
         */
        if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
-               SCTP_INC_STATS(SCTP_MIB_INUNORDERCHUNKS);
+               SCTP_INC_STATS(net, SCTP_MIB_INUNORDERCHUNKS);
        else {
-               SCTP_INC_STATS(SCTP_MIB_INORDERCHUNKS);
+               SCTP_INC_STATS(net, SCTP_MIB_INORDERCHUNKS);
                ordered = 1;
        }
 
index 7c211a7..84d98d8 100644 (file)
@@ -59,7 +59,8 @@ other_event_table[SCTP_NUM_OTHER_TYPES][SCTP_STATE_NUM_STATES];
 static const sctp_sm_table_entry_t
 timeout_event_table[SCTP_NUM_TIMEOUT_TYPES][SCTP_STATE_NUM_STATES];
 
-static const sctp_sm_table_entry_t *sctp_chunk_event_lookup(sctp_cid_t cid,
+static const sctp_sm_table_entry_t *sctp_chunk_event_lookup(struct net *net,
+                                                           sctp_cid_t cid,
                                                            sctp_state_t state);
 
 
@@ -82,13 +83,14 @@ static const sctp_sm_table_entry_t bug = {
        rtn;                                                            \
 })
 
-const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
+const sctp_sm_table_entry_t *sctp_sm_lookup_event(struct net *net,
+                                                 sctp_event_t event_type,
                                                  sctp_state_t state,
                                                  sctp_subtype_t event_subtype)
 {
        switch (event_type) {
        case SCTP_EVENT_T_CHUNK:
-               return sctp_chunk_event_lookup(event_subtype.chunk, state);
+               return sctp_chunk_event_lookup(net, event_subtype.chunk, state);
        case SCTP_EVENT_T_TIMEOUT:
                return DO_LOOKUP(SCTP_EVENT_TIMEOUT_MAX, timeout,
                                 timeout_event_table);
@@ -906,7 +908,8 @@ static const sctp_sm_table_entry_t timeout_event_table[SCTP_NUM_TIMEOUT_TYPES][S
        TYPE_SCTP_EVENT_TIMEOUT_AUTOCLOSE,
 };
 
-static const sctp_sm_table_entry_t *sctp_chunk_event_lookup(sctp_cid_t cid,
+static const sctp_sm_table_entry_t *sctp_chunk_event_lookup(struct net *net,
+                                                           sctp_cid_t cid,
                                                            sctp_state_t state)
 {
        if (state > SCTP_STATE_MAX)
@@ -915,12 +918,12 @@ static const sctp_sm_table_entry_t *sctp_chunk_event_lookup(sctp_cid_t cid,
        if (cid <= SCTP_CID_BASE_MAX)
                return &chunk_event_table[cid][state];
 
-       if (sctp_prsctp_enable) {
+       if (net->sctp.prsctp_enable) {
                if (cid == SCTP_CID_FWD_TSN)
                        return &prsctp_chunk_event_table[0][state];
        }
 
-       if (sctp_addip_enable) {
+       if (net->sctp.addip_enable) {
                if (cid == SCTP_CID_ASCONF)
                        return &addip_chunk_event_table[0][state];
 
@@ -928,7 +931,7 @@ static const sctp_sm_table_entry_t *sctp_chunk_event_lookup(sctp_cid_t cid,
                        return &addip_chunk_event_table[1][state];
        }
 
-       if (sctp_auth_enable) {
+       if (net->sctp.auth_enable) {
                if (cid == SCTP_CID_AUTH)
                        return &auth_chunk_event_table[0][state];
        }
index 5e25981..d37d24f 100644 (file)
@@ -427,6 +427,7 @@ SCTP_STATIC int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len)
 static int sctp_send_asconf(struct sctp_association *asoc,
                            struct sctp_chunk *chunk)
 {
+       struct net      *net = sock_net(asoc->base.sk);
        int             retval = 0;
 
        /* If there is an outstanding ASCONF chunk, queue it for later
@@ -439,7 +440,7 @@ static int sctp_send_asconf(struct sctp_association *asoc,
 
        /* Hold the chunk until an ASCONF_ACK is received. */
        sctp_chunk_hold(chunk);
-       retval = sctp_primitive_ASCONF(asoc, chunk);
+       retval = sctp_primitive_ASCONF(net, asoc, chunk);
        if (retval)
                sctp_chunk_free(chunk);
        else
@@ -515,6 +516,7 @@ static int sctp_send_asconf_add_ip(struct sock              *sk,
                                   struct sockaddr      *addrs,
                                   int                  addrcnt)
 {
+       struct net *net = sock_net(sk);
        struct sctp_sock                *sp;
        struct sctp_endpoint            *ep;
        struct sctp_association         *asoc;
@@ -529,7 +531,7 @@ static int sctp_send_asconf_add_ip(struct sock              *sk,
        int                             i;
        int                             retval = 0;
 
-       if (!sctp_addip_enable)
+       if (!net->sctp.addip_enable)
                return retval;
 
        sp = sctp_sk(sk);
@@ -717,6 +719,7 @@ static int sctp_send_asconf_del_ip(struct sock              *sk,
                                   struct sockaddr      *addrs,
                                   int                  addrcnt)
 {
+       struct net *net = sock_net(sk);
        struct sctp_sock        *sp;
        struct sctp_endpoint    *ep;
        struct sctp_association *asoc;
@@ -732,7 +735,7 @@ static int sctp_send_asconf_del_ip(struct sock              *sk,
        int                     stored = 0;
 
        chunk = NULL;
-       if (!sctp_addip_enable)
+       if (!net->sctp.addip_enable)
                return retval;
 
        sp = sctp_sk(sk);
@@ -1050,6 +1053,7 @@ static int __sctp_connect(struct sock* sk,
                          int addrs_size,
                          sctp_assoc_t *assoc_id)
 {
+       struct net *net = sock_net(sk);
        struct sctp_sock *sp;
        struct sctp_endpoint *ep;
        struct sctp_association *asoc = NULL;
@@ -1200,7 +1204,7 @@ static int __sctp_connect(struct sock* sk,
                        goto out_free;
        }
 
-       err = sctp_primitive_ASSOCIATE(asoc, NULL);
+       err = sctp_primitive_ASSOCIATE(net, asoc, NULL);
        if (err < 0) {
                goto out_free;
        }
@@ -1458,6 +1462,7 @@ SCTP_STATIC int sctp_getsockopt_connectx3(struct sock* sk, int len,
  */
 SCTP_STATIC void sctp_close(struct sock *sk, long timeout)
 {
+       struct net *net = sock_net(sk);
        struct sctp_endpoint *ep;
        struct sctp_association *asoc;
        struct list_head *pos, *temp;
@@ -1499,9 +1504,9 @@ SCTP_STATIC void sctp_close(struct sock *sk, long timeout)
 
                        chunk = sctp_make_abort_user(asoc, NULL, 0);
                        if (chunk)
-                               sctp_primitive_ABORT(asoc, chunk);
+                               sctp_primitive_ABORT(net, asoc, chunk);
                } else
-                       sctp_primitive_SHUTDOWN(asoc, NULL);
+                       sctp_primitive_SHUTDOWN(net, asoc, NULL);
        }
 
        /* On a TCP-style socket, block for at most linger_time if set. */
@@ -1569,6 +1574,7 @@ SCTP_STATIC int sctp_msghdr_parse(const struct msghdr *, sctp_cmsgs_t *);
 SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
                             struct msghdr *msg, size_t msg_len)
 {
+       struct net *net = sock_net(sk);
        struct sctp_sock *sp;
        struct sctp_endpoint *ep;
        struct sctp_association *new_asoc=NULL, *asoc=NULL;
@@ -1714,7 +1720,7 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
                if (sinfo_flags & SCTP_EOF) {
                        SCTP_DEBUG_PRINTK("Shutting down association: %p\n",
                                          asoc);
-                       sctp_primitive_SHUTDOWN(asoc, NULL);
+                       sctp_primitive_SHUTDOWN(net, asoc, NULL);
                        err = 0;
                        goto out_unlock;
                }
@@ -1727,7 +1733,7 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
                        }
 
                        SCTP_DEBUG_PRINTK("Aborting association: %p\n", asoc);
-                       sctp_primitive_ABORT(asoc, chunk);
+                       sctp_primitive_ABORT(net, asoc, chunk);
                        err = 0;
                        goto out_unlock;
                }
@@ -1900,7 +1906,7 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
 
        /* Auto-connect, if we aren't connected already. */
        if (sctp_state(asoc, CLOSED)) {
-               err = sctp_primitive_ASSOCIATE(asoc, NULL);
+               err = sctp_primitive_ASSOCIATE(net, asoc, NULL);
                if (err < 0)
                        goto out_free;
                SCTP_DEBUG_PRINTK("We associated primitively.\n");
@@ -1928,7 +1934,7 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
         * works that way today.  Keep it that way or this
         * breaks.
         */
-       err = sctp_primitive_SEND(asoc, datamsg);
+       err = sctp_primitive_SEND(net, asoc, datamsg);
        /* Did the lower layer accept the chunk? */
        if (err)
                sctp_datamsg_free(datamsg);
@@ -2320,7 +2326,9 @@ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params,
        int error;
 
        if (params->spp_flags & SPP_HB_DEMAND && trans) {
-               error = sctp_primitive_REQUESTHEARTBEAT (trans->asoc, trans);
+               struct net *net = sock_net(trans->asoc->base.sk);
+
+               error = sctp_primitive_REQUESTHEARTBEAT(net, trans->asoc, trans);
                if (error)
                        return error;
        }
@@ -3033,6 +3041,7 @@ static int sctp_setsockopt_maxseg(struct sock *sk, char __user *optval, unsigned
 static int sctp_setsockopt_peer_primary_addr(struct sock *sk, char __user *optval,
                                             unsigned int optlen)
 {
+       struct net *net = sock_net(sk);
        struct sctp_sock        *sp;
        struct sctp_association *asoc = NULL;
        struct sctp_setpeerprim prim;
@@ -3042,7 +3051,7 @@ static int sctp_setsockopt_peer_primary_addr(struct sock *sk, char __user *optva
 
        sp = sctp_sk(sk);
 
-       if (!sctp_addip_enable)
+       if (!net->sctp.addip_enable)
                return -EPERM;
 
        if (optlen != sizeof(struct sctp_setpeerprim))
@@ -3279,9 +3288,10 @@ static int sctp_setsockopt_auth_chunk(struct sock *sk,
                                      char __user *optval,
                                      unsigned int optlen)
 {
+       struct net *net = sock_net(sk);
        struct sctp_authchunk val;
 
-       if (!sctp_auth_enable)
+       if (!net->sctp.auth_enable)
                return -EACCES;
 
        if (optlen != sizeof(struct sctp_authchunk))
@@ -3311,11 +3321,12 @@ static int sctp_setsockopt_hmac_ident(struct sock *sk,
                                      char __user *optval,
                                      unsigned int optlen)
 {
+       struct net *net = sock_net(sk);
        struct sctp_hmacalgo *hmacs;
        u32 idents;
        int err;
 
-       if (!sctp_auth_enable)
+       if (!net->sctp.auth_enable)
                return -EACCES;
 
        if (optlen < sizeof(struct sctp_hmacalgo))
@@ -3348,11 +3359,12 @@ static int sctp_setsockopt_auth_key(struct sock *sk,
                                    char __user *optval,
                                    unsigned int optlen)
 {
+       struct net *net = sock_net(sk);
        struct sctp_authkey *authkey;
        struct sctp_association *asoc;
        int ret;
 
-       if (!sctp_auth_enable)
+       if (!net->sctp.auth_enable)
                return -EACCES;
 
        if (optlen <= sizeof(struct sctp_authkey))
@@ -3389,10 +3401,11 @@ static int sctp_setsockopt_active_key(struct sock *sk,
                                      char __user *optval,
                                      unsigned int optlen)
 {
+       struct net *net = sock_net(sk);
        struct sctp_authkeyid val;
        struct sctp_association *asoc;
 
-       if (!sctp_auth_enable)
+       if (!net->sctp.auth_enable)
                return -EACCES;
 
        if (optlen != sizeof(struct sctp_authkeyid))
@@ -3417,10 +3430,11 @@ static int sctp_setsockopt_del_key(struct sock *sk,
                                   char __user *optval,
                                   unsigned int optlen)
 {
+       struct net *net = sock_net(sk);
        struct sctp_authkeyid val;
        struct sctp_association *asoc;
 
-       if (!sctp_auth_enable)
+       if (!net->sctp.auth_enable)
                return -EACCES;
 
        if (optlen != sizeof(struct sctp_authkeyid))
@@ -3471,7 +3485,7 @@ static int sctp_setsockopt_auto_asconf(struct sock *sk, char __user *optval,
                sp->do_auto_asconf = 0;
        } else if (val && !sp->do_auto_asconf) {
                list_add_tail(&sp->auto_asconf_list,
-                   &sctp_auto_asconf_splist);
+                   &sock_net(sk)->sctp.auto_asconf_splist);
                sp->do_auto_asconf = 1;
        }
        return 0;
@@ -3843,6 +3857,7 @@ out:
  */
 SCTP_STATIC int sctp_init_sock(struct sock *sk)
 {
+       struct net *net = sock_net(sk);
        struct sctp_endpoint *ep;
        struct sctp_sock *sp;
 
@@ -3872,7 +3887,7 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk)
        sp->default_timetolive = 0;
 
        sp->default_rcv_context = 0;
-       sp->max_burst = sctp_max_burst;
+       sp->max_burst = net->sctp.max_burst;
 
        /* Initialize default setup parameters. These parameters
         * can be modified with the SCTP_INITMSG socket option or
@@ -3880,24 +3895,24 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk)
         */
        sp->initmsg.sinit_num_ostreams   = sctp_max_outstreams;
        sp->initmsg.sinit_max_instreams  = sctp_max_instreams;
-       sp->initmsg.sinit_max_attempts   = sctp_max_retrans_init;
-       sp->initmsg.sinit_max_init_timeo = sctp_rto_max;
+       sp->initmsg.sinit_max_attempts   = net->sctp.max_retrans_init;
+       sp->initmsg.sinit_max_init_timeo = net->sctp.rto_max;
 
        /* Initialize default RTO related parameters.  These parameters can
         * be modified for with the SCTP_RTOINFO socket option.
         */
-       sp->rtoinfo.srto_initial = sctp_rto_initial;
-       sp->rtoinfo.srto_max     = sctp_rto_max;
-       sp->rtoinfo.srto_min     = sctp_rto_min;
+       sp->rtoinfo.srto_initial = net->sctp.rto_initial;
+       sp->rtoinfo.srto_max     = net->sctp.rto_max;
+       sp->rtoinfo.srto_min     = net->sctp.rto_min;
 
        /* Initialize default association related parameters. These parameters
         * can be modified with the SCTP_ASSOCINFO socket option.
         */
-       sp->assocparams.sasoc_asocmaxrxt = sctp_max_retrans_association;
+       sp->assocparams.sasoc_asocmaxrxt = net->sctp.max_retrans_association;
        sp->assocparams.sasoc_number_peer_destinations = 0;
        sp->assocparams.sasoc_peer_rwnd = 0;
        sp->assocparams.sasoc_local_rwnd = 0;
-       sp->assocparams.sasoc_cookie_life = sctp_valid_cookie_life;
+       sp->assocparams.sasoc_cookie_life = net->sctp.valid_cookie_life;
 
        /* Initialize default event subscriptions. By default, all the
         * options are off.
@@ -3907,10 +3922,10 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk)
        /* Default Peer Address Parameters.  These defaults can
         * be modified via SCTP_PEER_ADDR_PARAMS
         */
-       sp->hbinterval  = sctp_hb_interval;
-       sp->pathmaxrxt  = sctp_max_retrans_path;
+       sp->hbinterval  = net->sctp.hb_interval;
+       sp->pathmaxrxt  = net->sctp.max_retrans_path;
        sp->pathmtu     = 0; // allow default discovery
-       sp->sackdelay   = sctp_sack_timeout;
+       sp->sackdelay   = net->sctp.sack_timeout;
        sp->sackfreq    = 2;
        sp->param_flags = SPP_HB_ENABLE |
                          SPP_PMTUD_ENABLE |
@@ -3961,10 +3976,10 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk)
 
        local_bh_disable();
        percpu_counter_inc(&sctp_sockets_allocated);
-       sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
-       if (sctp_default_auto_asconf) {
+       sock_prot_inuse_add(net, sk->sk_prot, 1);
+       if (net->sctp.default_auto_asconf) {
                list_add_tail(&sp->auto_asconf_list,
-                   &sctp_auto_asconf_splist);
+                   &net->sctp.auto_asconf_splist);
                sp->do_auto_asconf = 1;
        } else
                sp->do_auto_asconf = 0;
@@ -4011,6 +4026,7 @@ SCTP_STATIC void sctp_destroy_sock(struct sock *sk)
  */
 SCTP_STATIC void sctp_shutdown(struct sock *sk, int how)
 {
+       struct net *net = sock_net(sk);
        struct sctp_endpoint *ep;
        struct sctp_association *asoc;
 
@@ -4022,7 +4038,7 @@ SCTP_STATIC void sctp_shutdown(struct sock *sk, int how)
                if (!list_empty(&ep->asocs)) {
                        asoc = list_entry(ep->asocs.next,
                                          struct sctp_association, asocs);
-                       sctp_primitive_SHUTDOWN(asoc, NULL);
+                       sctp_primitive_SHUTDOWN(net, asoc, NULL);
                }
        }
 }
@@ -4653,9 +4669,10 @@ static int sctp_copy_laddrs(struct sock *sk, __u16 port, void *to,
        union sctp_addr temp;
        int cnt = 0;
        int addrlen;
+       struct net *net = sock_net(sk);
 
        rcu_read_lock();
-       list_for_each_entry_rcu(addr, &sctp_local_addr_list, list) {
+       list_for_each_entry_rcu(addr, &net->sctp.local_addr_list, list) {
                if (!addr->valid)
                        continue;
 
@@ -5299,12 +5316,13 @@ static int sctp_getsockopt_maxburst(struct sock *sk, int len,
 static int sctp_getsockopt_hmac_ident(struct sock *sk, int len,
                                    char __user *optval, int __user *optlen)
 {
+       struct net *net = sock_net(sk);
        struct sctp_hmacalgo  __user *p = (void __user *)optval;
        struct sctp_hmac_algo_param *hmacs;
        __u16 data_len = 0;
        u32 num_idents;
 
-       if (!sctp_auth_enable)
+       if (!net->sctp.auth_enable)
                return -EACCES;
 
        hmacs = sctp_sk(sk)->ep->auth_hmacs_list;
@@ -5328,10 +5346,11 @@ static int sctp_getsockopt_hmac_ident(struct sock *sk, int len,
 static int sctp_getsockopt_active_key(struct sock *sk, int len,
                                    char __user *optval, int __user *optlen)
 {
+       struct net *net = sock_net(sk);
        struct sctp_authkeyid val;
        struct sctp_association *asoc;
 
-       if (!sctp_auth_enable)
+       if (!net->sctp.auth_enable)
                return -EACCES;
 
        if (len < sizeof(struct sctp_authkeyid))
@@ -5360,6 +5379,7 @@ static int sctp_getsockopt_active_key(struct sock *sk, int len,
 static int sctp_getsockopt_peer_auth_chunks(struct sock *sk, int len,
                                    char __user *optval, int __user *optlen)
 {
+       struct net *net = sock_net(sk);
        struct sctp_authchunks __user *p = (void __user *)optval;
        struct sctp_authchunks val;
        struct sctp_association *asoc;
@@ -5367,7 +5387,7 @@ static int sctp_getsockopt_peer_auth_chunks(struct sock *sk, int len,
        u32    num_chunks = 0;
        char __user *to;
 
-       if (!sctp_auth_enable)
+       if (!net->sctp.auth_enable)
                return -EACCES;
 
        if (len < sizeof(struct sctp_authchunks))
@@ -5403,6 +5423,7 @@ num:
 static int sctp_getsockopt_local_auth_chunks(struct sock *sk, int len,
                                    char __user *optval, int __user *optlen)
 {
+       struct net *net = sock_net(sk);
        struct sctp_authchunks __user *p = (void __user *)optval;
        struct sctp_authchunks val;
        struct sctp_association *asoc;
@@ -5410,7 +5431,7 @@ static int sctp_getsockopt_local_auth_chunks(struct sock *sk, int len,
        u32    num_chunks = 0;
        char __user *to;
 
-       if (!sctp_auth_enable)
+       if (!net->sctp.auth_enable)
                return -EACCES;
 
        if (len < sizeof(struct sctp_authchunks))
@@ -5769,7 +5790,7 @@ static void sctp_unhash(struct sock *sk)
  * a fastreuse flag (FIXME: NPI ipg).
  */
 static struct sctp_bind_bucket *sctp_bucket_create(
-       struct sctp_bind_hashbucket *head, unsigned short snum);
+       struct sctp_bind_hashbucket *head, struct net *, unsigned short snum);
 
 static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
 {
@@ -5799,11 +5820,12 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
                                rover = low;
                        if (inet_is_reserved_local_port(rover))
                                continue;
-                       index = sctp_phashfn(rover);
+                       index = sctp_phashfn(sock_net(sk), rover);
                        head = &sctp_port_hashtable[index];
                        sctp_spin_lock(&head->lock);
                        sctp_for_each_hentry(pp, node, &head->chain)
-                               if (pp->port == rover)
+                               if ((pp->port == rover) &&
+                                   net_eq(sock_net(sk), pp->net))
                                        goto next;
                        break;
                next:
@@ -5827,10 +5849,10 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
                 * to the port number (snum) - we detect that with the
                 * port iterator, pp being NULL.
                 */
-               head = &sctp_port_hashtable[sctp_phashfn(snum)];
+               head = &sctp_port_hashtable[sctp_phashfn(sock_net(sk), snum)];
                sctp_spin_lock(&head->lock);
                sctp_for_each_hentry(pp, node, &head->chain) {
-                       if (pp->port == snum)
+                       if ((pp->port == snum) && net_eq(pp->net, sock_net(sk)))
                                goto pp_found;
                }
        }
@@ -5881,7 +5903,7 @@ pp_found:
 pp_not_found:
        /* If there was a hash table miss, create a new port.  */
        ret = 1;
-       if (!pp && !(pp = sctp_bucket_create(head, snum)))
+       if (!pp && !(pp = sctp_bucket_create(head, sock_net(sk), snum)))
                goto fail_unlock;
 
        /* In either case (hit or miss), make sure fastreuse is 1 only
@@ -6113,7 +6135,7 @@ unsigned int sctp_poll(struct file *file, struct socket *sock, poll_table *wait)
  ********************************************************************/
 
 static struct sctp_bind_bucket *sctp_bucket_create(
-       struct sctp_bind_hashbucket *head, unsigned short snum)
+       struct sctp_bind_hashbucket *head, struct net *net, unsigned short snum)
 {
        struct sctp_bind_bucket *pp;
 
@@ -6123,6 +6145,7 @@ static struct sctp_bind_bucket *sctp_bucket_create(
                pp->port = snum;
                pp->fastreuse = 0;
                INIT_HLIST_HEAD(&pp->owner);
+               pp->net = net;
                hlist_add_head(&pp->node, &head->chain);
        }
        return pp;
@@ -6142,7 +6165,8 @@ static void sctp_bucket_destroy(struct sctp_bind_bucket *pp)
 static inline void __sctp_put_port(struct sock *sk)
 {
        struct sctp_bind_hashbucket *head =
-               &sctp_port_hashtable[sctp_phashfn(inet_sk(sk)->inet_num)];
+               &sctp_port_hashtable[sctp_phashfn(sock_net(sk),
+                                                 inet_sk(sk)->inet_num)];
        struct sctp_bind_bucket *pp;
 
        sctp_spin_lock(&head->lock);
@@ -6809,7 +6833,8 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
        newsp->hmac = NULL;
 
        /* Hook this new socket in to the bind_hash list. */
-       head = &sctp_port_hashtable[sctp_phashfn(inet_sk(oldsk)->inet_num)];
+       head = &sctp_port_hashtable[sctp_phashfn(sock_net(oldsk),
+                                                inet_sk(oldsk)->inet_num)];
        sctp_local_bh_disable();
        sctp_spin_lock(&head->lock);
        pp = sctp_sk(oldsk)->bind_hash;
index 2b2bfe9..70e3ba5 100644 (file)
@@ -64,8 +64,34 @@ extern int sysctl_sctp_wmem[3];
 
 static ctl_table sctp_table[] = {
        {
+               .procname       = "sctp_mem",
+               .data           = &sysctl_sctp_mem,
+               .maxlen         = sizeof(sysctl_sctp_mem),
+               .mode           = 0644,
+               .proc_handler   = proc_doulongvec_minmax
+       },
+       {
+               .procname       = "sctp_rmem",
+               .data           = &sysctl_sctp_rmem,
+               .maxlen         = sizeof(sysctl_sctp_rmem),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec,
+       },
+       {
+               .procname       = "sctp_wmem",
+               .data           = &sysctl_sctp_wmem,
+               .maxlen         = sizeof(sysctl_sctp_wmem),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec,
+       },
+
+       { /* sentinel */ }
+};
+
+static ctl_table sctp_net_table[] = {
+       {
                .procname       = "rto_initial",
-               .data           = &sctp_rto_initial,
+               .data           = &init_net.sctp.rto_initial,
                .maxlen         = sizeof(unsigned int),
                .mode           = 0644,
                .proc_handler   = proc_dointvec_minmax,
@@ -74,7 +100,7 @@ static ctl_table sctp_table[] = {
        },
        {
                .procname       = "rto_min",
-               .data           = &sctp_rto_min,
+               .data           = &init_net.sctp.rto_min,
                .maxlen         = sizeof(unsigned int),
                .mode           = 0644,
                .proc_handler   = proc_dointvec_minmax,
@@ -83,7 +109,7 @@ static ctl_table sctp_table[] = {
        },
        {
                .procname       = "rto_max",
-               .data           = &sctp_rto_max,
+               .data           = &init_net.sctp.rto_max,
                .maxlen         = sizeof(unsigned int),
                .mode           = 0644,
                .proc_handler   = proc_dointvec_minmax,
@@ -91,17 +117,22 @@ static ctl_table sctp_table[] = {
                .extra2         = &timer_max
        },
        {
-               .procname       = "valid_cookie_life",
-               .data           = &sctp_valid_cookie_life,
-               .maxlen         = sizeof(unsigned int),
-               .mode           = 0644,
-               .proc_handler   = proc_dointvec_minmax,
-               .extra1         = &one,
-               .extra2         = &timer_max
+               .procname       = "rto_alpha_exp_divisor",
+               .data           = &init_net.sctp.rto_alpha,
+               .maxlen         = sizeof(int),
+               .mode           = 0444,
+               .proc_handler   = proc_dointvec,
+       },
+       {
+               .procname       = "rto_beta_exp_divisor",
+               .data           = &init_net.sctp.rto_beta,
+               .maxlen         = sizeof(int),
+               .mode           = 0444,
+               .proc_handler   = proc_dointvec,
        },
        {
                .procname       = "max_burst",
-               .data           = &sctp_max_burst,
+               .data           = &init_net.sctp.max_burst,
                .maxlen         = sizeof(int),
                .mode           = 0644,
                .proc_handler   = proc_dointvec_minmax,
@@ -109,31 +140,42 @@ static ctl_table sctp_table[] = {
                .extra2         = &int_max
        },
        {
-               .procname       = "association_max_retrans",
-               .data           = &sctp_max_retrans_association,
+               .procname       = "cookie_preserve_enable",
+               .data           = &init_net.sctp.cookie_preserve_enable,
                .maxlen         = sizeof(int),
                .mode           = 0644,
+               .proc_handler   = proc_dointvec,
+       },
+       {
+               .procname       = "valid_cookie_life",
+               .data           = &init_net.sctp.valid_cookie_life,
+               .maxlen         = sizeof(unsigned int),
+               .mode           = 0644,
                .proc_handler   = proc_dointvec_minmax,
-               .extra1         = &one,
-               .extra2         = &int_max
+               .extra1         = &one,
+               .extra2         = &timer_max
        },
        {
-               .procname       = "sndbuf_policy",
-               .data           = &sctp_sndbuf_policy,
+               .procname       = "sack_timeout",
+               .data           = &init_net.sctp.sack_timeout,
                .maxlen         = sizeof(int),
                .mode           = 0644,
-               .proc_handler   = proc_dointvec,
+               .proc_handler   = proc_dointvec_minmax,
+               .extra1         = &sack_timer_min,
+               .extra2         = &sack_timer_max,
        },
        {
-               .procname       = "rcvbuf_policy",
-               .data           = &sctp_rcvbuf_policy,
-               .maxlen         = sizeof(int),
+               .procname       = "hb_interval",
+               .data           = &init_net.sctp.hb_interval,
+               .maxlen         = sizeof(unsigned int),
                .mode           = 0644,
-               .proc_handler   = proc_dointvec,
+               .proc_handler   = proc_dointvec_minmax,
+               .extra1         = &one,
+               .extra2         = &timer_max
        },
        {
-               .procname       = "path_max_retrans",
-               .data           = &sctp_max_retrans_path,
+               .procname       = "association_max_retrans",
+               .data           = &init_net.sctp.max_retrans_association,
                .maxlen         = sizeof(int),
                .mode           = 0644,
                .proc_handler   = proc_dointvec_minmax,
@@ -141,17 +183,17 @@ static ctl_table sctp_table[] = {
                .extra2         = &int_max
        },
        {
-               .procname       = "pf_retrans",
-               .data           = &sctp_pf_retrans,
+               .procname       = "path_max_retrans",
+               .data           = &init_net.sctp.max_retrans_path,
                .maxlen         = sizeof(int),
                .mode           = 0644,
                .proc_handler   = proc_dointvec_minmax,
-               .extra1         = &zero,
+               .extra1         = &one,
                .extra2         = &int_max
        },
        {
                .procname       = "max_init_retransmits",
-               .data           = &sctp_max_retrans_init,
+               .data           = &init_net.sctp.max_retrans_init,
                .maxlen         = sizeof(int),
                .mode           = 0644,
                .proc_handler   = proc_dointvec_minmax,
@@ -159,103 +201,66 @@ static ctl_table sctp_table[] = {
                .extra2         = &int_max
        },
        {
-               .procname       = "hb_interval",
-               .data           = &sctp_hb_interval,
-               .maxlen         = sizeof(unsigned int),
+               .procname       = "pf_retrans",
+               .data           = &init_net.sctp.pf_retrans,
+               .maxlen         = sizeof(int),
                .mode           = 0644,
                .proc_handler   = proc_dointvec_minmax,
-               .extra1         = &one,
-               .extra2         = &timer_max
+               .extra1         = &zero,
+               .extra2         = &int_max
        },
        {
-               .procname       = "cookie_preserve_enable",
-               .data           = &sctp_cookie_preserve_enable,
+               .procname       = "sndbuf_policy",
+               .data           = &init_net.sctp.sndbuf_policy,
                .maxlen         = sizeof(int),
                .mode           = 0644,
                .proc_handler   = proc_dointvec,
        },
        {
-               .procname       = "rto_alpha_exp_divisor",
-               .data           = &sctp_rto_alpha,
-               .maxlen         = sizeof(int),
-               .mode           = 0444,
-               .proc_handler   = proc_dointvec,
-       },
-       {
-               .procname       = "rto_beta_exp_divisor",
-               .data           = &sctp_rto_beta,
-               .maxlen         = sizeof(int),
-               .mode           = 0444,
-               .proc_handler   = proc_dointvec,
-       },
-       {
-               .procname       = "addip_enable",
-               .data           = &sctp_addip_enable,
+               .procname       = "rcvbuf_policy",
+               .data           = &init_net.sctp.rcvbuf_policy,
                .maxlen         = sizeof(int),
                .mode           = 0644,
                .proc_handler   = proc_dointvec,
        },
        {
                .procname       = "default_auto_asconf",
-               .data           = &sctp_default_auto_asconf,
+               .data           = &init_net.sctp.default_auto_asconf,
                .maxlen         = sizeof(int),
                .mode           = 0644,
                .proc_handler   = proc_dointvec,
        },
        {
-               .procname       = "prsctp_enable",
-               .data           = &sctp_prsctp_enable,
+               .procname       = "addip_enable",
+               .data           = &init_net.sctp.addip_enable,
                .maxlen         = sizeof(int),
                .mode           = 0644,
                .proc_handler   = proc_dointvec,
        },
        {
-               .procname       = "sack_timeout",
-               .data           = &sctp_sack_timeout,
+               .procname       = "addip_noauth_enable",
+               .data           = &init_net.sctp.addip_noauth,
                .maxlen         = sizeof(int),
                .mode           = 0644,
-               .proc_handler   = proc_dointvec_minmax,
-               .extra1         = &sack_timer_min,
-               .extra2         = &sack_timer_max,
-       },
-       {
-               .procname       = "sctp_mem",
-               .data           = &sysctl_sctp_mem,
-               .maxlen         = sizeof(sysctl_sctp_mem),
-               .mode           = 0644,
-               .proc_handler   = proc_doulongvec_minmax
-       },
-       {
-               .procname       = "sctp_rmem",
-               .data           = &sysctl_sctp_rmem,
-               .maxlen         = sizeof(sysctl_sctp_rmem),
-               .mode           = 0644,
-               .proc_handler   = proc_dointvec,
-       },
-       {
-               .procname       = "sctp_wmem",
-               .data           = &sysctl_sctp_wmem,
-               .maxlen         = sizeof(sysctl_sctp_wmem),
-               .mode           = 0644,
                .proc_handler   = proc_dointvec,
        },
        {
-               .procname       = "auth_enable",
-               .data           = &sctp_auth_enable,
+               .procname       = "prsctp_enable",
+               .data           = &init_net.sctp.prsctp_enable,
                .maxlen         = sizeof(int),
                .mode           = 0644,
                .proc_handler   = proc_dointvec,
        },
        {
-               .procname       = "addip_noauth_enable",
-               .data           = &sctp_addip_noauth,
+               .procname       = "auth_enable",
+               .data           = &init_net.sctp.auth_enable,
                .maxlen         = sizeof(int),
                .mode           = 0644,
                .proc_handler   = proc_dointvec,
        },
        {
                .procname       = "addr_scope_policy",
-               .data           = &sctp_scope_policy,
+               .data           = &init_net.sctp.scope_policy,
                .maxlen         = sizeof(int),
                .mode           = 0644,
                .proc_handler   = proc_dointvec_minmax,
@@ -264,7 +269,7 @@ static ctl_table sctp_table[] = {
        },
        {
                .procname       = "rwnd_update_shift",
-               .data           = &sctp_rwnd_upd_shift,
+               .data           = &init_net.sctp.rwnd_upd_shift,
                .maxlen         = sizeof(int),
                .mode           = 0644,
                .proc_handler   = &proc_dointvec_minmax,
@@ -273,7 +278,7 @@ static ctl_table sctp_table[] = {
        },
        {
                .procname       = "max_autoclose",
-               .data           = &sctp_max_autoclose,
+               .data           = &init_net.sctp.max_autoclose,
                .maxlen         = sizeof(unsigned long),
                .mode           = 0644,
                .proc_handler   = &proc_doulongvec_minmax,
@@ -284,6 +289,27 @@ static ctl_table sctp_table[] = {
        { /* sentinel */ }
 };
 
+int sctp_sysctl_net_register(struct net *net)
+{
+       struct ctl_table *table;
+       int i;
+
+       table = kmemdup(sctp_net_table, sizeof(sctp_net_table), GFP_KERNEL);
+       if (!table)
+               return -ENOMEM;
+
+       for (i = 0; table[i].data; i++)
+               table[i].data += (char *)(&net->sctp) - (char *)&init_net.sctp;
+
+       net->sctp.sysctl_header = register_net_sysctl(net, "net/sctp", table);
+       return 0;
+}
+
+void sctp_sysctl_net_unregister(struct net *net)
+{
+       unregister_net_sysctl_table(net->sctp.sysctl_header);
+}
+
 static struct ctl_table_header * sctp_sysctl_header;
 
 /* Sysctl registration.  */
index c97472b..953c21e 100644 (file)
@@ -59,7 +59,8 @@
 /* 1st Level Abstractions.  */
 
 /* Initialize a new transport from provided memory.  */
-static struct sctp_transport *sctp_transport_init(struct sctp_transport *peer,
+static struct sctp_transport *sctp_transport_init(struct net *net,
+                                                 struct sctp_transport *peer,
                                                  const union sctp_addr *addr,
                                                  gfp_t gfp)
 {
@@ -76,7 +77,7 @@ static struct sctp_transport *sctp_transport_init(struct sctp_transport *peer,
         * given destination transport address, set RTO to the protocol
         * parameter 'RTO.Initial'.
         */
-       peer->rto = msecs_to_jiffies(sctp_rto_initial);
+       peer->rto = msecs_to_jiffies(net->sctp.rto_initial);
 
        peer->last_time_heard = jiffies;
        peer->last_time_ecne_reduced = jiffies;
@@ -86,8 +87,8 @@ static struct sctp_transport *sctp_transport_init(struct sctp_transport *peer,
                            SPP_SACKDELAY_ENABLE;
 
        /* Initialize the default path max_retrans.  */
-       peer->pathmaxrxt  = sctp_max_retrans_path;
-       peer->pf_retrans  = sctp_pf_retrans;
+       peer->pathmaxrxt  = net->sctp.max_retrans_path;
+       peer->pf_retrans  = net->sctp.pf_retrans;
 
        INIT_LIST_HEAD(&peer->transmitted);
        INIT_LIST_HEAD(&peer->send_ready);
@@ -109,7 +110,8 @@ static struct sctp_transport *sctp_transport_init(struct sctp_transport *peer,
 }
 
 /* Allocate and initialize a new transport.  */
-struct sctp_transport *sctp_transport_new(const union sctp_addr *addr,
+struct sctp_transport *sctp_transport_new(struct net *net,
+                                         const union sctp_addr *addr,
                                          gfp_t gfp)
 {
        struct sctp_transport *transport;
@@ -118,7 +120,7 @@ struct sctp_transport *sctp_transport_new(const union sctp_addr *addr,
        if (!transport)
                goto fail;
 
-       if (!sctp_transport_init(transport, addr, gfp))
+       if (!sctp_transport_init(net, transport, addr, gfp))
                goto fail_init;
 
        transport->malloced = 1;
@@ -316,6 +318,7 @@ void sctp_transport_update_rto(struct sctp_transport *tp, __u32 rtt)
        SCTP_ASSERT(tp->rto_pending, "rto_pending not set", return);
 
        if (tp->rttvar || tp->srtt) {
+               struct net *net = sock_net(tp->asoc->base.sk);
                /* 6.3.1 C3) When a new RTT measurement R' is made, set
                 * RTTVAR <- (1 - RTO.Beta) * RTTVAR + RTO.Beta * |SRTT - R'|
                 * SRTT <- (1 - RTO.Alpha) * SRTT + RTO.Alpha * R'
@@ -327,10 +330,10 @@ void sctp_transport_update_rto(struct sctp_transport *tp, __u32 rtt)
                 * For example, assuming the default value of RTO.Alpha of
                 * 1/8, rto_alpha would be expressed as 3.
                 */
-               tp->rttvar = tp->rttvar - (tp->rttvar >> sctp_rto_beta)
-                       + ((abs(tp->srtt - rtt)) >> sctp_rto_beta);
-               tp->srtt = tp->srtt - (tp->srtt >> sctp_rto_alpha)
-                       + (rtt >> sctp_rto_alpha);
+               tp->rttvar = tp->rttvar - (tp->rttvar >> net->sctp.rto_beta)
+                       + ((abs(tp->srtt - rtt)) >> net->sctp.rto_beta);
+               tp->srtt = tp->srtt - (tp->srtt >> net->sctp.rto_alpha)
+                       + (rtt >> net->sctp.rto_alpha);
        } else {
                /* 6.3.1 C2) When the first RTT measurement R is made, set
                 * SRTT <- R, RTTVAR <- R/2.
index f5a6a4f..360d869 100644 (file)
@@ -326,7 +326,9 @@ static void sctp_ulpq_store_reasm(struct sctp_ulpq *ulpq,
  * payload was fragmented on the way and ip had to reassemble them.
  * We add the rest of skb's to the first skb's fraglist.
  */
-static struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff_head *queue, struct sk_buff *f_frag, struct sk_buff *l_frag)
+static struct sctp_ulpevent *sctp_make_reassembled_event(struct net *net,
+       struct sk_buff_head *queue, struct sk_buff *f_frag,
+       struct sk_buff *l_frag)
 {
        struct sk_buff *pos;
        struct sk_buff *new = NULL;
@@ -394,7 +396,7 @@ static struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff_head *qu
        }
 
        event = sctp_skb2event(f_frag);
-       SCTP_INC_STATS(SCTP_MIB_REASMUSRMSGS);
+       SCTP_INC_STATS(net, SCTP_MIB_REASMUSRMSGS);
 
        return event;
 }
@@ -493,7 +495,8 @@ static struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_ulpq *ul
                cevent = sctp_skb2event(pd_first);
                pd_point = sctp_sk(asoc->base.sk)->pd_point;
                if (pd_point && pd_point <= pd_len) {
-                       retval = sctp_make_reassembled_event(&ulpq->reasm,
+                       retval = sctp_make_reassembled_event(sock_net(asoc->base.sk),
+                                                            &ulpq->reasm,
                                                             pd_first,
                                                             pd_last);
                        if (retval)
@@ -503,7 +506,8 @@ static struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_ulpq *ul
 done:
        return retval;
 found:
-       retval = sctp_make_reassembled_event(&ulpq->reasm, first_frag, pos);
+       retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
+                                            &ulpq->reasm, first_frag, pos);
        if (retval)
                retval->msg_flags |= MSG_EOR;
        goto done;
@@ -563,7 +567,8 @@ static struct sctp_ulpevent *sctp_ulpq_retrieve_partial(struct sctp_ulpq *ulpq)
         * further.
         */
 done:
-       retval = sctp_make_reassembled_event(&ulpq->reasm, first_frag, last_frag);
+       retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
+                                       &ulpq->reasm, first_frag, last_frag);
        if (retval && is_last)
                retval->msg_flags |= MSG_EOR;
 
@@ -655,7 +660,8 @@ static struct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *ulpq)
         * further.
         */
 done:
-       retval = sctp_make_reassembled_event(&ulpq->reasm, first_frag, last_frag);
+       retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
+                                       &ulpq->reasm, first_frag, last_frag);
        return retval;
 }
 
index 09e7124..4ec5c80 100644 (file)
@@ -49,21 +49,6 @@ struct tipc_bearer tipc_bearers[MAX_BEARERS];
 static void bearer_disable(struct tipc_bearer *b_ptr);
 
 /**
- * media_name_valid - validate media name
- *
- * Returns 1 if media name is valid, otherwise 0.
- */
-static int media_name_valid(const char *name)
-{
-       u32 len;
-
-       len = strlen(name);
-       if ((len + 1) > TIPC_MAX_MEDIA_NAME)
-               return 0;
-       return strspn(name, tipc_alphabet) == len;
-}
-
-/**
  * tipc_media_find - locates specified media object by name
  */
 struct tipc_media *tipc_media_find(const char *name)
@@ -102,7 +87,7 @@ int tipc_register_media(struct tipc_media *m_ptr)
 
        write_lock_bh(&tipc_net_lock);
 
-       if (!media_name_valid(m_ptr->name))
+       if ((strlen(m_ptr->name) + 1) > TIPC_MAX_MEDIA_NAME)
                goto exit;
        if ((m_ptr->bcast_addr.media_id != m_ptr->type_id) ||
            !m_ptr->bcast_addr.broadcast)
@@ -206,9 +191,7 @@ static int bearer_name_validate(const char *name,
 
        /* validate component parts of bearer name */
        if ((media_len <= 1) || (media_len > TIPC_MAX_MEDIA_NAME) ||
-           (if_len <= 1) || (if_len > TIPC_MAX_IF_NAME) ||
-           (strspn(media_name, tipc_alphabet) != (media_len - 1)) ||
-           (strspn(if_name, tipc_alphabet) != (if_len - 1)))
+           (if_len <= 1) || (if_len > TIPC_MAX_IF_NAME))
                return 0;
 
        /* return bearer name components, if necessary */
index a056a38..f67866c 100644 (file)
@@ -2,7 +2,7 @@
  * net/tipc/config.c: TIPC configuration management code
  *
  * Copyright (c) 2002-2006, Ericsson AB
- * Copyright (c) 2004-2007, 2010-2011, Wind River Systems
+ * Copyright (c) 2004-2007, 2010-2012, Wind River Systems
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -208,36 +208,6 @@ static struct sk_buff *cfg_set_remote_mng(void)
        return tipc_cfg_reply_none();
 }
 
-static struct sk_buff *cfg_set_max_publications(void)
-{
-       u32 value;
-
-       if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
-               return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
-
-       value = ntohl(*(__be32 *)TLV_DATA(req_tlv_area));
-       if (value < 1 || value > 65535)
-               return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
-                                                  " (max publications must be 1-65535)");
-       tipc_max_publications = value;
-       return tipc_cfg_reply_none();
-}
-
-static struct sk_buff *cfg_set_max_subscriptions(void)
-{
-       u32 value;
-
-       if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
-               return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
-
-       value = ntohl(*(__be32 *)TLV_DATA(req_tlv_area));
-       if (value < 1 || value > 65535)
-               return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
-                                                  " (max subscriptions must be 1-65535");
-       tipc_max_subscriptions = value;
-       return tipc_cfg_reply_none();
-}
-
 static struct sk_buff *cfg_set_max_ports(void)
 {
        u32 value;
@@ -357,12 +327,6 @@ struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area
        case TIPC_CMD_SET_MAX_PORTS:
                rep_tlv_buf = cfg_set_max_ports();
                break;
-       case TIPC_CMD_SET_MAX_PUBL:
-               rep_tlv_buf = cfg_set_max_publications();
-               break;
-       case TIPC_CMD_SET_MAX_SUBSCR:
-               rep_tlv_buf = cfg_set_max_subscriptions();
-               break;
        case TIPC_CMD_SET_NETID:
                rep_tlv_buf = cfg_set_netid();
                break;
@@ -372,12 +336,6 @@ struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area
        case TIPC_CMD_GET_MAX_PORTS:
                rep_tlv_buf = tipc_cfg_reply_unsigned(tipc_max_ports);
                break;
-       case TIPC_CMD_GET_MAX_PUBL:
-               rep_tlv_buf = tipc_cfg_reply_unsigned(tipc_max_publications);
-               break;
-       case TIPC_CMD_GET_MAX_SUBSCR:
-               rep_tlv_buf = tipc_cfg_reply_unsigned(tipc_max_subscriptions);
-               break;
        case TIPC_CMD_GET_NETID:
                rep_tlv_buf = tipc_cfg_reply_unsigned(tipc_net_id);
                break;
@@ -393,6 +351,10 @@ struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area
        case TIPC_CMD_GET_MAX_CLUSTERS:
        case TIPC_CMD_SET_MAX_NODES:
        case TIPC_CMD_GET_MAX_NODES:
+       case TIPC_CMD_SET_MAX_SUBSCR:
+       case TIPC_CMD_GET_MAX_SUBSCR:
+       case TIPC_CMD_SET_MAX_PUBL:
+       case TIPC_CMD_GET_MAX_PUBL:
        case TIPC_CMD_SET_LOG_SIZE:
        case TIPC_CMD_DUMP_LOG:
                rep_tlv_buf = tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
index 6586eac..bfe8af8 100644 (file)
 
 
 /* global variables used by multiple sub-systems within TIPC */
-int tipc_random;
-
-const char tipc_alphabet[] =
-       "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_.";
+int tipc_random __read_mostly;
 
 /* configurable TIPC parameters */
-u32 tipc_own_addr;
-int tipc_max_ports;
-int tipc_max_subscriptions;
-int tipc_max_publications;
-int tipc_net_id;
-int tipc_remote_management;
+u32 tipc_own_addr __read_mostly;
+int tipc_max_ports __read_mostly;
+int tipc_net_id __read_mostly;
+int tipc_remote_management __read_mostly;
 
 
 /**
@@ -101,9 +96,8 @@ int tipc_core_start_net(unsigned long addr)
 {
        int res;
 
-       res = tipc_net_start(addr);
-       if (!res)
-               res = tipc_eth_media_start();
+       tipc_net_start(addr);
+       res = tipc_eth_media_start();
        if (res)
                tipc_core_stop_net();
        return res;
@@ -160,8 +154,6 @@ static int __init tipc_init(void)
 
        tipc_own_addr = 0;
        tipc_remote_management = 1;
-       tipc_max_publications = 10000;
-       tipc_max_subscriptions = 2000;
        tipc_max_ports = CONFIG_TIPC_PORTS;
        tipc_net_id = 4711;
 
index fd42e10..0207db0 100644 (file)
@@ -60,7 +60,9 @@
 
 #define TIPC_MOD_VER "2.0.0"
 
-#define ULTRA_STRING_MAX_LEN 32768
+#define ULTRA_STRING_MAX_LEN   32768
+#define TIPC_MAX_SUBSCRIPTIONS 65535
+#define TIPC_MAX_PUBLICATIONS  65535
 
 struct tipc_msg;       /* msg.h */
 
@@ -74,19 +76,15 @@ int tipc_snprintf(char *buf, int len, const char *fmt, ...);
 /*
  * Global configuration variables
  */
-extern u32 tipc_own_addr;
-extern int tipc_max_ports;
-extern int tipc_max_subscriptions;
-extern int tipc_max_publications;
-extern int tipc_net_id;
-extern int tipc_remote_management;
+extern u32 tipc_own_addr __read_mostly;
+extern int tipc_max_ports __read_mostly;
+extern int tipc_net_id __read_mostly;
+extern int tipc_remote_management __read_mostly;
 
 /*
  * Other global variables
  */
-extern int tipc_random;
-extern const char tipc_alphabet[];
-
+extern int tipc_random __read_mostly;
 
 /*
  * Routines available to privileged subsystems
index 90ac9bf..2132c1e 100644 (file)
  * @bearer: ptr to associated "generic" bearer structure
  * @dev: ptr to associated Ethernet network device
  * @tipc_packet_type: used in binding TIPC to Ethernet driver
+ * @setup: work item used when enabling bearer
  * @cleanup: work item used when disabling bearer
  */
 struct eth_bearer {
        struct tipc_bearer *bearer;
        struct net_device *dev;
        struct packet_type tipc_packet_type;
+       struct work_struct setup;
        struct work_struct cleanup;
 };
 
 static struct tipc_media eth_media_info;
 static struct eth_bearer eth_bearers[MAX_ETH_BEARERS];
 static int eth_started;
-static struct notifier_block notifier;
+
+static int recv_notification(struct notifier_block *nb, unsigned long evt,
+                             void *dv);
+/*
+ * Network device notifier info
+ */
+static struct notifier_block notifier = {
+       .notifier_call  = recv_notification,
+       .priority       = 0
+};
 
 /**
  * eth_media_addr_set - initialize Ethernet media address structure
@@ -134,6 +145,17 @@ static int recv_msg(struct sk_buff *buf, struct net_device *dev,
 }
 
 /**
+ * setup_bearer - setup association between Ethernet bearer and interface
+ */
+static void setup_bearer(struct work_struct *work)
+{
+       struct eth_bearer *eb_ptr =
+               container_of(work, struct eth_bearer, setup);
+
+       dev_add_pack(&eb_ptr->tipc_packet_type);
+}
+
+/**
  * enable_bearer - attach TIPC bearer to an Ethernet interface
  */
 static int enable_bearer(struct tipc_bearer *tb_ptr)
@@ -173,7 +195,8 @@ static int enable_bearer(struct tipc_bearer *tb_ptr)
        eb_ptr->tipc_packet_type.func = recv_msg;
        eb_ptr->tipc_packet_type.af_packet_priv = eb_ptr;
        INIT_LIST_HEAD(&(eb_ptr->tipc_packet_type.list));
-       dev_add_pack(&eb_ptr->tipc_packet_type);
+       INIT_WORK(&eb_ptr->setup, setup_bearer);
+       schedule_work(&eb_ptr->setup);
 
        /* Associate TIPC bearer with Ethernet bearer */
        eb_ptr->bearer = tb_ptr;
@@ -357,8 +380,6 @@ int tipc_eth_media_start(void)
        if (res)
                return res;
 
-       notifier.notifier_call = &recv_notification;
-       notifier.priority = 0;
        res = register_netdevice_notifier(&notifier);
        if (!res)
                eth_started = 1;
index 7a52d39..111ff83 100644 (file)
@@ -45,7 +45,7 @@ struct queue_item {
 static struct kmem_cache *tipc_queue_item_cache;
 static struct list_head signal_queue_head;
 static DEFINE_SPINLOCK(qitem_lock);
-static int handler_enabled;
+static int handler_enabled __read_mostly;
 
 static void process_signal_queue(unsigned long dummy);
 
index 1c1e615..a79c755 100644 (file)
@@ -210,9 +210,7 @@ static int link_name_validate(const char *name,
            (z_local > 255) || (c_local > 4095) || (n_local > 4095) ||
            (z_peer  > 255) || (c_peer  > 4095) || (n_peer  > 4095) ||
            (if_local_len <= 1) || (if_local_len > TIPC_MAX_IF_NAME) ||
-           (if_peer_len  <= 1) || (if_peer_len  > TIPC_MAX_IF_NAME) ||
-           (strspn(if_local, tipc_alphabet) != (if_local_len - 1)) ||
-           (strspn(if_peer, tipc_alphabet) != (if_peer_len - 1)))
+           (if_peer_len  <= 1) || (if_peer_len  > TIPC_MAX_IF_NAME))
                return 0;
 
        /* return link name components, if necessary */
index 360c478..98975e8 100644 (file)
@@ -41,7 +41,7 @@
 #include "subscr.h"
 #include "port.h"
 
-static int tipc_nametbl_size = 1024;           /* must be a power of 2 */
+#define TIPC_NAMETBL_SIZE 1024         /* must be a power of 2 */
 
 /**
  * struct name_info - name sequence publication info
@@ -114,7 +114,7 @@ DEFINE_RWLOCK(tipc_nametbl_lock);
 
 static int hash(int x)
 {
-       return x & (tipc_nametbl_size - 1);
+       return x & (TIPC_NAMETBL_SIZE - 1);
 }
 
 /**
@@ -667,9 +667,9 @@ struct publication *tipc_nametbl_publish(u32 type, u32 lower, u32 upper,
 {
        struct publication *publ;
 
-       if (table.local_publ_count >= tipc_max_publications) {
+       if (table.local_publ_count >= TIPC_MAX_PUBLICATIONS) {
                pr_warn("Publication failed, local publication limit reached (%u)\n",
-                       tipc_max_publications);
+                       TIPC_MAX_PUBLICATIONS);
                return NULL;
        }
 
@@ -871,7 +871,7 @@ static int nametbl_list(char *buf, int len, u32 depth_info,
                ret += nametbl_header(buf, len, depth);
                lowbound = 0;
                upbound = ~0;
-               for (i = 0; i < tipc_nametbl_size; i++) {
+               for (i = 0; i < TIPC_NAMETBL_SIZE; i++) {
                        seq_head = &table.types[i];
                        hlist_for_each_entry(seq, seq_node, seq_head, ns_list) {
                                ret += nameseq_list(seq, buf + ret, len - ret,
@@ -935,7 +935,7 @@ struct sk_buff *tipc_nametbl_get(const void *req_tlv_area, int req_tlv_space)
 
 int tipc_nametbl_init(void)
 {
-       table.types = kcalloc(tipc_nametbl_size, sizeof(struct hlist_head),
+       table.types = kcalloc(TIPC_NAMETBL_SIZE, sizeof(struct hlist_head),
                              GFP_ATOMIC);
        if (!table.types)
                return -ENOMEM;
@@ -953,7 +953,7 @@ void tipc_nametbl_stop(void)
 
        /* Verify name table is empty, then release it */
        write_lock_bh(&tipc_nametbl_lock);
-       for (i = 0; i < tipc_nametbl_size; i++) {
+       for (i = 0; i < TIPC_NAMETBL_SIZE; i++) {
                if (hlist_empty(&table.types[i]))
                        continue;
                pr_err("nametbl_stop(): orphaned hash chain detected\n");
index 5b5cea2..7d305ec 100644 (file)
@@ -171,7 +171,7 @@ void tipc_net_route_msg(struct sk_buff *buf)
        tipc_link_send(buf, dnode, msg_link_selector(msg));
 }
 
-int tipc_net_start(u32 addr)
+void tipc_net_start(u32 addr)
 {
        char addr_string[16];
 
@@ -187,7 +187,6 @@ int tipc_net_start(u32 addr)
        pr_info("Started in network mode\n");
        pr_info("Own node address %s, network identity %u\n",
                tipc_addr_string_fill(addr_string, tipc_own_addr), tipc_net_id);
-       return 0;
 }
 
 void tipc_net_stop(void)
index 9eb4b9e..079daad 100644 (file)
@@ -41,7 +41,7 @@ extern rwlock_t tipc_net_lock;
 
 void tipc_net_route_msg(struct sk_buff *buf);
 
-int tipc_net_start(u32 addr);
+void tipc_net_start(u32 addr);
 void tipc_net_stop(void);
 
 #endif
index 5ed5965..0f7d0d0 100644 (file)
@@ -304,9 +304,9 @@ static struct tipc_subscription *subscr_subscribe(struct tipc_subscr *s,
        }
 
        /* Refuse subscription if global limit exceeded */
-       if (atomic_read(&topsrv.subscription_count) >= tipc_max_subscriptions) {
+       if (atomic_read(&topsrv.subscription_count) >= TIPC_MAX_SUBSCRIPTIONS) {
                pr_warn("Subscription rejected, limit reached (%u)\n",
-                       tipc_max_subscriptions);
+                       TIPC_MAX_SUBSCRIPTIONS);
                subscr_terminate(subscriber);
                return NULL;
        }
index d355f67..2f876b9 100644 (file)
@@ -105,7 +105,7 @@ cfg80211_get_chan_state(struct wireless_dev *wdev,
 
        ASSERT_WDEV_LOCK(wdev);
 
-       if (!netif_running(wdev->netdev))
+       if (wdev->netdev && !netif_running(wdev->netdev))
                return;
 
        switch (wdev->iftype) {
@@ -143,6 +143,11 @@ cfg80211_get_chan_state(struct wireless_dev *wdev,
        case NL80211_IFTYPE_WDS:
                /* these interface types don't really have a channel */
                return;
+       case NL80211_IFTYPE_P2P_DEVICE:
+               if (wdev->wiphy->features &
+                               NL80211_FEATURE_P2P_DEVICE_NEEDS_CHANNEL)
+                       *chanmode = CHAN_MODE_EXCLUSIVE;
+               return;
        case NL80211_IFTYPE_UNSPECIFIED:
        case NUM_NL80211_IFTYPES:
                WARN_ON(1);
index dcd64d5..443d4d7 100644 (file)
@@ -230,9 +230,24 @@ static int cfg80211_rfkill_set_block(void *data, bool blocked)
        rtnl_lock();
        mutex_lock(&rdev->devlist_mtx);
 
-       list_for_each_entry(wdev, &rdev->wdev_list, list)
-               if (wdev->netdev)
+       list_for_each_entry(wdev, &rdev->wdev_list, list) {
+               if (wdev->netdev) {
                        dev_close(wdev->netdev);
+                       continue;
+               }
+               /* otherwise, check iftype */
+               switch (wdev->iftype) {
+               case NL80211_IFTYPE_P2P_DEVICE:
+                       if (!wdev->p2p_started)
+                               break;
+                       rdev->ops->stop_p2p_device(&rdev->wiphy, wdev);
+                       wdev->p2p_started = false;
+                       rdev->opencount--;
+                       break;
+               default:
+                       break;
+               }
+       }
 
        mutex_unlock(&rdev->devlist_mtx);
        rtnl_unlock();
@@ -407,6 +422,11 @@ static int wiphy_verify_combinations(struct wiphy *wiphy)
                        if (WARN_ON(wiphy->software_iftypes & types))
                                return -EINVAL;
 
+                       /* Only a single P2P_DEVICE can be allowed */
+                       if (WARN_ON(types & BIT(NL80211_IFTYPE_P2P_DEVICE) &&
+                                   c->limits[j].max > 1))
+                               return -EINVAL;
+
                        cnt += c->limits[j].max;
                        /*
                         * Don't advertise an unsupported type
@@ -734,6 +754,35 @@ static void wdev_cleanup_work(struct work_struct *work)
        dev_put(wdev->netdev);
 }
 
+void cfg80211_unregister_wdev(struct wireless_dev *wdev)
+{
+       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+
+       ASSERT_RTNL();
+
+       if (WARN_ON(wdev->netdev))
+               return;
+
+       mutex_lock(&rdev->devlist_mtx);
+       list_del_rcu(&wdev->list);
+       rdev->devlist_generation++;
+
+       switch (wdev->iftype) {
+       case NL80211_IFTYPE_P2P_DEVICE:
+               if (!wdev->p2p_started)
+                       break;
+               rdev->ops->stop_p2p_device(&rdev->wiphy, wdev);
+               wdev->p2p_started = false;
+               rdev->opencount--;
+               break;
+       default:
+               WARN_ON_ONCE(1);
+               break;
+       }
+       mutex_unlock(&rdev->devlist_mtx);
+}
+EXPORT_SYMBOL(cfg80211_unregister_wdev);
+
 static struct device_type wiphy_type = {
        .name   = "wlan",
 };
index 1cdb1d5..8fd0242 100644 (file)
@@ -736,7 +736,6 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
                          const u8 *buf, size_t len, bool no_cck,
                          bool dont_wait_for_ack, u64 *cookie)
 {
-       struct net_device *dev = wdev->netdev;
        const struct ieee80211_mgmt *mgmt;
        u16 stype;
 
@@ -796,7 +795,7 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
                case NL80211_IFTYPE_AP:
                case NL80211_IFTYPE_P2P_GO:
                case NL80211_IFTYPE_AP_VLAN:
-                       if (!ether_addr_equal(mgmt->bssid, dev->dev_addr))
+                       if (!ether_addr_equal(mgmt->bssid, wdev_address(wdev)))
                                err = -EINVAL;
                        break;
                case NL80211_IFTYPE_MESH_POINT:
@@ -809,6 +808,11 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
                         * cfg80211 doesn't track the stations
                         */
                        break;
+               case NL80211_IFTYPE_P2P_DEVICE:
+                       /*
+                        * fall through, P2P device only supports
+                        * public action frames
+                        */
                default:
                        err = -EOPNOTSUPP;
                        break;
@@ -819,7 +823,7 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
                        return err;
        }
 
-       if (!ether_addr_equal(mgmt->sa, dev->dev_addr))
+       if (!ether_addr_equal(mgmt->sa, wdev_address(wdev)))
                return -EINVAL;
 
        /* Transmit the Action frame as requested by user space */
index 97026f3..787aeaa 100644 (file)
@@ -1100,6 +1100,7 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
                if (nla_put_u32(msg, i, NL80211_CMD_REGISTER_BEACONS))
                        goto nla_put_failure;
        }
+       CMD(start_p2p_device, START_P2P_DEVICE);
 
 #ifdef CONFIG_NL80211_TESTMODE
        CMD(testmode_cmd, TESTMODE);
@@ -1748,13 +1749,13 @@ static int nl80211_send_iface(struct sk_buff *msg, u32 pid, u32 seq, int flags,
 
        if (dev &&
            (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) ||
-            nla_put_string(msg, NL80211_ATTR_IFNAME, dev->name) ||
-            nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, dev->dev_addr)))
+            nla_put_string(msg, NL80211_ATTR_IFNAME, dev->name)))
                goto nla_put_failure;
 
        if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
            nla_put_u32(msg, NL80211_ATTR_IFTYPE, wdev->iftype) ||
            nla_put_u64(msg, NL80211_ATTR_WDEV, wdev_id(wdev)) ||
+           nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, wdev_address(wdev)) ||
            nla_put_u32(msg, NL80211_ATTR_GENERATION,
                        rdev->devlist_generation ^
                        (cfg80211_rdev_list_generation << 2)))
@@ -2021,8 +2022,10 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
                return PTR_ERR(wdev);
        }
 
-       if (type == NL80211_IFTYPE_MESH_POINT &&
-           info->attrs[NL80211_ATTR_MESH_ID]) {
+       switch (type) {
+       case NL80211_IFTYPE_MESH_POINT:
+               if (!info->attrs[NL80211_ATTR_MESH_ID])
+                       break;
                wdev_lock(wdev);
                BUILD_BUG_ON(IEEE80211_MAX_SSID_LEN !=
                             IEEE80211_MAX_MESH_ID_LEN);
@@ -2031,6 +2034,26 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
                memcpy(wdev->ssid, nla_data(info->attrs[NL80211_ATTR_MESH_ID]),
                       wdev->mesh_id_up_len);
                wdev_unlock(wdev);
+               break;
+       case NL80211_IFTYPE_P2P_DEVICE:
+               /*
+                * P2P Device doesn't have a netdev, so doesn't go
+                * through the netdev notifier and must be added here
+                */
+               mutex_init(&wdev->mtx);
+               INIT_LIST_HEAD(&wdev->event_list);
+               spin_lock_init(&wdev->event_lock);
+               INIT_LIST_HEAD(&wdev->mgmt_registrations);
+               spin_lock_init(&wdev->mgmt_registrations_lock);
+
+               mutex_lock(&rdev->devlist_mtx);
+               wdev->identifier = ++rdev->wdev_id;
+               list_add_rcu(&wdev->list, &rdev->wdev_list);
+               rdev->devlist_generation++;
+               mutex_unlock(&rdev->devlist_mtx);
+               break;
+       default:
+               break;
        }
 
        if (nl80211_send_iface(msg, info->snd_pid, info->snd_seq, 0,
@@ -6053,6 +6076,7 @@ static int nl80211_register_mgmt(struct sk_buff *skb, struct genl_info *info)
        case NL80211_IFTYPE_AP_VLAN:
        case NL80211_IFTYPE_MESH_POINT:
        case NL80211_IFTYPE_P2P_GO:
+       case NL80211_IFTYPE_P2P_DEVICE:
                break;
        default:
                return -EOPNOTSUPP;
@@ -6099,6 +6123,7 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info)
        case NL80211_IFTYPE_AP_VLAN:
        case NL80211_IFTYPE_MESH_POINT:
        case NL80211_IFTYPE_P2P_GO:
+       case NL80211_IFTYPE_P2P_DEVICE:
                break;
        default:
                return -EOPNOTSUPP;
@@ -6195,6 +6220,7 @@ static int nl80211_tx_mgmt_cancel_wait(struct sk_buff *skb, struct genl_info *in
        case NL80211_IFTYPE_AP:
        case NL80211_IFTYPE_AP_VLAN:
        case NL80211_IFTYPE_P2P_GO:
+       case NL80211_IFTYPE_P2P_DEVICE:
                break;
        default:
                return -EOPNOTSUPP;
@@ -6810,6 +6836,68 @@ static int nl80211_register_beacons(struct sk_buff *skb, struct genl_info *info)
        return 0;
 }
 
+static int nl80211_start_p2p_device(struct sk_buff *skb, struct genl_info *info)
+{
+       struct cfg80211_registered_device *rdev = info->user_ptr[0];
+       struct wireless_dev *wdev = info->user_ptr[1];
+       int err;
+
+       if (!rdev->ops->start_p2p_device)
+               return -EOPNOTSUPP;
+
+       if (wdev->iftype != NL80211_IFTYPE_P2P_DEVICE)
+               return -EOPNOTSUPP;
+
+       if (wdev->p2p_started)
+               return 0;
+
+       mutex_lock(&rdev->devlist_mtx);
+       err = cfg80211_can_add_interface(rdev, wdev->iftype);
+       mutex_unlock(&rdev->devlist_mtx);
+       if (err)
+               return err;
+
+       err = rdev->ops->start_p2p_device(&rdev->wiphy, wdev);
+       if (err)
+               return err;
+
+       wdev->p2p_started = true;
+       mutex_lock(&rdev->devlist_mtx);
+       rdev->opencount++;
+       mutex_unlock(&rdev->devlist_mtx);
+
+       return 0;
+}
+
+static int nl80211_stop_p2p_device(struct sk_buff *skb, struct genl_info *info)
+{
+       struct cfg80211_registered_device *rdev = info->user_ptr[0];
+       struct wireless_dev *wdev = info->user_ptr[1];
+
+       if (wdev->iftype != NL80211_IFTYPE_P2P_DEVICE)
+               return -EOPNOTSUPP;
+
+       if (!rdev->ops->stop_p2p_device)
+               return -EOPNOTSUPP;
+
+       if (!wdev->p2p_started)
+               return 0;
+
+       rdev->ops->stop_p2p_device(&rdev->wiphy, wdev);
+       wdev->p2p_started = false;
+
+       mutex_lock(&rdev->devlist_mtx);
+       rdev->opencount--;
+       mutex_unlock(&rdev->devlist_mtx);
+
+       if (WARN_ON(rdev->scan_req && rdev->scan_req->wdev == wdev)) {
+               rdev->scan_req->aborted = true;
+               ___cfg80211_scan_done(rdev, true);
+       }
+
+       return 0;
+}
+
 #define NL80211_FLAG_NEED_WIPHY                0x01
 #define NL80211_FLAG_NEED_NETDEV       0x02
 #define NL80211_FLAG_NEED_RTNL         0x04
@@ -6817,7 +6905,7 @@ static int nl80211_register_beacons(struct sk_buff *skb, struct genl_info *info)
 #define NL80211_FLAG_NEED_NETDEV_UP    (NL80211_FLAG_NEED_NETDEV |\
                                         NL80211_FLAG_CHECK_NETDEV_UP)
 #define NL80211_FLAG_NEED_WDEV         0x10
-/* If a netdev is associated, it must be UP */
+/* If a netdev is associated, it must be UP, P2P must be started */
 #define NL80211_FLAG_NEED_WDEV_UP      (NL80211_FLAG_NEED_WDEV |\
                                         NL80211_FLAG_CHECK_NETDEV_UP)
 
@@ -6878,6 +6966,13 @@ static int nl80211_pre_doit(struct genl_ops *ops, struct sk_buff *skb,
                        }
 
                        dev_hold(dev);
+               } else if (ops->internal_flags & NL80211_FLAG_CHECK_NETDEV_UP) {
+                       if (!wdev->p2p_started) {
+                               mutex_unlock(&cfg80211_mutex);
+                               if (rtnl)
+                                       rtnl_unlock();
+                               return -ENETDOWN;
+                       }
                }
 
                cfg80211_lock_rdev(rdev);
@@ -7439,7 +7534,22 @@ static struct genl_ops nl80211_ops[] = {
                .internal_flags = NL80211_FLAG_NEED_NETDEV |
                                  NL80211_FLAG_NEED_RTNL,
        },
-
+       {
+               .cmd = NL80211_CMD_START_P2P_DEVICE,
+               .doit = nl80211_start_p2p_device,
+               .policy = nl80211_policy,
+               .flags = GENL_ADMIN_PERM,
+               .internal_flags = NL80211_FLAG_NEED_WDEV |
+                                 NL80211_FLAG_NEED_RTNL,
+       },
+       {
+               .cmd = NL80211_CMD_STOP_P2P_DEVICE,
+               .doit = nl80211_stop_p2p_device,
+               .policy = nl80211_policy,
+               .flags = GENL_ADMIN_PERM,
+               .internal_flags = NL80211_FLAG_NEED_WDEV_UP |
+                                 NL80211_FLAG_NEED_RTNL,
+       },
 };
 
 static struct genl_multicast_group nl80211_mlme_mcgrp = {
index c4ad795..7d604c0 100644 (file)
@@ -41,6 +41,8 @@ static const struct radiotap_align_size rtap_namespace_sizes[] = {
        [IEEE80211_RADIOTAP_TX_FLAGS] = { .align = 2, .size = 2, },
        [IEEE80211_RADIOTAP_RTS_RETRIES] = { .align = 1, .size = 1, },
        [IEEE80211_RADIOTAP_DATA_RETRIES] = { .align = 1, .size = 1, },
+       [IEEE80211_RADIOTAP_MCS] = { .align = 1, .size = 3, },
+       [IEEE80211_RADIOTAP_AMPDU_STATUS] = { .align = 4, .size = 8, },
        /*
         * add more here as they are defined in radiotap.h
         */
index 994e2f0..ef35f4e 100644 (file)
@@ -684,22 +684,10 @@ EXPORT_SYMBOL(cfg80211_classify8021d);
 
 const u8 *ieee80211_bss_get_ie(struct cfg80211_bss *bss, u8 ie)
 {
-       u8 *end, *pos;
-
-       pos = bss->information_elements;
-       if (pos == NULL)
+       if (bss->information_elements == NULL)
                return NULL;
-       end = pos + bss->len_information_elements;
-
-       while (pos + 1 < end) {
-               if (pos + 2 + pos[1] > end)
-                       break;
-               if (pos[0] == ie)
-                       return pos;
-               pos += 2 + pos[1];
-       }
-
-       return NULL;
+       return cfg80211_find_ie(ie, bss->information_elements,
+                                bss->len_information_elements);
 }
 EXPORT_SYMBOL(ieee80211_bss_get_ie);
 
@@ -812,6 +800,10 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
        if (otype == NL80211_IFTYPE_AP_VLAN)
                return -EOPNOTSUPP;
 
+       /* cannot change into P2P device type */
+       if (ntype == NL80211_IFTYPE_P2P_DEVICE)
+               return -EOPNOTSUPP;
+
        if (!rdev->ops->change_virtual_intf ||
            !(rdev->wiphy.interface_modes & (1 << ntype)))
                return -EOPNOTSUPP;
@@ -889,6 +881,9 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
                case NUM_NL80211_IFTYPES:
                        /* not happening */
                        break;
+               case NL80211_IFTYPE_P2P_DEVICE:
+                       WARN_ON(1);
+                       break;
                }
        }
 
@@ -1053,8 +1048,15 @@ int cfg80211_can_use_iftype_chan(struct cfg80211_registered_device *rdev,
        list_for_each_entry(wdev_iter, &rdev->wdev_list, list) {
                if (wdev_iter == wdev)
                        continue;
-               if (!netif_running(wdev_iter->netdev))
-                       continue;
+               if (wdev_iter->netdev) {
+                       if (!netif_running(wdev_iter->netdev))
+                               continue;
+               } else if (wdev_iter->iftype == NL80211_IFTYPE_P2P_DEVICE) {
+                       if (!wdev_iter->p2p_started)
+                               continue;
+               } else {
+                       WARN_ON(1);
+               }
 
                if (rdev->wiphy.software_iftypes & BIT(wdev_iter->iftype))
                        continue;
index 5a2aa17..741a32a 100644 (file)
@@ -42,13 +42,12 @@ static DEFINE_SPINLOCK(xfrm_policy_sk_bundle_lock);
 static struct dst_entry *xfrm_policy_sk_bundles;
 static DEFINE_RWLOCK(xfrm_policy_lock);
 
-static DEFINE_RWLOCK(xfrm_policy_afinfo_lock);
-static struct xfrm_policy_afinfo *xfrm_policy_afinfo[NPROTO];
+static DEFINE_SPINLOCK(xfrm_policy_afinfo_lock);
+static struct xfrm_policy_afinfo __rcu *xfrm_policy_afinfo[NPROTO]
+                                               __read_mostly;
 
 static struct kmem_cache *xfrm_dst_cache __read_mostly;
 
-static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family);
-static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo);
 static void xfrm_init_pmtu(struct dst_entry *dst);
 static int stale_bundle(struct dst_entry *dst);
 static int xfrm_bundle_ok(struct xfrm_dst *xdst);
@@ -95,6 +94,24 @@ bool xfrm_selector_match(const struct xfrm_selector *sel, const struct flowi *fl
        return false;
 }
 
+static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family)
+{
+       struct xfrm_policy_afinfo *afinfo;
+
+       if (unlikely(family >= NPROTO))
+               return NULL;
+       rcu_read_lock();
+       afinfo = rcu_dereference(xfrm_policy_afinfo[family]);
+       if (unlikely(!afinfo))
+               rcu_read_unlock();
+       return afinfo;
+}
+
+static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo)
+{
+       rcu_read_unlock();
+}
+
 static inline struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos,
                                                  const xfrm_address_t *saddr,
                                                  const xfrm_address_t *daddr,
@@ -2420,7 +2437,7 @@ int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
                return -EINVAL;
        if (unlikely(afinfo->family >= NPROTO))
                return -EAFNOSUPPORT;
-       write_lock_bh(&xfrm_policy_afinfo_lock);
+       spin_lock(&xfrm_policy_afinfo_lock);
        if (unlikely(xfrm_policy_afinfo[afinfo->family] != NULL))
                err = -ENOBUFS;
        else {
@@ -2441,9 +2458,9 @@ int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
                        dst_ops->neigh_lookup = xfrm_neigh_lookup;
                if (likely(afinfo->garbage_collect == NULL))
                        afinfo->garbage_collect = xfrm_garbage_collect_deferred;
-               xfrm_policy_afinfo[afinfo->family] = afinfo;
+               rcu_assign_pointer(xfrm_policy_afinfo[afinfo->family], afinfo);
        }
-       write_unlock_bh(&xfrm_policy_afinfo_lock);
+       spin_unlock(&xfrm_policy_afinfo_lock);
 
        rtnl_lock();
        for_each_net(net) {
@@ -2476,21 +2493,26 @@ int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo)
                return -EINVAL;
        if (unlikely(afinfo->family >= NPROTO))
                return -EAFNOSUPPORT;
-       write_lock_bh(&xfrm_policy_afinfo_lock);
+       spin_lock(&xfrm_policy_afinfo_lock);
        if (likely(xfrm_policy_afinfo[afinfo->family] != NULL)) {
                if (unlikely(xfrm_policy_afinfo[afinfo->family] != afinfo))
                        err = -EINVAL;
-               else {
-                       struct dst_ops *dst_ops = afinfo->dst_ops;
-                       xfrm_policy_afinfo[afinfo->family] = NULL;
-                       dst_ops->kmem_cachep = NULL;
-                       dst_ops->check = NULL;
-                       dst_ops->negative_advice = NULL;
-                       dst_ops->link_failure = NULL;
-                       afinfo->garbage_collect = NULL;
-               }
+               else
+                       RCU_INIT_POINTER(xfrm_policy_afinfo[afinfo->family],
+                                        NULL);
+       }
+       spin_unlock(&xfrm_policy_afinfo_lock);
+       if (!err) {
+               struct dst_ops *dst_ops = afinfo->dst_ops;
+
+               synchronize_rcu();
+
+               dst_ops->kmem_cachep = NULL;
+               dst_ops->check = NULL;
+               dst_ops->negative_advice = NULL;
+               dst_ops->link_failure = NULL;
+               afinfo->garbage_collect = NULL;
        }
-       write_unlock_bh(&xfrm_policy_afinfo_lock);
        return err;
 }
 EXPORT_SYMBOL(xfrm_policy_unregister_afinfo);
@@ -2499,33 +2521,16 @@ static void __net_init xfrm_dst_ops_init(struct net *net)
 {
        struct xfrm_policy_afinfo *afinfo;
 
-       read_lock_bh(&xfrm_policy_afinfo_lock);
-       afinfo = xfrm_policy_afinfo[AF_INET];
+       rcu_read_lock();
+       afinfo = rcu_dereference(xfrm_policy_afinfo[AF_INET]);
        if (afinfo)
                net->xfrm.xfrm4_dst_ops = *afinfo->dst_ops;
 #if IS_ENABLED(CONFIG_IPV6)
-       afinfo = xfrm_policy_afinfo[AF_INET6];
+       afinfo = rcu_dereference(xfrm_policy_afinfo[AF_INET6]);
        if (afinfo)
                net->xfrm.xfrm6_dst_ops = *afinfo->dst_ops;
 #endif
-       read_unlock_bh(&xfrm_policy_afinfo_lock);
-}
-
-static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family)
-{
-       struct xfrm_policy_afinfo *afinfo;
-       if (unlikely(family >= NPROTO))
-               return NULL;
-       read_lock(&xfrm_policy_afinfo_lock);
-       afinfo = xfrm_policy_afinfo[family];
-       if (unlikely(!afinfo))
-               read_unlock(&xfrm_policy_afinfo_lock);
-       return afinfo;
-}
-
-static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo)
-{
-       read_unlock(&xfrm_policy_afinfo_lock);
+       rcu_read_unlock();
 }
 
 static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void *ptr)
index 87cd0e4..7856c33 100644 (file)
@@ -1700,7 +1700,7 @@ int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol)
 
        read_lock(&xfrm_km_lock);
        list_for_each_entry(km, &xfrm_km_list, list) {
-               acqret = km->acquire(x, t, pol, XFRM_POLICY_OUT);
+               acqret = km->acquire(x, t, pol);
                if (!acqret)
                        err = acqret;
        }
index e75d8e4..ab58034 100644 (file)
@@ -2567,8 +2567,7 @@ static inline size_t xfrm_acquire_msgsize(struct xfrm_state *x,
 }
 
 static int build_acquire(struct sk_buff *skb, struct xfrm_state *x,
-                        struct xfrm_tmpl *xt, struct xfrm_policy *xp,
-                        int dir)
+                        struct xfrm_tmpl *xt, struct xfrm_policy *xp)
 {
        __u32 seq = xfrm_get_acqseq();
        struct xfrm_user_acquire *ua;
@@ -2583,7 +2582,7 @@ static int build_acquire(struct sk_buff *skb, struct xfrm_state *x,
        memcpy(&ua->id, &x->id, sizeof(ua->id));
        memcpy(&ua->saddr, &x->props.saddr, sizeof(ua->saddr));
        memcpy(&ua->sel, &x->sel, sizeof(ua->sel));
-       copy_to_user_policy(xp, &ua->policy, dir);
+       copy_to_user_policy(xp, &ua->policy, XFRM_POLICY_OUT);
        ua->aalgos = xt->aalgos;
        ua->ealgos = xt->ealgos;
        ua->calgos = xt->calgos;
@@ -2605,7 +2604,7 @@ static int build_acquire(struct sk_buff *skb, struct xfrm_state *x,
 }
 
 static int xfrm_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *xt,
-                            struct xfrm_policy *xp, int dir)
+                            struct xfrm_policy *xp)
 {
        struct net *net = xs_net(x);
        struct sk_buff *skb;
@@ -2614,7 +2613,7 @@ static int xfrm_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *xt,
        if (skb == NULL)
                return -ENOMEM;
 
-       if (build_acquire(skb, x, xt, xp, dir) < 0)
+       if (build_acquire(skb, x, xt, xp) < 0)
                BUG();
 
        return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_ACQUIRE, GFP_ATOMIC);