Merge tag 'extcon-arizona-3.9' of git://git.kernel.org/pub/scm/linux/kernel/git/broon...
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 11 Feb 2013 21:44:43 +0000 (13:44 -0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 11 Feb 2013 21:44:43 +0000 (13:44 -0800)
Mark writes:
extcon: arizona: Updates for v3.9

More updates for v3.9, a mix of fixes for the code that's already there
and a few new features.

678 files changed:
Documentation/DocBook/uio-howto.tmpl
Documentation/devicetree/bindings/pinctrl/atmel,at91-pinctrl.txt
Documentation/filesystems/f2fs.txt
MAINTAINERS
Makefile
arch/arm/boot/dts/Makefile
arch/arm/boot/dts/armada-370-db.dts
arch/arm/boot/dts/armada-xp-mv78230.dtsi
arch/arm/boot/dts/armada-xp-mv78260.dtsi
arch/arm/boot/dts/armada-xp-mv78460.dtsi
arch/arm/boot/dts/at91rm9200.dtsi
arch/arm/boot/dts/at91sam9260.dtsi
arch/arm/boot/dts/at91sam9263.dtsi
arch/arm/boot/dts/at91sam9g45.dtsi
arch/arm/boot/dts/at91sam9n12.dtsi
arch/arm/boot/dts/at91sam9x5.dtsi
arch/arm/boot/dts/cros5250-common.dtsi
arch/arm/boot/dts/dove-cubox.dts
arch/arm/boot/dts/exynos5250-smdk5250.dts
arch/arm/boot/dts/kirkwood-ns2-common.dtsi
arch/arm/boot/dts/kirkwood.dtsi
arch/arm/boot/dts/kizbox.dts
arch/arm/boot/dts/sunxi.dtsi
arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts
arch/arm/configs/at91_dt_defconfig
arch/arm/kernel/debug.S
arch/arm/kernel/head.S
arch/arm/kernel/hyp-stub.S
arch/arm/mach-at91/setup.c
arch/arm/mach-imx/Kconfig
arch/arm/mach-imx/clk-imx25.c
arch/arm/mach-imx/clk-imx27.c
arch/arm/mach-imx/clk-imx31.c
arch/arm/mach-imx/clk-imx35.c
arch/arm/mach-imx/clk-imx51-imx53.c
arch/arm/mach-imx/clk-imx6q.c
arch/arm/mach-imx/common.h
arch/arm/mach-imx/devices/devices-common.h
arch/arm/mach-imx/devices/platform-fsl-usb2-udc.c
arch/arm/mach-imx/devices/platform-imx-fb.c
arch/arm/mach-imx/hotplug.c
arch/arm/mach-imx/iram_alloc.c
arch/arm/mach-imx/platsmp.c
arch/arm/mach-imx/pm-imx6q.c
arch/arm/mach-integrator/pci_v3.c
arch/arm/mach-kirkwood/board-ns2.c
arch/arm/mach-mvebu/Makefile
arch/arm/mach-omap2/board-omap4panda.c
arch/arm/mach-omap2/cclock2420_data.c
arch/arm/mach-omap2/cclock2430_data.c
arch/arm/mach-omap2/cclock44xx_data.c
arch/arm/mach-omap2/devices.c
arch/arm/mach-omap2/drm.c
arch/arm/mach-omap2/omap_hwmod_44xx_data.c
arch/arm/mach-omap2/timer.c
arch/arm/mach-pxa/include/mach/mfp-pxa27x.h
arch/arm/mach-pxa/pxa27x.c
arch/arm/mach-s3c64xx/mach-crag6410-module.c
arch/arm/mach-s3c64xx/pm.c
arch/arm/mm/dma-mapping.c
arch/arm/mm/mmu.c
arch/arm/plat-versatile/headsmp.S
arch/arm/vfp/entry.S
arch/arm/vfp/vfphw.S
arch/arm64/boot/dts/Makefile
arch/arm64/include/asm/elf.h
arch/arm64/include/asm/pgtable.h
arch/arm64/include/asm/unistd32.h
arch/arm64/kernel/vdso.c
arch/arm64/kernel/vdso/gettimeofday.S
arch/ia64/kernel/ptrace.c
arch/m68k/include/asm/dma-mapping.h
arch/m68k/include/asm/pgtable_no.h
arch/m68k/include/asm/unistd.h
arch/m68k/include/uapi/asm/unistd.h
arch/m68k/kernel/syscalltable.S
arch/m68k/mm/init.c
arch/mn10300/Kconfig
arch/parisc/kernel/entry.S
arch/parisc/kernel/irq.c
arch/parisc/kernel/ptrace.c
arch/parisc/kernel/signal.c
arch/parisc/math-emu/cnv_float.h
arch/powerpc/include/uapi/asm/kvm_para.h
arch/powerpc/kvm/book3s_hv_ras.c
arch/powerpc/kvm/emulate.c
arch/s390/Makefile
arch/s390/include/asm/dma.h
arch/s390/include/asm/io.h
arch/s390/include/asm/irq.h
arch/s390/include/asm/pgtable.h
arch/s390/include/asm/timex.h
arch/s390/include/uapi/asm/unistd.h
arch/s390/kernel/compat_wrapper.S
arch/s390/kernel/debug.c
arch/s390/kernel/irq.c
arch/s390/kernel/nmi.c
arch/s390/kernel/perf_cpum_cf.c
arch/s390/kernel/runtime_instr.c
arch/s390/kernel/setup.c
arch/s390/kernel/smp.c
arch/s390/kernel/syscalls.S
arch/s390/kernel/time.c
arch/s390/kernel/topology.c
arch/s390/kvm/interrupt.c
arch/s390/kvm/kvm-s390.c
arch/s390/mm/fault.c
arch/s390/oprofile/hwsampler.c
arch/s390/pci/pci.c
arch/s390/pci/pci_dma.c
arch/sh/boards/mach-ecovec24/setup.c
arch/sh/include/asm/elf.h
arch/sh/include/asm/processor_32.h
arch/sh/include/asm/processor_64.h
arch/sh/include/uapi/asm/unistd_32.h
arch/sh/include/uapi/asm/unistd_64.h
arch/sh/kernel/syscalls_32.S
arch/sh/kernel/syscalls_64.S
arch/sh/lib/mcount.S
arch/sparc/include/uapi/asm/unistd.h
arch/sparc/kernel/pci.c
arch/sparc/kernel/pci_psycho.c
arch/sparc/kernel/pci_sabre.c
arch/sparc/kernel/pci_schizo.c
arch/sparc/kernel/systbls_32.S
arch/sparc/kernel/systbls_64.S
arch/x86/boot/compressed/eboot.c
arch/x86/kernel/cpu/perf_event.c
arch/x86/kernel/entry_32.S
arch/x86/kernel/kvm.c
arch/x86/kernel/setup.c
arch/x86/kernel/step.c
arch/x86/kvm/x86.c
arch/x86/xen/smp.c
drivers/Kconfig
drivers/Makefile
drivers/acpi/apei/apei-base.c
drivers/acpi/glue.c
drivers/acpi/processor_idle.c
drivers/acpi/processor_perflib.c
drivers/ata/ahci.c
drivers/ata/libahci.c
drivers/ata/libata-core.c
drivers/ata/libata-eh.c
drivers/base/cpu.c
drivers/base/firmware_class.c
drivers/base/regmap/regmap-debugfs.c
drivers/base/regmap/regmap.c
drivers/block/virtio_blk.c
drivers/char/hw_random/exynos-rng.c
drivers/char/mem.c
drivers/char/pcmcia/synclink_cs.c
drivers/clk/mvebu/clk-cpu.c
drivers/cpufreq/Kconfig.x86
drivers/cpufreq/acpi-cpufreq.c
drivers/cpufreq/cpufreq-cpu0.c
drivers/cpufreq/omap-cpufreq.c
drivers/cpuidle/cpuidle.c
drivers/cpuidle/driver.c
drivers/cpuidle/governors/menu.c
drivers/cpuidle/sysfs.c
drivers/devfreq/devfreq.c
drivers/devfreq/exynos4_bus.c
drivers/dma/imx-dma.c
drivers/dma/ioat/dma_v3.c
drivers/dma/tegra20-apb-dma.c
drivers/extcon/Kconfig
drivers/gpio/gpio-mvebu.c
drivers/gpio/gpio-samsung.c
drivers/gpu/drm/drm_mm.c
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_execbuffer.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_lvds.c
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/i915/intel_sprite.c
drivers/gpu/drm/nouveau/core/core/client.c
drivers/gpu/drm/nouveau/core/core/handle.c
drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
drivers/gpu/drm/nouveau/core/include/core/client.h
drivers/gpu/drm/nouveau/core/include/subdev/bios/pll.h
drivers/gpu/drm/nouveau/core/subdev/bios/init.c
drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c
drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
drivers/gpu/drm/nouveau/core/subdev/instmem/base.c
drivers/gpu/drm/nouveau/core/subdev/vm/base.c
drivers/gpu/drm/nouveau/nouveau_connector.c
drivers/gpu/drm/nouveau/nouveau_display.c
drivers/gpu/drm/nouveau/nouveau_drm.c
drivers/gpu/drm/nouveau/nouveau_fence.h
drivers/gpu/drm/nouveau/nv04_dfp.c
drivers/gpu/drm/nouveau/nv10_fence.c
drivers/gpu/drm/nouveau/nv50_fence.c
drivers/gpu/drm/radeon/evergreen.c
drivers/gpu/drm/radeon/ni.c
drivers/gpu/drm/radeon/r600.c
drivers/gpu/drm/radeon/r600_cs.c
drivers/gpu/drm/radeon/radeon.h
drivers/gpu/drm/radeon/radeon_cs.c
drivers/gpu/drm/radeon/radeon_drv.c
drivers/gpu/drm/radeon/radeon_legacy_encoders.c
drivers/gpu/drm/radeon/radeon_object.c
drivers/gpu/drm/radeon/radeon_ring.c
drivers/gpu/drm/radeon/radeon_semaphore.c
drivers/gpu/drm/radeon/reg_srcs/rv515
drivers/gpu/drm/radeon/si.c
drivers/gpu/drm/ttm/ttm_bo.c
drivers/gpu/drm/ttm/ttm_bo_util.c
drivers/gpu/drm/udl/udl_connector.c
drivers/hid/hid-hyperv.c
drivers/hv/channel.c
drivers/hv/channel_mgmt.c
drivers/hv/connection.c
drivers/hv/hv.c
drivers/hv/hv_balloon.c
drivers/hv/hv_util.c
drivers/hv/hyperv_vmbus.h
drivers/hv/ring_buffer.c
drivers/hv/vmbus_drv.c
drivers/hwmon/vexpress.c
drivers/i2c/busses/i2c-designware-core.c
drivers/i2c/busses/i2c-mxs.c
drivers/i2c/busses/i2c-omap.c
drivers/i2c/busses/i2c-sirf.c
drivers/i2c/muxes/i2c-mux-pinctrl.c
drivers/idle/intel_idle.c
drivers/iio/accel/Kconfig
drivers/iio/adc/ad7266.c
drivers/iio/adc/at91_adc.c
drivers/iio/adc/max1363.c
drivers/iio/common/hid-sensors/Kconfig
drivers/iio/common/hid-sensors/Makefile
drivers/iio/dac/ad5380.c
drivers/iio/dac/ad5446.c
drivers/iio/dac/ad5504.c
drivers/iio/dac/ad5624r_spi.c
drivers/iio/dac/ad5686.c
drivers/iio/dac/ad5791.c
drivers/iio/frequency/adf4350.c
drivers/iio/gyro/Kconfig
drivers/iio/light/Kconfig
drivers/iio/magnetometer/Kconfig
drivers/ipack/devices/ipoctal.c
drivers/media/i2c/m5mols/m5mols_core.c
drivers/media/platform/coda.c
drivers/media/platform/omap3isp/ispvideo.c
drivers/media/platform/s5p-fimc/fimc-mdevice.c
drivers/media/platform/s5p-mfc/s5p_mfc.c
drivers/media/usb/gspca/kinect.c
drivers/media/usb/gspca/sonixb.c
drivers/media/usb/gspca/sonixj.c
drivers/media/usb/uvc/uvc_ctrl.c
drivers/media/usb/uvc/uvc_v4l2.c
drivers/media/v4l2-core/videobuf2-core.c
drivers/mfd/Kconfig
drivers/mfd/vexpress-sysreg.c
drivers/misc/Kconfig
drivers/misc/Makefile
drivers/misc/atmel-ssc.c
drivers/misc/cb710/Kconfig
drivers/misc/lattice-ecp3-config.c [new file with mode: 0644]
drivers/misc/mei/Kconfig
drivers/misc/mei/Makefile
drivers/misc/mei/amthif.c
drivers/misc/mei/client.c [new file with mode: 0644]
drivers/misc/mei/client.h [new file with mode: 0644]
drivers/misc/mei/hbm.c [new file with mode: 0644]
drivers/misc/mei/hbm.h [new file with mode: 0644]
drivers/misc/mei/hw-me-regs.h [new file with mode: 0644]
drivers/misc/mei/hw-me.c [new file with mode: 0644]
drivers/misc/mei/hw-me.h [new file with mode: 0644]
drivers/misc/mei/hw.h
drivers/misc/mei/init.c
drivers/misc/mei/interface.c [deleted file]
drivers/misc/mei/interface.h [deleted file]
drivers/misc/mei/interrupt.c
drivers/misc/mei/iorw.c [deleted file]
drivers/misc/mei/main.c
drivers/misc/mei/mei_dev.h
drivers/misc/mei/pci-me.c [new file with mode: 0644]
drivers/misc/mei/wd.c
drivers/misc/ti-st/st_core.c
drivers/misc/ti-st/st_kim.c
drivers/misc/vmw_vmci/Kconfig [new file with mode: 0644]
drivers/misc/vmw_vmci/Makefile [new file with mode: 0644]
drivers/misc/vmw_vmci/vmci_context.c [new file with mode: 0644]
drivers/misc/vmw_vmci/vmci_context.h [new file with mode: 0644]
drivers/misc/vmw_vmci/vmci_datagram.c [new file with mode: 0644]
drivers/misc/vmw_vmci/vmci_datagram.h [new file with mode: 0644]
drivers/misc/vmw_vmci/vmci_doorbell.c [new file with mode: 0644]
drivers/misc/vmw_vmci/vmci_doorbell.h [new file with mode: 0644]
drivers/misc/vmw_vmci/vmci_driver.c [new file with mode: 0644]
drivers/misc/vmw_vmci/vmci_driver.h [new file with mode: 0644]
drivers/misc/vmw_vmci/vmci_event.c [new file with mode: 0644]
drivers/misc/vmw_vmci/vmci_event.h [new file with mode: 0644]
drivers/misc/vmw_vmci/vmci_guest.c [new file with mode: 0644]
drivers/misc/vmw_vmci/vmci_handle_array.c [new file with mode: 0644]
drivers/misc/vmw_vmci/vmci_handle_array.h [new file with mode: 0644]
drivers/misc/vmw_vmci/vmci_host.c [new file with mode: 0644]
drivers/misc/vmw_vmci/vmci_queue_pair.c [new file with mode: 0644]
drivers/misc/vmw_vmci/vmci_queue_pair.h [new file with mode: 0644]
drivers/misc/vmw_vmci/vmci_resource.c [new file with mode: 0644]
drivers/misc/vmw_vmci/vmci_resource.h [new file with mode: 0644]
drivers/misc/vmw_vmci/vmci_route.c [new file with mode: 0644]
drivers/misc/vmw_vmci/vmci_route.h [new file with mode: 0644]
drivers/mmc/host/Kconfig
drivers/mmc/host/mvsdio.c
drivers/net/Kconfig
drivers/net/Makefile
drivers/net/ethernet/adi/Kconfig
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
drivers/net/ethernet/emulex/benet/be.h
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/qlogic/qlge/qlge_main.c
drivers/net/ethernet/xilinx/Kconfig
drivers/net/ethernet/xilinx/xilinx_axienet_main.c
drivers/net/hyperv/netvsc_drv.c
drivers/net/ntb_netdev.c [new file with mode: 0644]
drivers/net/tun.c
drivers/net/wireless/ath/Kconfig
drivers/net/wireless/ath/Makefile
drivers/net/wireless/ath/wil6210/Kconfig [new file with mode: 0644]
drivers/net/wireless/ath/wil6210/Makefile [new file with mode: 0644]
drivers/net/wireless/ath/wil6210/cfg80211.c [new file with mode: 0644]
drivers/net/wireless/ath/wil6210/dbg_hexdump.h [new file with mode: 0644]
drivers/net/wireless/ath/wil6210/debugfs.c [new file with mode: 0644]
drivers/net/wireless/ath/wil6210/interrupt.c [new file with mode: 0644]
drivers/net/wireless/ath/wil6210/main.c [new file with mode: 0644]
drivers/net/wireless/ath/wil6210/netdev.c [new file with mode: 0644]
drivers/net/wireless/ath/wil6210/pcie_bus.c [new file with mode: 0644]
drivers/net/wireless/ath/wil6210/txrx.c [new file with mode: 0644]
drivers/net/wireless/ath/wil6210/txrx.h [new file with mode: 0644]
drivers/net/wireless/ath/wil6210/wil6210.h [new file with mode: 0644]
drivers/net/wireless/ath/wil6210/wmi.c [new file with mode: 0644]
drivers/net/wireless/ath/wil6210/wmi.h [new file with mode: 0644]
drivers/net/wireless/b43/b43.h
drivers/net/wireless/b43/main.c
drivers/net/wireless/b43/main.h
drivers/net/wireless/iwlegacy/3945-mac.c
drivers/net/wireless/iwlwifi/dvm/tx.c
drivers/net/wireless/iwlwifi/pcie/rx.c
drivers/net/wireless/mwifiex/cfg80211.c
drivers/net/wireless/mwifiex/sta_ioctl.c
drivers/net/wireless/mwl8k.c
drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
drivers/net/wireless/rtlwifi/rtl8723ae/phy.c
drivers/ntb/Kconfig [new file with mode: 0644]
drivers/ntb/Makefile [new file with mode: 0644]
drivers/ntb/ntb_hw.c [new file with mode: 0644]
drivers/ntb/ntb_hw.h [new file with mode: 0644]
drivers/ntb/ntb_regs.h [new file with mode: 0644]
drivers/ntb/ntb_transport.c [new file with mode: 0644]
drivers/pci/hotplug/pciehp.h
drivers/pci/hotplug/pciehp_core.c
drivers/pci/hotplug/pciehp_ctrl.c
drivers/pci/hotplug/pciehp_hpc.c
drivers/pci/hotplug/shpchp.h
drivers/pci/hotplug/shpchp_core.c
drivers/pci/hotplug/shpchp_ctrl.c
drivers/pci/iov.c
drivers/pci/pcie/Kconfig
drivers/pci/pcie/aer/aerdrv_core.c
drivers/pci/pcie/aspm.c
drivers/pcmcia/i82092.c
drivers/pcmcia/vrc4171_card.c
drivers/platform/x86/acer-wmi.c
drivers/platform/x86/asus-laptop.c
drivers/platform/x86/samsung-laptop.c
drivers/platform/x86/sony-laptop.c
drivers/regulator/core.c
drivers/regulator/max8997.c
drivers/regulator/max8998.c
drivers/regulator/s5m8767.c
drivers/rtc/rtc-da9055.c
drivers/s390/block/dasd_diag.c
drivers/s390/block/dasd_eckd.c
drivers/s390/block/dasd_fba.c
drivers/s390/char/con3215.c
drivers/s390/char/raw3270.c
drivers/s390/char/sclp.c
drivers/s390/char/tape_34xx.c
drivers/s390/char/tape_3590.c
drivers/s390/char/vmur.c
drivers/s390/cio/chsc.c
drivers/s390/cio/chsc_sch.c
drivers/s390/cio/cio.c
drivers/s390/cio/device.c
drivers/s390/cio/device.h
drivers/s390/cio/eadm_sch.c
drivers/s390/cio/qdio_thinint.c
drivers/s390/crypto/ap_bus.c
drivers/s390/kvm/kvm_virtio.c
drivers/s390/net/claw.c
drivers/s390/net/ctcm_main.c
drivers/s390/net/lcs.c
drivers/scsi/storvsc_drv.c
drivers/sh/clk/cpg.c
drivers/staging/comedi/Kconfig
drivers/staging/comedi/comedi_fops.c
drivers/staging/comedi/drivers/comedi_test.c
drivers/staging/comedi/drivers/ni_pcimio.c
drivers/staging/fwserial/Kconfig
drivers/staging/fwserial/TODO
drivers/staging/fwserial/fwserial.c
drivers/staging/fwserial/fwserial.h
drivers/staging/iio/adc/mxs-lradc.c
drivers/staging/iio/gyro/Kconfig
drivers/staging/iio/gyro/adis16080_core.c
drivers/staging/imx-drm/imx-drm-core.c
drivers/staging/imx-drm/ipu-v3/ipu-common.c
drivers/staging/imx-drm/ipuv3-crtc.c
drivers/staging/omapdrm/Makefile
drivers/staging/omapdrm/TODO
drivers/staging/omapdrm/omap_connector.c
drivers/staging/omapdrm/omap_crtc.c
drivers/staging/omapdrm/omap_drv.c
drivers/staging/omapdrm/omap_drv.h
drivers/staging/omapdrm/omap_encoder.c
drivers/staging/omapdrm/omap_gem_dmabuf.c
drivers/staging/omapdrm/omap_irq.c [new file with mode: 0644]
drivers/staging/omapdrm/omap_plane.c
drivers/staging/rtl8187se/r8180_core.c
drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
drivers/staging/rtl8192e/rtl8192e/rtl_core.c
drivers/staging/rtl8712/usb_intf.c
drivers/staging/sb105x/Kconfig
drivers/staging/sb105x/sb_pci_mp.c
drivers/staging/speakup/synth.c
drivers/staging/tidspbridge/core/_tiomap.h
drivers/staging/tidspbridge/core/dsp-clock.c
drivers/staging/tidspbridge/core/wdt.c
drivers/staging/vme/devices/vme_pio2_core.c
drivers/staging/vt6656/bssdb.h
drivers/staging/vt6656/int.h
drivers/staging/vt6656/iocmd.h
drivers/staging/vt6656/iowpa.h
drivers/staging/wlan-ng/cfg80211.c
drivers/staging/wlan-ng/prism2mgmt.c
drivers/staging/zram/zram_drv.c
drivers/target/iscsi/iscsi_target_erl2.c
drivers/target/target_core_alua.c
drivers/target/target_core_pr.c
drivers/target/target_core_transport.c
drivers/target/tcm_fc/tfc_sess.c
drivers/tty/pty.c
drivers/tty/serial/8250/8250.c
drivers/tty/serial/8250/8250.h
drivers/tty/serial/8250/8250_dw.c
drivers/tty/serial/8250/8250_pci.c
drivers/tty/serial/ifx6x60.c
drivers/tty/serial/mxs-auart.c
drivers/tty/serial/samsung.c
drivers/tty/serial/vt8500_serial.c
drivers/usb/Kconfig
drivers/usb/chipidea/host.c
drivers/usb/class/cdc-acm.c
drivers/usb/core/hub.c
drivers/usb/core/quirks.c
drivers/usb/dwc3/debugfs.c
drivers/usb/dwc3/gadget.c
drivers/usb/gadget/amd5536udc.c
drivers/usb/gadget/dummy_hcd.c
drivers/usb/gadget/f_fs.c
drivers/usb/gadget/fsl_mxc_udc.c
drivers/usb/gadget/fsl_udc_core.c
drivers/usb/gadget/fsl_usb2_udc.h
drivers/usb/gadget/mv_udc_core.c
drivers/usb/gadget/s3c-hsotg.c
drivers/usb/gadget/tcm_usb_gadget.c
drivers/usb/gadget/u_serial.c
drivers/usb/host/Kconfig
drivers/usb/host/Makefile
drivers/usb/host/ehci-fsl.c
drivers/usb/host/ehci-hcd.c
drivers/usb/host/ehci-mv.c
drivers/usb/host/ehci-mxc.c
drivers/usb/host/ehci-pci.c
drivers/usb/host/ehci.h
drivers/usb/host/fsl-mph-dr-of.c
drivers/usb/host/imx21-hcd.c
drivers/usb/host/ohci-tmio.c
drivers/usb/host/uhci-hcd.c
drivers/usb/host/xhci-hub.c
drivers/usb/host/xhci-mem.c
drivers/usb/host/xhci-ring.c
drivers/usb/host/xhci.c
drivers/usb/misc/usbtest.c
drivers/usb/musb/cppi_dma.c
drivers/usb/musb/musb_core.c
drivers/usb/musb/musb_dsps.c
drivers/usb/otg/Kconfig
drivers/usb/otg/mv_otg.c
drivers/usb/renesas_usbhs/mod_gadget.c
drivers/usb/renesas_usbhs/mod_host.c
drivers/usb/serial/ftdi_sio.c
drivers/usb/serial/ftdi_sio_ids.h
drivers/usb/serial/io_ti.c
drivers/usb/serial/option.c
drivers/vfio/pci/vfio_pci_rdwr.c
drivers/video/imxfb.c
drivers/video/ssd1307fb.c
drivers/w1/masters/ds1wm.c
drivers/w1/masters/mxc_w1.c
drivers/w1/masters/w1-gpio.c
drivers/w1/slaves/w1_therm.c
drivers/xen/cpu_hotplug.c
drivers/xen/gntdev.c
drivers/xen/grant-table.c
drivers/xen/privcmd.c
drivers/xen/xen-pciback/pciback.h
fs/Kconfig
fs/btrfs/extent-tree.c
fs/btrfs/extent_map.c
fs/btrfs/extent_map.h
fs/btrfs/file-item.c
fs/btrfs/file.c
fs/btrfs/free-space-cache.c
fs/btrfs/inode.c
fs/btrfs/ioctl.c
fs/btrfs/qgroup.c
fs/btrfs/send.c
fs/btrfs/super.c
fs/btrfs/transaction.c
fs/btrfs/tree-log.c
fs/btrfs/volumes.c
fs/buffer.c
fs/cifs/cifs_dfs_ref.c
fs/cifs/connect.c
fs/debugfs/inode.c
fs/exec.c
fs/f2fs/acl.c
fs/f2fs/checkpoint.c
fs/f2fs/data.c
fs/f2fs/debug.c
fs/f2fs/dir.c
fs/f2fs/f2fs.h
fs/f2fs/file.c
fs/f2fs/gc.c
fs/f2fs/inode.c
fs/f2fs/node.c
fs/f2fs/recovery.c
fs/f2fs/segment.c
fs/f2fs/super.c
fs/f2fs/xattr.c
fs/fuse/Kconfig
fs/fuse/cuse.c
fs/fuse/dev.c
fs/fuse/file.c
fs/jbd/journal.c
fs/seq_file.c
fs/udf/super.c
fs/xfs/xfs_buf.c
fs/xfs/xfs_buf.h
fs/xfs/xfs_buf_item.c
fs/xfs/xfs_buf_item.h
fs/xfs/xfs_dir2_block.c
fs/xfs/xfs_qm_syscalls.c
fs/xfs/xfs_trans_buf.c
include/asm-generic/dma-mapping-broken.h
include/asm-generic/pgtable.h
include/asm-generic/syscalls.h
include/drm/drm_mm.h
include/linux/ata.h
include/linux/audit.h
include/linux/compaction.h
include/linux/cpu_rmap.h
include/linux/cpuidle.h
include/linux/hyperv.h
include/linux/init.h
include/linux/interrupt.h
include/linux/libata.h
include/linux/lockdep.h
include/linux/mm.h
include/linux/module.h
include/linux/netdevice.h
include/linux/ntb.h [new file with mode: 0644]
include/linux/platform_data/imx-iram.h [moved from arch/arm/mach-imx/iram.h with 100% similarity]
include/linux/ptrace.h
include/linux/rbtree_augmented.h
include/linux/rwsem.h
include/linux/sched.h
include/linux/vmw_vmci_api.h [new file with mode: 0644]
include/linux/vmw_vmci_defs.h [new file with mode: 0644]
include/sound/cs4271.h
include/sound/soc.h
include/target/target_core_base.h
include/uapi/linux/audit.h
include/uapi/linux/serial_core.h
init/Kconfig
init/do_mounts_initrd.c
init/main.c
kernel/async.c
kernel/audit.c
kernel/audit_tree.c
kernel/audit_watch.c
kernel/auditfilter.c
kernel/auditsc.c
kernel/compat.c
kernel/debug/kdb/kdb_main.c
kernel/fork.c
kernel/module.c
kernel/ptrace.c
kernel/rwsem.c
kernel/sched/core.c
kernel/signal.c
kernel/trace/ftrace.c
kernel/trace/trace.c
lib/bug.c
lib/cpu_rmap.c
lib/rbtree.c
mm/bootmem.c
mm/compaction.c
mm/huge_memory.c
mm/internal.h
mm/memblock.c
mm/migrate.c
mm/mmap.c
mm/page_alloc.c
net/core/dev.c
net/ipv4/ip_sockglue.c
net/ipv4/tcp.c
net/ipv4/tcp_input.c
net/ipv6/addrconf.c
net/iucv/iucv.c
net/mac80211/cfg.c
net/mac80211/chan.c
net/mac80211/ibss.c
net/mac80211/ieee80211_i.h
net/mac80211/iface.c
net/mac80211/mesh.c
net/mac80211/mesh.h
net/mac80211/mlme.c
net/mac80211/scan.c
net/mac80211/sta_info.c
net/mac80211/sta_info.h
net/sunrpc/clnt.c
net/sunrpc/sched.c
net/sunrpc/xprt.c
net/wireless/core.c
security/device_cgroup.c
security/integrity/evm/evm_crypto.c
sound/arm/pxa2xx-ac97-lib.c
sound/pci/hda/hda_codec.c
sound/pci/hda/hda_intel.c
sound/pci/hda/patch_conexant.c
sound/pci/hda/patch_hdmi.c
sound/pci/hda/patch_realtek.c
sound/pci/rme9652/hdspm.c
sound/soc/codecs/arizona.c
sound/soc/codecs/arizona.h
sound/soc/codecs/cs4271.c
sound/soc/codecs/cs42l52.c
sound/soc/codecs/lm49453.c
sound/soc/codecs/sgtl5000.c
sound/soc/codecs/sta529.c
sound/soc/codecs/wm2000.c
sound/soc/codecs/wm2200.c
sound/soc/codecs/wm5100.c
sound/soc/codecs/wm5102.c
sound/soc/codecs/wm_adsp.c
sound/soc/soc-core.c
sound/soc/soc-pcm.c
sound/usb/mixer_maps.c
sound/usb/mixer_quirks.c
sound/usb/pcm.c
sound/usb/quirks-table.h
sound/usb/quirks.c
tools/hv/hv_kvp_daemon.c
tools/hv/hv_set_ifconfig.sh
tools/perf/MANIFEST
tools/perf/Makefile

index ddb05e9..9561815 100644 (file)
@@ -984,7 +984,7 @@ int main()
                return errno;
        }
        configfd = open(&quot;/sys/class/uio/uio0/device/config&quot;, O_RDWR);
-       if (uiofd &lt; 0) {
+       if (configfd &lt; 0) {
                perror(&quot;config open:&quot;);
                return errno;
        }
index 3a26812..bc50899 100644 (file)
@@ -81,7 +81,8 @@ PA31  TXD4
 Required properties for pin configuration node:
 - atmel,pins: 4 integers array, represents a group of pins mux and config
   setting. The format is atmel,pins = <PIN_BANK PIN_BANK_NUM PERIPH CONFIG>.
-  The PERIPH 0 means gpio.
+  The PERIPH 0 means gpio, PERIPH 1 is periph A, PERIPH 2 is periph B...
+  PIN_BANK 0 is pioA, PIN_BANK 1 is pioB...
 
 Bits used for CONFIG:
 PULL_UP                (1 << 0): indicate this pin need a pull up.
@@ -126,7 +127,7 @@ pinctrl@fffff400 {
                pinctrl_dbgu: dbgu-0 {
                        atmel,pins =
                                <1 14 0x1 0x0   /* PB14 periph A */
-                                1 15 0x1 0x1>; /* PB15 periph with pullup */
+                                1 15 0x1 0x1>; /* PB15 periph with pullup */
                };
        };
 };
index 8fbd8b4..dcf338e 100644 (file)
@@ -175,9 +175,9 @@ consists of multiple segments as described below.
                                             align with the zone size <-|
                  |-> align with the segment size
      _________________________________________________________________________
-    |            |            |    Node     |   Segment   |   Segment  |      |
-    | Superblock | Checkpoint |   Address   |    Info.    |   Summary  | Main |
-    |    (SB)    |   (CP)     | Table (NAT) | Table (SIT) | Area (SSA) |      |
+    |            |            |   Segment   |    Node     |   Segment  |      |
+    | Superblock | Checkpoint |    Info.    |   Address   |   Summary  | Main |
+    |    (SB)    |   (CP)     | Table (SIT) | Table (NAT) | Area (SSA) |      |
     |____________|_____2______|______N______|______N______|______N_____|__N___|
                                                                        .      .
                                                              .                .
@@ -200,14 +200,14 @@ consists of multiple segments as described below.
  : It contains file system information, bitmaps for valid NAT/SIT sets, orphan
    inode lists, and summary entries of current active segments.
 
-- Node Address Table (NAT)
- : It is composed of a block address table for all the node blocks stored in
-   Main area.
-
 - Segment Information Table (SIT)
  : It contains segment information such as valid block count and bitmap for the
    validity of all the blocks.
 
+- Node Address Table (NAT)
+ : It is composed of a block address table for all the node blocks stored in
+   Main area.
+
 - Segment Summary Area (SSA)
  : It contains summary entries which contains the owner information of all the
    data and node blocks stored in Main area.
@@ -236,13 +236,13 @@ For file system consistency, each CP points to which NAT and SIT copies are
 valid, as shown as below.
 
   +--------+----------+---------+
-  |   CP   |    NAT   |   SIT   |
+  |   CP   |    SIT   |   NAT   |
   +--------+----------+---------+
   .         .          .          .
   .            .              .              .
   .               .                 .                 .
   +-------+-------+--------+--------+--------+--------+
-  | CP #0 | CP #1 | NAT #0 | NAT #1 | SIT #0 | SIT #1 |
+  | CP #0 | CP #1 | SIT #0 | SIT #1 | NAT #0 | NAT #1 |
   +-------+-------+--------+--------+--------+--------+
      |             ^                          ^
      |             |                          |
index 915564e..75fd3e0 100644 (file)
@@ -228,7 +228,7 @@ S:  Maintained
 F:     drivers/platform/x86/acerhdf.c
 
 ACER WMI LAPTOP EXTRAS
-M:     Joey Lee <jlee@novell.com>
+M:     "Lee, Chun-Yi" <jlee@suse.com>
 L:     platform-driver-x86@vger.kernel.org
 S:     Maintained
 F:     drivers/platform/x86/acer-wmi.c
@@ -648,7 +648,7 @@ F:  arch/arm/
 
 ARM SUB-ARCHITECTURES
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-S:     MAINTAINED
+S:     Maintained
 F:     arch/arm/mach-*/
 F:     arch/arm/plat-*/
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc.git
@@ -1351,6 +1351,14 @@ W:       http://wireless.kernel.org/en/users/Drivers/ath9k
 S:     Supported
 F:     drivers/net/wireless/ath/ath9k/
 
+WILOCITY WIL6210 WIRELESS DRIVER
+M:     Vladimir Kondratiev <qca_vkondrat@qca.qualcomm.com>
+L:     linux-wireless@vger.kernel.org
+L:     wil6210@qca.qualcomm.com
+S:     Supported
+W:     http://wireless.kernel.org/en/users/Drivers/wil6210
+F:     drivers/net/wireless/ath/wil6210/
+
 CARL9170 LINUX COMMUNITY WIRELESS DRIVER
 M:     Christian Lamparter <chunkeey@googlemail.com>
 L:     linux-wireless@vger.kernel.org
@@ -1964,9 +1972,9 @@ S:        Maintained
 F:     drivers/usb/host/ohci-ep93xx.c
 
 CIRRUS LOGIC CS4270 SOUND DRIVER
-M:     Timur Tabi <timur@freescale.com>
+M:     Timur Tabi <timur@tabi.org>
 L:     alsa-devel@alsa-project.org (moderated for non-subscribers)
-S:     Supported
+S:     Odd Fixes
 F:     sound/soc/codecs/cs4270*
 
 CLEANCACHE API
@@ -3183,9 +3191,9 @@ F:        include/uapi/video/
 F:     include/uapi/linux/fb.h
 
 FREESCALE DIU FRAMEBUFFER DRIVER
-M:     Timur Tabi <timur@freescale.com>
+M:     Timur Tabi <timur@tabi.org>
 L:     linux-fbdev@vger.kernel.org
-S:     Supported
+S:     Maintained
 F:     drivers/video/fsl-diu-fb.*
 
 FREESCALE DMA DRIVER
@@ -3220,9 +3228,8 @@ F:        drivers/net/ethernet/freescale/fs_enet/
 F:     include/linux/fs_enet_pd.h
 
 FREESCALE QUICC ENGINE LIBRARY
-M:     Timur Tabi <timur@freescale.com>
 L:     linuxppc-dev@lists.ozlabs.org
-S:     Supported
+S:     Orphan
 F:     arch/powerpc/sysdev/qe_lib/
 F:     arch/powerpc/include/asm/*qe.h
 
@@ -3241,16 +3248,16 @@ S:      Maintained
 F:     drivers/net/ethernet/freescale/ucc_geth*
 
 FREESCALE QUICC ENGINE UCC UART DRIVER
-M:     Timur Tabi <timur@freescale.com>
+M:     Timur Tabi <timur@tabi.org>
 L:     linuxppc-dev@lists.ozlabs.org
-S:     Supported
+S:     Maintained
 F:     drivers/tty/serial/ucc_uart.c
 
 FREESCALE SOC SOUND DRIVERS
-M:     Timur Tabi <timur@freescale.com>
+M:     Timur Tabi <timur@tabi.org>
 L:     alsa-devel@alsa-project.org (moderated for non-subscribers)
 L:     linuxppc-dev@lists.ozlabs.org
-S:     Supported
+S:     Maintained
 F:     sound/soc/fsl/fsl*
 F:     sound/soc/fsl/mpc8610_hpcd.c
 
@@ -5077,7 +5084,7 @@ S:        Maintained
 F:     drivers/media/radio/radio-mr800.c
 
 MSI LAPTOP SUPPORT
-M:     "Lee, Chun-Yi" <jlee@novell.com>
+M:     "Lee, Chun-Yi" <jlee@suse.com>
 L:     platform-driver-x86@vger.kernel.org
 S:     Maintained
 F:     drivers/platform/x86/msi-laptop.c
@@ -5394,6 +5401,13 @@ S:       Maintained
 F:     Documentation/scsi/NinjaSCSI.txt
 F:     drivers/scsi/nsp32*
 
+NTB DRIVER
+M:     Jon Mason <jon.mason@intel.com>
+S:     Supported
+F:     drivers/ntb/
+F:     drivers/net/ntb_netdev.c
+F:     include/linux/ntb.h
+
 NTFS FILESYSTEM
 M:     Anton Altaparmakov <anton@tuxera.com>
 L:     linux-ntfs-dev@lists.sourceforge.net
@@ -5507,8 +5521,7 @@ M:        Benoît Cousson <b-cousson@ti.com>
 M:     Paul Walmsley <paul@pwsan.com>
 L:     linux-omap@vger.kernel.org
 S:     Maintained
-F:     arch/arm/mach-omap2/omap_hwmod.c
-F:     arch/arm/plat-omap/include/plat/omap_hwmod.h
+F:     arch/arm/mach-omap2/omap_hwmod.*
 
 OMAP HWMOD DATA FOR OMAP4-BASED DEVICES
 M:     Benoît Cousson <b-cousson@ti.com>
@@ -6579,7 +6592,7 @@ F:        drivers/media/platform/s3c-camif/
 F:     include/media/s3c_camif.h
 
 SERIAL DRIVERS
-M:     Alan Cox <alan@linux.intel.com>
+M:     Greg Kroah-Hartman <gregkh@linuxfoundation.org>
 L:     linux-serial@vger.kernel.org
 S:     Maintained
 F:     drivers/tty/serial
@@ -7334,7 +7347,7 @@ S:        Odd Fixes
 F:     drivers/staging/speakup/
 
 STAGING - TI DSP BRIDGE DRIVERS
-M:     Omar Ramirez Luna <omar.ramirez@ti.com>
+M:     Omar Ramirez Luna <omar.ramirez@copitl.com>
 S:     Odd Fixes
 F:     drivers/staging/tidspbridge/
 
@@ -8526,7 +8539,7 @@ F:        Documentation/x86/
 F:     arch/x86/
 
 X86 PLATFORM DRIVERS
-M:     Matthew Garrett <mjg@redhat.com>
+M:     Matthew Garrett <matthew.garrett@nebula.com>
 L:     platform-driver-x86@vger.kernel.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/mjg59/platform-drivers-x86.git
 S:     Maintained
index a1667c4..2d3c92c 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 3
 PATCHLEVEL = 8
 SUBLEVEL = 0
-EXTRAVERSION = -rc3
+EXTRAVERSION = -rc5
 NAME = Terrified Chipmunk
 
 # *DOCUMENTATION*
@@ -169,7 +169,7 @@ SUBARCH := $(shell uname -m | sed -e s/i.86/i386/ -e s/sun4u/sparc64/ \
                                  -e s/arm.*/arm/ -e s/sa110/arm/ \
                                  -e s/s390x/s390/ -e s/parisc64/parisc/ \
                                  -e s/ppc.*/powerpc/ -e s/mips.*/mips/ \
-                                 -e s/sh[234].*/sh/ )
+                                 -e s/sh[234].*/sh/ -e s/aarch64.*/arm64/ )
 
 # Cross compiling and selecting different set of gcc/bin-utils
 # ---------------------------------------------------------------------------
index e44da40..5ebb44f 100644 (file)
@@ -155,6 +155,7 @@ dtb-$(CONFIG_ARCH_VT8500) += vt8500-bv07.dtb \
 dtb-$(CONFIG_ARCH_ZYNQ) += zynq-zc702.dtb
 
 targets += dtbs
+targets += $(dtb-y)
 endif
 
 # *.dtb used to be generated in the directory above. Clean out the
index 0004402..9b82fac 100644 (file)
@@ -26,7 +26,7 @@
 
        memory {
                device_type = "memory";
-               reg = <0x00000000 0x20000000>; /* 512 MB */
+               reg = <0x00000000 0x40000000>; /* 1 GB */
        };
 
        soc {
index 271855a..e041f42 100644 (file)
                };
 
                gpio0: gpio@d0018100 {
-                       compatible = "marvell,armadaxp-gpio";
-                       reg = <0xd0018100 0x40>,
-                           <0xd0018800 0x30>;
+                       compatible = "marvell,orion-gpio";
+                       reg = <0xd0018100 0x40>;
                        ngpios = <32>;
                        gpio-controller;
                        #gpio-cells = <2>;
                        interrupt-controller;
                        #interrupts-cells = <2>;
-                       interrupts = <16>, <17>, <18>, <19>;
+                       interrupts = <82>, <83>, <84>, <85>;
                };
 
                gpio1: gpio@d0018140 {
-                       compatible = "marvell,armadaxp-gpio";
-                       reg = <0xd0018140 0x40>,
-                           <0xd0018840 0x30>;
+                       compatible = "marvell,orion-gpio";
+                       reg = <0xd0018140 0x40>;
                        ngpios = <17>;
                        gpio-controller;
                        #gpio-cells = <2>;
                        interrupt-controller;
                        #interrupts-cells = <2>;
-                       interrupts = <20>, <21>, <22>;
+                       interrupts = <87>, <88>, <89>;
                };
        };
 };
index 1c1937d..9e23bd8 100644 (file)
                };
 
                gpio0: gpio@d0018100 {
-                       compatible = "marvell,armadaxp-gpio";
-                       reg = <0xd0018100 0x40>,
-                           <0xd0018800 0x30>;
+                       compatible = "marvell,orion-gpio";
+                       reg = <0xd0018100 0x40>;
                        ngpios = <32>;
                        gpio-controller;
                        #gpio-cells = <2>;
                        interrupt-controller;
                        #interrupts-cells = <2>;
-                       interrupts = <16>, <17>, <18>, <19>;
+                       interrupts = <82>, <83>, <84>, <85>;
                };
 
                gpio1: gpio@d0018140 {
-                       compatible = "marvell,armadaxp-gpio";
-                       reg = <0xd0018140 0x40>,
-                           <0xd0018840 0x30>;
+                       compatible = "marvell,orion-gpio";
+                       reg = <0xd0018140 0x40>;
                        ngpios = <32>;
                        gpio-controller;
                        #gpio-cells = <2>;
                        interrupt-controller;
                        #interrupts-cells = <2>;
-                       interrupts = <20>, <21>, <22>, <23>;
+                       interrupts = <87>, <88>, <89>, <90>;
                };
 
                gpio2: gpio@d0018180 {
-                       compatible = "marvell,armadaxp-gpio";
-                       reg = <0xd0018180 0x40>,
-                           <0xd0018870 0x30>;
+                       compatible = "marvell,orion-gpio";
+                       reg = <0xd0018180 0x40>;
                        ngpios = <3>;
                        gpio-controller;
                        #gpio-cells = <2>;
                        interrupt-controller;
                        #interrupts-cells = <2>;
-                       interrupts = <24>;
+                       interrupts = <91>;
                };
 
                ethernet@d0034000 {
index 4905cf3..9659661 100644 (file)
                };
 
                gpio0: gpio@d0018100 {
-                       compatible = "marvell,armadaxp-gpio";
-                       reg = <0xd0018100 0x40>,
-                           <0xd0018800 0x30>;
+                       compatible = "marvell,orion-gpio";
+                       reg = <0xd0018100 0x40>;
                        ngpios = <32>;
                        gpio-controller;
                        #gpio-cells = <2>;
                        interrupt-controller;
                        #interrupts-cells = <2>;
-                       interrupts = <16>, <17>, <18>, <19>;
+                       interrupts = <82>, <83>, <84>, <85>;
                };
 
                gpio1: gpio@d0018140 {
-                       compatible = "marvell,armadaxp-gpio";
-                       reg = <0xd0018140 0x40>,
-                           <0xd0018840 0x30>;
+                       compatible = "marvell,orion-gpio";
+                       reg = <0xd0018140 0x40>;
                        ngpios = <32>;
                        gpio-controller;
                        #gpio-cells = <2>;
                        interrupt-controller;
                        #interrupts-cells = <2>;
-                       interrupts = <20>, <21>, <22>, <23>;
+                       interrupts = <87>, <88>, <89>, <90>;
                };
 
                gpio2: gpio@d0018180 {
-                       compatible = "marvell,armadaxp-gpio";
-                       reg = <0xd0018180 0x40>,
-                           <0xd0018870 0x30>;
+                       compatible = "marvell,orion-gpio";
+                       reg = <0xd0018180 0x40>;
                        ngpios = <3>;
                        gpio-controller;
                        #gpio-cells = <2>;
                        interrupt-controller;
                        #interrupts-cells = <2>;
-                       interrupts = <24>;
+                       interrupts = <91>;
                };
 
                ethernet@d0034000 {
index e154f24..222047f 100644 (file)
 
        i2c@0 {
                compatible = "i2c-gpio";
-               gpios = <&pioA 23 0 /* sda */
-                        &pioA 24 0 /* scl */
+               gpios = <&pioA 25 0 /* sda */
+                        &pioA 26 0 /* scl */
                        >;
                i2c-gpio,sda-open-drain;
                i2c-gpio,scl-open-drain;
index 68bccf4..cb7bcc5 100644 (file)
                                        };
                                };
 
+                               ssc0 {
+                                       pinctrl_ssc0_tx: ssc0_tx-0 {
+                                               atmel,pins =
+                                                       <1 16 0x1 0x0   /* PB16 periph A */
+                                                        1 17 0x1 0x0   /* PB17 periph A */
+                                                        1 18 0x1 0x0>; /* PB18 periph A */
+                                       };
+
+                                       pinctrl_ssc0_rx: ssc0_rx-0 {
+                                               atmel,pins =
+                                                       <1 19 0x1 0x0   /* PB19 periph A */
+                                                        1 20 0x1 0x0   /* PB20 periph A */
+                                                        1 21 0x1 0x0>; /* PB21 periph A */
+                                       };
+                               };
+
                                pioA: gpio@fffff400 {
                                        compatible = "atmel,at91rm9200-gpio";
                                        reg = <0xfffff400 0x200>;
                                compatible = "atmel,at91rm9200-ssc";
                                reg = <0xfffbc000 0x4000>;
                                interrupts = <14 4 5>;
+                               pinctrl-names = "default";
+                               pinctrl-0 = <&pinctrl_ssc0_tx &pinctrl_ssc0_rx>;
                                status = "disabled";
                        };
 
index 32ec62c..271d4de 100644 (file)
                                        };
                                };
 
+                               ssc0 {
+                                       pinctrl_ssc0_tx: ssc0_tx-0 {
+                                               atmel,pins =
+                                                       <1 0 0x2 0x0    /* PB0 periph B */
+                                                        1 1 0x2 0x0    /* PB1 periph B */
+                                                        1 2 0x2 0x0>;  /* PB2 periph B */
+                                       };
+
+                                       pinctrl_ssc0_rx: ssc0_rx-0 {
+                                               atmel,pins =
+                                                       <1 3 0x2 0x0    /* PB3 periph B */
+                                                        1 4 0x2 0x0    /* PB4 periph B */
+                                                        1 5 0x2 0x0>;  /* PB5 periph B */
+                                       };
+                               };
+
+                               ssc1 {
+                                       pinctrl_ssc1_tx: ssc1_tx-0 {
+                                               atmel,pins =
+                                                       <1 6 0x1 0x0    /* PB6 periph A */
+                                                        1 7 0x1 0x0    /* PB7 periph A */
+                                                        1 8 0x1 0x0>;  /* PB8 periph A */
+                                       };
+
+                                       pinctrl_ssc1_rx: ssc1_rx-0 {
+                                               atmel,pins =
+                                                       <1 9 0x1 0x0    /* PB9 periph A */
+                                                        1 10 0x1 0x0   /* PB10 periph A */
+                                                        1 11 0x1 0x0>; /* PB11 periph A */
+                                       };
+                               };
+
                                pioA: gpio@fffff200 {
                                        compatible = "atmel,at91rm9200-gpio";
                                        reg = <0xfffff200 0x200>;
                                compatible = "atmel,at91rm9200-ssc";
                                reg = <0xfff98000 0x4000>;
                                interrupts = <16 4 5>;
+                               pinctrl-names = "default";
+                               pinctrl-0 = <&pinctrl_ssc0_tx &pinctrl_ssc0_rx>;
                                status = "disabled";
                        };
 
                                compatible = "atmel,at91rm9200-ssc";
                                reg = <0xfff9c000 0x4000>;
                                interrupts = <17 4 5>;
+                               pinctrl-names = "default";
+                               pinctrl-0 = <&pinctrl_ssc1_tx &pinctrl_ssc1_rx>;
                                status = "disabled";
                        };
 
index 231858f..6b1d4ca 100644 (file)
                                        };
                                };
 
+                               ssc0 {
+                                       pinctrl_ssc0_tx: ssc0_tx-0 {
+                                               atmel,pins =
+                                                       <3 0 0x1 0x0    /* PD0 periph A */
+                                                        3 1 0x1 0x0    /* PD1 periph A */
+                                                        3 2 0x1 0x0>;  /* PD2 periph A */
+                                       };
+
+                                       pinctrl_ssc0_rx: ssc0_rx-0 {
+                                               atmel,pins =
+                                                       <3 3 0x1 0x0    /* PD3 periph A */
+                                                        3 4 0x1 0x0    /* PD4 periph A */
+                                                        3 5 0x1 0x0>;  /* PD5 periph A */
+                                       };
+                               };
+
+                               ssc1 {
+                                       pinctrl_ssc1_tx: ssc1_tx-0 {
+                                               atmel,pins =
+                                                       <3 10 0x1 0x0   /* PD10 periph A */
+                                                        3 11 0x1 0x0   /* PD11 periph A */
+                                                        3 12 0x1 0x0>; /* PD12 periph A */
+                                       };
+
+                                       pinctrl_ssc1_rx: ssc1_rx-0 {
+                                               atmel,pins =
+                                                       <3 13 0x1 0x0   /* PD13 periph A */
+                                                        3 14 0x1 0x0   /* PD14 periph A */
+                                                        3 15 0x1 0x0>; /* PD15 periph A */
+                                       };
+                               };
+
                                pioA: gpio@fffff200 {
                                        compatible = "atmel,at91rm9200-gpio";
                                        reg = <0xfffff200 0x200>;
                                compatible = "atmel,at91sam9g45-ssc";
                                reg = <0xfff9c000 0x4000>;
                                interrupts = <16 4 5>;
+                               pinctrl-names = "default";
+                               pinctrl-0 = <&pinctrl_ssc0_tx &pinctrl_ssc0_rx>;
                                status = "disabled";
                        };
 
                                compatible = "atmel,at91sam9g45-ssc";
                                reg = <0xfffa0000 0x4000>;
                                interrupts = <17 4 5>;
+                               pinctrl-names = "default";
+                               pinctrl-0 = <&pinctrl_ssc1_tx &pinctrl_ssc1_rx>;
                                status = "disabled";
                        };
 
index e9efb34..80e29c6 100644 (file)
@@ -28,6 +28,7 @@
                tcb1 = &tcb1;
                i2c0 = &i2c0;
                i2c1 = &i2c1;
+               ssc0 = &ssc0;
        };
        cpus {
                cpu@0 {
                                        };
                                };
 
+                               ssc0 {
+                                       pinctrl_ssc0_tx: ssc0_tx-0 {
+                                               atmel,pins =
+                                                       <0 24 0x2 0x0   /* PA24 periph B */
+                                                        0 25 0x2 0x0   /* PA25 periph B */
+                                                        0 26 0x2 0x0>; /* PA26 periph B */
+                                       };
+
+                                       pinctrl_ssc0_rx: ssc0_rx-0 {
+                                               atmel,pins =
+                                                       <0 27 0x2 0x0   /* PA27 periph B */
+                                                        0 28 0x2 0x0   /* PA28 periph B */
+                                                        0 29 0x2 0x0>; /* PA29 periph B */
+                                       };
+                               };
+
                                pioA: gpio@fffff400 {
                                        compatible = "atmel,at91sam9x5-gpio", "atmel,at91rm9200-gpio";
                                        reg = <0xfffff400 0x200>;
                                status = "disabled";
                        };
 
+                       ssc0: ssc@f0010000 {
+                               compatible = "atmel,at91sam9g45-ssc";
+                               reg = <0xf0010000 0x4000>;
+                               interrupts = <28 4 5>;
+                               pinctrl-names = "default";
+                               pinctrl-0 = <&pinctrl_ssc0_tx &pinctrl_ssc0_rx>;
+                               status = "disabled";
+                       };
+
                        usart0: serial@f801c000 {
                                compatible = "atmel,at91sam9260-usart";
                                reg = <0xf801c000 0x4000>;
index 40ac3a4..8ecca69 100644 (file)
                                interrupts = <1 4 7>;
                        };
 
-                       ssc0: ssc@f0010000 {
-                               compatible = "atmel,at91sam9g45-ssc";
-                               reg = <0xf0010000 0x4000>;
-                               interrupts = <28 4 5>;
-                               status = "disabled";
-                       };
-
                        tcb0: timer@f8008000 {
                                compatible = "atmel,at91sam9x5-tcb";
                                reg = <0xf8008000 0x100>;
                                                atmel,pins =
                                                        <0 3 0x1 0x0>;  /* PA3 periph A */
                                        };
+
+                                       pinctrl_usart0_sck: usart0_sck-0 {
+                                               atmel,pins =
+                                                       <0 4 0x1 0x0>;  /* PA4 periph A */
+                                       };
                                };
 
                                usart1 {
 
                                        pinctrl_usart1_rts: usart1_rts-0 {
                                                atmel,pins =
-                                                       <3 27 0x3 0x0>; /* PC27 periph C */
+                                                       <2 27 0x3 0x0>; /* PC27 periph C */
                                        };
 
                                        pinctrl_usart1_cts: usart1_cts-0 {
                                                atmel,pins =
-                                                       <3 28 0x3 0x0>; /* PC28 periph C */
+                                                       <2 28 0x3 0x0>; /* PC28 periph C */
+                                       };
+
+                                       pinctrl_usart1_sck: usart1_sck-0 {
+                                               atmel,pins =
+                                                       <2 28 0x3 0x0>; /* PC29 periph C */
                                        };
                                };
 
 
                                        pinctrl_uart2_rts: uart2_rts-0 {
                                                atmel,pins =
-                                                       <0 0 0x2 0x0>;  /* PB0 periph B */
+                                                       <1 0 0x2 0x0>;  /* PB0 periph B */
                                        };
 
                                        pinctrl_uart2_cts: uart2_cts-0 {
                                                atmel,pins =
-                                                       <0 1 0x2 0x0>;  /* PB1 periph B */
+                                                       <1 1 0x2 0x0>;  /* PB1 periph B */
+                                       };
+
+                                       pinctrl_usart2_sck: usart2_sck-0 {
+                                               atmel,pins =
+                                                       <1 2 0x2 0x0>;  /* PB2 periph B */
                                        };
                                };
 
                                usart3 {
                                        pinctrl_uart3: usart3-0 {
                                                atmel,pins =
-                                                       <3 23 0x2 0x1   /* PC22 periph B with pullup */
-                                                        3 23 0x2 0x0>; /* PC23 periph B */
+                                                       <2 23 0x2 0x1   /* PC22 periph B with pullup */
+                                                        2 23 0x2 0x0>; /* PC23 periph B */
                                        };
 
                                        pinctrl_usart3_rts: usart3_rts-0 {
                                                atmel,pins =
-                                                       <3 24 0x2 0x0>; /* PC24 periph B */
+                                                       <2 24 0x2 0x0>; /* PC24 periph B */
                                        };
 
                                        pinctrl_usart3_cts: usart3_cts-0 {
                                                atmel,pins =
-                                                       <3 25 0x2 0x0>; /* PC25 periph B */
+                                                       <2 25 0x2 0x0>; /* PC25 periph B */
+                                       };
+
+                                       pinctrl_usart3_sck: usart3_sck-0 {
+                                               atmel,pins =
+                                                       <2 26 0x2 0x0>; /* PC26 periph B */
                                        };
                                };
 
                                uart0 {
                                        pinctrl_uart0: uart0-0 {
                                                atmel,pins =
-                                                       <3 8 0x3 0x0    /* PC8 periph C */
-                                                        3 9 0x3 0x1>;  /* PC9 periph C with pullup */
+                                                       <2 8 0x3 0x0    /* PC8 periph C */
+                                                        2 9 0x3 0x1>;  /* PC9 periph C with pullup */
                                        };
                                };
 
                                uart1 {
                                        pinctrl_uart1: uart1-0 {
                                                atmel,pins =
-                                                       <3 16 0x3 0x0   /* PC16 periph C */
-                                                        3 17 0x3 0x1>; /* PC17 periph C with pullup */
+                                                       <2 16 0x3 0x0   /* PC16 periph C */
+                                                        2 17 0x3 0x1>; /* PC17 periph C with pullup */
                                        };
                                };
 
 
                                        pinctrl_macb0_rmii_mii: macb0_rmii_mii-0 {
                                                atmel,pins =
-                                                       <1 8 0x1 0x0    /* PA8 periph A */
-                                                        1 11 0x1 0x0   /* PA11 periph A */
-                                                        1 12 0x1 0x0   /* PA12 periph A */
-                                                        1 13 0x1 0x0   /* PA13 periph A */
-                                                        1 14 0x1 0x0   /* PA14 periph A */
-                                                        1 15 0x1 0x0   /* PA15 periph A */
-                                                        1 16 0x1 0x0   /* PA16 periph A */
-                                                        1 17 0x1 0x0>; /* PA17 periph A */
+                                                       <1 8 0x1 0x0    /* PB8 periph A */
+                                                        1 11 0x1 0x0   /* PB11 periph A */
+                                                        1 12 0x1 0x0   /* PB12 periph A */
+                                                        1 13 0x1 0x0   /* PB13 periph A */
+                                                        1 14 0x1 0x0   /* PB14 periph A */
+                                                        1 15 0x1 0x0   /* PB15 periph A */
+                                                        1 16 0x1 0x0   /* PB16 periph A */
+                                                        1 17 0x1 0x0>; /* PB17 periph A */
                                        };
                                };
 
                                        };
                                };
 
+                               ssc0 {
+                                       pinctrl_ssc0_tx: ssc0_tx-0 {
+                                               atmel,pins =
+                                                       <0 24 0x2 0x0   /* PA24 periph B */
+                                                        0 25 0x2 0x0   /* PA25 periph B */
+                                                        0 26 0x2 0x0>; /* PA26 periph B */
+                                       };
+
+                                       pinctrl_ssc0_rx: ssc0_rx-0 {
+                                               atmel,pins =
+                                                       <0 27 0x2 0x0   /* PA27 periph B */
+                                                        0 28 0x2 0x0   /* PA28 periph B */
+                                                        0 29 0x2 0x0>; /* PA29 periph B */
+                                       };
+                               };
+
                                pioA: gpio@fffff400 {
                                        compatible = "atmel,at91sam9x5-gpio", "atmel,at91rm9200-gpio";
                                        reg = <0xfffff400 0x200>;
                                };
                        };
 
+                       ssc0: ssc@f0010000 {
+                               compatible = "atmel,at91sam9g45-ssc";
+                               reg = <0xf0010000 0x4000>;
+                               interrupts = <28 4 5>;
+                               pinctrl-names = "default";
+                               pinctrl-0 = <&pinctrl_ssc0_tx &pinctrl_ssc0_rx>;
+                               status = "disabled";
+                       };
+
                        mmc0: mmc@f0008000 {
                                compatible = "atmel,hsmci";
                                reg = <0xf0008000 0x600>;
index fddd174..46c0980 100644 (file)
@@ -96,8 +96,8 @@
                fifo-depth = <0x80>;
                card-detect-delay = <200>;
                samsung,dw-mshc-ciu-div = <3>;
-               samsung,dw-mshc-sdr-timing = <2 3 3>;
-               samsung,dw-mshc-ddr-timing = <1 2 3>;
+               samsung,dw-mshc-sdr-timing = <2 3>;
+               samsung,dw-mshc-ddr-timing = <1 2>;
 
                slot@0 {
                        reg = <0>;
                fifo-depth = <0x80>;
                card-detect-delay = <200>;
                samsung,dw-mshc-ciu-div = <3>;
-               samsung,dw-mshc-sdr-timing = <2 3 3>;
-               samsung,dw-mshc-ddr-timing = <1 2 3>;
+               samsung,dw-mshc-sdr-timing = <2 3>;
+               samsung,dw-mshc-ddr-timing = <1 2>;
 
                slot@0 {
                        reg = <0>;
                fifo-depth = <0x80>;
                card-detect-delay = <200>;
                samsung,dw-mshc-ciu-div = <3>;
-               samsung,dw-mshc-sdr-timing = <2 3 3>;
-               samsung,dw-mshc-ddr-timing = <1 2 3>;
+               samsung,dw-mshc-sdr-timing = <2 3>;
+               samsung,dw-mshc-ddr-timing = <1 2>;
 
                slot@0 {
                        reg = <0>;
index fed7d3f..cdee96f 100644 (file)
 };
 
 &uart0 { status = "okay"; };
-&sdio0 { status = "okay"; };
 &sata0 { status = "okay"; };
 &i2c0 { status = "okay"; };
 
+&sdio0 {
+       status = "okay";
+       /* sdio0 card detect is connected to wrong pin on CuBox */
+       cd-gpios = <&gpio0 12 1>;
+};
+
 &spi0 {
        status = "okay";
 
 };
 
 &pinctrl {
-       pinctrl-0 = <&pmx_gpio_18>;
+       pinctrl-0 = <&pmx_gpio_12 &pmx_gpio_18>;
        pinctrl-names = "default";
 
+       pmx_gpio_12: pmx-gpio-12 {
+               marvell,pins = "mpp12";
+               marvell,function = "gpio";
+       };
+
        pmx_gpio_18: pmx-gpio-18 {
                marvell,pins = "mpp18";
                marvell,function = "gpio";
index 942d576..e05b18f 100644 (file)
                fifo-depth = <0x80>;
                card-detect-delay = <200>;
                samsung,dw-mshc-ciu-div = <3>;
-               samsung,dw-mshc-sdr-timing = <2 3 3>;
-               samsung,dw-mshc-ddr-timing = <1 2 3>;
+               samsung,dw-mshc-sdr-timing = <2 3>;
+               samsung,dw-mshc-ddr-timing = <1 2>;
 
                slot@0 {
                        reg = <0>;
                fifo-depth = <0x80>;
                card-detect-delay = <200>;
                samsung,dw-mshc-ciu-div = <3>;
-               samsung,dw-mshc-sdr-timing = <2 3 3>;
-               samsung,dw-mshc-ddr-timing = <1 2 3>;
+               samsung,dw-mshc-sdr-timing = <2 3>;
+               samsung,dw-mshc-ddr-timing = <1 2>;
 
                slot@0 {
                        reg = <0>;
index 9bc6785..77d21ab 100644 (file)
@@ -1,4 +1,5 @@
 /include/ "kirkwood.dtsi"
+/include/ "kirkwood-6281.dtsi"
 
 / {
        chosen {
@@ -6,6 +7,21 @@
        };
 
        ocp@f1000000 {
+               pinctrl: pinctrl@10000 {
+                       pinctrl-0 = < &pmx_spi &pmx_twsi0 &pmx_uart0
+                                       &pmx_ns2_sata0 &pmx_ns2_sata1>;
+                       pinctrl-names = "default";
+
+                       pmx_ns2_sata0: pmx-ns2-sata0 {
+                               marvell,pins = "mpp21";
+                               marvell,function = "sata0";
+                       };
+                       pmx_ns2_sata1: pmx-ns2-sata1 {
+                               marvell,pins = "mpp20";
+                               marvell,function = "sata1";
+                       };
+               };
+
                serial@12000 {
                        clock-frequency = <166666667>;
                        status = "okay";
index 110d6cb..d6ab442 100644 (file)
@@ -36,6 +36,7 @@
                        reg = <0x10100 0x40>;
                        ngpios = <32>;
                        interrupt-controller;
+                       #interrupt-cells = <2>;
                        interrupts = <35>, <36>, <37>, <38>;
                };
 
@@ -46,6 +47,7 @@
                        reg = <0x10140 0x40>;
                        ngpios = <18>;
                        interrupt-controller;
+                       #interrupt-cells = <2>;
                        interrupts = <39>, <40>, <41>;
                };
 
index e8814fe..b4dc3ed 100644 (file)
@@ -48,6 +48,8 @@
 
                        macb0: ethernet@fffc4000 {
                                phy-mode = "mii";
+                               pinctrl-0 = <&pinctrl_macb_rmii
+                                            &pinctrl_macb_rmii_mii_alt>;
                                status = "okay";
                        };
 
index 8bbc2bf..8b36abe 100644 (file)
                };
 
                uart0: uart@01c28000 {
-                       compatible = "ns8250";
+                       compatible = "snps,dw-apb-uart";
                        reg = <0x01c28000 0x400>;
                        interrupts = <1>;
                        reg-shift = <2>;
+                       reg-io-width = <4>;
                        clock-frequency = <24000000>;
                        status = "disabled";
                };
 
                uart1: uart@01c28400 {
-                       compatible = "ns8250";
+                       compatible = "snps,dw-apb-uart";
                        reg = <0x01c28400 0x400>;
                        interrupts = <2>;
                        reg-shift = <2>;
+                       reg-io-width = <4>;
                        clock-frequency = <24000000>;
                        status = "disabled";
                };
index 1fc405a..cf8071a 100644 (file)
@@ -45,7 +45,6 @@
                        reg = <1>;
                };
 
-/* A7s disabled till big.LITTLE patches are available...
                cpu2: cpu@2 {
                        device_type = "cpu";
                        compatible = "arm,cortex-a7";
@@ -63,7 +62,6 @@
                        compatible = "arm,cortex-a7";
                        reg = <0x102>;
                };
-*/
        };
 
        memory@80000000 {
index b175577..1ea9590 100644 (file)
@@ -19,6 +19,7 @@ CONFIG_SOC_AT91SAM9260=y
 CONFIG_SOC_AT91SAM9263=y
 CONFIG_SOC_AT91SAM9G45=y
 CONFIG_SOC_AT91SAM9X5=y
+CONFIG_SOC_AT91SAM9N12=y
 CONFIG_MACH_AT91SAM_DT=y
 CONFIG_AT91_PROGRAMMABLE_CLOCKS=y
 CONFIG_AT91_TIMER_HZ=128
@@ -31,7 +32,7 @@ CONFIG_ZBOOT_ROM_TEXT=0x0
 CONFIG_ZBOOT_ROM_BSS=0x0
 CONFIG_ARM_APPENDED_DTB=y
 CONFIG_ARM_ATAG_DTB_COMPAT=y
-CONFIG_CMDLINE="mem=128M console=ttyS0,115200 initrd=0x21100000,25165824 root=/dev/ram0 rw"
+CONFIG_CMDLINE="console=ttyS0,115200 initrd=0x21100000,25165824 root=/dev/ram0 rw"
 CONFIG_KEXEC=y
 CONFIG_AUTO_ZRELADDR=y
 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
index 6809200..14f7c3b 100644 (file)
@@ -100,12 +100,14 @@ ENTRY(printch)
                b       1b
 ENDPROC(printch)
 
+#ifdef CONFIG_MMU
 ENTRY(debug_ll_addr)
                addruart r2, r3, ip
                str     r2, [r0]
                str     r3, [r1]
                mov     pc, lr
 ENDPROC(debug_ll_addr)
+#endif
 
 #else
 
index 4eee351..486a15a 100644 (file)
@@ -246,6 +246,7 @@ __create_page_tables:
 
        /*
         * Then map boot params address in r2 if specified.
+        * We map 2 sections in case the ATAGs/DTB crosses a section boundary.
         */
        mov     r0, r2, lsr #SECTION_SHIFT
        movs    r0, r0, lsl #SECTION_SHIFT
@@ -253,6 +254,8 @@ __create_page_tables:
        addne   r3, r3, #PAGE_OFFSET
        addne   r3, r4, r3, lsr #(SECTION_SHIFT - PMD_ORDER)
        orrne   r6, r7, r0
+       strne   r6, [r3], #1 << PMD_ORDER
+       addne   r6, r6, #1 << SECTION_SHIFT
        strne   r6, [r3]
 
 #ifdef CONFIG_DEBUG_LL
@@ -331,7 +334,7 @@ ENTRY(secondary_startup)
         * as it has already been validated by the primary processor.
         */
 #ifdef CONFIG_ARM_VIRT_EXT
-       bl      __hyp_stub_install
+       bl      __hyp_stub_install_secondary
 #endif
        safe_svcmode_maskall r9
 
index 65b2417..1315c4c 100644 (file)
@@ -99,7 +99,7 @@ ENTRY(__hyp_stub_install_secondary)
         * immediately.
         */
        compare_cpu_mode_with_primary   r4, r5, r6, r7
-       bxne    lr
+       movne   pc, lr
 
        /*
         * Once we have given up on one CPU, we do not try to install the
@@ -111,7 +111,7 @@ ENTRY(__hyp_stub_install_secondary)
         */
 
        cmp     r4, #HYP_MODE
-       bxne    lr                      @ give up if the CPU is not in HYP mode
+       movne   pc, lr                  @ give up if the CPU is not in HYP mode
 
 /*
  * Configure HSCTLR to set correct exception endianness/instruction set
@@ -120,7 +120,8 @@ ENTRY(__hyp_stub_install_secondary)
  * Eventually, CPU-specific code might be needed -- assume not for now
  *
  * This code relies on the "eret" instruction to synchronize the
- * various coprocessor accesses.
+ * various coprocessor accesses. This is done when we switch to SVC
+ * (see safe_svcmode_maskall).
  */
        @ Now install the hypervisor stub:
        adr     r7, __hyp_stub_vectors
@@ -155,14 +156,7 @@ THUMB(     orr     r7, #(1 << 30)  )       @ HSCTLR.TE
 1:
 #endif
 
-       bic     r7, r4, #MODE_MASK
-       orr     r7, r7, #SVC_MODE
-THUMB( orr     r7, r7, #PSR_T_BIT      )
-       msr     spsr_cxsf, r7           @ This is SPSR_hyp.
-
-       __MSR_ELR_HYP(14)               @ msr elr_hyp, lr
-       __ERET                          @ return, switching to SVC mode
-                                       @ The boot CPU mode is left in r4.
+       bx      lr                      @ The boot CPU mode is left in r4.
 ENDPROC(__hyp_stub_install_secondary)
 
 __hyp_stub_do_trap:
@@ -200,7 +194,7 @@ ENDPROC(__hyp_get_vectors)
        @ fall through
 ENTRY(__hyp_set_vectors)
        __HVC(0)
-       bx      lr
+       mov     pc, lr
 ENDPROC(__hyp_set_vectors)
 
 #ifndef ZIMAGE
index 9ee866c..4b67847 100644 (file)
@@ -105,6 +105,8 @@ static void __init soc_detect(u32 dbgu_base)
        switch (socid) {
        case ARCH_ID_AT91RM9200:
                at91_soc_initdata.type = AT91_SOC_RM9200;
+               if (at91_soc_initdata.subtype == AT91_SOC_SUBTYPE_NONE)
+                       at91_soc_initdata.subtype = AT91_SOC_RM9200_BGA;
                at91_boot_soc = at91rm9200_soc;
                break;
 
index 3e628fd..0a2349d 100644 (file)
@@ -851,6 +851,7 @@ config SOC_IMX6Q
        select HAVE_CAN_FLEXCAN if CAN
        select HAVE_IMX_GPC
        select HAVE_IMX_MMDC
+       select HAVE_IMX_SRC
        select HAVE_SMP
        select MFD_SYSCON
        select PINCTRL
index b197aa7..2c570cd 100644 (file)
@@ -254,9 +254,9 @@ int __init mx25_clocks_init(void)
        clk_register_clkdev(clk[ipg], "ipg", "mxc-ehci.2");
        clk_register_clkdev(clk[usbotg_ahb], "ahb", "mxc-ehci.2");
        clk_register_clkdev(clk[usb_div], "per", "mxc-ehci.2");
-       clk_register_clkdev(clk[ipg], "ipg", "fsl-usb2-udc");
-       clk_register_clkdev(clk[usbotg_ahb], "ahb", "fsl-usb2-udc");
-       clk_register_clkdev(clk[usb_div], "per", "fsl-usb2-udc");
+       clk_register_clkdev(clk[ipg], "ipg", "imx-udc-mx27");
+       clk_register_clkdev(clk[usbotg_ahb], "ahb", "imx-udc-mx27");
+       clk_register_clkdev(clk[usb_div], "per", "imx-udc-mx27");
        clk_register_clkdev(clk[nfc_ipg_per], NULL, "imx25-nand.0");
        /* i.mx25 has the i.mx35 type cspi */
        clk_register_clkdev(clk[cspi1_ipg], NULL, "imx35-cspi.0");
index 4c1d1e4..1ffe3b5 100644 (file)
@@ -236,9 +236,9 @@ int __init mx27_clocks_init(unsigned long fref)
        clk_register_clkdev(clk[lcdc_ahb_gate], "ahb", "imx21-fb.0");
        clk_register_clkdev(clk[csi_ahb_gate], "ahb", "imx27-camera.0");
        clk_register_clkdev(clk[per4_gate], "per", "imx27-camera.0");
-       clk_register_clkdev(clk[usb_div], "per", "fsl-usb2-udc");
-       clk_register_clkdev(clk[usb_ipg_gate], "ipg", "fsl-usb2-udc");
-       clk_register_clkdev(clk[usb_ahb_gate], "ahb", "fsl-usb2-udc");
+       clk_register_clkdev(clk[usb_div], "per", "imx-udc-mx27");
+       clk_register_clkdev(clk[usb_ipg_gate], "ipg", "imx-udc-mx27");
+       clk_register_clkdev(clk[usb_ahb_gate], "ahb", "imx-udc-mx27");
        clk_register_clkdev(clk[usb_div], "per", "mxc-ehci.0");
        clk_register_clkdev(clk[usb_ipg_gate], "ipg", "mxc-ehci.0");
        clk_register_clkdev(clk[usb_ahb_gate], "ahb", "mxc-ehci.0");
index 8be64e0..16ccbd4 100644 (file)
@@ -139,9 +139,9 @@ int __init mx31_clocks_init(unsigned long fref)
        clk_register_clkdev(clk[usb_div_post], "per", "mxc-ehci.2");
        clk_register_clkdev(clk[usb_gate], "ahb", "mxc-ehci.2");
        clk_register_clkdev(clk[ipg], "ipg", "mxc-ehci.2");
-       clk_register_clkdev(clk[usb_div_post], "per", "fsl-usb2-udc");
-       clk_register_clkdev(clk[usb_gate], "ahb", "fsl-usb2-udc");
-       clk_register_clkdev(clk[ipg], "ipg", "fsl-usb2-udc");
+       clk_register_clkdev(clk[usb_div_post], "per", "imx-udc-mx27");
+       clk_register_clkdev(clk[usb_gate], "ahb", "imx-udc-mx27");
+       clk_register_clkdev(clk[ipg], "ipg", "imx-udc-mx27");
        clk_register_clkdev(clk[csi_gate], NULL, "mx3-camera.0");
        /* i.mx31 has the i.mx21 type uart */
        clk_register_clkdev(clk[uart1_gate], "per", "imx21-uart.0");
index 66f3d65..f0727e8 100644 (file)
@@ -251,9 +251,9 @@ int __init mx35_clocks_init()
        clk_register_clkdev(clk[usb_div], "per", "mxc-ehci.2");
        clk_register_clkdev(clk[ipg], "ipg", "mxc-ehci.2");
        clk_register_clkdev(clk[usbotg_gate], "ahb", "mxc-ehci.2");
-       clk_register_clkdev(clk[usb_div], "per", "fsl-usb2-udc");
-       clk_register_clkdev(clk[ipg], "ipg", "fsl-usb2-udc");
-       clk_register_clkdev(clk[usbotg_gate], "ahb", "fsl-usb2-udc");
+       clk_register_clkdev(clk[usb_div], "per", "imx-udc-mx27");
+       clk_register_clkdev(clk[ipg], "ipg", "imx-udc-mx27");
+       clk_register_clkdev(clk[usbotg_gate], "ahb", "imx-udc-mx27");
        clk_register_clkdev(clk[wdog_gate], NULL, "imx2-wdt.0");
        clk_register_clkdev(clk[nfc_div], NULL, "imx25-nand.0");
        clk_register_clkdev(clk[csi_gate], NULL, "mx3-camera.0");
index 579023f..fb7cb84 100644 (file)
@@ -269,9 +269,9 @@ static void __init mx5_clocks_common_init(unsigned long rate_ckil,
        clk_register_clkdev(clk[usboh3_per_gate], "per", "mxc-ehci.2");
        clk_register_clkdev(clk[usboh3_gate], "ipg", "mxc-ehci.2");
        clk_register_clkdev(clk[usboh3_gate], "ahb", "mxc-ehci.2");
-       clk_register_clkdev(clk[usboh3_per_gate], "per", "fsl-usb2-udc");
-       clk_register_clkdev(clk[usboh3_gate], "ipg", "fsl-usb2-udc");
-       clk_register_clkdev(clk[usboh3_gate], "ahb", "fsl-usb2-udc");
+       clk_register_clkdev(clk[usboh3_per_gate], "per", "imx-udc-mx51");
+       clk_register_clkdev(clk[usboh3_gate], "ipg", "imx-udc-mx51");
+       clk_register_clkdev(clk[usboh3_gate], "ahb", "imx-udc-mx51");
        clk_register_clkdev(clk[nfc_gate], NULL, "imx51-nand");
        clk_register_clkdev(clk[ssi1_ipg_gate], NULL, "imx-ssi.0");
        clk_register_clkdev(clk[ssi2_ipg_gate], NULL, "imx-ssi.1");
index 7f2c10c..c0c4e72 100644 (file)
@@ -436,6 +436,9 @@ int __init mx6q_clocks_init(void)
        for (i = 0; i < ARRAY_SIZE(clks_init_on); i++)
                clk_prepare_enable(clk[clks_init_on[i]]);
 
+       /* Set initial power mode */
+       imx6q_set_lpm(WAIT_CLOCKED);
+
        np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-gpt");
        base = of_iomap(np, 0);
        WARN_ON(!base);
index 7191ab4..fa36fb8 100644 (file)
@@ -142,6 +142,7 @@ extern int imx6q_set_lpm(enum mxc_cpu_pwr_mode mode);
 extern void imx6q_clock_map_io(void);
 
 extern void imx_cpu_die(unsigned int cpu);
+extern int imx_cpu_kill(unsigned int cpu);
 
 #ifdef CONFIG_PM
 extern void imx6q_pm_init(void);
index 6277baf..9bd5777 100644 (file)
@@ -63,6 +63,7 @@ struct platform_device *__init imx_add_flexcan(
 
 #include <linux/fsl_devices.h>
 struct imx_fsl_usb2_udc_data {
+       const char *devid;
        resource_size_t iobase;
        resource_size_t irq;
 };
index 37e4439..3c06bd9 100644 (file)
 #include "../hardware.h"
 #include "devices-common.h"
 
-#define imx_fsl_usb2_udc_data_entry_single(soc)                                \
+#define imx_fsl_usb2_udc_data_entry_single(soc, _devid)                        \
        {                                                               \
+               .devid = _devid,                                        \
                .iobase = soc ## _USB_OTG_BASE_ADDR,                    \
                .irq = soc ## _INT_USB_OTG,                             \
        }
 
 #ifdef CONFIG_SOC_IMX25
 const struct imx_fsl_usb2_udc_data imx25_fsl_usb2_udc_data __initconst =
-       imx_fsl_usb2_udc_data_entry_single(MX25);
+       imx_fsl_usb2_udc_data_entry_single(MX25, "imx-udc-mx27");
 #endif /* ifdef CONFIG_SOC_IMX25 */
 
 #ifdef CONFIG_SOC_IMX27
 const struct imx_fsl_usb2_udc_data imx27_fsl_usb2_udc_data __initconst =
-       imx_fsl_usb2_udc_data_entry_single(MX27);
+       imx_fsl_usb2_udc_data_entry_single(MX27, "imx-udc-mx27");
 #endif /* ifdef CONFIG_SOC_IMX27 */
 
 #ifdef CONFIG_SOC_IMX31
 const struct imx_fsl_usb2_udc_data imx31_fsl_usb2_udc_data __initconst =
-       imx_fsl_usb2_udc_data_entry_single(MX31);
+       imx_fsl_usb2_udc_data_entry_single(MX31, "imx-udc-mx27");
 #endif /* ifdef CONFIG_SOC_IMX31 */
 
 #ifdef CONFIG_SOC_IMX35
 const struct imx_fsl_usb2_udc_data imx35_fsl_usb2_udc_data __initconst =
-       imx_fsl_usb2_udc_data_entry_single(MX35);
+       imx_fsl_usb2_udc_data_entry_single(MX35, "imx-udc-mx27");
 #endif /* ifdef CONFIG_SOC_IMX35 */
 
 #ifdef CONFIG_SOC_IMX51
 const struct imx_fsl_usb2_udc_data imx51_fsl_usb2_udc_data __initconst =
-       imx_fsl_usb2_udc_data_entry_single(MX51);
+       imx_fsl_usb2_udc_data_entry_single(MX51, "imx-udc-mx51");
 #endif
 
 struct platform_device *__init imx_add_fsl_usb2_udc(
@@ -57,7 +58,7 @@ struct platform_device *__init imx_add_fsl_usb2_udc(
                        .flags = IORESOURCE_IRQ,
                },
        };
-       return imx_add_platform_device_dmamask("fsl-usb2-udc", -1,
+       return imx_add_platform_device_dmamask(data->devid, -1,
                        res, ARRAY_SIZE(res),
                        pdata, sizeof(*pdata), DMA_BIT_MASK(32));
 }
index 10b0ed3..25a47c6 100644 (file)
@@ -54,7 +54,7 @@ struct platform_device *__init imx_add_imx_fb(
                        .flags = IORESOURCE_IRQ,
                },
        };
-       return imx_add_platform_device_dmamask("imx-fb", 0,
+       return imx_add_platform_device_dmamask(data->devid, 0,
                        res, ARRAY_SIZE(res),
                        pdata, sizeof(*pdata), DMA_BIT_MASK(32));
 }
index 3dec962..7bc5fe1 100644 (file)
@@ -46,9 +46,11 @@ static inline void cpu_enter_lowpower(void)
 void imx_cpu_die(unsigned int cpu)
 {
        cpu_enter_lowpower();
-       imx_enable_cpu(cpu, false);
+       cpu_do_idle();
+}
 
-       /* spin here until hardware takes it down */
-       while (1)
-               ;
+int imx_cpu_kill(unsigned int cpu)
+{
+       imx_enable_cpu(cpu, false);
+       return 1;
 }
index 6c80424..e05cf40 100644 (file)
@@ -22,8 +22,7 @@
 #include <linux/module.h>
 #include <linux/spinlock.h>
 #include <linux/genalloc.h>
-
-#include "iram.h"
+#include "linux/platform_data/imx-iram.h"
 
 static unsigned long iram_phys_base;
 static void __iomem *iram_virt_base;
index 3777b80..66fae88 100644 (file)
@@ -92,5 +92,6 @@ struct smp_operations  imx_smp_ops __initdata = {
        .smp_boot_secondary     = imx_boot_secondary,
 #ifdef CONFIG_HOTPLUG_CPU
        .cpu_die                = imx_cpu_die,
+       .cpu_kill               = imx_cpu_kill,
 #endif
 };
index a17543d..ee42d20 100644 (file)
@@ -41,6 +41,7 @@ static int imx6q_pm_enter(suspend_state_t state)
                cpu_suspend(0, imx6q_suspend_finish);
                imx_smp_prepare();
                imx_gpc_post_resume();
+               imx6q_set_lpm(WAIT_CLOCKED);
                break;
        default:
                return -EINVAL;
index be50e79..e7fcea7 100644 (file)
@@ -475,13 +475,12 @@ int __init pci_v3_setup(int nr, struct pci_sys_data *sys)
 {
        int ret = 0;
 
+       if (!ap_syscon_base)
+               return -EINVAL;
+
        if (nr == 0) {
                sys->mem_offset = PHYS_PCI_MEM_BASE;
                ret = pci_v3_setup_resources(sys);
-               /* Remap the Integrator system controller */
-               ap_syscon_base = ioremap(INTEGRATOR_SC_BASE, 0x100);
-               if (!ap_syscon_base)
-                       return -EINVAL;
        }
 
        return ret;
@@ -497,6 +496,13 @@ void __init pci_v3_preinit(void)
        unsigned int temp;
        int ret;
 
+       /* Remap the Integrator system controller */
+       ap_syscon_base = ioremap(INTEGRATOR_SC_BASE, 0x100);
+       if (!ap_syscon_base) {
+               pr_err("unable to remap the AP syscon for PCIv3\n");
+               return;
+       }
+
        pcibios_min_mem = 0x00100000;
 
        /*
index 8821720..f4632a8 100644 (file)
 #include <linux/gpio.h>
 #include <linux/of.h>
 #include "common.h"
-#include "mpp.h"
 
 static struct mv643xx_eth_platform_data ns2_ge00_data = {
        .phy_addr       = MV643XX_ETH_PHY_ADDR(8),
 };
 
-static unsigned int ns2_mpp_config[] __initdata = {
-       MPP0_SPI_SCn,
-       MPP1_SPI_MOSI,
-       MPP2_SPI_SCK,
-       MPP3_SPI_MISO,
-       MPP4_NF_IO6,
-       MPP5_NF_IO7,
-       MPP6_SYSRST_OUTn,
-       MPP7_GPO,               /* Fan speed (bit 1) */
-       MPP8_TW0_SDA,
-       MPP9_TW0_SCK,
-       MPP10_UART0_TXD,
-       MPP11_UART0_RXD,
-       MPP12_GPO,              /* Red led */
-       MPP14_GPIO,             /* USB fuse */
-       MPP16_GPIO,             /* SATA 0 power */
-       MPP17_GPIO,             /* SATA 1 power */
-       MPP18_NF_IO0,
-       MPP19_NF_IO1,
-       MPP20_SATA1_ACTn,
-       MPP21_SATA0_ACTn,
-       MPP22_GPIO,             /* Fan speed (bit 0) */
-       MPP23_GPIO,             /* Fan power */
-       MPP24_GPIO,             /* USB mode select */
-       MPP25_GPIO,             /* Fan rotation fail */
-       MPP26_GPIO,             /* USB device vbus */
-       MPP28_GPIO,             /* USB enable host vbus */
-       MPP29_GPIO,             /* Blue led (slow register) */
-       MPP30_GPIO,             /* Blue led (command register) */
-       MPP31_GPIO,             /* Board power off */
-       MPP32_GPIO,             /* Power button (0 = Released, 1 = Pushed) */
-       MPP33_GPO,              /* Fan speed (bit 2) */
-       0
-};
-
 #define NS2_GPIO_POWER_OFF     31
 
 static void ns2_power_off(void)
@@ -71,8 +35,6 @@ void __init ns2_init(void)
        /*
         * Basic setup. Needs to be called early.
         */
-       kirkwood_mpp_conf(ns2_mpp_config);
-
        if (of_machine_is_compatible("lacie,netspace_lite_v2") ||
            of_machine_is_compatible("lacie,netspace_mini_v2"))
                ns2_ge00_data.phy_addr = MV643XX_ETH_PHY_ADDR(0);
index 5dcb369..99df4df 100644 (file)
@@ -1,6 +1,8 @@
 ccflags-$(CONFIG_ARCH_MULTIPLATFORM) := -I$(srctree)/$(src)/include \
        -I$(srctree)/arch/arm/plat-orion/include
 
+AFLAGS_coherency_ll.o          := -Wa,-march=armv7-a
+
 obj-y += system-controller.o
 obj-$(CONFIG_MACH_ARMADA_370_XP) += armada-370-xp.o irq-armada-370-xp.o addr-map.o coherency.o coherency_ll.o pmsu.o
 obj-$(CONFIG_SMP)                += platsmp.o headsmp.o
index 5c8e9ce..769c1fe 100644 (file)
@@ -397,6 +397,12 @@ static struct omap_board_mux board_mux[] __initdata = {
                  OMAP_PULL_ENA),
        OMAP4_MUX(ABE_MCBSP1_FSX, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
 
+       /* UART2 - BT/FM/GPS shared transport */
+       OMAP4_MUX(UART2_CTS,    OMAP_PIN_INPUT  | OMAP_MUX_MODE0),
+       OMAP4_MUX(UART2_RTS,    OMAP_PIN_OUTPUT | OMAP_MUX_MODE0),
+       OMAP4_MUX(UART2_RX,     OMAP_PIN_INPUT  | OMAP_MUX_MODE0),
+       OMAP4_MUX(UART2_TX,     OMAP_PIN_OUTPUT | OMAP_MUX_MODE0),
+
        { .reg_offset = OMAP_MUX_TERMINATOR },
 };
 
index 7e5febe..ab7e952 100644 (file)
@@ -1935,6 +1935,8 @@ int __init omap2420_clk_init(void)
                        omap2_init_clk_hw_omap_clocks(c->lk.clk);
        }
 
+       omap2xxx_clkt_vps_late_init();
+
        omap2_clk_disable_autoidle_all();
 
        omap2_clk_enable_init_clocks(enable_init_clks,
index eda079b..eb3dab6 100644 (file)
@@ -2050,6 +2050,8 @@ int __init omap2430_clk_init(void)
                        omap2_init_clk_hw_omap_clocks(c->lk.clk);
        }
 
+       omap2xxx_clkt_vps_late_init();
+
        omap2_clk_disable_autoidle_all();
 
        omap2_clk_enable_init_clocks(enable_init_clks,
index 5789a5e..a2cc046 100644 (file)
@@ -2026,14 +2026,13 @@ int __init omap4xxx_clk_init(void)
         * On OMAP4460 the ABE DPLL fails to turn on if in idle low-power
         * state when turning the ABE clock domain. Workaround this by
         * locking the ABE DPLL on boot.
+        * Lock the ABE DPLL in any case to avoid issues with audio.
         */
-       if (cpu_is_omap446x()) {
-               rc = clk_set_parent(&abe_dpll_refclk_mux_ck, &sys_32k_ck);
-               if (!rc)
-                       rc = clk_set_rate(&dpll_abe_ck, OMAP4_DPLL_ABE_DEFFREQ);
-               if (rc)
-                       pr_err("%s: failed to configure ABE DPLL!\n", __func__);
-       }
+       rc = clk_set_parent(&abe_dpll_refclk_mux_ck, &sys_32k_ck);
+       if (!rc)
+               rc = clk_set_rate(&dpll_abe_ck, OMAP4_DPLL_ABE_DEFFREQ);
+       if (rc)
+               pr_err("%s: failed to configure ABE DPLL!\n", __func__);
 
        return 0;
 }
index 5e304d0..626f3ea 100644 (file)
@@ -639,7 +639,7 @@ static int count_ocp2scp_devices(struct omap_ocp2scp_dev *ocp2scp_dev)
        return cnt;
 }
 
-static void omap_init_ocp2scp(void)
+static void __init omap_init_ocp2scp(void)
 {
        struct omap_hwmod       *oh;
        struct platform_device  *pdev;
index 4c7566c..2a2cfa8 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/dma-mapping.h>
 #include <linux/platform_data/omap_drm.h>
 
+#include "soc.h"
 #include "omap_device.h"
 #include "omap_hwmod.h"
 
@@ -56,7 +57,7 @@ static int __init omap_init_drm(void)
                        oh->name);
        }
 
-       platform_data.omaprev = GET_OMAP_REVISION();
+       platform_data.omaprev = GET_OMAP_TYPE;
 
        return platform_device_register(&omap_drm_device);
 
index 129d508..793f54a 100644 (file)
@@ -2132,8 +2132,12 @@ static struct omap_hwmod omap44xx_mcpdm_hwmod = {
         * currently reset very early during boot, before I2C is
         * available, so it doesn't seem that we have any choice in
         * the kernel other than to avoid resetting it.
+        *
+        * Also, McPDM needs to be configured to NO_IDLE mode when it
+        * is in used otherwise vital clocks will be gated which
+        * results 'slow motion' audio playback.
         */
-       .flags          = HWMOD_EXT_OPT_MAIN_CLK,
+       .flags          = HWMOD_EXT_OPT_MAIN_CLK | HWMOD_SWSUP_SIDLE,
        .mpu_irqs       = omap44xx_mcpdm_irqs,
        .sdma_reqs      = omap44xx_mcpdm_sdma_reqs,
        .main_clk       = "mcpdm_fck",
index 691aa67..b8ad6e6 100644 (file)
@@ -165,15 +165,11 @@ static struct device_node * __init omap_get_timer_dt(struct of_device_id *match,
        struct device_node *np;
 
        for_each_matching_node(np, match) {
-               if (!of_device_is_available(np)) {
-                       of_node_put(np);
+               if (!of_device_is_available(np))
                        continue;
-               }
 
-               if (property && !of_get_property(np, property, NULL)) {
-                       of_node_put(np);
+               if (property && !of_get_property(np, property, NULL))
                        continue;
-               }
 
                of_add_property(np, &device_disabled);
                return np;
index a611ad3..b6132aa 100644 (file)
        GPIO76_LCD_PCLK,        \
        GPIO77_LCD_BIAS
 
+/* these enable a work-around for a hw bug in pxa27x during ac97 warm reset */
+#define GPIO113_AC97_nRESET_GPIO_HIGH MFP_CFG_OUT(GPIO113, AF0, DEFAULT)
+#define GPIO95_AC97_nRESET_GPIO_HIGH MFP_CFG_OUT(GPIO95, AF0, DEFAULT)
 
 extern int keypad_set_wake(unsigned int on);
 #endif /* __ASM_ARCH_MFP_PXA27X_H */
index 8047ee0..616cb87 100644 (file)
@@ -47,9 +47,9 @@ void pxa27x_clear_otgph(void)
 EXPORT_SYMBOL(pxa27x_clear_otgph);
 
 static unsigned long ac97_reset_config[] = {
-       GPIO113_GPIO,
+       GPIO113_AC97_nRESET_GPIO_HIGH,
        GPIO113_AC97_nRESET,
-       GPIO95_GPIO,
+       GPIO95_AC97_nRESET_GPIO_HIGH,
        GPIO95_AC97_nRESET,
 };
 
index 553059f..755c0bb 100644 (file)
@@ -47,7 +47,7 @@ static struct spi_board_info wm1253_devs[] = {
                .bus_num        = 0,
                .chip_select    = 0,
                .mode           = SPI_MODE_0,
-               .irq            = S3C_EINT(5),
+               .irq            = S3C_EINT(4),
                .controller_data = &wm0010_spi_csinfo,
                .platform_data = &wm0010_pdata,
        },
index 7feb426..d2e1a16 100644 (file)
@@ -338,8 +338,10 @@ int __init s3c64xx_pm_init(void)
        for (i = 0; i < ARRAY_SIZE(s3c64xx_pm_domains); i++)
                pm_genpd_init(&s3c64xx_pm_domains[i]->pd, NULL, false);
 
+#ifdef CONFIG_S3C_DEV_FB
        if (dev_get_platdata(&s3c_device_fb.dev))
                pm_genpd_add_device(&s3c64xx_pm_f.pd, &s3c_device_fb.dev);
+#endif
 
        return 0;
 }
index 6b2fb87..076c26d 100644 (file)
@@ -774,25 +774,27 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset,
        size_t size, enum dma_data_direction dir,
        void (*op)(const void *, size_t, int))
 {
+       unsigned long pfn;
+       size_t left = size;
+
+       pfn = page_to_pfn(page) + offset / PAGE_SIZE;
+       offset %= PAGE_SIZE;
+
        /*
         * A single sg entry may refer to multiple physically contiguous
         * pages.  But we still need to process highmem pages individually.
         * If highmem is not configured then the bulk of this loop gets
         * optimized out.
         */
-       size_t left = size;
        do {
                size_t len = left;
                void *vaddr;
 
+               page = pfn_to_page(pfn);
+
                if (PageHighMem(page)) {
-                       if (len + offset > PAGE_SIZE) {
-                               if (offset >= PAGE_SIZE) {
-                                       page += offset / PAGE_SIZE;
-                                       offset %= PAGE_SIZE;
-                               }
+                       if (len + offset > PAGE_SIZE)
                                len = PAGE_SIZE - offset;
-                       }
                        vaddr = kmap_high_get(page);
                        if (vaddr) {
                                vaddr += offset;
@@ -809,7 +811,7 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset,
                        op(vaddr, len, dir);
                }
                offset = 0;
-               page++;
+               pfn++;
                left -= len;
        } while (left);
 }
index 9f06102..ce328c7 100644 (file)
@@ -283,7 +283,7 @@ static struct mem_type mem_types[] = {
        },
        [MT_MEMORY_SO] = {
                .prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
-                               L_PTE_MT_UNCACHED,
+                               L_PTE_MT_UNCACHED | L_PTE_XN,
                .prot_l1   = PMD_TYPE_TABLE,
                .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_S |
                                PMD_SECT_UNCACHED | PMD_SECT_XN,
index dd703ef..b178d44 100644 (file)
@@ -20,7 +20,7 @@
  */
 ENTRY(versatile_secondary_startup)
        mrc     p15, 0, r0, c0, c0, 5
-       and     r0, r0, #15
+       bic     r0, #0xff000000
        adr     r4, 1f
        ldmia   r4, {r5, r6}
        sub     r4, r4, r5
index cc926c9..323ce1a 100644 (file)
@@ -22,7 +22,7 @@
 @  IRQs disabled.
 @
 ENTRY(do_vfp)
-#ifdef CONFIG_PREEMPT
+#ifdef CONFIG_PREEMPT_COUNT
        ldr     r4, [r10, #TI_PREEMPT]  @ get preempt count
        add     r11, r4, #1             @ increment it
        str     r11, [r10, #TI_PREEMPT]
@@ -35,7 +35,7 @@ ENTRY(do_vfp)
 ENDPROC(do_vfp)
 
 ENTRY(vfp_null_entry)
-#ifdef CONFIG_PREEMPT
+#ifdef CONFIG_PREEMPT_COUNT
        get_thread_info r10
        ldr     r4, [r10, #TI_PREEMPT]  @ get preempt count
        sub     r11, r4, #1             @ decrement it
@@ -53,7 +53,7 @@ ENDPROC(vfp_null_entry)
 
        __INIT
 ENTRY(vfp_testing_entry)
-#ifdef CONFIG_PREEMPT
+#ifdef CONFIG_PREEMPT_COUNT
        get_thread_info r10
        ldr     r4, [r10, #TI_PREEMPT]  @ get preempt count
        sub     r11, r4, #1             @ decrement it
index ea0349f..dd5e56f 100644 (file)
@@ -168,7 +168,7 @@ vfp_hw_state_valid:
                                        @ else it's one 32-bit instruction, so
                                        @ always subtract 4 from the following
                                        @ instruction address.
-#ifdef CONFIG_PREEMPT
+#ifdef CONFIG_PREEMPT_COUNT
        get_thread_info r10
        ldr     r4, [r10, #TI_PREEMPT]  @ get preempt count
        sub     r11, r4, #1             @ decrement it
@@ -192,7 +192,7 @@ look_for_VFP_exceptions:
        @ not recognised by VFP
 
        DBGSTR  "not VFP"
-#ifdef CONFIG_PREEMPT
+#ifdef CONFIG_PREEMPT_COUNT
        get_thread_info r10
        ldr     r4, [r10, #TI_PREEMPT]  @ get preempt count
        sub     r11, r4, #1             @ decrement it
index 801e2d7..32ac0ae 100644 (file)
@@ -1,4 +1,5 @@
 targets += dtbs
+targets += $(dtb-y)
 
 dtbs: $(addprefix $(obj)/, $(dtb-y))
 
index 07fea29..fe32c0e 100644 (file)
 
 typedef unsigned long elf_greg_t;
 
-#define ELF_NGREG (sizeof (struct pt_regs) / sizeof(elf_greg_t))
+#define ELF_NGREG (sizeof(struct user_pt_regs) / sizeof(elf_greg_t))
+#define ELF_CORE_COPY_REGS(dest, regs) \
+       *(struct user_pt_regs *)&(dest) = (regs)->user_regs;
+
 typedef elf_greg_t elf_gregset_t[ELF_NGREG];
 typedef struct user_fpsimd_state elf_fpregset_t;
 
index 64b1339..e333a24 100644 (file)
@@ -24,7 +24,8 @@
 /*
  * Software defined PTE bits definition.
  */
-#define PTE_VALID              (_AT(pteval_t, 1) << 0) /* pte_present() check */
+#define PTE_VALID              (_AT(pteval_t, 1) << 0)
+#define PTE_PROT_NONE          (_AT(pteval_t, 1) << 1) /* only when !PTE_VALID */
 #define PTE_FILE               (_AT(pteval_t, 1) << 2) /* only when !pte_present() */
 #define PTE_DIRTY              (_AT(pteval_t, 1) << 55)
 #define PTE_SPECIAL            (_AT(pteval_t, 1) << 56)
@@ -60,9 +61,12 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
 
 extern pgprot_t pgprot_default;
 
-#define _MOD_PROT(p, b)        __pgprot(pgprot_val(p) | (b))
+#define __pgprot_modify(prot,mask,bits) \
+       __pgprot((pgprot_val(prot) & ~(mask)) | (bits))
+
+#define _MOD_PROT(p, b)                __pgprot_modify(p, 0, b)
 
-#define PAGE_NONE              _MOD_PROT(pgprot_default, PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY)
+#define PAGE_NONE              __pgprot_modify(pgprot_default, PTE_TYPE_MASK, PTE_PROT_NONE)
 #define PAGE_SHARED            _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
 #define PAGE_SHARED_EXEC       _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN)
 #define PAGE_COPY              _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY)
@@ -72,7 +76,7 @@ extern pgprot_t pgprot_default;
 #define PAGE_KERNEL            _MOD_PROT(pgprot_default, PTE_PXN | PTE_UXN | PTE_DIRTY)
 #define PAGE_KERNEL_EXEC       _MOD_PROT(pgprot_default, PTE_UXN | PTE_DIRTY)
 
-#define __PAGE_NONE            __pgprot(_PAGE_DEFAULT | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY)
+#define __PAGE_NONE            __pgprot(((_PAGE_DEFAULT) & ~PTE_TYPE_MASK) | PTE_PROT_NONE)
 #define __PAGE_SHARED          __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
 #define __PAGE_SHARED_EXEC     __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN)
 #define __PAGE_COPY            __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY)
@@ -125,16 +129,15 @@ extern struct page *empty_zero_page;
 /*
  * The following only work if pte_present(). Undefined behaviour otherwise.
  */
-#define pte_present(pte)       (pte_val(pte) & PTE_VALID)
+#define pte_present(pte)       (pte_val(pte) & (PTE_VALID | PTE_PROT_NONE))
 #define pte_dirty(pte)         (pte_val(pte) & PTE_DIRTY)
 #define pte_young(pte)         (pte_val(pte) & PTE_AF)
 #define pte_special(pte)       (pte_val(pte) & PTE_SPECIAL)
 #define pte_write(pte)         (!(pte_val(pte) & PTE_RDONLY))
 #define pte_exec(pte)          (!(pte_val(pte) & PTE_UXN))
 
-#define pte_present_exec_user(pte) \
-       ((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_UXN)) == \
-        (PTE_VALID | PTE_USER))
+#define pte_valid_user(pte) \
+       ((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER))
 
 #define PTE_BIT_FUNC(fn,op) \
 static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; }
@@ -157,10 +160,13 @@ extern void __sync_icache_dcache(pte_t pteval, unsigned long addr);
 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
                              pte_t *ptep, pte_t pte)
 {
-       if (pte_present_exec_user(pte))
-               __sync_icache_dcache(pte, addr);
-       if (!pte_dirty(pte))
-               pte = pte_wrprotect(pte);
+       if (pte_valid_user(pte)) {
+               if (pte_exec(pte))
+                       __sync_icache_dcache(pte, addr);
+               if (!pte_dirty(pte))
+                       pte = pte_wrprotect(pte);
+       }
+
        set_pte(ptep, pte);
 }
 
@@ -170,9 +176,6 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
 #define pte_huge(pte)          ((pte_val(pte) & PTE_TYPE_MASK) == PTE_TYPE_HUGEPAGE)
 #define pte_mkhuge(pte)                (__pte((pte_val(pte) & ~PTE_TYPE_MASK) | PTE_TYPE_HUGEPAGE))
 
-#define __pgprot_modify(prot,mask,bits)                \
-       __pgprot((pgprot_val(prot) & ~(mask)) | (bits))
-
 #define __HAVE_ARCH_PTE_SPECIAL
 
 /*
@@ -264,7 +267,8 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
 
 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 {
-       const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY;
+       const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY |
+                             PTE_PROT_NONE | PTE_VALID;
        pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
        return pte;
 }
index 5843262..5ef47ba 100644 (file)
@@ -395,8 +395,13 @@ __SYSCALL(370, sys_name_to_handle_at)
 __SYSCALL(371, compat_sys_open_by_handle_at)
 __SYSCALL(372, compat_sys_clock_adjtime)
 __SYSCALL(373, sys_syncfs)
+__SYSCALL(374, compat_sys_sendmmsg)
+__SYSCALL(375, sys_setns)
+__SYSCALL(376, compat_sys_process_vm_readv)
+__SYSCALL(377, compat_sys_process_vm_writev)
+__SYSCALL(378, sys_ni_syscall)                 /* 378 for kcmp */
 
-#define __NR_compat_syscalls           374
+#define __NR_compat_syscalls           379
 
 /*
  * Compat syscall numbers used by the AArch64 kernel.
index c958cb8..6a389dc 100644 (file)
@@ -252,10 +252,6 @@ void update_vsyscall(struct timekeeper *tk)
 
 void update_vsyscall_tz(void)
 {
-       ++vdso_data->tb_seq_count;
-       smp_wmb();
        vdso_data->tz_minuteswest       = sys_tz.tz_minuteswest;
        vdso_data->tz_dsttime           = sys_tz.tz_dsttime;
-       smp_wmb();
-       ++vdso_data->tb_seq_count;
 }
index 8bf658d..f0a6d10 100644 (file)
@@ -73,8 +73,6 @@ ENTRY(__kernel_gettimeofday)
        /* If tz is NULL, return 0. */
        cbz     x1, 3f
        ldp     w4, w5, [vdso_data, #VDSO_TZ_MINWEST]
-       seqcnt_read w9
-       seqcnt_check w9, 1b
        stp     w4, w5, [x1, #TZ_MINWEST]
 3:
        mov     x0, xzr
index 4265ff6..b7a5fff 100644 (file)
@@ -672,33 +672,6 @@ ptrace_attach_sync_user_rbs (struct task_struct *child)
        read_unlock(&tasklist_lock);
 }
 
-static inline int
-thread_matches (struct task_struct *thread, unsigned long addr)
-{
-       unsigned long thread_rbs_end;
-       struct pt_regs *thread_regs;
-
-       if (ptrace_check_attach(thread, 0) < 0)
-               /*
-                * If the thread is not in an attachable state, we'll
-                * ignore it.  The net effect is that if ADDR happens
-                * to overlap with the portion of the thread's
-                * register backing store that is currently residing
-                * on the thread's kernel stack, then ptrace() may end
-                * up accessing a stale value.  But if the thread
-                * isn't stopped, that's a problem anyhow, so we're
-                * doing as well as we can...
-                */
-               return 0;
-
-       thread_regs = task_pt_regs(thread);
-       thread_rbs_end = ia64_get_user_rbs_end(thread, thread_regs, NULL);
-       if (!on_kernel_rbs(addr, thread_regs->ar_bspstore, thread_rbs_end))
-               return 0;
-
-       return 1;       /* looks like we've got a winner */
-}
-
 /*
  * Write f32-f127 back to task->thread.fph if it has been modified.
  */
index 17f7a45..3e6b844 100644 (file)
@@ -21,6 +21,22 @@ extern void *dma_alloc_coherent(struct device *, size_t,
 extern void dma_free_coherent(struct device *, size_t,
                              void *, dma_addr_t);
 
+static inline void *dma_alloc_attrs(struct device *dev, size_t size,
+                                   dma_addr_t *dma_handle, gfp_t flag,
+                                   struct dma_attrs *attrs)
+{
+       /* attrs is not supported and ignored */
+       return dma_alloc_coherent(dev, size, dma_handle, flag);
+}
+
+static inline void dma_free_attrs(struct device *dev, size_t size,
+                                 void *cpu_addr, dma_addr_t dma_handle,
+                                 struct dma_attrs *attrs)
+{
+       /* attrs is not supported and ignored */
+       dma_free_coherent(dev, size, cpu_addr, dma_handle);
+}
+
 static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
                                          dma_addr_t *handle, gfp_t flag)
 {
index bf86b29..037028f 100644 (file)
@@ -64,6 +64,8 @@ extern unsigned int kobjsize(const void *objp);
  */
 #define        VMALLOC_START   0
 #define        VMALLOC_END     0xffffffff
+#define        KMAP_START      0
+#define        KMAP_END        0xffffffff
 
 #include <asm-generic/pgtable.h>
 
index 847994c..f9337f6 100644 (file)
@@ -4,7 +4,7 @@
 #include <uapi/asm/unistd.h>
 
 
-#define NR_syscalls            348
+#define NR_syscalls            349
 
 #define __ARCH_WANT_OLD_READDIR
 #define __ARCH_WANT_OLD_STAT
index b94bfbf..625f321 100644 (file)
 #define __NR_process_vm_readv  345
 #define __NR_process_vm_writev 346
 #define __NR_kcmp              347
+#define __NR_finit_module      348
 
 #endif /* _UAPI_ASM_M68K_UNISTD_H_ */
index c30da5b..3f04ea0 100644 (file)
@@ -368,4 +368,5 @@ ENTRY(sys_call_table)
        .long sys_process_vm_readv      /* 345 */
        .long sys_process_vm_writev
        .long sys_kcmp
+       .long sys_finit_module
 
index f0e05bc..afd8106 100644 (file)
 void *empty_zero_page;
 EXPORT_SYMBOL(empty_zero_page);
 
+#if !defined(CONFIG_SUN3) && !defined(CONFIG_COLDFIRE)
+extern void init_pointer_table(unsigned long ptable);
+extern pmd_t *zero_pgtable;
+#endif
+
 #ifdef CONFIG_MMU
 
 pg_data_t pg_data_map[MAX_NUMNODES];
@@ -69,9 +74,6 @@ void __init m68k_setup_node(int node)
        node_set_online(node);
 }
 
-extern void init_pointer_table(unsigned long ptable);
-extern pmd_t *zero_pgtable;
-
 #else /* CONFIG_MMU */
 
 /*
index aa03f2e..e70001c 100644 (file)
@@ -6,6 +6,7 @@ config MN10300
        select ARCH_WANT_IPC_PARSE_VERSION
        select HAVE_ARCH_TRACEHOOK
        select HAVE_ARCH_KGDB
+       select GENERIC_ATOMIC64
        select HAVE_NMI_WATCHDOG if MN10300_WD_TIMER
        select GENERIC_CLOCKEVENTS
        select MODULES_USE_ELF_RELA
index bfb4424..eb7850b 100644 (file)
@@ -1865,7 +1865,7 @@ syscall_restore:
 
        /* Are we being ptraced? */
        ldw     TASK_FLAGS(%r1),%r19
-       ldi     (_TIF_SINGLESTEP|_TIF_BLOCKSTEP),%r2
+       ldi     _TIF_SYSCALL_TRACE_MASK,%r2
        and,COND(=)     %r19,%r2,%r0
        b,n     syscall_restore_rfi
 
@@ -1978,15 +1978,23 @@ syscall_restore_rfi:
        /* sr2 should be set to zero for userspace syscalls */
        STREG   %r0,TASK_PT_SR2(%r1)
 
-pt_regs_ok:
        LDREG   TASK_PT_GR31(%r1),%r2
-       depi    3,31,2,%r2                         /* ensure return to user mode. */
-       STREG   %r2,TASK_PT_IAOQ0(%r1)
+       depi    3,31,2,%r2                 /* ensure return to user mode. */
+       STREG   %r2,TASK_PT_IAOQ0(%r1)
        ldo     4(%r2),%r2
        STREG   %r2,TASK_PT_IAOQ1(%r1)
+       b       intr_restore
        copy    %r25,%r16
+
+pt_regs_ok:
+       LDREG   TASK_PT_IAOQ0(%r1),%r2
+       depi    3,31,2,%r2                 /* ensure return to user mode. */
+       STREG   %r2,TASK_PT_IAOQ0(%r1)
+       LDREG   TASK_PT_IAOQ1(%r1),%r2
+       depi    3,31,2,%r2
+       STREG   %r2,TASK_PT_IAOQ1(%r1)
        b       intr_restore
-       nop
+       copy    %r25,%r16
 
        .import schedule,code
 syscall_do_resched:
index c0b1aff..0299d63 100644 (file)
@@ -410,11 +410,13 @@ void __init init_IRQ(void)
 {
        local_irq_disable();    /* PARANOID - should already be disabled */
        mtctl(~0UL, 23);        /* EIRR : clear all pending external intr */
-       claim_cpu_irqs();
 #ifdef CONFIG_SMP
-       if (!cpu_eiem)
+       if (!cpu_eiem) {
+               claim_cpu_irqs();
                cpu_eiem = EIEM_MASK(IPI_IRQ) | EIEM_MASK(TIMER_IRQ);
+       }
 #else
+       claim_cpu_irqs();
        cpu_eiem = EIEM_MASK(TIMER_IRQ);
 #endif
         set_eiem(cpu_eiem);    /* EIEM : enable all external intr */
index 857c2f5..534abd4 100644 (file)
@@ -26,7 +26,7 @@
 #include <asm/asm-offsets.h>
 
 /* PSW bits we allow the debugger to modify */
-#define USER_PSW_BITS  (PSW_N | PSW_V | PSW_CB)
+#define USER_PSW_BITS  (PSW_N | PSW_B | PSW_V | PSW_CB)
 
 /*
  * Called by kernel/ptrace.c when detaching..
index 5379969..fd05170 100644 (file)
@@ -190,8 +190,10 @@ get_sigframe(struct k_sigaction *ka, unsigned long sp, size_t frame_size)
        DBG(1,"get_sigframe: ka = %#lx, sp = %#lx, frame_size = %#lx\n",
                        (unsigned long)ka, sp, frame_size);
        
+       /* Align alternate stack and reserve 64 bytes for the signal
+          handler's frame marker.  */
        if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && ! sas_ss_flags(sp))
-               sp = current->sas_ss_sp; /* Stacks grow up! */
+               sp = (current->sas_ss_sp + 0x7f) & ~0x3f; /* Stacks grow up! */
 
        DBG(1,"get_sigframe: Returning sp = %#lx\n", (unsigned long)sp);
        return (void __user *) sp; /* Stacks grow up.  Fun. */
index 9071e09..933423f 100644 (file)
     Sgl_isinexact_to_fix(sgl_value,exponent)
 
 #define Duint_from_sgl_mantissa(sgl_value,exponent,dresultA,dresultB)  \
-  {Sall(sgl_value) <<= SGL_EXP_LENGTH;  /*  left-justify  */           \
+  {unsigned int val = Sall(sgl_value) << SGL_EXP_LENGTH;               \
     if (exponent <= 31) {                                              \
-       Dintp1(dresultA) = 0;                                           \
-       Dintp2(dresultB) = (unsigned)Sall(sgl_value) >> (31 - exponent); \
+       Dintp1(dresultA) = 0;                                           \
+       Dintp2(dresultB) = val >> (31 - exponent);                      \
     }                                                                  \
     else {                                                             \
-       Dintp1(dresultA) = Sall(sgl_value) >> (63 - exponent);          \
-       Dintp2(dresultB) = Sall(sgl_value) << (exponent - 31);          \
+       Dintp1(dresultA) = val >> (63 - exponent);                      \
+       Dintp2(dresultB) = exponent <= 62 ? val << (exponent - 31) : 0; \
     }                                                                  \
-    Sall(sgl_value) >>= SGL_EXP_LENGTH;  /* return to original */      \
   }
 
 #define Duint_setzero(dresultA,dresultB)       \
index ed0e025..e3af328 100644 (file)
@@ -78,7 +78,7 @@ struct kvm_vcpu_arch_shared {
 
 #define KVM_HCALL_TOKEN(num)     _EV_HCALL_TOKEN(EV_KVM_VENDOR_ID, num)
 
-#include <uapi/asm/epapr_hcalls.h>
+#include <asm/epapr_hcalls.h>
 
 #define KVM_FEATURE_MAGIC_PAGE 1
 
index 35f3cf0..a353c48 100644 (file)
@@ -79,7 +79,9 @@ static void flush_tlb_power7(struct kvm_vcpu *vcpu)
 static long kvmppc_realmode_mc_power7(struct kvm_vcpu *vcpu)
 {
        unsigned long srr1 = vcpu->arch.shregs.msr;
+#ifdef CONFIG_PPC_POWERNV
        struct opal_machine_check_event *opal_evt;
+#endif
        long handled = 1;
 
        if (srr1 & SRR1_MC_LDSTERR) {
@@ -117,6 +119,7 @@ static long kvmppc_realmode_mc_power7(struct kvm_vcpu *vcpu)
                handled = 0;
        }
 
+#ifdef CONFIG_PPC_POWERNV
        /*
         * See if OPAL has already handled the condition.
         * We assume that if the condition is recovered then OPAL
@@ -131,6 +134,7 @@ static long kvmppc_realmode_mc_power7(struct kvm_vcpu *vcpu)
 
        if (handled)
                opal_evt->in_use = 0;
+#endif
 
        return handled;
 }
index b0855e5..9d9cddc 100644 (file)
@@ -39,6 +39,7 @@
 #define OP_31_XOP_TRAP      4
 #define OP_31_XOP_LWZX      23
 #define OP_31_XOP_TRAP_64   68
+#define OP_31_XOP_DCBF      86
 #define OP_31_XOP_LBZX      87
 #define OP_31_XOP_STWX      151
 #define OP_31_XOP_STBX      215
@@ -374,6 +375,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
                        emulated = kvmppc_emulate_mtspr(vcpu, sprn, rs);
                        break;
 
+               case OP_31_XOP_DCBF:
                case OP_31_XOP_DCBI:
                        /* Do nothing. The guest is performing dcbi because
                         * hardware DMA is not snooped by the dcache, but
index 4b8e08b..7e3ce78 100644 (file)
@@ -24,8 +24,8 @@ CHECKFLAGS    += -D__s390__ -msize-long
 else
 LD_BFD         := elf64-s390
 LDFLAGS                := -m elf64_s390
-KBUILD_AFLAGS_MODULE += -fpic -D__PIC__
-KBUILD_CFLAGS_MODULE += -fpic -D__PIC__
+KBUILD_AFLAGS_MODULE += -fPIC
+KBUILD_CFLAGS_MODULE += -fPIC
 KBUILD_CFLAGS  += -m64
 KBUILD_AFLAGS  += -m64
 UTS_MACHINE    := s390x
index de015d8..bb9bdcd 100644 (file)
  */
 #define MAX_DMA_ADDRESS         0x80000000
 
+#ifdef CONFIG_PCI
+extern int isa_dma_bridge_buggy;
+#else
+#define isa_dma_bridge_buggy   (0)
+#endif
+
 #endif /* _ASM_S390_DMA_H */
index 16c3eb1..27cb321 100644 (file)
@@ -85,6 +85,11 @@ static inline void iounmap(volatile void __iomem *addr)
 #define __raw_writel   zpci_write_u32
 #define __raw_writeq   zpci_write_u64
 
+#define readb_relaxed  readb
+#define readw_relaxed  readw
+#define readl_relaxed  readl
+#define readq_relaxed  readq
+
 #endif /* CONFIG_PCI */
 
 #include <asm-generic/io.h>
index e6972f8..7def773 100644 (file)
@@ -2,43 +2,61 @@
 #define _ASM_IRQ_H
 
 #include <linux/hardirq.h>
+#include <linux/percpu.h>
+#include <linux/cache.h>
 #include <linux/types.h>
 
-enum interruption_class {
+enum interruption_main_class {
        EXTERNAL_INTERRUPT,
        IO_INTERRUPT,
-       EXTINT_CLK,
-       EXTINT_EXC,
-       EXTINT_EMS,
-       EXTINT_TMR,
-       EXTINT_TLA,
-       EXTINT_PFL,
-       EXTINT_DSD,
-       EXTINT_VRT,
-       EXTINT_SCP,
-       EXTINT_IUC,
-       EXTINT_CMS,
-       EXTINT_CMC,
-       EXTINT_CMR,
-       IOINT_CIO,
-       IOINT_QAI,
-       IOINT_DAS,
-       IOINT_C15,
-       IOINT_C70,
-       IOINT_TAP,
-       IOINT_VMR,
-       IOINT_LCS,
-       IOINT_CLW,
-       IOINT_CTC,
-       IOINT_APB,
-       IOINT_ADM,
-       IOINT_CSC,
-       IOINT_PCI,
-       IOINT_MSI,
+       NR_IRQS
+};
+
+enum interruption_class {
+       IRQEXT_CLK,
+       IRQEXT_EXC,
+       IRQEXT_EMS,
+       IRQEXT_TMR,
+       IRQEXT_TLA,
+       IRQEXT_PFL,
+       IRQEXT_DSD,
+       IRQEXT_VRT,
+       IRQEXT_SCP,
+       IRQEXT_IUC,
+       IRQEXT_CMS,
+       IRQEXT_CMC,
+       IRQEXT_CMR,
+       IRQIO_CIO,
+       IRQIO_QAI,
+       IRQIO_DAS,
+       IRQIO_C15,
+       IRQIO_C70,
+       IRQIO_TAP,
+       IRQIO_VMR,
+       IRQIO_LCS,
+       IRQIO_CLW,
+       IRQIO_CTC,
+       IRQIO_APB,
+       IRQIO_ADM,
+       IRQIO_CSC,
+       IRQIO_PCI,
+       IRQIO_MSI,
        NMI_NMI,
-       NR_IRQS,
+       CPU_RST,
+       NR_ARCH_IRQS
 };
 
+struct irq_stat {
+       unsigned int irqs[NR_ARCH_IRQS];
+};
+
+DECLARE_PER_CPU_SHARED_ALIGNED(struct irq_stat, irq_stat);
+
+static __always_inline void inc_irq_stat(enum interruption_class irq)
+{
+       __get_cpu_var(irq_stat).irqs[irq]++;
+}
+
 struct ext_code {
        unsigned short subcode;
        unsigned short code;
index c928dc1..c1d7930 100644 (file)
@@ -1387,10 +1387,7 @@ static inline int has_transparent_hugepage(void)
 
 static inline unsigned long pmd_pfn(pmd_t pmd)
 {
-       if (pmd_trans_huge(pmd))
-               return pmd_val(pmd) >> HPAGE_SHIFT;
-       else
-               return pmd_val(pmd) >> PAGE_SHIFT;
+       return pmd_val(pmd) >> PAGE_SHIFT;
 }
 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 
index fba4d66..4c060bb 100644 (file)
@@ -128,4 +128,32 @@ static inline unsigned long long get_clock_monotonic(void)
        return get_clock_xt() - sched_clock_base_cc;
 }
 
+/**
+ * tod_to_ns - convert a TOD format value to nanoseconds
+ * @todval: to be converted TOD format value
+ * Returns: number of nanoseconds that correspond to the TOD format value
+ *
+ * Converting a 64 Bit TOD format value to nanoseconds means that the value
+ * must be divided by 4.096. In order to achieve that we multiply with 125
+ * and divide by 512:
+ *
+ *    ns = (todval * 125) >> 9;
+ *
+ * In order to avoid an overflow with the multiplication we can rewrite this.
+ * With a split todval == 2^32 * th + tl (th upper 32 bits, tl lower 32 bits)
+ * we end up with
+ *
+ *    ns = ((2^32 * th + tl) * 125 ) >> 9;
+ * -> ns = (2^23 * th * 125) + ((tl * 125) >> 9);
+ *
+ */
+static inline unsigned long long tod_to_ns(unsigned long long todval)
+{
+       unsigned long long ns;
+
+       ns = ((todval >> 32) << 23) * 125;
+       ns += ((todval & 0xffffffff) * 125) >> 9;
+       return ns;
+}
+
 #endif
index 63e6078..864f693 100644 (file)
 #define __NR_process_vm_writev 341
 #define __NR_s390_runtime_instr 342
 #define __NR_kcmp              343
-#define NR_syscalls 344
+#define __NR_finit_module      344
+#define NR_syscalls 345
 
 /* 
  * There are some system calls that are not present on 64 bit, some
index 827e094..9b9a805 100644 (file)
@@ -1659,3 +1659,9 @@ ENTRY(sys_kcmp_wrapper)
        llgfr   %r5,%r5                 # unsigned long
        llgfr   %r6,%r6                 # unsigned long
        jg      sys_kcmp
+
+ENTRY(sys_finit_module_wrapper)
+       lgfr    %r2,%r2                 # int
+       llgtr   %r3,%r3                 # const char __user *
+       lgfr    %r4,%r4                 # int
+       jg      sys_finit_module
index ba500d8..4e8215e 100644 (file)
@@ -1127,13 +1127,14 @@ debug_register_view(debug_info_t * id, struct debug_view *view)
        if (i == DEBUG_MAX_VIEWS) {
                pr_err("Registering view %s/%s would exceed the maximum "
                       "number of views %i\n", id->name, view->name, i);
-               debugfs_remove(pde);
                rc = -1;
        } else {
                id->views[i] = view;
                id->debugfs_entries[i] = pde;
        }
        spin_unlock_irqrestore(&id->lock, flags);
+       if (rc)
+               debugfs_remove(pde);
 out:
        return rc;
 }
@@ -1146,9 +1147,9 @@ EXPORT_SYMBOL(debug_register_view);
 int
 debug_unregister_view(debug_info_t * id, struct debug_view *view)
 {
-       int rc = 0;
-       int i;
+       struct dentry *dentry = NULL;
        unsigned long flags;
+       int i, rc = 0;
 
        if (!id)
                goto out;
@@ -1160,10 +1161,12 @@ debug_unregister_view(debug_info_t * id, struct debug_view *view)
        if (i == DEBUG_MAX_VIEWS)
                rc = -1;
        else {
-               debugfs_remove(id->debugfs_entries[i]);
+               dentry = id->debugfs_entries[i];
                id->views[i] = NULL;
+               id->debugfs_entries[i] = NULL;
        }
        spin_unlock_irqrestore(&id->lock, flags);
+       debugfs_remove(dentry);
 out:
        return rc;
 }
index bf24293..9df824e 100644 (file)
 #include <asm/irq.h>
 #include "entry.h"
 
+DEFINE_PER_CPU_SHARED_ALIGNED(struct irq_stat, irq_stat);
+EXPORT_PER_CPU_SYMBOL_GPL(irq_stat);
+
 struct irq_class {
        char *name;
        char *desc;
 };
 
-static const struct irq_class intrclass_names[] = {
+/*
+ * The list of "main" irq classes on s390. This is the list of interrrupts
+ * that appear both in /proc/stat ("intr" line) and /proc/interrupts.
+ * Historically only external and I/O interrupts have been part of /proc/stat.
+ * We can't add the split external and I/O sub classes since the first field
+ * in the "intr" line in /proc/stat is supposed to be the sum of all other
+ * fields.
+ * Since the external and I/O interrupt fields are already sums we would end
+ * up with having a sum which accounts each interrupt twice.
+ */
+static const struct irq_class irqclass_main_desc[NR_IRQS] = {
        [EXTERNAL_INTERRUPT] = {.name = "EXT"},
-       [IO_INTERRUPT]       = {.name = "I/O"},
-       [EXTINT_CLK] = {.name = "CLK", .desc = "[EXT] Clock Comparator"},
-       [EXTINT_EXC] = {.name = "EXC", .desc = "[EXT] External Call"},
-       [EXTINT_EMS] = {.name = "EMS", .desc = "[EXT] Emergency Signal"},
-       [EXTINT_TMR] = {.name = "TMR", .desc = "[EXT] CPU Timer"},
-       [EXTINT_TLA] = {.name = "TAL", .desc = "[EXT] Timing Alert"},
-       [EXTINT_PFL] = {.name = "PFL", .desc = "[EXT] Pseudo Page Fault"},
-       [EXTINT_DSD] = {.name = "DSD", .desc = "[EXT] DASD Diag"},
-       [EXTINT_VRT] = {.name = "VRT", .desc = "[EXT] Virtio"},
-       [EXTINT_SCP] = {.name = "SCP", .desc = "[EXT] Service Call"},
-       [EXTINT_IUC] = {.name = "IUC", .desc = "[EXT] IUCV"},
-       [EXTINT_CMS] = {.name = "CMS", .desc = "[EXT] CPU-Measurement: Sampling"},
-       [EXTINT_CMC] = {.name = "CMC", .desc = "[EXT] CPU-Measurement: Counter"},
-       [EXTINT_CMR] = {.name = "CMR", .desc = "[EXT] CPU-Measurement: RI"},
-       [IOINT_CIO]  = {.name = "CIO", .desc = "[I/O] Common I/O Layer Interrupt"},
-       [IOINT_QAI]  = {.name = "QAI", .desc = "[I/O] QDIO Adapter Interrupt"},
-       [IOINT_DAS]  = {.name = "DAS", .desc = "[I/O] DASD"},
-       [IOINT_C15]  = {.name = "C15", .desc = "[I/O] 3215"},
-       [IOINT_C70]  = {.name = "C70", .desc = "[I/O] 3270"},
-       [IOINT_TAP]  = {.name = "TAP", .desc = "[I/O] Tape"},
-       [IOINT_VMR]  = {.name = "VMR", .desc = "[I/O] Unit Record Devices"},
-       [IOINT_LCS]  = {.name = "LCS", .desc = "[I/O] LCS"},
-       [IOINT_CLW]  = {.name = "CLW", .desc = "[I/O] CLAW"},
-       [IOINT_CTC]  = {.name = "CTC", .desc = "[I/O] CTC"},
-       [IOINT_APB]  = {.name = "APB", .desc = "[I/O] AP Bus"},
-       [IOINT_ADM]  = {.name = "ADM", .desc = "[I/O] EADM Subchannel"},
-       [IOINT_CSC]  = {.name = "CSC", .desc = "[I/O] CHSC Subchannel"},
-       [IOINT_PCI]  = {.name = "PCI", .desc = "[I/O] PCI Interrupt" },
-       [IOINT_MSI] =  {.name = "MSI", .desc = "[I/O] MSI Interrupt" },
+       [IO_INTERRUPT]       = {.name = "I/O"}
+};
+
+/*
+ * The list of split external and I/O interrupts that appear only in
+ * /proc/interrupts.
+ * In addition this list contains non external / I/O events like NMIs.
+ */
+static const struct irq_class irqclass_sub_desc[NR_ARCH_IRQS] = {
+       [IRQEXT_CLK] = {.name = "CLK", .desc = "[EXT] Clock Comparator"},
+       [IRQEXT_EXC] = {.name = "EXC", .desc = "[EXT] External Call"},
+       [IRQEXT_EMS] = {.name = "EMS", .desc = "[EXT] Emergency Signal"},
+       [IRQEXT_TMR] = {.name = "TMR", .desc = "[EXT] CPU Timer"},
+       [IRQEXT_TLA] = {.name = "TAL", .desc = "[EXT] Timing Alert"},
+       [IRQEXT_PFL] = {.name = "PFL", .desc = "[EXT] Pseudo Page Fault"},
+       [IRQEXT_DSD] = {.name = "DSD", .desc = "[EXT] DASD Diag"},
+       [IRQEXT_VRT] = {.name = "VRT", .desc = "[EXT] Virtio"},
+       [IRQEXT_SCP] = {.name = "SCP", .desc = "[EXT] Service Call"},
+       [IRQEXT_IUC] = {.name = "IUC", .desc = "[EXT] IUCV"},
+       [IRQEXT_CMS] = {.name = "CMS", .desc = "[EXT] CPU-Measurement: Sampling"},
+       [IRQEXT_CMC] = {.name = "CMC", .desc = "[EXT] CPU-Measurement: Counter"},
+       [IRQEXT_CMR] = {.name = "CMR", .desc = "[EXT] CPU-Measurement: RI"},
+       [IRQIO_CIO]  = {.name = "CIO", .desc = "[I/O] Common I/O Layer Interrupt"},
+       [IRQIO_QAI]  = {.name = "QAI", .desc = "[I/O] QDIO Adapter Interrupt"},
+       [IRQIO_DAS]  = {.name = "DAS", .desc = "[I/O] DASD"},
+       [IRQIO_C15]  = {.name = "C15", .desc = "[I/O] 3215"},
+       [IRQIO_C70]  = {.name = "C70", .desc = "[I/O] 3270"},
+       [IRQIO_TAP]  = {.name = "TAP", .desc = "[I/O] Tape"},
+       [IRQIO_VMR]  = {.name = "VMR", .desc = "[I/O] Unit Record Devices"},
+       [IRQIO_LCS]  = {.name = "LCS", .desc = "[I/O] LCS"},
+       [IRQIO_CLW]  = {.name = "CLW", .desc = "[I/O] CLAW"},
+       [IRQIO_CTC]  = {.name = "CTC", .desc = "[I/O] CTC"},
+       [IRQIO_APB]  = {.name = "APB", .desc = "[I/O] AP Bus"},
+       [IRQIO_ADM]  = {.name = "ADM", .desc = "[I/O] EADM Subchannel"},
+       [IRQIO_CSC]  = {.name = "CSC", .desc = "[I/O] CHSC Subchannel"},
+       [IRQIO_PCI]  = {.name = "PCI", .desc = "[I/O] PCI Interrupt" },
+       [IRQIO_MSI]  = {.name = "MSI", .desc = "[I/O] MSI Interrupt" },
        [NMI_NMI]    = {.name = "NMI", .desc = "[NMI] Machine Check"},
+       [CPU_RST]    = {.name = "RST", .desc = "[CPU] CPU Restart"},
 };
 
 /*
@@ -68,30 +90,34 @@ static const struct irq_class intrclass_names[] = {
  */
 int show_interrupts(struct seq_file *p, void *v)
 {
-       int i = *(loff_t *) v, j;
+       int irq = *(loff_t *) v;
+       int cpu;
 
        get_online_cpus();
-       if (i == 0) {
+       if (irq == 0) {
                seq_puts(p, "           ");
-               for_each_online_cpu(j)
-                       seq_printf(p, "CPU%d       ",j);
+               for_each_online_cpu(cpu)
+                       seq_printf(p, "CPU%d       ", cpu);
                seq_putc(p, '\n');
        }
-
-       if (i < NR_IRQS) {
-               seq_printf(p, "%s: ", intrclass_names[i].name);
-#ifndef CONFIG_SMP
-               seq_printf(p, "%10u ", kstat_irqs(i));
-#else
-               for_each_online_cpu(j)
-                       seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
-#endif
-               if (intrclass_names[i].desc)
-                       seq_printf(p, "  %s", intrclass_names[i].desc);
-                seq_putc(p, '\n');
-        }
+       if (irq < NR_IRQS) {
+               seq_printf(p, "%s: ", irqclass_main_desc[irq].name);
+               for_each_online_cpu(cpu)
+                       seq_printf(p, "%10u ", kstat_cpu(cpu).irqs[irq]);
+               seq_putc(p, '\n');
+               goto skip_arch_irqs;
+       }
+       for (irq = 0; irq < NR_ARCH_IRQS; irq++) {
+               seq_printf(p, "%s: ", irqclass_sub_desc[irq].name);
+               for_each_online_cpu(cpu)
+                       seq_printf(p, "%10u ", per_cpu(irq_stat, cpu).irqs[irq]);
+               if (irqclass_sub_desc[irq].desc)
+                       seq_printf(p, "  %s", irqclass_sub_desc[irq].desc);
+               seq_putc(p, '\n');
+       }
+skip_arch_irqs:
        put_online_cpus();
-        return 0;
+       return 0;
 }
 
 /*
@@ -222,7 +248,7 @@ void __irq_entry do_extint(struct pt_regs *regs, struct ext_code ext_code,
                /* Serve timer interrupts first. */
                clock_comparator_work();
        }
-       kstat_cpu(smp_processor_id()).irqs[EXTERNAL_INTERRUPT]++;
+       kstat_incr_irqs_this_cpu(EXTERNAL_INTERRUPT, NULL);
        if (ext_code.code != 0x1004)
                __get_cpu_var(s390_idle).nohz_delay = 1;
 
index a6daa5c..7918fbe 100644 (file)
@@ -254,7 +254,7 @@ void notrace s390_do_machine_check(struct pt_regs *regs)
        int umode;
 
        nmi_enter();
-       kstat_cpu(smp_processor_id()).irqs[NMI_NMI]++;
+       inc_irq_stat(NMI_NMI);
        mci = (struct mci *) &S390_lowcore.mcck_interruption_code;
        mcck = &__get_cpu_var(cpu_mcck);
        umode = user_mode(regs);
index c4e7269..86ec744 100644 (file)
@@ -229,7 +229,7 @@ static void cpumf_measurement_alert(struct ext_code ext_code,
        if (!(alert & CPU_MF_INT_CF_MASK))
                return;
 
-       kstat_cpu(smp_processor_id()).irqs[EXTINT_CMC]++;
+       inc_irq_stat(IRQEXT_CMC);
        cpuhw = &__get_cpu_var(cpu_hw_events);
 
        /* Measurement alerts are shared and might happen when the PMU
index 61066f6..077a993 100644 (file)
@@ -71,7 +71,7 @@ static void runtime_instr_int_handler(struct ext_code ext_code,
        if (!(param32 & CPU_MF_INT_RI_MASK))
                return;
 
-       kstat_cpu(smp_processor_id()).irqs[EXTINT_CMR]++;
+       inc_irq_stat(IRQEXT_CMR);
 
        if (!current->thread.ri_cb)
                return;
index 2568590..a5360de 100644 (file)
@@ -16,7 +16,7 @@
 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
 
 #include <linux/errno.h>
-#include <linux/module.h>
+#include <linux/export.h>
 #include <linux/sched.h>
 #include <linux/kernel.h>
 #include <linux/memblock.h>
@@ -289,6 +289,7 @@ void machine_power_off(void)
  * Dummy power off function.
  */
 void (*pm_power_off)(void) = machine_power_off;
+EXPORT_SYMBOL_GPL(pm_power_off);
 
 static int __init early_parse_mem(char *p)
 {
index 0b45baa..7433a2f 100644 (file)
@@ -433,9 +433,9 @@ static void do_ext_call_interrupt(struct ext_code ext_code,
 
        cpu = smp_processor_id();
        if (ext_code.code == 0x1202)
-               kstat_cpu(cpu).irqs[EXTINT_EXC]++;
+               inc_irq_stat(IRQEXT_EXC);
        else
-               kstat_cpu(cpu).irqs[EXTINT_EMS]++;
+               inc_irq_stat(IRQEXT_EMS);
        /*
         * handle bit signal external calls
         */
@@ -623,9 +623,10 @@ static struct sclp_cpu_info *smp_get_cpu_info(void)
        return info;
 }
 
-static int smp_add_present_cpu(int cpu);
+static int __cpuinit smp_add_present_cpu(int cpu);
 
-static int __smp_rescan_cpus(struct sclp_cpu_info *info, int sysfs_add)
+static int __cpuinit __smp_rescan_cpus(struct sclp_cpu_info *info,
+                                      int sysfs_add)
 {
        struct pcpu *pcpu;
        cpumask_t avail;
@@ -708,6 +709,7 @@ static void __cpuinit smp_start_secondary(void *cpuvoid)
        pfault_init();
        notify_cpu_starting(smp_processor_id());
        set_cpu_online(smp_processor_id(), true);
+       inc_irq_stat(CPU_RST);
        local_irq_enable();
        /* cpu_idle will call schedule for us */
        cpu_idle();
@@ -985,7 +987,7 @@ static int __cpuinit smp_cpu_notify(struct notifier_block *self,
        return notifier_from_errno(err);
 }
 
-static int smp_add_present_cpu(int cpu)
+static int __cpuinit smp_add_present_cpu(int cpu)
 {
        struct cpu *c = &pcpu_devices[cpu].cpu;
        struct device *s = &c->dev;
index 4817485..6a6c61f 100644 (file)
@@ -352,3 +352,4 @@ SYSCALL(sys_process_vm_readv,sys_process_vm_readv,compat_sys_process_vm_readv_wr
 SYSCALL(sys_process_vm_writev,sys_process_vm_writev,compat_sys_process_vm_writev_wrapper)
 SYSCALL(sys_ni_syscall,sys_s390_runtime_instr,sys_s390_runtime_instr_wrapper)
 SYSCALL(sys_kcmp,sys_kcmp,sys_kcmp_wrapper)
+SYSCALL(sys_finit_module,sys_finit_module,sys_finit_module_wrapper)
index 7fcd690..a5f4f5a 100644 (file)
@@ -63,7 +63,7 @@ static DEFINE_PER_CPU(struct clock_event_device, comparators);
  */
 unsigned long long notrace __kprobes sched_clock(void)
 {
-       return (get_clock_monotonic() * 125) >> 9;
+       return tod_to_ns(get_clock_monotonic());
 }
 
 /*
@@ -168,7 +168,7 @@ static void clock_comparator_interrupt(struct ext_code ext_code,
                                       unsigned int param32,
                                       unsigned long param64)
 {
-       kstat_cpu(smp_processor_id()).irqs[EXTINT_CLK]++;
+       inc_irq_stat(IRQEXT_CLK);
        if (S390_lowcore.clock_comparator == -1ULL)
                set_clock_comparator(S390_lowcore.clock_comparator);
 }
@@ -179,7 +179,7 @@ static void stp_timing_alert(struct stp_irq_parm *);
 static void timing_alert_interrupt(struct ext_code ext_code,
                                   unsigned int param32, unsigned long param64)
 {
-       kstat_cpu(smp_processor_id()).irqs[EXTINT_TLA]++;
+       inc_irq_stat(IRQEXT_TLA);
        if (param32 & 0x00c40000)
                etr_timing_alert((struct etr_irq_parm *) &param32);
        if (param32 & 0x00038000)
index f1aba87..4b2e3e3 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/bootmem.h>
 #include <linux/cpuset.h>
 #include <linux/device.h>
+#include <linux/export.h>
 #include <linux/kernel.h>
 #include <linux/sched.h>
 #include <linux/init.h>
@@ -42,6 +43,7 @@ static struct mask_info socket_info;
 static struct mask_info book_info;
 
 struct cpu_topology_s390 cpu_topology[NR_CPUS];
+EXPORT_SYMBOL_GPL(cpu_topology);
 
 static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu)
 {
index c30615e..82c481d 100644 (file)
@@ -408,7 +408,7 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
                return 0;
        }
 
-       sltime = ((vcpu->arch.sie_block->ckc - now)*125)>>9;
+       sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now);
 
        hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL);
        VCPU_EVENT(vcpu, 5, "enabled wait via clock comparator: %llx ns", sltime);
index c9011bf..f090e81 100644 (file)
@@ -613,7 +613,9 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
                kvm_s390_deliver_pending_interrupts(vcpu);
 
        vcpu->arch.sie_block->icptcode = 0;
+       preempt_disable();
        kvm_guest_enter();
+       preempt_enable();
        VCPU_EVENT(vcpu, 6, "entering sie flags %x",
                   atomic_read(&vcpu->arch.sie_block->cpuflags));
        trace_kvm_s390_sie_enter(vcpu,
index 42601d6..2fb9e63 100644 (file)
@@ -569,7 +569,7 @@ static void pfault_interrupt(struct ext_code ext_code,
        subcode = ext_code.subcode;
        if ((subcode & 0xff00) != __SUBCODE_MASK)
                return;
-       kstat_cpu(smp_processor_id()).irqs[EXTINT_PFL]++;
+       inc_irq_stat(IRQEXT_PFL);
        /* Get the token (= pid of the affected task). */
        pid = sizeof(void *) == 4 ? param32 : param64;
        rcu_read_lock();
index 0cb385d..b5b2916 100644 (file)
@@ -233,7 +233,7 @@ static void hws_ext_handler(struct ext_code ext_code,
        if (!(param32 & CPU_MF_INT_SF_MASK))
                return;
 
-       kstat_cpu(smp_processor_id()).irqs[EXTINT_CMS]++;
+       inc_irq_stat(IRQEXT_CMS);
        atomic_xchg(&cb->ext_params, atomic_read(&cb->ext_params) | param32);
 
        if (hws_wq)
index ff49427..60e0372 100644 (file)
@@ -160,35 +160,6 @@ int pci_proc_domain(struct pci_bus *bus)
 }
 EXPORT_SYMBOL_GPL(pci_proc_domain);
 
-/* Store PCI function information block */
-static int zpci_store_fib(struct zpci_dev *zdev, u8 *fc)
-{
-       struct zpci_fib *fib;
-       u8 status, cc;
-
-       fib = (void *) get_zeroed_page(GFP_KERNEL);
-       if (!fib)
-               return -ENOMEM;
-
-       do {
-               cc = __stpcifc(zdev->fh, 0, fib, &status);
-               if (cc == 2) {
-                       msleep(ZPCI_INSN_BUSY_DELAY);
-                       memset(fib, 0, PAGE_SIZE);
-               }
-       } while (cc == 2);
-
-       if (cc)
-               pr_err_once("%s: cc: %u  status: %u\n",
-                           __func__, cc, status);
-
-       /* Return PCI function controls */
-       *fc = fib->fc;
-
-       free_page((unsigned long) fib);
-       return (cc) ? -EIO : 0;
-}
-
 /* Modify PCI: Register adapter interruptions */
 static int zpci_register_airq(struct zpci_dev *zdev, unsigned int aisb,
                              u64 aibv)
@@ -469,7 +440,7 @@ static void zpci_irq_handler(void *dont, void *need)
        int rescan = 0, max = aisb_max;
        struct zdev_irq_map *imap;
 
-       kstat_cpu(smp_processor_id()).irqs[IOINT_PCI]++;
+       inc_irq_stat(IRQIO_PCI);
        sbit = start;
 
 scan:
@@ -481,7 +452,7 @@ scan:
                /* find vector bit */
                imap = bucket->imap[sbit];
                for_each_set_bit_left(mbit, &imap->aibv, imap->msi_vecs) {
-                       kstat_cpu(smp_processor_id()).irqs[IOINT_MSI]++;
+                       inc_irq_stat(IRQIO_MSI);
                        clear_bit(63 - mbit, &imap->aibv);
 
                        spin_lock(&imap->lock);
index 6138468..a547419 100644 (file)
@@ -13,8 +13,6 @@
 #include <linux/pci.h>
 #include <asm/pci_dma.h>
 
-static enum zpci_ioat_dtype zpci_ioat_dt = ZPCI_IOTA_RTTO;
-
 static struct kmem_cache *dma_region_table_cache;
 static struct kmem_cache *dma_page_table_cache;
 
index 3fede45..a0fa579 100644 (file)
  *                                  OFF-ON : MMC
  */
 
+/*
+ * FSI - DA7210
+ *
+ * it needs amixer settings for playing
+ *
+ * amixer set 'HeadPhone' 80
+ * amixer set 'Out Mixer Left DAC Left' on
+ * amixer set 'Out Mixer Right DAC Right' on
+ */
+
 /* Heartbeat */
 static unsigned char led_pos[] = { 0, 1, 2, 3 };
 
index 37924af..bf9f44f 100644 (file)
@@ -203,9 +203,9 @@ extern void __kernel_vsyscall;
        if (vdso_enabled)                                       \
                NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_BASE);        \
        else                                                    \
-               NEW_AUX_ENT(AT_IGNORE, 0);
+               NEW_AUX_ENT(AT_IGNORE, 0)
 #else
-#define VSYSCALL_AUX_ENT
+#define VSYSCALL_AUX_ENT       NEW_AUX_ENT(AT_IGNORE, 0)
 #endif /* CONFIG_VSYSCALL */
 
 #ifdef CONFIG_SH_FPU
index b1320d5..e699a12 100644 (file)
@@ -39,7 +39,7 @@
 /* This decides where the kernel will search for a free chunk of vm
  * space during mmap's.
  */
-#define TASK_UNMAPPED_BASE     (TASK_SIZE / 3)
+#define TASK_UNMAPPED_BASE     PAGE_ALIGN(TASK_SIZE / 3)
 
 /*
  * Bit of SR register
index 1ee8946..1cc7d31 100644 (file)
@@ -47,7 +47,7 @@ pc; })
 /* This decides where the kernel will search for a free chunk of vm
  * space during mmap's.
  */
-#define TASK_UNMAPPED_BASE     (TASK_SIZE / 3)
+#define TASK_UNMAPPED_BASE     PAGE_ALIGN(TASK_SIZE / 3)
 
 /*
  * Bit of SR register
index 9e465f2..d13a1d6 100644 (file)
 #define __NR_process_vm_readv  365
 #define __NR_process_vm_writev 366
 #define __NR_kcmp              367
+#define __NR_finit_module      368
 
-#define NR_syscalls 368
+#define NR_syscalls 369
 
 #endif /* __ASM_SH_UNISTD_32_H */
index 8e3a2ed..e6820c8 100644 (file)
 #define __NR_process_vm_readv  376
 #define __NR_process_vm_writev 377
 #define __NR_kcmp              378
+#define __NR_finit_module      379
 
-#define NR_syscalls 379
+#define NR_syscalls 380
 
 #endif /* __ASM_SH_UNISTD_64_H */
index fe97ae5..734234b 100644 (file)
@@ -385,3 +385,4 @@ ENTRY(sys_call_table)
        .long sys_process_vm_readv      /* 365 */
        .long sys_process_vm_writev
        .long sys_kcmp
+       .long sys_finit_module
index 5c7b1c6..579fcb9 100644 (file)
@@ -405,3 +405,4 @@ sys_call_table:
        .long sys_process_vm_readv
        .long sys_process_vm_writev
        .long sys_kcmp
+       .long sys_finit_module
index 60164e6..52aa201 100644 (file)
@@ -294,6 +294,8 @@ stack_panic:
        .align 2
 .L_init_thread_union:
        .long   init_thread_union
+.L_ebss:
+       .long   __bss_stop
 .Lpanic:
        .long   panic
 .Lpanic_s:
index cac719d..62ced58 100644 (file)
 #define __NR_process_vm_writev 339
 #define __NR_kern_features     340
 #define __NR_kcmp              341
+#define __NR_finit_module      342
 
-#define NR_syscalls            342
+#define NR_syscalls            343
 
 /* Bitmask values returned from kern_features system call.  */
 #define KERN_FEATURE_MIXED_MODE_STACK  0x00000001
index 04bacce..baf4366 100644 (file)
@@ -378,7 +378,8 @@ static void apb_calc_first_last(u8 map, u32 *first_p, u32 *last_p)
 /* Cook up fake bus resources for SUNW,simba PCI bridges which lack
  * a proper 'ranges' property.
  */
-static void apb_fake_ranges(struct pci_dev *dev, struct pci_bus *bus,
+static void apb_fake_ranges(struct pci_dev *dev,
+                           struct pci_bus *bus,
                            struct pci_pbm_info *pbm)
 {
        struct pci_bus_region region;
@@ -403,13 +404,15 @@ static void apb_fake_ranges(struct pci_dev *dev, struct pci_bus *bus,
        pcibios_bus_to_resource(dev, res, &region);
 }
 
-static void pci_of_scan_bus(struct pci_pbm_info *pbm, struct device_node *node,
+static void pci_of_scan_bus(struct pci_pbm_info *pbm,
+                           struct device_node *node,
                            struct pci_bus *bus);
 
 #define GET_64BIT(prop, i)     ((((u64) (prop)[(i)]) << 32) | (prop)[(i)+1])
 
 static void of_scan_pci_bridge(struct pci_pbm_info *pbm,
-                              struct device_node *node, struct pci_dev *dev)
+                              struct device_node *node,
+                              struct pci_dev *dev)
 {
        struct pci_bus *bus;
        const u32 *busrange, *ranges;
@@ -500,7 +503,8 @@ after_ranges:
        pci_of_scan_bus(pbm, node, bus);
 }
 
-static void pci_of_scan_bus(struct pci_pbm_info *pbm, struct device_node *node,
+static void pci_of_scan_bus(struct pci_pbm_info *pbm,
+                           struct device_node *node,
                            struct pci_bus *bus)
 {
        struct device_node *child;
index b852382..c647634 100644 (file)
@@ -366,7 +366,8 @@ static void pbm_config_busmastering(struct pci_pbm_info *pbm)
        pci_config_write8(addr, 64);
 }
 
-static void psycho_scan_bus(struct pci_pbm_info *pbm, struct device *parent)
+static void psycho_scan_bus(struct pci_pbm_info *pbm,
+                           struct device *parent)
 {
        pbm_config_busmastering(pbm);
        pbm->is_66mhz_capable = 0;
index 531186d..6f00d27 100644 (file)
@@ -442,7 +442,8 @@ static void sabre_scan_bus(struct pci_pbm_info *pbm, struct device *parent)
        sabre_register_error_handlers(pbm);
 }
 
-static void sabre_pbm_init(struct pci_pbm_info *pbm, struct platform_device *op)
+static void sabre_pbm_init(struct pci_pbm_info *pbm,
+                          struct platform_device *op)
 {
        psycho_pbm_init_common(pbm, op, "SABRE", PBM_CHIP_TYPE_SABRE);
        pbm->pci_afsr = pbm->controller_regs + SABRE_PIOAFSR;
index 29e8881..8f76f23 100644 (file)
@@ -1306,8 +1306,9 @@ static void schizo_pbm_hw_init(struct pci_pbm_info *pbm)
        }
 }
 
-static int schizo_pbm_init(struct pci_pbm_info *pbm, struct platform_device *op,
-                          u32 portid, int chip_type)
+static int schizo_pbm_init(struct pci_pbm_info *pbm,
+                          struct platform_device *op, u32 portid,
+                          int chip_type)
 {
        const struct linux_prom64_registers *regs;
        struct device_node *dp = op->dev.of_node;
index 5147f57..6ac43c3 100644 (file)
@@ -85,4 +85,4 @@ sys_call_table:
 /*325*/        .long sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_event_open, sys_recvmmsg, sys_fanotify_init
 /*330*/        .long sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime
 /*335*/        .long sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev
-/*340*/        .long sys_ni_syscall, sys_kcmp
+/*340*/        .long sys_ni_syscall, sys_kcmp, sys_finit_module
index cdbd9b8..1009ecb 100644 (file)
@@ -86,7 +86,7 @@ sys_call_table32:
        .word compat_sys_pwritev, compat_sys_rt_tgsigqueueinfo, sys_perf_event_open, compat_sys_recvmmsg, sys_fanotify_init
 /*330*/        .word sys32_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, compat_sys_open_by_handle_at, compat_sys_clock_adjtime
        .word sys_syncfs, compat_sys_sendmmsg, sys_setns, compat_sys_process_vm_readv, compat_sys_process_vm_writev
-/*340*/        .word sys_kern_features, sys_kcmp
+/*340*/        .word sys_kern_features, sys_kcmp, sys_finit_module
 
 #endif /* CONFIG_COMPAT */
 
@@ -164,4 +164,4 @@ sys_call_table:
        .word sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_event_open, sys_recvmmsg, sys_fanotify_init
 /*330*/        .word sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime
        .word sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev
-/*340*/        .word sys_kern_features, sys_kcmp
+/*340*/        .word sys_kern_features, sys_kcmp, sys_finit_module
index b1942e2..18e329c 100644 (file)
@@ -302,7 +302,7 @@ static efi_status_t setup_efi_pci(struct boot_params *params)
                if (status != EFI_SUCCESS)
                        continue;
 
-               if (!attributes & EFI_PCI_IO_ATTRIBUTE_EMBEDDED_ROM)
+               if (!(attributes & EFI_PCI_IO_ATTRIBUTE_EMBEDDED_ROM))
                        continue;
 
                if (!pci->romimage || !pci->romsize)
index 4428fd1..6774c17 100644 (file)
@@ -340,9 +340,6 @@ int x86_setup_perfctr(struct perf_event *event)
                /* BTS is currently only allowed for user-mode. */
                if (!attr->exclude_kernel)
                        return -EOPNOTSUPP;
-
-               if (!attr->exclude_guest)
-                       return -EOPNOTSUPP;
        }
 
        hwc->config |= config;
@@ -385,9 +382,6 @@ int x86_pmu_hw_config(struct perf_event *event)
        if (event->attr.precise_ip) {
                int precise = 0;
 
-               if (!event->attr.exclude_guest)
-                       return -EOPNOTSUPP;
-
                /* Support for constant skid */
                if (x86_pmu.pebs_active && !x86_pmu.pebs_broken) {
                        precise++;
index ff84d54..6ed91d9 100644 (file)
@@ -1065,7 +1065,6 @@ ENTRY(xen_failsafe_callback)
        lea 16(%esp),%esp
        CFI_ADJUST_CFA_OFFSET -16
        jz 5f
-       addl $16,%esp
        jmp iret_exc
 5:     pushl_cfi $-1 /* orig_ax = -1 => not a system call */
        SAVE_ALL
index 08b973f..9c2bd8b 100644 (file)
@@ -43,6 +43,7 @@
 #include <asm/apicdef.h>
 #include <asm/hypervisor.h>
 #include <asm/kvm_guest.h>
+#include <asm/context_tracking.h>
 
 static int kvmapf = 1;
 
@@ -121,6 +122,8 @@ void kvm_async_pf_task_wait(u32 token)
        struct kvm_task_sleep_node n, *e;
        DEFINE_WAIT(wait);
 
+       rcu_irq_enter();
+
        spin_lock(&b->lock);
        e = _find_apf_task(b, token);
        if (e) {
@@ -128,6 +131,8 @@ void kvm_async_pf_task_wait(u32 token)
                hlist_del(&e->link);
                kfree(e);
                spin_unlock(&b->lock);
+
+               rcu_irq_exit();
                return;
        }
 
@@ -152,13 +157,16 @@ void kvm_async_pf_task_wait(u32 token)
                        /*
                         * We cannot reschedule. So halt.
                         */
+                       rcu_irq_exit();
                        native_safe_halt();
+                       rcu_irq_enter();
                        local_irq_disable();
                }
        }
        if (!n.halted)
                finish_wait(&n.wq, &wait);
 
+       rcu_irq_exit();
        return;
 }
 EXPORT_SYMBOL_GPL(kvm_async_pf_task_wait);
@@ -252,10 +260,10 @@ do_async_page_fault(struct pt_regs *regs, unsigned long error_code)
                break;
        case KVM_PV_REASON_PAGE_NOT_PRESENT:
                /* page is swapped out by the host. */
-               rcu_irq_enter();
+               exception_enter(regs);
                exit_idle();
                kvm_async_pf_task_wait((u32)read_cr2());
-               rcu_irq_exit();
+               exception_exit(regs);
                break;
        case KVM_PV_REASON_PAGE_READY:
                rcu_irq_enter();
index 23ddd55..00f6c14 100644 (file)
@@ -610,6 +610,83 @@ static __init void reserve_ibft_region(void)
 
 static unsigned reserve_low = CONFIG_X86_RESERVE_LOW << 10;
 
+static bool __init snb_gfx_workaround_needed(void)
+{
+#ifdef CONFIG_PCI
+       int i;
+       u16 vendor, devid;
+       static const __initconst u16 snb_ids[] = {
+               0x0102,
+               0x0112,
+               0x0122,
+               0x0106,
+               0x0116,
+               0x0126,
+               0x010a,
+       };
+
+       /* Assume no if something weird is going on with PCI */
+       if (!early_pci_allowed())
+               return false;
+
+       vendor = read_pci_config_16(0, 2, 0, PCI_VENDOR_ID);
+       if (vendor != 0x8086)
+               return false;
+
+       devid = read_pci_config_16(0, 2, 0, PCI_DEVICE_ID);
+       for (i = 0; i < ARRAY_SIZE(snb_ids); i++)
+               if (devid == snb_ids[i])
+                       return true;
+#endif
+
+       return false;
+}
+
+/*
+ * Sandy Bridge graphics has trouble with certain ranges, exclude
+ * them from allocation.
+ */
+static void __init trim_snb_memory(void)
+{
+       static const __initconst unsigned long bad_pages[] = {
+               0x20050000,
+               0x20110000,
+               0x20130000,
+               0x20138000,
+               0x40004000,
+       };
+       int i;
+
+       if (!snb_gfx_workaround_needed())
+               return;
+
+       printk(KERN_DEBUG "reserving inaccessible SNB gfx pages\n");
+
+       /*
+        * Reserve all memory below the 1 MB mark that has not
+        * already been reserved.
+        */
+       memblock_reserve(0, 1<<20);
+       
+       for (i = 0; i < ARRAY_SIZE(bad_pages); i++) {
+               if (memblock_reserve(bad_pages[i], PAGE_SIZE))
+                       printk(KERN_WARNING "failed to reserve 0x%08lx\n",
+                              bad_pages[i]);
+       }
+}
+
+/*
+ * Here we put platform-specific memory range workarounds, i.e.
+ * memory known to be corrupt or otherwise in need to be reserved on
+ * specific platforms.
+ *
+ * If this gets used more widely it could use a real dispatch mechanism.
+ */
+static void __init trim_platform_memory_ranges(void)
+{
+       trim_snb_memory();
+}
+
 static void __init trim_bios_range(void)
 {
        /*
@@ -630,6 +707,7 @@ static void __init trim_bios_range(void)
         * take them out.
         */
        e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
+
        sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
 }
 
@@ -908,6 +986,8 @@ void __init setup_arch(char **cmdline_p)
 
        setup_real_mode();
 
+       trim_platform_memory_ranges();
+
        init_gbpages();
 
        /* max_pfn_mapped is updated here */
index cd3b243..9b4d51d 100644 (file)
@@ -165,10 +165,11 @@ void set_task_blockstep(struct task_struct *task, bool on)
         * Ensure irq/preemption can't change debugctl in between.
         * Note also that both TIF_BLOCKSTEP and debugctl should
         * be changed atomically wrt preemption.
-        * FIXME: this means that set/clear TIF_BLOCKSTEP is simply
-        * wrong if task != current, SIGKILL can wakeup the stopped
-        * tracee and set/clear can play with the running task, this
-        * can confuse the next __switch_to_xtra().
+        *
+        * NOTE: this means that set/clear TIF_BLOCKSTEP is only safe if
+        * task is current or it can't be running, otherwise we can race
+        * with __switch_to_xtra(). We rely on ptrace_freeze_traced() but
+        * PTRACE_KILL is not safe.
         */
        local_irq_disable();
        debugctl = get_debugctlmsr();
index 76f5446..c243b81 100644 (file)
@@ -120,7 +120,7 @@ struct kvm_shared_msrs {
 };
 
 static struct kvm_shared_msrs_global __read_mostly shared_msrs_global;
-static DEFINE_PER_CPU(struct kvm_shared_msrs, shared_msrs);
+static struct kvm_shared_msrs __percpu *shared_msrs;
 
 struct kvm_stats_debugfs_item debugfs_entries[] = {
        { "pf_fixed", VCPU_STAT(pf_fixed) },
@@ -191,10 +191,10 @@ static void kvm_on_user_return(struct user_return_notifier *urn)
 
 static void shared_msr_update(unsigned slot, u32 msr)
 {
-       struct kvm_shared_msrs *smsr;
        u64 value;
+       unsigned int cpu = smp_processor_id();
+       struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu);
 
-       smsr = &__get_cpu_var(shared_msrs);
        /* only read, and nobody should modify it at this time,
         * so don't need lock */
        if (slot >= shared_msrs_global.nr) {
@@ -226,7 +226,8 @@ static void kvm_shared_msr_cpu_online(void)
 
 void kvm_set_shared_msr(unsigned slot, u64 value, u64 mask)
 {
-       struct kvm_shared_msrs *smsr = &__get_cpu_var(shared_msrs);
+       unsigned int cpu = smp_processor_id();
+       struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu);
 
        if (((value ^ smsr->values[slot].curr) & mask) == 0)
                return;
@@ -242,7 +243,8 @@ EXPORT_SYMBOL_GPL(kvm_set_shared_msr);
 
 static void drop_user_return_notifiers(void *ignore)
 {
-       struct kvm_shared_msrs *smsr = &__get_cpu_var(shared_msrs);
+       unsigned int cpu = smp_processor_id();
+       struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu);
 
        if (smsr->registered)
                kvm_on_user_return(&smsr->urn);
@@ -5233,9 +5235,16 @@ int kvm_arch_init(void *opaque)
                goto out;
        }
 
+       r = -ENOMEM;
+       shared_msrs = alloc_percpu(struct kvm_shared_msrs);
+       if (!shared_msrs) {
+               printk(KERN_ERR "kvm: failed to allocate percpu kvm_shared_msrs\n");
+               goto out;
+       }
+
        r = kvm_mmu_module_init();
        if (r)
-               goto out;
+               goto out_free_percpu;
 
        kvm_set_mmio_spte_mask();
        kvm_init_msr_list();
@@ -5258,6 +5267,8 @@ int kvm_arch_init(void *opaque)
 
        return 0;
 
+out_free_percpu:
+       free_percpu(shared_msrs);
 out:
        return r;
 }
@@ -5275,6 +5286,7 @@ void kvm_arch_exit(void)
 #endif
        kvm_x86_ops = NULL;
        kvm_mmu_module_exit();
+       free_percpu(shared_msrs);
 }
 
 int kvm_emulate_halt(struct kvm_vcpu *vcpu)
index 4f7d259..34bc4ce 100644 (file)
@@ -432,13 +432,6 @@ static void __cpuinit xen_play_dead(void) /* used only with HOTPLUG_CPU */
        play_dead_common();
        HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL);
        cpu_bringup();
-       /*
-        * Balance out the preempt calls - as we are running in cpu_idle
-        * loop which has been called at bootup from cpu_bringup_and_idle.
-        * The cpucpu_bringup_and_idle called cpu_bringup which made a
-        * preempt_disable() So this preempt_enable will balance it out.
-        */
-       preempt_enable();
 }
 
 #else /* !CONFIG_HOTPLUG_CPU */
index f5fb072..c13044c 100644 (file)
@@ -150,6 +150,8 @@ source "drivers/memory/Kconfig"
 
 source "drivers/iio/Kconfig"
 
+source "drivers/ntb/Kconfig"
+
 source "drivers/vme/Kconfig"
 
 source "drivers/pwm/Kconfig"
index 7863b9f..3d92e12 100644 (file)
@@ -146,3 +146,4 @@ obj-$(CONFIG_MEMORY)                += memory/
 obj-$(CONFIG_IIO)              += iio/
 obj-$(CONFIG_VME_BUS)          += vme/
 obj-$(CONFIG_IPACK_BUS)                += ipack/
+obj-$(CONFIG_NTB)              += ntb/
index 00a7836..46f80e2 100644 (file)
@@ -590,6 +590,9 @@ static int apei_check_gar(struct acpi_generic_address *reg, u64 *paddr,
        if (bit_width == 32 && bit_offset == 0 && (*paddr & 0x03) == 0 &&
            *access_bit_width < 32)
                *access_bit_width = 32;
+       else if (bit_width == 64 && bit_offset == 0 && (*paddr & 0x07) == 0 &&
+           *access_bit_width < 64)
+               *access_bit_width = 64;
 
        if ((bit_width + bit_offset) > *access_bit_width) {
                pr_warning(FW_BUG APEI_PFX
index 95af6f6..35da181 100644 (file)
@@ -297,7 +297,7 @@ static int acpi_platform_notify(struct device *dev)
        if (!ret) {
                struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
 
-               acpi_get_name(dev->acpi_handle, ACPI_FULL_PATHNAME, &buffer);
+               acpi_get_name(ACPI_HANDLE(dev), ACPI_FULL_PATHNAME, &buffer);
                DBG("Device %s -> %s\n", dev_name(dev), (char *)buffer.pointer);
                kfree(buffer.pointer);
        } else
index f1a5da4..ed9a1cc 100644 (file)
@@ -958,6 +958,9 @@ static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr)
                return -EINVAL;
        }
 
+       if (!dev)
+               return -EINVAL;
+
        dev->cpu = pr->id;
 
        if (max_cstate == 0)
@@ -1149,6 +1152,7 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr)
                }
 
                /* Populate Updated C-state information */
+               acpi_processor_get_power_info(pr);
                acpi_processor_setup_cpuidle_states(pr);
 
                /* Enable all cpuidle devices */
index 836bfe0..53e7ac9 100644 (file)
@@ -340,6 +340,13 @@ static void amd_fixup_frequency(struct acpi_processor_px *px, int i)
        if ((boot_cpu_data.x86 == 0x10 && boot_cpu_data.x86_model < 10)
            || boot_cpu_data.x86 == 0x11) {
                rdmsr(MSR_AMD_PSTATE_DEF_BASE + index, lo, hi);
+               /*
+                * MSR C001_0064+:
+                * Bit 63: PstateEn. Read-write. If set, the P-state is valid.
+                */
+               if (!(hi & BIT(31)))
+                       return;
+
                fid = lo & 0x3f;
                did = (lo >> 6) & 7;
                if (boot_cpu_data.x86 == 0x10)
index 7862d17..4979127 100644 (file)
@@ -53,6 +53,7 @@
 
 enum {
        AHCI_PCI_BAR_STA2X11    = 0,
+       AHCI_PCI_BAR_ENMOTUS    = 2,
        AHCI_PCI_BAR_STANDARD   = 5,
 };
 
@@ -410,6 +411,9 @@ static const struct pci_device_id ahci_pci_tbl[] = {
        { PCI_VDEVICE(ASMEDIA, 0x0611), board_ahci },   /* ASM1061 */
        { PCI_VDEVICE(ASMEDIA, 0x0612), board_ahci },   /* ASM1062 */
 
+       /* Enmotus */
+       { PCI_DEVICE(0x1c44, 0x8000), board_ahci },
+
        /* Generic, PCI class code for AHCI */
        { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
          PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci },
@@ -1098,9 +1102,11 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                dev_info(&pdev->dev,
                         "PDC42819 can only drive SATA devices with this driver\n");
 
-       /* The Connext uses non-standard BAR */
+       /* Both Connext and Enmotus devices use non-standard BARs */
        if (pdev->vendor == PCI_VENDOR_ID_STMICRO && pdev->device == 0xCC06)
                ahci_pci_bar = AHCI_PCI_BAR_STA2X11;
+       else if (pdev->vendor == 0x1c44 && pdev->device == 0x8000)
+               ahci_pci_bar = AHCI_PCI_BAR_ENMOTUS;
 
        /* acquire resources */
        rc = pcim_enable_device(pdev);
index 320712a..6cd7805 100644 (file)
@@ -1951,13 +1951,13 @@ static void ahci_set_aggressive_devslp(struct ata_port *ap, bool sleep)
        /* Use the nominal value 10 ms if the read MDAT is zero,
         * the nominal value of DETO is 20 ms.
         */
-       if (dev->sata_settings[ATA_LOG_DEVSLP_VALID] &
+       if (dev->devslp_timing[ATA_LOG_DEVSLP_VALID] &
            ATA_LOG_DEVSLP_VALID_MASK) {
-               mdat = dev->sata_settings[ATA_LOG_DEVSLP_MDAT] &
+               mdat = dev->devslp_timing[ATA_LOG_DEVSLP_MDAT] &
                       ATA_LOG_DEVSLP_MDAT_MASK;
                if (!mdat)
                        mdat = 10;
-               deto = dev->sata_settings[ATA_LOG_DEVSLP_DETO];
+               deto = dev->devslp_timing[ATA_LOG_DEVSLP_DETO];
                if (!deto)
                        deto = 20;
        } else {
index 9e8b99a..46cd3f4 100644 (file)
@@ -2325,24 +2325,28 @@ int ata_dev_configure(struct ata_device *dev)
                        }
                }
 
-               /* check and mark DevSlp capability */
-               if (ata_id_has_devslp(dev->id))
-                       dev->flags |= ATA_DFLAG_DEVSLP;
-
-               /* Obtain SATA Settings page from Identify Device Data Log,
-                * which contains DevSlp timing variables etc.
-                * Exclude old devices with ata_id_has_ncq()
+               /* Check and mark DevSlp capability. Get DevSlp timing variables
+                * from SATA Settings page of Identify Device Data Log.
                 */
-               if (ata_id_has_ncq(dev->id)) {
+               if (ata_id_has_devslp(dev->id)) {
+                       u8 sata_setting[ATA_SECT_SIZE];
+                       int i, j;
+
+                       dev->flags |= ATA_DFLAG_DEVSLP;
                        err_mask = ata_read_log_page(dev,
                                                     ATA_LOG_SATA_ID_DEV_DATA,
                                                     ATA_LOG_SATA_SETTINGS,
-                                                    dev->sata_settings,
+                                                    sata_setting,
                                                     1);
                        if (err_mask)
                                ata_dev_dbg(dev,
                                            "failed to get Identify Device Data, Emask 0x%x\n",
                                            err_mask);
+                       else
+                               for (i = 0; i < ATA_LOG_DEVSLP_SIZE; i++) {
+                                       j = ATA_LOG_DEVSLP_OFFSET + i;
+                                       dev->devslp_timing[i] = sata_setting[j];
+                               }
                }
 
                dev->cdb_len = 16;
index bf039b0..bcf4437 100644 (file)
@@ -2094,7 +2094,7 @@ static unsigned int ata_eh_speed_down(struct ata_device *dev,
  */
 static inline int ata_eh_worth_retry(struct ata_queued_cmd *qc)
 {
-       if (qc->flags & AC_ERR_MEDIA)
+       if (qc->err_mask & AC_ERR_MEDIA)
                return 0;       /* don't retry media errors */
        if (qc->flags & ATA_QCFLAG_IO)
                return 1;       /* otherwise retry anything from fs stack */
index 6345294..fb10728 100644 (file)
@@ -224,7 +224,7 @@ static void cpu_device_release(struct device *dev)
         * by the cpu device.
         *
         * Never copy this way of doing things, or you too will be made fun of
-        * on the linux-kerenl list, you have been warned.
+        * on the linux-kernel list, you have been warned.
         */
 }
 
index d814603..b392b35 100644 (file)
@@ -305,7 +305,7 @@ static bool fw_read_file_contents(struct file *file, struct firmware_buf *fw_buf
        char *buf;
 
        size = fw_file_size(file);
-       if (size < 0)
+       if (size <= 0)
                return false;
        buf = vmalloc(size);
        if (!buf)
index 07aad78..d9a6c94 100644 (file)
@@ -56,6 +56,19 @@ static const struct file_operations regmap_name_fops = {
        .llseek = default_llseek,
 };
 
+static void regmap_debugfs_free_dump_cache(struct regmap *map)
+{
+       struct regmap_debugfs_off_cache *c;
+
+       while (!list_empty(&map->debugfs_off_cache)) {
+               c = list_first_entry(&map->debugfs_off_cache,
+                                    struct regmap_debugfs_off_cache,
+                                    list);
+               list_del(&c->list);
+               kfree(c);
+       }
+}
+
 /*
  * Work out where the start offset maps into register numbers, bearing
  * in mind that we suppress hidden registers.
@@ -91,8 +104,10 @@ static unsigned int regmap_debugfs_get_dump_start(struct regmap *map,
                        /* No cache entry?  Start a new one */
                        if (!c) {
                                c = kzalloc(sizeof(*c), GFP_KERNEL);
-                               if (!c)
-                                       break;
+                               if (!c) {
+                                       regmap_debugfs_free_dump_cache(map);
+                                       return base;
+                               }
                                c->min = p;
                                c->base_reg = i;
                        }
@@ -101,14 +116,32 @@ static unsigned int regmap_debugfs_get_dump_start(struct regmap *map,
                }
        }
 
+       /* Close the last entry off if we didn't scan beyond it */
+       if (c) {
+               c->max = p - 1;
+               list_add_tail(&c->list,
+                             &map->debugfs_off_cache);
+       }
+
+       /*
+        * This should never happen; we return above if we fail to
+        * allocate and we should never be in this code if there are
+        * no registers at all.
+        */
+       if (list_empty(&map->debugfs_off_cache)) {
+               WARN_ON(list_empty(&map->debugfs_off_cache));
+               return base;
+       }
+
        /* Find the relevant block */
        list_for_each_entry(c, &map->debugfs_off_cache, list) {
-               if (*pos >= c->min && *pos <= c->max) {
+               if (from >= c->min && from <= c->max) {
                        *pos = c->min;
                        return c->base_reg;
                }
 
-               ret = c->max;
+               *pos = c->min;
+               ret = c->base_reg;
        }
 
        return ret;
@@ -387,16 +420,8 @@ void regmap_debugfs_init(struct regmap *map, const char *name)
 
 void regmap_debugfs_exit(struct regmap *map)
 {
-       struct regmap_debugfs_off_cache *c;
-
        debugfs_remove_recursive(map->debugfs);
-       while (!list_empty(&map->debugfs_off_cache)) {
-               c = list_first_entry(&map->debugfs_off_cache,
-                                    struct regmap_debugfs_off_cache,
-                                    list);
-               list_del(&c->list);
-               kfree(c);
-       }
+       regmap_debugfs_free_dump_cache(map);
        kfree(map->debugfs_name);
 }
 
index 42d5cb0..f00b059 100644 (file)
@@ -1106,7 +1106,7 @@ EXPORT_SYMBOL_GPL(regmap_raw_write);
  * @val_count: Number of registers to write
  *
  * This function is intended to be used for writing a large block of
- * data to be device either in single transfer or multiple transfer.
+ * data to the device either in single transfer or multiple transfer.
  *
  * A value of zero will be returned on success, a negative errno will
  * be returned in error cases.
index 9d8409c..8ad21a2 100644 (file)
@@ -889,6 +889,7 @@ static void virtblk_remove(struct virtio_device *vdev)
 {
        struct virtio_blk *vblk = vdev->priv;
        int index = vblk->index;
+       int refc;
 
        /* Prevent config work handler from accessing the device. */
        mutex_lock(&vblk->config_lock);
@@ -903,11 +904,15 @@ static void virtblk_remove(struct virtio_device *vdev)
 
        flush_work(&vblk->config_work);
 
+       refc = atomic_read(&disk_to_dev(vblk->disk)->kobj.kref.refcount);
        put_disk(vblk->disk);
        mempool_destroy(vblk->pool);
        vdev->config->del_vqs(vdev);
        kfree(vblk);
-       ida_simple_remove(&vd_index_ida, index);
+
+       /* Only free device id if we don't have any users */
+       if (refc == 1)
+               ida_simple_remove(&vd_index_ida, index);
 }
 
 #ifdef CONFIG_PM
index 48bbfec..2a4a86d 100644 (file)
@@ -162,7 +162,7 @@ static int exynos_rng_runtime_resume(struct device *dev)
 }
 
 
-UNIVERSAL_DEV_PM_OPS(exynos_rng_pm_ops, exynos_rng_runtime_suspend,
+static UNIVERSAL_DEV_PM_OPS(exynos_rng_pm_ops, exynos_rng_runtime_suspend,
                                        exynos_rng_runtime_resume, NULL);
 
 static struct platform_driver exynos_rng_driver = {
index c6fa3bc..6f6e92a 100644 (file)
@@ -399,7 +399,7 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
 {
        unsigned long p = *ppos;
        ssize_t low_count, read, sz;
-       char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
+       char *kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
        int err = 0;
 
        read = 0;
@@ -527,7 +527,7 @@ static ssize_t write_kmem(struct file *file, const char __user *buf,
        unsigned long p = *ppos;
        ssize_t wrote = 0;
        ssize_t virtr = 0;
-       char * kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
+       char *kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
        int err = 0;
 
        if (p < (unsigned long) high_memory) {
@@ -595,7 +595,7 @@ static ssize_t write_port(struct file *file, const char __user *buf,
                          size_t count, loff_t *ppos)
 {
        unsigned long i = *ppos;
-       const char __user * tmp = buf;
+       const char __user *tmp = buf;
 
        if (!access_ok(VERIFY_READ, buf, count))
                return -EFAULT;
@@ -729,7 +729,7 @@ static loff_t memory_lseek(struct file *file, loff_t offset, int orig)
        return ret;
 }
 
-static int open_port(struct inode * inode, struct file * filp)
+static int open_port(struct inode *inode, struct file *filp)
 {
        return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
 }
@@ -898,7 +898,7 @@ static int __init chr_dev_init(void)
                        continue;
 
                /*
-                * Create /dev/port? 
+                * Create /dev/port?
                 */
                if ((minor == DEVPORT_MINOR) && !arch_has_dev_port())
                        continue;
index b66eaa0..29f6bec 100644 (file)
@@ -102,8 +102,7 @@ static MGSL_PARAMS default_params = {
        ASYNC_PARITY_NONE               /* unsigned char parity; */
 };
 
-typedef struct
-{
+typedef struct {
        int count;
        unsigned char status;
        char data[1];
@@ -326,10 +325,10 @@ typedef struct _mgslpc_info {
 #define write_reg16(info, reg, val) outw((val), (info)->io_base + (reg))
 
 #define set_reg_bits(info, reg, mask) \
-    write_reg(info, (reg), \
+       write_reg(info, (reg), \
                 (unsigned char) (read_reg(info, (reg)) | (mask)))
 #define clear_reg_bits(info, reg, mask) \
-    write_reg(info, (reg), \
+       write_reg(info, (reg), \
                 (unsigned char) (read_reg(info, (reg)) & ~(mask)))
 /*
  * interrupt enable/disable routines
@@ -356,10 +355,10 @@ static void irq_enable(MGSLPC_INFO *info, unsigned char channel, unsigned short
 }
 
 #define port_irq_disable(info, mask) \
-  { info->pim_value |= (mask); write_reg(info, PIM, info->pim_value); }
+       { info->pim_value |= (mask); write_reg(info, PIM, info->pim_value); }
 
 #define port_irq_enable(info, mask) \
-  { info->pim_value &= ~(mask); write_reg(info, PIM, info->pim_value); }
+       { info->pim_value &= ~(mask); write_reg(info, PIM, info->pim_value); }
 
 static void rx_start(MGSLPC_INFO *info);
 static void rx_stop(MGSLPC_INFO *info);
@@ -397,7 +396,7 @@ static int adapter_test(MGSLPC_INFO *info);
 
 static int claim_resources(MGSLPC_INFO *info);
 static void release_resources(MGSLPC_INFO *info);
-static void mgslpc_add_device(MGSLPC_INFO *info);
+static int mgslpc_add_device(MGSLPC_INFO *info);
 static void mgslpc_remove_device(MGSLPC_INFO *info);
 
 static bool rx_get_frame(MGSLPC_INFO *info, struct tty_struct *tty);
@@ -514,49 +513,56 @@ static const struct tty_port_operations mgslpc_port_ops = {
 
 static int mgslpc_probe(struct pcmcia_device *link)
 {
-    MGSLPC_INFO *info;
-    int ret;
-
-    if (debug_level >= DEBUG_LEVEL_INFO)
-           printk("mgslpc_attach\n");
-
-    info = kzalloc(sizeof(MGSLPC_INFO), GFP_KERNEL);
-    if (!info) {
-           printk("Error can't allocate device instance data\n");
-           return -ENOMEM;
-    }
-
-    info->magic = MGSLPC_MAGIC;
-    tty_port_init(&info->port);
-    info->port.ops = &mgslpc_port_ops;
-    INIT_WORK(&info->task, bh_handler);
-    info->max_frame_size = 4096;
-    info->port.close_delay = 5*HZ/10;
-    info->port.closing_wait = 30*HZ;
-    init_waitqueue_head(&info->status_event_wait_q);
-    init_waitqueue_head(&info->event_wait_q);
-    spin_lock_init(&info->lock);
-    spin_lock_init(&info->netlock);
-    memcpy(&info->params,&default_params,sizeof(MGSL_PARAMS));
-    info->idle_mode = HDLC_TXIDLE_FLAGS;
-    info->imra_value = 0xffff;
-    info->imrb_value = 0xffff;
-    info->pim_value = 0xff;
-
-    info->p_dev = link;
-    link->priv = info;
-
-    /* Initialize the struct pcmcia_device structure */
-
-    ret = mgslpc_config(link);
-    if (ret) {
-           tty_port_destroy(&info->port);
-           return ret;
-    }
-
-    mgslpc_add_device(info);
-
-    return 0;
+       MGSLPC_INFO *info;
+       int ret;
+
+       if (debug_level >= DEBUG_LEVEL_INFO)
+               printk("mgslpc_attach\n");
+
+       info = kzalloc(sizeof(MGSLPC_INFO), GFP_KERNEL);
+       if (!info) {
+               printk("Error can't allocate device instance data\n");
+               return -ENOMEM;
+       }
+
+       info->magic = MGSLPC_MAGIC;
+       tty_port_init(&info->port);
+       info->port.ops = &mgslpc_port_ops;
+       INIT_WORK(&info->task, bh_handler);
+       info->max_frame_size = 4096;
+       info->port.close_delay = 5*HZ/10;
+       info->port.closing_wait = 30*HZ;
+       init_waitqueue_head(&info->status_event_wait_q);
+       init_waitqueue_head(&info->event_wait_q);
+       spin_lock_init(&info->lock);
+       spin_lock_init(&info->netlock);
+       memcpy(&info->params,&default_params,sizeof(MGSL_PARAMS));
+       info->idle_mode = HDLC_TXIDLE_FLAGS;
+       info->imra_value = 0xffff;
+       info->imrb_value = 0xffff;
+       info->pim_value = 0xff;
+
+       info->p_dev = link;
+       link->priv = info;
+
+       /* Initialize the struct pcmcia_device structure */
+
+       ret = mgslpc_config(link);
+       if (ret != 0)
+               goto failed;
+
+       ret = mgslpc_add_device(info);
+       if (ret != 0)
+               goto failed_release;
+
+       return 0;
+
+failed_release:
+       mgslpc_release((u_long)link);
+failed:
+       tty_port_destroy(&info->port);
+       kfree(info);
+       return ret;
 }
 
 /* Card has been inserted.
@@ -569,35 +575,35 @@ static int mgslpc_ioprobe(struct pcmcia_device *p_dev, void *priv_data)
 
 static int mgslpc_config(struct pcmcia_device *link)
 {
-    MGSLPC_INFO *info = link->priv;
-    int ret;
+       MGSLPC_INFO *info = link->priv;
+       int ret;
 
-    if (debug_level >= DEBUG_LEVEL_INFO)
-           printk("mgslpc_config(0x%p)\n", link);
+       if (debug_level >= DEBUG_LEVEL_INFO)
+               printk("mgslpc_config(0x%p)\n", link);
 
-    link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO;
+       link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO;
 
-    ret = pcmcia_loop_config(link, mgslpc_ioprobe, NULL);
-    if (ret != 0)
-           goto failed;
+       ret = pcmcia_loop_config(link, mgslpc_ioprobe, NULL);
+       if (ret != 0)
+               goto failed;
 
-    link->config_index = 8;
-    link->config_regs = PRESENT_OPTION;
+       link->config_index = 8;
+       link->config_regs = PRESENT_OPTION;
 
-    ret = pcmcia_request_irq(link, mgslpc_isr);
-    if (ret)
-           goto failed;
-    ret = pcmcia_enable_device(link);
-    if (ret)
-           goto failed;
+       ret = pcmcia_request_irq(link, mgslpc_isr);
+       if (ret)
+               goto failed;
+       ret = pcmcia_enable_device(link);
+       if (ret)
+               goto failed;
 
-    info->io_base = link->resource[0]->start;
-    info->irq_level = link->irq;
-    return 0;
+       info->io_base = link->resource[0]->start;
+       info->irq_level = link->irq;
+       return 0;
 
 failed:
-    mgslpc_release((u_long)link);
-    return -ENODEV;
+       mgslpc_release((u_long)link);
+       return -ENODEV;
 }
 
 /* Card has been removed.
@@ -703,12 +709,12 @@ static void tx_pause(struct tty_struct *tty)
        if (mgslpc_paranoia_check(info, tty->name, "tx_pause"))
                return;
        if (debug_level >= DEBUG_LEVEL_INFO)
-               printk("tx_pause(%s)\n",info->device_name);
+               printk("tx_pause(%s)\n", info->device_name);
 
-       spin_lock_irqsave(&info->lock,flags);
+       spin_lock_irqsave(&info->lock, flags);
        if (info->tx_enabled)
-               tx_stop(info);
-       spin_unlock_irqrestore(&info->lock,flags);
+               tx_stop(info);
+       spin_unlock_irqrestore(&info->lock, flags);
 }
 
 static void tx_release(struct tty_struct *tty)
@@ -719,12 +725,12 @@ static void tx_release(struct tty_struct *tty)
        if (mgslpc_paranoia_check(info, tty->name, "tx_release"))
                return;
        if (debug_level >= DEBUG_LEVEL_INFO)
-               printk("tx_release(%s)\n",info->device_name);
+               printk("tx_release(%s)\n", info->device_name);
 
-       spin_lock_irqsave(&info->lock,flags);
+       spin_lock_irqsave(&info->lock, flags);
        if (!info->tx_enabled)
-               tx_start(info, tty);
-       spin_unlock_irqrestore(&info->lock,flags);
+               tx_start(info, tty);
+       spin_unlock_irqrestore(&info->lock, flags);
 }
 
 /* Return next bottom half action to perform.
@@ -735,7 +741,7 @@ static int bh_action(MGSLPC_INFO *info)
        unsigned long flags;
        int rc = 0;
 
-       spin_lock_irqsave(&info->lock,flags);
+       spin_lock_irqsave(&info->lock, flags);
 
        if (info->pending_bh & BH_RECEIVE) {
                info->pending_bh &= ~BH_RECEIVE;
@@ -754,7 +760,7 @@ static int bh_action(MGSLPC_INFO *info)
                info->bh_requested = false;
        }
 
-       spin_unlock_irqrestore(&info->lock,flags);
+       spin_unlock_irqrestore(&info->lock, flags);
 
        return rc;
 }
@@ -769,7 +775,7 @@ static void bh_handler(struct work_struct *work)
                return;
 
        if (debug_level >= DEBUG_LEVEL_BH)
-               printk( "%s(%d):bh_handler(%s) entry\n",
+               printk("%s(%d):bh_handler(%s) entry\n",
                        __FILE__,__LINE__,info->device_name);
 
        info->bh_running = true;
@@ -778,8 +784,8 @@ static void bh_handler(struct work_struct *work)
        while((action = bh_action(info)) != 0) {
 
                /* Process work item */
-               if ( debug_level >= DEBUG_LEVEL_BH )
-                       printk( "%s(%d):bh_handler() work item action=%d\n",
+               if (debug_level >= DEBUG_LEVEL_BH)
+                       printk("%s(%d):bh_handler() work item action=%d\n",
                                __FILE__,__LINE__,action);
 
                switch (action) {
@@ -802,7 +808,7 @@ static void bh_handler(struct work_struct *work)
 
        tty_kref_put(tty);
        if (debug_level >= DEBUG_LEVEL_BH)
-               printk( "%s(%d):bh_handler(%s) exit\n",
+               printk("%s(%d):bh_handler(%s) exit\n",
                        __FILE__,__LINE__,info->device_name);
 }
 
@@ -831,7 +837,7 @@ static void rx_ready_hdlc(MGSLPC_INFO *info, int eom)
        RXBUF *buf = (RXBUF*)(info->rx_buf + (info->rx_put * info->rx_buf_size));
 
        if (debug_level >= DEBUG_LEVEL_ISR)
-               printk("%s(%d):rx_ready_hdlc(eom=%d)\n",__FILE__,__LINE__,eom);
+               printk("%s(%d):rx_ready_hdlc(eom=%d)\n", __FILE__, __LINE__, eom);
 
        if (!info->rx_enabled)
                return;
@@ -847,7 +853,8 @@ static void rx_ready_hdlc(MGSLPC_INFO *info, int eom)
 
        if (eom) {
                /* end of frame, get FIFO count from RBCL register */
-               if (!(fifo_count = (unsigned char)(read_reg(info, CHA+RBCL) & 0x1f)))
+               fifo_count = (unsigned char)(read_reg(info, CHA+RBCL) & 0x1f);
+               if (fifo_count == 0)
                        fifo_count = 32;
        } else
                fifo_count = 32;
@@ -891,13 +898,13 @@ static void rx_ready_async(MGSLPC_INFO *info, int tcd, struct tty_struct *tty)
        unsigned char data, status, flag;
        int fifo_count;
        int work = 0;
-       struct mgsl_icount *icount = &info->icount;
+       struct mgsl_icount *icount = &info->icount;
 
        if (!tty) {
                /* tty is not available anymore */
                issue_command(info, CHA, CMD_RXRESET);
                if (debug_level >= DEBUG_LEVEL_ISR)
-                       printk("%s(%d):rx_ready_async(tty=NULL)\n",__FILE__,__LINE__);
+                       printk("%s(%d):rx_ready_async(tty=NULL)\n", __FILE__, __LINE__);
                return;
        }
 
@@ -1004,7 +1011,7 @@ static void tx_ready(MGSLPC_INFO *info, struct tty_struct *tty)
        int c;
 
        if (debug_level >= DEBUG_LEVEL_ISR)
-               printk("%s(%d):tx_ready(%s)\n", __FILE__,__LINE__,info->device_name);
+               printk("%s(%d):tx_ready(%s)\n", __FILE__, __LINE__, info->device_name);
 
        if (info->params.mode == MGSL_MODE_HDLC) {
                if (!info->tx_active)
@@ -1249,7 +1256,7 @@ static irqreturn_t mgslpc_isr(int dummy, void *dev_id)
         */
 
        if (info->pending_bh && !info->bh_running && !info->bh_requested) {
-               if ( debug_level >= DEBUG_LEVEL_ISR )
+               if (debug_level >= DEBUG_LEVEL_ISR)
                        printk("%s(%d):%s queueing bh task.\n",
                                __FILE__,__LINE__,info->device_name);
                schedule_work(&info->task);
@@ -1273,7 +1280,7 @@ static int startup(MGSLPC_INFO * info, struct tty_struct *tty)
        int retval = 0;
 
        if (debug_level >= DEBUG_LEVEL_INFO)
-               printk("%s(%d):startup(%s)\n",__FILE__,__LINE__,info->device_name);
+               printk("%s(%d):startup(%s)\n", __FILE__, __LINE__, info->device_name);
 
        if (info->port.flags & ASYNC_INITIALIZED)
                return 0;
@@ -1283,7 +1290,7 @@ static int startup(MGSLPC_INFO * info, struct tty_struct *tty)
                info->tx_buf = (unsigned char *)get_zeroed_page(GFP_KERNEL);
                if (!info->tx_buf) {
                        printk(KERN_ERR"%s(%d):%s can't allocate transmit buffer\n",
-                               __FILE__,__LINE__,info->device_name);
+                               __FILE__, __LINE__, info->device_name);
                        return -ENOMEM;
                }
        }
@@ -1298,15 +1305,15 @@ static int startup(MGSLPC_INFO * info, struct tty_struct *tty)
        retval = claim_resources(info);
 
        /* perform existence check and diagnostics */
-       if ( !retval )
+       if (!retval)
                retval = adapter_test(info);
 
-       if ( retval ) {
-               if (capable(CAP_SYS_ADMIN) && tty)
+       if (retval) {
+               if (capable(CAP_SYS_ADMIN) && tty)
                        set_bit(TTY_IO_ERROR, &tty->flags);
                release_resources(info);
-               return retval;
-       }
+               return retval;
+       }
 
        /* program hardware for current parameters */
        mgslpc_change_params(info, tty);
@@ -1330,7 +1337,7 @@ static void shutdown(MGSLPC_INFO * info, struct tty_struct *tty)
 
        if (debug_level >= DEBUG_LEVEL_INFO)
                printk("%s(%d):mgslpc_shutdown(%s)\n",
-                        __FILE__,__LINE__, info->device_name );
+                        __FILE__, __LINE__, info->device_name);
 
        /* clear status wait queue because status changes */
        /* can't happen after shutting down the hardware */
@@ -1344,7 +1351,7 @@ static void shutdown(MGSLPC_INFO * info, struct tty_struct *tty)
                info->tx_buf = NULL;
        }
 
-       spin_lock_irqsave(&info->lock,flags);
+       spin_lock_irqsave(&info->lock, flags);
 
        rx_stop(info);
        tx_stop(info);
@@ -1352,12 +1359,12 @@ static void shutdown(MGSLPC_INFO * info, struct tty_struct *tty)
        /* TODO:disable interrupts instead of reset to preserve signal states */
        reset_device(info);
 
-       if (!tty || tty->termios.c_cflag & HUPCL) {
-               info->serial_signals &= ~(SerialSignal_DTR + SerialSignal_RTS);
+       if (!tty || tty->termios.c_cflag & HUPCL) {
+               info->serial_signals &= ~(SerialSignal_DTR + SerialSignal_RTS);
                set_signals(info);
        }
 
-       spin_unlock_irqrestore(&info->lock,flags);
+       spin_unlock_irqrestore(&info->lock, flags);
 
        release_resources(info);
 
@@ -1371,7 +1378,7 @@ static void mgslpc_program_hw(MGSLPC_INFO *info, struct tty_struct *tty)
 {
        unsigned long flags;
 
-       spin_lock_irqsave(&info->lock,flags);
+       spin_lock_irqsave(&info->lock, flags);
 
        rx_stop(info);
        tx_stop(info);
@@ -1396,7 +1403,7 @@ static void mgslpc_program_hw(MGSLPC_INFO *info, struct tty_struct *tty)
        if (info->netcount || (tty && (tty->termios.c_cflag & CREAD)))
                rx_start(info);
 
-       spin_unlock_irqrestore(&info->lock,flags);
+       spin_unlock_irqrestore(&info->lock, flags);
 }
 
 /* Reconfigure adapter based on new parameters
@@ -1411,13 +1418,13 @@ static void mgslpc_change_params(MGSLPC_INFO *info, struct tty_struct *tty)
 
        if (debug_level >= DEBUG_LEVEL_INFO)
                printk("%s(%d):mgslpc_change_params(%s)\n",
-                        __FILE__,__LINE__, info->device_name );
+                        __FILE__, __LINE__, info->device_name);
 
        cflag = tty->termios.c_cflag;
 
        /* if B0 rate (hangup) specified then negate DTR and RTS */
        /* otherwise assert DTR and RTS */
-       if (cflag & CBAUD)
+       if (cflag & CBAUD)
                info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR;
        else
                info->serial_signals &= ~(SerialSignal_RTS + SerialSignal_DTR);
@@ -1463,7 +1470,7 @@ static void mgslpc_change_params(MGSLPC_INFO *info, struct tty_struct *tty)
                info->params.data_rate = tty_get_baud_rate(tty);
        }
 
-       if ( info->params.data_rate ) {
+       if (info->params.data_rate) {
                info->timeout = (32*HZ*bits_per_char) /
                                info->params.data_rate;
        }
@@ -1498,8 +1505,8 @@ static int mgslpc_put_char(struct tty_struct *tty, unsigned char ch)
        unsigned long flags;
 
        if (debug_level >= DEBUG_LEVEL_INFO) {
-               printk( "%s(%d):mgslpc_put_char(%d) on %s\n",
-                       __FILE__,__LINE__,ch,info->device_name);
+               printk("%s(%d):mgslpc_put_char(%d) on %s\n",
+                       __FILE__, __LINE__, ch, info->device_name);
        }
 
        if (mgslpc_paranoia_check(info, tty->name, "mgslpc_put_char"))
@@ -1508,7 +1515,7 @@ static int mgslpc_put_char(struct tty_struct *tty, unsigned char ch)
        if (!info->tx_buf)
                return 0;
 
-       spin_lock_irqsave(&info->lock,flags);
+       spin_lock_irqsave(&info->lock, flags);
 
        if (info->params.mode == MGSL_MODE_ASYNC || !info->tx_active) {
                if (info->tx_count < TXBUFSIZE - 1) {
@@ -1518,7 +1525,7 @@ static int mgslpc_put_char(struct tty_struct *tty, unsigned char ch)
                }
        }
 
-       spin_unlock_irqrestore(&info->lock,flags);
+       spin_unlock_irqrestore(&info->lock, flags);
        return 1;
 }
 
@@ -1531,8 +1538,8 @@ static void mgslpc_flush_chars(struct tty_struct *tty)
        unsigned long flags;
 
        if (debug_level >= DEBUG_LEVEL_INFO)
-               printk( "%s(%d):mgslpc_flush_chars() entry on %s tx_count=%d\n",
-                       __FILE__,__LINE__,info->device_name,info->tx_count);
+               printk("%s(%d):mgslpc_flush_chars() entry on %s tx_count=%d\n",
+                       __FILE__, __LINE__, info->device_name, info->tx_count);
 
        if (mgslpc_paranoia_check(info, tty->name, "mgslpc_flush_chars"))
                return;
@@ -1542,13 +1549,13 @@ static void mgslpc_flush_chars(struct tty_struct *tty)
                return;
 
        if (debug_level >= DEBUG_LEVEL_INFO)
-               printk( "%s(%d):mgslpc_flush_chars() entry on %s starting transmitter\n",
-                       __FILE__,__LINE__,info->device_name);
+               printk("%s(%d):mgslpc_flush_chars() entry on %s starting transmitter\n",
+                       __FILE__, __LINE__, info->device_name);
 
-       spin_lock_irqsave(&info->lock,flags);
+       spin_lock_irqsave(&info->lock, flags);
        if (!info->tx_active)
-               tx_start(info, tty);
-       spin_unlock_irqrestore(&info->lock,flags);
+               tx_start(info, tty);
+       spin_unlock_irqrestore(&info->lock, flags);
 }
 
 /* Send a block of data
@@ -1569,8 +1576,8 @@ static int mgslpc_write(struct tty_struct * tty,
        unsigned long flags;
 
        if (debug_level >= DEBUG_LEVEL_INFO)
-               printk( "%s(%d):mgslpc_write(%s) count=%d\n",
-                       __FILE__,__LINE__,info->device_name,count);
+               printk("%s(%d):mgslpc_write(%s) count=%d\n",
+                       __FILE__, __LINE__, info->device_name, count);
 
        if (mgslpc_paranoia_check(info, tty->name, "mgslpc_write") ||
                !info->tx_buf)
@@ -1596,26 +1603,26 @@ static int mgslpc_write(struct tty_struct * tty,
 
                memcpy(info->tx_buf + info->tx_put, buf, c);
 
-               spin_lock_irqsave(&info->lock,flags);
+               spin_lock_irqsave(&info->lock, flags);
                info->tx_put = (info->tx_put + c) & (TXBUFSIZE-1);
                info->tx_count += c;
-               spin_unlock_irqrestore(&info->lock,flags);
+               spin_unlock_irqrestore(&info->lock, flags);
 
                buf += c;
                count -= c;
                ret += c;
        }
 start:
-       if (info->tx_count && !tty->stopped && !tty->hw_stopped) {
-               spin_lock_irqsave(&info->lock,flags);
+       if (info->tx_count && !tty->stopped && !tty->hw_stopped) {
+               spin_lock_irqsave(&info->lock, flags);
                if (!info->tx_active)
-                       tx_start(info, tty);
-               spin_unlock_irqrestore(&info->lock,flags);
-       }
+                       tx_start(info, tty);
+               spin_unlock_irqrestore(&info->lock, flags);
+       }
 cleanup:
        if (debug_level >= DEBUG_LEVEL_INFO)
-               printk( "%s(%d):mgslpc_write(%s) returning=%d\n",
-                       __FILE__,__LINE__,info->device_name,ret);
+               printk("%s(%d):mgslpc_write(%s) returning=%d\n",
+                       __FILE__, __LINE__, info->device_name, ret);
        return ret;
 }
 
@@ -1643,7 +1650,7 @@ static int mgslpc_write_room(struct tty_struct *tty)
 
        if (debug_level >= DEBUG_LEVEL_INFO)
                printk("%s(%d):mgslpc_write_room(%s)=%d\n",
-                        __FILE__,__LINE__, info->device_name, ret);
+                        __FILE__, __LINE__, info->device_name, ret);
        return ret;
 }
 
@@ -1656,7 +1663,7 @@ static int mgslpc_chars_in_buffer(struct tty_struct *tty)
 
        if (debug_level >= DEBUG_LEVEL_INFO)
                printk("%s(%d):mgslpc_chars_in_buffer(%s)\n",
-                        __FILE__,__LINE__, info->device_name );
+                        __FILE__, __LINE__, info->device_name);
 
        if (mgslpc_paranoia_check(info, tty->name, "mgslpc_chars_in_buffer"))
                return 0;
@@ -1668,7 +1675,7 @@ static int mgslpc_chars_in_buffer(struct tty_struct *tty)
 
        if (debug_level >= DEBUG_LEVEL_INFO)
                printk("%s(%d):mgslpc_chars_in_buffer(%s)=%d\n",
-                        __FILE__,__LINE__, info->device_name, rc);
+                        __FILE__, __LINE__, info->device_name, rc);
 
        return rc;
 }
@@ -1682,15 +1689,15 @@ static void mgslpc_flush_buffer(struct tty_struct *tty)
 
        if (debug_level >= DEBUG_LEVEL_INFO)
                printk("%s(%d):mgslpc_flush_buffer(%s) entry\n",
-                        __FILE__,__LINE__, info->device_name );
+                        __FILE__, __LINE__, info->device_name);
 
        if (mgslpc_paranoia_check(info, tty->name, "mgslpc_flush_buffer"))
                return;
 
-       spin_lock_irqsave(&info->lock,flags);
+       spin_lock_irqsave(&info->lock, flags);
        info->tx_count = info->tx_put = info->tx_get = 0;
        del_timer(&info->tx_timer);
-       spin_unlock_irqrestore(&info->lock,flags);
+       spin_unlock_irqrestore(&info->lock, flags);
 
        wake_up_interruptible(&tty->write_wait);
        tty_wakeup(tty);
@@ -1705,17 +1712,17 @@ static void mgslpc_send_xchar(struct tty_struct *tty, char ch)
 
        if (debug_level >= DEBUG_LEVEL_INFO)
                printk("%s(%d):mgslpc_send_xchar(%s,%d)\n",
-                        __FILE__,__LINE__, info->device_name, ch );
+                        __FILE__, __LINE__, info->device_name, ch);
 
        if (mgslpc_paranoia_check(info, tty->name, "mgslpc_send_xchar"))
                return;
 
        info->x_char = ch;
        if (ch) {
-               spin_lock_irqsave(&info->lock,flags);
+               spin_lock_irqsave(&info->lock, flags);
                if (!info->tx_enabled)
-                       tx_start(info, tty);
-               spin_unlock_irqrestore(&info->lock,flags);
+                       tx_start(info, tty);
+               spin_unlock_irqrestore(&info->lock, flags);
        }
 }
 
@@ -1728,7 +1735,7 @@ static void mgslpc_throttle(struct tty_struct * tty)
 
        if (debug_level >= DEBUG_LEVEL_INFO)
                printk("%s(%d):mgslpc_throttle(%s) entry\n",
-                        __FILE__,__LINE__, info->device_name );
+                        __FILE__, __LINE__, info->device_name);
 
        if (mgslpc_paranoia_check(info, tty->name, "mgslpc_throttle"))
                return;
@@ -1736,11 +1743,11 @@ static void mgslpc_throttle(struct tty_struct * tty)
        if (I_IXOFF(tty))
                mgslpc_send_xchar(tty, STOP_CHAR(tty));
 
-       if (tty->termios.c_cflag & CRTSCTS) {
-               spin_lock_irqsave(&info->lock,flags);
+       if (tty->termios.c_cflag & CRTSCTS) {
+               spin_lock_irqsave(&info->lock, flags);
                info->serial_signals &= ~SerialSignal_RTS;
-               set_signals(info);
-               spin_unlock_irqrestore(&info->lock,flags);
+               set_signals(info);
+               spin_unlock_irqrestore(&info->lock, flags);
        }
 }
 
@@ -1753,7 +1760,7 @@ static void mgslpc_unthrottle(struct tty_struct * tty)
 
        if (debug_level >= DEBUG_LEVEL_INFO)
                printk("%s(%d):mgslpc_unthrottle(%s) entry\n",
-                        __FILE__,__LINE__, info->device_name );
+                        __FILE__, __LINE__, info->device_name);
 
        if (mgslpc_paranoia_check(info, tty->name, "mgslpc_unthrottle"))
                return;
@@ -1765,11 +1772,11 @@ static void mgslpc_unthrottle(struct tty_struct * tty)
                        mgslpc_send_xchar(tty, START_CHAR(tty));
        }
 
-       if (tty->termios.c_cflag & CRTSCTS) {
-               spin_lock_irqsave(&info->lock,flags);
+       if (tty->termios.c_cflag & CRTSCTS) {
+               spin_lock_irqsave(&info->lock, flags);
                info->serial_signals |= SerialSignal_RTS;
-               set_signals(info);
-               spin_unlock_irqrestore(&info->lock,flags);
+               set_signals(info);
+               spin_unlock_irqrestore(&info->lock, flags);
        }
 }
 
@@ -1807,33 +1814,33 @@ static int get_params(MGSLPC_INFO * info, MGSL_PARAMS __user *user_params)
  *
  * Arguments:
  *
- *     info            pointer to device instance data
- *     new_params      user buffer containing new serial params
+ *     info            pointer to device instance data
+ *     new_params      user buffer containing new serial params
  *
  * Returns:    0 if success, otherwise error code
  */
 static int set_params(MGSLPC_INFO * info, MGSL_PARAMS __user *new_params, struct tty_struct *tty)
 {
-       unsigned long flags;
+       unsigned long flags;
        MGSL_PARAMS tmp_params;
        int err;
 
        if (debug_level >= DEBUG_LEVEL_INFO)
                printk("%s(%d):set_params %s\n", __FILE__,__LINE__,
-                       info->device_name );
+                       info->device_name);
        COPY_FROM_USER(err,&tmp_params, new_params, sizeof(MGSL_PARAMS));
        if (err) {
-               if ( debug_level >= DEBUG_LEVEL_INFO )
-                       printk( "%s(%d):set_params(%s) user buffer copy failed\n",
-                               __FILE__,__LINE__,info->device_name);
+               if (debug_level >= DEBUG_LEVEL_INFO)
+                       printk("%s(%d):set_params(%s) user buffer copy failed\n",
+                               __FILE__, __LINE__, info->device_name);
                return -EFAULT;
        }
 
-       spin_lock_irqsave(&info->lock,flags);
+       spin_lock_irqsave(&info->lock, flags);
        memcpy(&info->params,&tmp_params,sizeof(MGSL_PARAMS));
-       spin_unlock_irqrestore(&info->lock,flags);
+       spin_unlock_irqrestore(&info->lock, flags);
 
-       mgslpc_change_params(info, tty);
+       mgslpc_change_params(info, tty);
 
        return 0;
 }
@@ -1851,13 +1858,13 @@ static int get_txidle(MGSLPC_INFO * info, int __user *idle_mode)
 
 static int set_txidle(MGSLPC_INFO * info, int idle_mode)
 {
-       unsigned long flags;
+       unsigned long flags;
        if (debug_level >= DEBUG_LEVEL_INFO)
                printk("set_txidle(%s,%d)\n", info->device_name, idle_mode);
-       spin_lock_irqsave(&info->lock,flags);
+       spin_lock_irqsave(&info->lock, flags);
        info->idle_mode = idle_mode;
        tx_set_idle(info);
-       spin_unlock_irqrestore(&info->lock,flags);
+       spin_unlock_irqrestore(&info->lock, flags);
        return 0;
 }
 
@@ -1874,11 +1881,11 @@ static int get_interface(MGSLPC_INFO * info, int __user *if_mode)
 
 static int set_interface(MGSLPC_INFO * info, int if_mode)
 {
-       unsigned long flags;
+       unsigned long flags;
        unsigned char val;
        if (debug_level >= DEBUG_LEVEL_INFO)
                printk("set_interface(%s,%d)\n", info->device_name, if_mode);
-       spin_lock_irqsave(&info->lock,flags);
+       spin_lock_irqsave(&info->lock, flags);
        info->if_mode = if_mode;
 
        val = read_reg(info, PVR) & 0x0f;
@@ -1890,18 +1897,18 @@ static int set_interface(MGSLPC_INFO * info, int if_mode)
        }
        write_reg(info, PVR, val);
 
-       spin_unlock_irqrestore(&info->lock,flags);
+       spin_unlock_irqrestore(&info->lock, flags);
        return 0;
 }
 
 static int set_txenable(MGSLPC_INFO * info, int enable, struct tty_struct *tty)
 {
-       unsigned long flags;
+       unsigned long flags;
 
        if (debug_level >= DEBUG_LEVEL_INFO)
                printk("set_txenable(%s,%d)\n", info->device_name, enable);
 
-       spin_lock_irqsave(&info->lock,flags);
+       spin_lock_irqsave(&info->lock, flags);
        if (enable) {
                if (!info->tx_enabled)
                        tx_start(info, tty);
@@ -1909,18 +1916,18 @@ static int set_txenable(MGSLPC_INFO * info, int enable, struct tty_struct *tty)
                if (info->tx_enabled)
                        tx_stop(info);
        }
-       spin_unlock_irqrestore(&info->lock,flags);
+       spin_unlock_irqrestore(&info->lock, flags);
        return 0;
 }
 
 static int tx_abort(MGSLPC_INFO * info)
 {
-       unsigned long flags;
+       unsigned long flags;
 
        if (debug_level >= DEBUG_LEVEL_INFO)
                printk("tx_abort(%s)\n", info->device_name);
 
-       spin_lock_irqsave(&info->lock,flags);
+       spin_lock_irqsave(&info->lock, flags);
        if (info->tx_active && info->tx_count &&
            info->params.mode == MGSL_MODE_HDLC) {
                /* clear data count so FIFO is not filled on next IRQ.
@@ -1929,18 +1936,18 @@ static int tx_abort(MGSLPC_INFO * info)
                info->tx_count = info->tx_put = info->tx_get = 0;
                info->tx_aborting = true;
        }
-       spin_unlock_irqrestore(&info->lock,flags);
+       spin_unlock_irqrestore(&info->lock, flags);
        return 0;
 }
 
 static int set_rxenable(MGSLPC_INFO * info, int enable)
 {
-       unsigned long flags;
+       unsigned long flags;
 
        if (debug_level >= DEBUG_LEVEL_INFO)
                printk("set_rxenable(%s,%d)\n", info->device_name, enable);
 
-       spin_lock_irqsave(&info->lock,flags);
+       spin_lock_irqsave(&info->lock, flags);
        if (enable) {
                if (!info->rx_enabled)
                        rx_start(info);
@@ -1948,21 +1955,21 @@ static int set_rxenable(MGSLPC_INFO * info, int enable)
                if (info->rx_enabled)
                        rx_stop(info);
        }
-       spin_unlock_irqrestore(&info->lock,flags);
+       spin_unlock_irqrestore(&info->lock, flags);
        return 0;
 }
 
 /* wait for specified event to occur
  *
- * Arguments:          info    pointer to device instance data
- *                     mask    pointer to bitmask of events to wait for
- * Return Value:       0       if successful and bit mask updated with
+ * Arguments:          info    pointer to device instance data
+ *                     mask    pointer to bitmask of events to wait for
+ * Return Value:       0       if successful and bit mask updated with
  *                             of events triggerred,
- *                     otherwise error code
+ *                     otherwise error code
  */
 static int wait_events(MGSLPC_INFO * info, int __user *mask_ptr)
 {
-       unsigned long flags;
+       unsigned long flags;
        int s;
        int rc=0;
        struct mgsl_icount cprev, cnow;
@@ -1978,18 +1985,18 @@ static int wait_events(MGSLPC_INFO * info, int __user *mask_ptr)
        if (debug_level >= DEBUG_LEVEL_INFO)
                printk("wait_events(%s,%d)\n", info->device_name, mask);
 
-       spin_lock_irqsave(&info->lock,flags);
+       spin_lock_irqsave(&info->lock, flags);
 
        /* return immediately if state matches requested events */
        get_signals(info);
        s = info->serial_signals;
        events = mask &
                ( ((s & SerialSignal_DSR) ? MgslEvent_DsrActive:MgslEvent_DsrInactive) +
-                 ((s & SerialSignal_DCD) ? MgslEvent_DcdActive:MgslEvent_DcdInactive) +
+                 ((s & SerialSignal_DCD) ? MgslEvent_DcdActive:MgslEvent_DcdInactive) +
                  ((s & SerialSignal_CTS) ? MgslEvent_CtsActive:MgslEvent_CtsInactive) +
                  ((s & SerialSignal_RI)  ? MgslEvent_RiActive :MgslEvent_RiInactive) );
        if (events) {
-               spin_unlock_irqrestore(&info->lock,flags);
+               spin_unlock_irqrestore(&info->lock, flags);
                goto exit;
        }
 
@@ -2004,7 +2011,7 @@ static int wait_events(MGSLPC_INFO * info, int __user *mask_ptr)
        set_current_state(TASK_INTERRUPTIBLE);
        add_wait_queue(&info->event_wait_q, &wait);
 
-       spin_unlock_irqrestore(&info->lock,flags);
+       spin_unlock_irqrestore(&info->lock, flags);
 
 
        for(;;) {
@@ -2015,11 +2022,11 @@ static int wait_events(MGSLPC_INFO * info, int __user *mask_ptr)
                }
 
                /* get current irq counts */
-               spin_lock_irqsave(&info->lock,flags);
+               spin_lock_irqsave(&info->lock, flags);
                cnow = info->icount;
                newsigs = info->input_signal_events;
                set_current_state(TASK_INTERRUPTIBLE);
-               spin_unlock_irqrestore(&info->lock,flags);
+               spin_unlock_irqrestore(&info->lock, flags);
 
                /* if no change, wait aborted for some reason */
                if (newsigs.dsr_up   == oldsigs.dsr_up   &&
@@ -2058,10 +2065,10 @@ static int wait_events(MGSLPC_INFO * info, int __user *mask_ptr)
        set_current_state(TASK_RUNNING);
 
        if (mask & MgslEvent_ExitHuntMode) {
-               spin_lock_irqsave(&info->lock,flags);
+               spin_lock_irqsave(&info->lock, flags);
                if (!waitqueue_active(&info->event_wait_q))
                        irq_disable(info, CHA, IRQ_EXITHUNT);
-               spin_unlock_irqrestore(&info->lock,flags);
+               spin_unlock_irqrestore(&info->lock, flags);
        }
 exit:
        if (rc == 0)
@@ -2071,17 +2078,17 @@ exit:
 
 static int modem_input_wait(MGSLPC_INFO *info,int arg)
 {
-       unsigned long flags;
+       unsigned long flags;
        int rc;
        struct mgsl_icount cprev, cnow;
        DECLARE_WAITQUEUE(wait, current);
 
        /* save current irq counts */
-       spin_lock_irqsave(&info->lock,flags);
+       spin_lock_irqsave(&info->lock, flags);
        cprev = info->icount;
        add_wait_queue(&info->status_event_wait_q, &wait);
        set_current_state(TASK_INTERRUPTIBLE);
-       spin_unlock_irqrestore(&info->lock,flags);
+       spin_unlock_irqrestore(&info->lock, flags);
 
        for(;;) {
                schedule();
@@ -2091,10 +2098,10 @@ static int modem_input_wait(MGSLPC_INFO *info,int arg)
                }
 
                /* get new irq counts */
-               spin_lock_irqsave(&info->lock,flags);
+               spin_lock_irqsave(&info->lock, flags);
                cnow = info->icount;
                set_current_state(TASK_INTERRUPTIBLE);
-               spin_unlock_irqrestore(&info->lock,flags);
+               spin_unlock_irqrestore(&info->lock, flags);
 
                /* if no change, wait aborted for some reason */
                if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr &&
@@ -2125,11 +2132,11 @@ static int tiocmget(struct tty_struct *tty)
 {
        MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data;
        unsigned int result;
-       unsigned long flags;
+       unsigned long flags;
 
-       spin_lock_irqsave(&info->lock,flags);
-       get_signals(info);
-       spin_unlock_irqrestore(&info->lock,flags);
+       spin_lock_irqsave(&info->lock, flags);
+       get_signals(info);
+       spin_unlock_irqrestore(&info->lock, flags);
 
        result = ((info->serial_signals & SerialSignal_RTS) ? TIOCM_RTS:0) +
                ((info->serial_signals & SerialSignal_DTR) ? TIOCM_DTR:0) +
@@ -2140,7 +2147,7 @@ static int tiocmget(struct tty_struct *tty)
 
        if (debug_level >= DEBUG_LEVEL_INFO)
                printk("%s(%d):%s tiocmget() value=%08X\n",
-                        __FILE__,__LINE__, info->device_name, result );
+                        __FILE__, __LINE__, info->device_name, result);
        return result;
 }
 
@@ -2150,11 +2157,11 @@ static int tiocmset(struct tty_struct *tty,
                    unsigned int set, unsigned int clear)
 {
        MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data;
-       unsigned long flags;
+       unsigned long flags;
 
        if (debug_level >= DEBUG_LEVEL_INFO)
                printk("%s(%d):%s tiocmset(%x,%x)\n",
-                       __FILE__,__LINE__,info->device_name, set, clear);
+                       __FILE__, __LINE__, info->device_name, set, clear);
 
        if (set & TIOCM_RTS)
                info->serial_signals |= SerialSignal_RTS;
@@ -2165,9 +2172,9 @@ static int tiocmset(struct tty_struct *tty,
        if (clear & TIOCM_DTR)
                info->serial_signals &= ~SerialSignal_DTR;
 
-       spin_lock_irqsave(&info->lock,flags);
-       set_signals(info);
-       spin_unlock_irqrestore(&info->lock,flags);
+       spin_lock_irqsave(&info->lock, flags);
+       set_signals(info);
+       spin_unlock_irqrestore(&info->lock, flags);
 
        return 0;
 }
@@ -2184,17 +2191,17 @@ static int mgslpc_break(struct tty_struct *tty, int break_state)
 
        if (debug_level >= DEBUG_LEVEL_INFO)
                printk("%s(%d):mgslpc_break(%s,%d)\n",
-                        __FILE__,__LINE__, info->device_name, break_state);
+                        __FILE__, __LINE__, info->device_name, break_state);
 
        if (mgslpc_paranoia_check(info, tty->name, "mgslpc_break"))
                return -EINVAL;
 
-       spin_lock_irqsave(&info->lock,flags);
-       if (break_state == -1)
+       spin_lock_irqsave(&info->lock, flags);
+       if (break_state == -1)
                set_reg_bits(info, CHA+DAFO, BIT6);
        else
                clear_reg_bits(info, CHA+DAFO, BIT6);
-       spin_unlock_irqrestore(&info->lock,flags);
+       spin_unlock_irqrestore(&info->lock, flags);
        return 0;
 }
 
@@ -2205,9 +2212,9 @@ static int mgslpc_get_icount(struct tty_struct *tty,
        struct mgsl_icount cnow;        /* kernel counter temps */
        unsigned long flags;
 
-       spin_lock_irqsave(&info->lock,flags);
+       spin_lock_irqsave(&info->lock, flags);
        cnow = info->icount;
-       spin_unlock_irqrestore(&info->lock,flags);
+       spin_unlock_irqrestore(&info->lock, flags);
 
        icount->cts = cnow.cts;
        icount->dsr = cnow.dsr;
@@ -2228,9 +2235,9 @@ static int mgslpc_get_icount(struct tty_struct *tty,
  *
  * Arguments:
  *
- *     tty     pointer to tty instance data
- *     cmd     IOCTL command code
- *     arg     command argument/context
+ *     tty     pointer to tty instance data
+ *     cmd     IOCTL command code
+ *     arg     command argument/context
  *
  * Return Value:       0 if success, otherwise error code
  */
@@ -2241,8 +2248,8 @@ static int mgslpc_ioctl(struct tty_struct *tty,
        void __user *argp = (void __user *)arg;
 
        if (debug_level >= DEBUG_LEVEL_INFO)
-               printk("%s(%d):mgslpc_ioctl %s cmd=%08X\n", __FILE__,__LINE__,
-                       info->device_name, cmd );
+               printk("%s(%d):mgslpc_ioctl %s cmd=%08X\n", __FILE__, __LINE__,
+                       info->device_name, cmd);
 
        if (mgslpc_paranoia_check(info, tty->name, "mgslpc_ioctl"))
                return -ENODEV;
@@ -2288,8 +2295,8 @@ static int mgslpc_ioctl(struct tty_struct *tty,
  *
  * Arguments:
  *
- *     tty             pointer to tty structure
- *     termios         pointer to buffer to hold returned old termios
+ *     tty             pointer to tty structure
+ *     termios         pointer to buffer to hold returned old termios
  */
 static void mgslpc_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
 {
@@ -2297,8 +2304,8 @@ static void mgslpc_set_termios(struct tty_struct *tty, struct ktermios *old_term
        unsigned long flags;
 
        if (debug_level >= DEBUG_LEVEL_INFO)
-               printk("%s(%d):mgslpc_set_termios %s\n", __FILE__,__LINE__,
-                       tty->driver->name );
+               printk("%s(%d):mgslpc_set_termios %s\n", __FILE__, __LINE__,
+                       tty->driver->name);
 
        /* just return if nothing has changed */
        if ((tty->termios.c_cflag == old_termios->c_cflag)
@@ -2312,22 +2319,22 @@ static void mgslpc_set_termios(struct tty_struct *tty, struct ktermios *old_term
        if (old_termios->c_cflag & CBAUD &&
            !(tty->termios.c_cflag & CBAUD)) {
                info->serial_signals &= ~(SerialSignal_RTS + SerialSignal_DTR);
-               spin_lock_irqsave(&info->lock,flags);
-               set_signals(info);
-               spin_unlock_irqrestore(&info->lock,flags);
+               spin_lock_irqsave(&info->lock, flags);
+               set_signals(info);
+               spin_unlock_irqrestore(&info->lock, flags);
        }
 
        /* Handle transition away from B0 status */
        if (!(old_termios->c_cflag & CBAUD) &&
            tty->termios.c_cflag & CBAUD) {
                info->serial_signals |= SerialSignal_DTR;
-               if (!(tty->termios.c_cflag & CRTSCTS) ||
-                   !test_bit(TTY_THROTTLED, &tty->flags)) {
+               if (!(tty->termios.c_cflag & CRTSCTS) ||
+                   !test_bit(TTY_THROTTLED, &tty->flags)) {
                        info->serial_signals |= SerialSignal_RTS;
-               }
-               spin_lock_irqsave(&info->lock,flags);
-               set_signals(info);
-               spin_unlock_irqrestore(&info->lock,flags);
+               }
+               spin_lock_irqsave(&info->lock, flags);
+               set_signals(info);
+               spin_unlock_irqrestore(&info->lock, flags);
        }
 
        /* Handle turning off CRTSCTS */
@@ -2348,15 +2355,15 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
 
        if (debug_level >= DEBUG_LEVEL_INFO)
                printk("%s(%d):mgslpc_close(%s) entry, count=%d\n",
-                        __FILE__,__LINE__, info->device_name, port->count);
+                        __FILE__, __LINE__, info->device_name, port->count);
 
        WARN_ON(!port->count);
 
        if (tty_port_close_start(port, tty, filp) == 0)
                goto cleanup;
 
-       if (port->flags & ASYNC_INITIALIZED)
-               mgslpc_wait_until_sent(tty, info->timeout);
+       if (port->flags & ASYNC_INITIALIZED)
+               mgslpc_wait_until_sent(tty, info->timeout);
 
        mgslpc_flush_buffer(tty);
 
@@ -2367,7 +2374,7 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
        tty_port_tty_set(port, NULL);
 cleanup:
        if (debug_level >= DEBUG_LEVEL_INFO)
-               printk("%s(%d):mgslpc_close(%s) exit, count=%d\n", __FILE__,__LINE__,
+               printk("%s(%d):mgslpc_close(%s) exit, count=%d\n", __FILE__, __LINE__,
                        tty->driver->name, port->count);
 }
 
@@ -2378,12 +2385,12 @@ static void mgslpc_wait_until_sent(struct tty_struct *tty, int timeout)
        MGSLPC_INFO * info = (MGSLPC_INFO *)tty->driver_data;
        unsigned long orig_jiffies, char_time;
 
-       if (!info )
+       if (!info)
                return;
 
        if (debug_level >= DEBUG_LEVEL_INFO)
                printk("%s(%d):mgslpc_wait_until_sent(%s) entry\n",
-                        __FILE__,__LINE__, info->device_name );
+                        __FILE__, __LINE__, info->device_name);
 
        if (mgslpc_paranoia_check(info, tty->name, "mgslpc_wait_until_sent"))
                return;
@@ -2399,8 +2406,8 @@ static void mgslpc_wait_until_sent(struct tty_struct *tty, int timeout)
         * Note: use tight timings here to satisfy the NIST-PCTS.
         */
 
-       if ( info->params.data_rate ) {
-               char_time = info->timeout/(32 * 5);
+       if (info->params.data_rate) {
+               char_time = info->timeout/(32 * 5);
                if (!char_time)
                        char_time++;
        } else
@@ -2431,7 +2438,7 @@ static void mgslpc_wait_until_sent(struct tty_struct *tty, int timeout)
 exit:
        if (debug_level >= DEBUG_LEVEL_INFO)
                printk("%s(%d):mgslpc_wait_until_sent(%s) exit\n",
-                        __FILE__,__LINE__, info->device_name );
+                        __FILE__, __LINE__, info->device_name);
 }
 
 /* Called by tty_hangup() when a hangup is signaled.
@@ -2443,7 +2450,7 @@ static void mgslpc_hangup(struct tty_struct *tty)
 
        if (debug_level >= DEBUG_LEVEL_INFO)
                printk("%s(%d):mgslpc_hangup(%s)\n",
-                        __FILE__,__LINE__, info->device_name );
+                        __FILE__, __LINE__, info->device_name);
 
        if (mgslpc_paranoia_check(info, tty->name, "mgslpc_hangup"))
                return;
@@ -2458,9 +2465,9 @@ static int carrier_raised(struct tty_port *port)
        MGSLPC_INFO *info = container_of(port, MGSLPC_INFO, port);
        unsigned long flags;
 
-       spin_lock_irqsave(&info->lock,flags);
-       get_signals(info);
-       spin_unlock_irqrestore(&info->lock,flags);
+       spin_lock_irqsave(&info->lock, flags);
+       get_signals(info);
+       spin_unlock_irqrestore(&info->lock, flags);
 
        if (info->serial_signals & SerialSignal_DCD)
                return 1;
@@ -2472,13 +2479,13 @@ static void dtr_rts(struct tty_port *port, int onoff)
        MGSLPC_INFO *info = container_of(port, MGSLPC_INFO, port);
        unsigned long flags;
 
-       spin_lock_irqsave(&info->lock,flags);
+       spin_lock_irqsave(&info->lock, flags);
        if (onoff)
                info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR;
        else
                info->serial_signals &= ~SerialSignal_RTS + SerialSignal_DTR;
        set_signals(info);
-       spin_unlock_irqrestore(&info->lock,flags);
+       spin_unlock_irqrestore(&info->lock, flags);
 }
 
 
@@ -2486,14 +2493,14 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
 {
        MGSLPC_INFO     *info;
        struct tty_port *port;
-       int                     retval, line;
-       unsigned long flags;
+       int             retval, line;
+       unsigned long   flags;
 
        /* verify range of specified line number */
        line = tty->index;
        if (line >= mgslpc_device_count) {
                printk("%s(%d):mgslpc_open with invalid line #%d.\n",
-                       __FILE__,__LINE__,line);
+                       __FILE__, __LINE__, line);
                return -ENODEV;
        }
 
@@ -2510,7 +2517,7 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
 
        if (debug_level >= DEBUG_LEVEL_INFO)
                printk("%s(%d):mgslpc_open(%s), old ref count = %d\n",
-                        __FILE__,__LINE__,tty->driver->name, port->count);
+                        __FILE__, __LINE__, tty->driver->name, port->count);
 
        /* If port is closing, signal caller to try again */
        if (tty_hung_up_p(filp) || port->flags & ASYNC_CLOSING){
@@ -2545,13 +2552,13 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
        if (retval) {
                if (debug_level >= DEBUG_LEVEL_INFO)
                        printk("%s(%d):block_til_ready(%s) returned %d\n",
-                                __FILE__,__LINE__, info->device_name, retval);
+                                __FILE__, __LINE__, info->device_name, retval);
                goto cleanup;
        }
 
        if (debug_level >= DEBUG_LEVEL_INFO)
                printk("%s(%d):mgslpc_open(%s) success\n",
-                        __FILE__,__LINE__, info->device_name);
+                        __FILE__, __LINE__, info->device_name);
        retval = 0;
 
 cleanup:
@@ -2571,9 +2578,9 @@ static inline void line_info(struct seq_file *m, MGSLPC_INFO *info)
                      info->device_name, info->io_base, info->irq_level);
 
        /* output current serial signal states */
-       spin_lock_irqsave(&info->lock,flags);
-       get_signals(info);
-       spin_unlock_irqrestore(&info->lock,flags);
+       spin_lock_irqsave(&info->lock, flags);
+       get_signals(info);
+       spin_unlock_irqrestore(&info->lock, flags);
 
        stat_buf[0] = 0;
        stat_buf[1] = 0;
@@ -2635,7 +2642,7 @@ static int mgslpc_proc_show(struct seq_file *m, void *v)
        seq_printf(m, "synclink driver:%s\n", driver_version);
 
        info = mgslpc_device_list;
-       while( info ) {
+       while (info) {
                line_info(m, info);
                info = info->next_device;
        }
@@ -2686,8 +2693,8 @@ static void rx_free_buffers(MGSLPC_INFO *info)
 
 static int claim_resources(MGSLPC_INFO *info)
 {
-       if (rx_alloc_buffers(info) < 0 ) {
-               printk( "Can't allocate rx buffer %s\n", info->device_name);
+       if (rx_alloc_buffers(info) < 0) {
+               printk("Can't allocate rx buffer %s\n", info->device_name);
                release_resources(info);
                return -ENODEV;
        }
@@ -2706,8 +2713,12 @@ static void release_resources(MGSLPC_INFO *info)
  *
  * Arguments:          info    pointer to device instance data
  */
-static void mgslpc_add_device(MGSLPC_INFO *info)
+static int mgslpc_add_device(MGSLPC_INFO *info)
 {
+       MGSLPC_INFO *current_dev = NULL;
+       struct device *tty_dev;
+       int ret;
+
        info->next_device = NULL;
        info->line = mgslpc_device_count;
        sprintf(info->device_name,"ttySLP%d",info->line);
@@ -2722,8 +2733,8 @@ static void mgslpc_add_device(MGSLPC_INFO *info)
        if (!mgslpc_device_list)
                mgslpc_device_list = info;
        else {
-               MGSLPC_INFO *current_dev = mgslpc_device_list;
-               while( current_dev->next_device )
+               current_dev = mgslpc_device_list;
+               while (current_dev->next_device)
                        current_dev = current_dev->next_device;
                current_dev->next_device = info;
        }
@@ -2733,14 +2744,34 @@ static void mgslpc_add_device(MGSLPC_INFO *info)
        else if (info->max_frame_size > 65535)
                info->max_frame_size = 65535;
 
-       printk( "SyncLink PC Card %s:IO=%04X IRQ=%d\n",
+       printk("SyncLink PC Card %s:IO=%04X IRQ=%d\n",
                info->device_name, info->io_base, info->irq_level);
 
 #if SYNCLINK_GENERIC_HDLC
-       hdlcdev_init(info);
+       ret = hdlcdev_init(info);
+       if (ret != 0)
+               goto failed;
 #endif
-       tty_port_register_device(&info->port, serial_driver, info->line,
+
+       tty_dev = tty_port_register_device(&info->port, serial_driver, info->line,
                        &info->p_dev->dev);
+       if (IS_ERR(tty_dev)) {
+               ret = PTR_ERR(tty_dev);
+#if SYNCLINK_GENERIC_HDLC
+               hdlcdev_exit(info);
+#endif
+               goto failed;
+       }
+
+       return 0;
+
+failed:
+       if (current_dev)
+               current_dev->next_device = NULL;
+       else
+               mgslpc_device_list = NULL;
+       mgslpc_device_count--;
+       return ret;
 }
 
 static void mgslpc_remove_device(MGSLPC_INFO *remove_info)
@@ -3262,7 +3293,7 @@ static void rx_stop(MGSLPC_INFO *info)
 {
        if (debug_level >= DEBUG_LEVEL_ISR)
                printk("%s(%d):rx_stop(%s)\n",
-                        __FILE__,__LINE__, info->device_name );
+                        __FILE__, __LINE__, info->device_name);
 
        /* MODE:03 RAC Receiver Active, 0=inactive */
        clear_reg_bits(info, CHA + MODE, BIT3);
@@ -3275,7 +3306,7 @@ static void rx_start(MGSLPC_INFO *info)
 {
        if (debug_level >= DEBUG_LEVEL_ISR)
                printk("%s(%d):rx_start(%s)\n",
-                        __FILE__,__LINE__, info->device_name );
+                        __FILE__, __LINE__, info->device_name);
 
        rx_reset_buffers(info);
        info->rx_enabled = false;
@@ -3291,7 +3322,7 @@ static void tx_start(MGSLPC_INFO *info, struct tty_struct *tty)
 {
        if (debug_level >= DEBUG_LEVEL_ISR)
                printk("%s(%d):tx_start(%s)\n",
-                        __FILE__,__LINE__, info->device_name );
+                        __FILE__, __LINE__, info->device_name);
 
        if (info->tx_count) {
                /* If auto RTS enabled and RTS is inactive, then assert */
@@ -3329,7 +3360,7 @@ static void tx_stop(MGSLPC_INFO *info)
 {
        if (debug_level >= DEBUG_LEVEL_ISR)
                printk("%s(%d):tx_stop(%s)\n",
-                        __FILE__,__LINE__, info->device_name );
+                        __FILE__, __LINE__, info->device_name);
 
        del_timer(&info->tx_timer);
 
@@ -3681,7 +3712,7 @@ static bool rx_get_frame(MGSLPC_INFO *info, struct tty_struct *tty)
 
        if (debug_level >= DEBUG_LEVEL_BH)
                printk("%s(%d):rx_get_frame(%s) status=%04X size=%d\n",
-                       __FILE__,__LINE__,info->device_name,status,framesize);
+                       __FILE__, __LINE__, info->device_name, status, framesize);
 
        if (debug_level >= DEBUG_LEVEL_DATA)
                trace_block(info, buf->data, framesize, 0);
@@ -3709,13 +3740,13 @@ static bool rx_get_frame(MGSLPC_INFO *info, struct tty_struct *tty)
                }
        }
 
-       spin_lock_irqsave(&info->lock,flags);
+       spin_lock_irqsave(&info->lock, flags);
        buf->status = buf->count = 0;
        info->rx_frame_count--;
        info->rx_get++;
        if (info->rx_get >= info->rx_buf_count)
                info->rx_get = 0;
-       spin_unlock_irqrestore(&info->lock,flags);
+       spin_unlock_irqrestore(&info->lock, flags);
 
        return true;
 }
@@ -3729,7 +3760,7 @@ static bool register_test(MGSLPC_INFO *info)
        bool rc = true;
        unsigned long flags;
 
-       spin_lock_irqsave(&info->lock,flags);
+       spin_lock_irqsave(&info->lock, flags);
        reset_device(info);
 
        for (i = 0; i < count; i++) {
@@ -3742,7 +3773,7 @@ static bool register_test(MGSLPC_INFO *info)
                }
        }
 
-       spin_unlock_irqrestore(&info->lock,flags);
+       spin_unlock_irqrestore(&info->lock, flags);
        return rc;
 }
 
@@ -3751,7 +3782,7 @@ static bool irq_test(MGSLPC_INFO *info)
        unsigned long end_time;
        unsigned long flags;
 
-       spin_lock_irqsave(&info->lock,flags);
+       spin_lock_irqsave(&info->lock, flags);
        reset_device(info);
 
        info->testing_irq = true;
@@ -3765,7 +3796,7 @@ static bool irq_test(MGSLPC_INFO *info)
        write_reg(info, CHA + TIMR, 0); /* 512 cycles */
        issue_command(info, CHA, CMD_START_TIMER);
 
-       spin_unlock_irqrestore(&info->lock,flags);
+       spin_unlock_irqrestore(&info->lock, flags);
 
        end_time=100;
        while(end_time-- && !info->irq_occurred) {
@@ -3774,9 +3805,9 @@ static bool irq_test(MGSLPC_INFO *info)
 
        info->testing_irq = false;
 
-       spin_lock_irqsave(&info->lock,flags);
+       spin_lock_irqsave(&info->lock, flags);
        reset_device(info);
-       spin_unlock_irqrestore(&info->lock,flags);
+       spin_unlock_irqrestore(&info->lock, flags);
 
        return info->irq_occurred;
 }
@@ -3785,21 +3816,21 @@ static int adapter_test(MGSLPC_INFO *info)
 {
        if (!register_test(info)) {
                info->init_error = DiagStatus_AddressFailure;
-               printk( "%s(%d):Register test failure for device %s Addr=%04X\n",
-                       __FILE__,__LINE__,info->device_name, (unsigned short)(info->io_base) );
+               printk("%s(%d):Register test failure for device %s Addr=%04X\n",
+                       __FILE__, __LINE__, info->device_name, (unsigned short)(info->io_base));
                return -ENODEV;
        }
 
        if (!irq_test(info)) {
                info->init_error = DiagStatus_IrqFailure;
-               printk( "%s(%d):Interrupt test failure for device %s IRQ=%d\n",
-                       __FILE__,__LINE__,info->device_name, (unsigned short)(info->irq_level) );
+               printk("%s(%d):Interrupt test failure for device %s IRQ=%d\n",
+                       __FILE__, __LINE__, info->device_name, (unsigned short)(info->irq_level));
                return -ENODEV;
        }
 
        if (debug_level >= DEBUG_LEVEL_INFO)
                printk("%s(%d):device %s passed diagnostics\n",
-                       __FILE__,__LINE__,info->device_name);
+                       __FILE__, __LINE__, info->device_name);
        return 0;
 }
 
@@ -3808,9 +3839,9 @@ static void trace_block(MGSLPC_INFO *info,const char* data, int count, int xmit)
        int i;
        int linecount;
        if (xmit)
-               printk("%s tx data:\n",info->device_name);
+               printk("%s tx data:\n", info->device_name);
        else
-               printk("%s rx data:\n",info->device_name);
+               printk("%s rx data:\n", info->device_name);
 
        while(count) {
                if (count > 16)
@@ -3819,12 +3850,12 @@ static void trace_block(MGSLPC_INFO *info,const char* data, int count, int xmit)
                        linecount = count;
 
                for(i=0;i<linecount;i++)
-                       printk("%02X ",(unsigned char)data[i]);
+                       printk("%02X ", (unsigned char)data[i]);
                for(;i<17;i++)
                        printk("   ");
                for(i=0;i<linecount;i++) {
                        if (data[i]>=040 && data[i]<=0176)
-                               printk("%c",data[i]);
+                               printk("%c", data[i]);
                        else
                                printk(".");
                }
@@ -3843,18 +3874,18 @@ static void tx_timeout(unsigned long context)
        MGSLPC_INFO *info = (MGSLPC_INFO*)context;
        unsigned long flags;
 
-       if ( debug_level >= DEBUG_LEVEL_INFO )
-               printk( "%s(%d):tx_timeout(%s)\n",
-                       __FILE__,__LINE__,info->device_name);
-       if(info->tx_active &&
-          info->params.mode == MGSL_MODE_HDLC) {
+       if (debug_level >= DEBUG_LEVEL_INFO)
+               printk("%s(%d):tx_timeout(%s)\n",
+                       __FILE__, __LINE__, info->device_name);
+       if (info->tx_active &&
+           info->params.mode == MGSL_MODE_HDLC) {
                info->icount.txtimeout++;
        }
-       spin_lock_irqsave(&info->lock,flags);
+       spin_lock_irqsave(&info->lock, flags);
        info->tx_active = false;
        info->tx_count = info->tx_put = info->tx_get = 0;
 
-       spin_unlock_irqrestore(&info->lock,flags);
+       spin_unlock_irqrestore(&info->lock, flags);
 
 #if SYNCLINK_GENERIC_HDLC
        if (info->netcount)
@@ -3936,7 +3967,7 @@ static netdev_tx_t hdlcdev_xmit(struct sk_buff *skb,
        unsigned long flags;
 
        if (debug_level >= DEBUG_LEVEL_INFO)
-               printk(KERN_INFO "%s:hdlc_xmit(%s)\n",__FILE__,dev->name);
+               printk(KERN_INFO "%s:hdlc_xmit(%s)\n", __FILE__, dev->name);
 
        /* stop sending until this frame completes */
        netif_stop_queue(dev);
@@ -3957,13 +3988,13 @@ static netdev_tx_t hdlcdev_xmit(struct sk_buff *skb,
        dev->trans_start = jiffies;
 
        /* start hardware transmitter if necessary */
-       spin_lock_irqsave(&info->lock,flags);
+       spin_lock_irqsave(&info->lock, flags);
        if (!info->tx_active) {
                struct tty_struct *tty = tty_port_tty_get(&info->port);
-               tx_start(info, tty);
-               tty_kref_put(tty);
+               tx_start(info, tty);
+               tty_kref_put(tty);
        }
-       spin_unlock_irqrestore(&info->lock,flags);
+       spin_unlock_irqrestore(&info->lock, flags);
 
        return NETDEV_TX_OK;
 }
@@ -3984,10 +4015,11 @@ static int hdlcdev_open(struct net_device *dev)
        unsigned long flags;
 
        if (debug_level >= DEBUG_LEVEL_INFO)
-               printk("%s:hdlcdev_open(%s)\n",__FILE__,dev->name);
+               printk("%s:hdlcdev_open(%s)\n", __FILE__, dev->name);
 
        /* generic HDLC layer open processing */
-       if ((rc = hdlc_open(dev)))
+       rc = hdlc_open(dev);
+       if (rc != 0)
                return rc;
 
        /* arbitrate between network and tty opens */
@@ -4002,7 +4034,8 @@ static int hdlcdev_open(struct net_device *dev)
 
        tty = tty_port_tty_get(&info->port);
        /* claim resources and init adapter */
-       if ((rc = startup(info, tty)) != 0) {
+       rc = startup(info, tty);
+       if (rc != 0) {
                tty_kref_put(tty);
                spin_lock_irqsave(&info->netlock, flags);
                info->netcount=0;
@@ -4044,7 +4077,7 @@ static int hdlcdev_close(struct net_device *dev)
        unsigned long flags;
 
        if (debug_level >= DEBUG_LEVEL_INFO)
-               printk("%s:hdlcdev_close(%s)\n",__FILE__,dev->name);
+               printk("%s:hdlcdev_close(%s)\n", __FILE__, dev->name);
 
        netif_stop_queue(dev);
 
@@ -4078,7 +4111,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
        unsigned int flags;
 
        if (debug_level >= DEBUG_LEVEL_INFO)
-               printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
+               printk("%s:hdlcdev_ioctl(%s)\n", __FILE__, dev->name);
 
        /* return error if TTY interface open */
        if (info->port.count)
@@ -4179,14 +4212,14 @@ static void hdlcdev_tx_timeout(struct net_device *dev)
        unsigned long flags;
 
        if (debug_level >= DEBUG_LEVEL_INFO)
-               printk("hdlcdev_tx_timeout(%s)\n",dev->name);
+               printk("hdlcdev_tx_timeout(%s)\n", dev->name);
 
        dev->stats.tx_errors++;
        dev->stats.tx_aborted_errors++;
 
-       spin_lock_irqsave(&info->lock,flags);
+       spin_lock_irqsave(&info->lock, flags);
        tx_stop(info);
-       spin_unlock_irqrestore(&info->lock,flags);
+       spin_unlock_irqrestore(&info->lock, flags);
 
        netif_wake_queue(dev);
 }
@@ -4217,7 +4250,7 @@ static void hdlcdev_rx(MGSLPC_INFO *info, char *buf, int size)
        struct net_device *dev = info->netdev;
 
        if (debug_level >= DEBUG_LEVEL_INFO)
-               printk("hdlcdev_rx(%s)\n",dev->name);
+               printk("hdlcdev_rx(%s)\n", dev->name);
 
        if (skb == NULL) {
                printk(KERN_NOTICE "%s: can't alloc skb, dropping packet\n", dev->name);
@@ -4260,8 +4293,9 @@ static int hdlcdev_init(MGSLPC_INFO *info)
 
        /* allocate and initialize network and HDLC layer objects */
 
-       if (!(dev = alloc_hdlcdev(info))) {
-               printk(KERN_ERR "%s:hdlc device allocation failure\n",__FILE__);
+       dev = alloc_hdlcdev(info);
+       if (dev == NULL) {
+               printk(KERN_ERR "%s:hdlc device allocation failure\n", __FILE__);
                return -ENOMEM;
        }
 
@@ -4280,8 +4314,9 @@ static int hdlcdev_init(MGSLPC_INFO *info)
        hdlc->xmit   = hdlcdev_xmit;
 
        /* register objects with HDLC layer */
-       if ((rc = register_hdlc_device(dev))) {
-               printk(KERN_WARNING "%s:unable to register hdlc device\n",__FILE__);
+       rc = register_hdlc_device(dev);
+       if (rc) {
+               printk(KERN_WARNING "%s:unable to register hdlc device\n", __FILE__);
                free_netdev(dev);
                return rc;
        }
index ff00457..9dd2551 100644 (file)
@@ -124,7 +124,7 @@ void __init of_cpu_clk_setup(struct device_node *node)
 
        clks = kzalloc(ncpus * sizeof(*clks), GFP_KERNEL);
        if (WARN_ON(!clks))
-               return;
+               goto clks_out;
 
        for_each_node_by_type(dn, "cpu") {
                struct clk_init_data init;
@@ -134,11 +134,11 @@ void __init of_cpu_clk_setup(struct device_node *node)
                int cpu, err;
 
                if (WARN_ON(!clk_name))
-                       return;
+                       goto bail_out;
 
                err = of_property_read_u32(dn, "reg", &cpu);
                if (WARN_ON(err))
-                       return;
+                       goto bail_out;
 
                sprintf(clk_name, "cpu%d", cpu);
                parent_clk = of_clk_get(node, 0);
@@ -167,6 +167,9 @@ void __init of_cpu_clk_setup(struct device_node *node)
        return;
 bail_out:
        kfree(clks);
+       while(ncpus--)
+               kfree(cpuclk[ncpus].clk_name);
+clks_out:
        kfree(cpuclk);
 }
 
index 934854a..7227cd7 100644 (file)
@@ -106,7 +106,7 @@ config X86_POWERNOW_K7_ACPI
 config X86_POWERNOW_K8
        tristate "AMD Opteron/Athlon64 PowerNow!"
        select CPU_FREQ_TABLE
-       depends on ACPI && ACPI_PROCESSOR
+       depends on ACPI && ACPI_PROCESSOR && X86_ACPI_CPUFREQ
        help
          This adds the CPUFreq driver for K8/early Opteron/Athlon64 processors.
          Support for K10 and newer processors is now in acpi-cpufreq.
index 0d048f6..7b0d49d 100644 (file)
@@ -1030,4 +1030,11 @@ MODULE_PARM_DESC(acpi_pstate_strict,
 late_initcall(acpi_cpufreq_init);
 module_exit(acpi_cpufreq_exit);
 
+static const struct x86_cpu_id acpi_cpufreq_ids[] = {
+       X86_FEATURE_MATCH(X86_FEATURE_ACPI),
+       X86_FEATURE_MATCH(X86_FEATURE_HW_PSTATE),
+       {}
+};
+MODULE_DEVICE_TABLE(x86cpu, acpi_cpufreq_ids);
+
 MODULE_ALIAS("acpi");
index 52bf36d..debc5a7 100644 (file)
@@ -71,12 +71,15 @@ static int cpu0_set_target(struct cpufreq_policy *policy,
        }
 
        if (cpu_reg) {
+               rcu_read_lock();
                opp = opp_find_freq_ceil(cpu_dev, &freq_Hz);
                if (IS_ERR(opp)) {
+                       rcu_read_unlock();
                        pr_err("failed to find OPP for %ld\n", freq_Hz);
                        return PTR_ERR(opp);
                }
                volt = opp_get_voltage(opp);
+               rcu_read_unlock();
                tol = volt * voltage_tolerance / 100;
                volt_old = regulator_get_voltage(cpu_reg);
        }
@@ -236,12 +239,14 @@ static int cpu0_cpufreq_driver_init(void)
                 */
                for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++)
                        ;
+               rcu_read_lock();
                opp = opp_find_freq_exact(cpu_dev,
                                freq_table[0].frequency * 1000, true);
                min_uV = opp_get_voltage(opp);
                opp = opp_find_freq_exact(cpu_dev,
                                freq_table[i-1].frequency * 1000, true);
                max_uV = opp_get_voltage(opp);
+               rcu_read_unlock();
                ret = regulator_set_voltage_time(cpu_reg, min_uV, max_uV);
                if (ret > 0)
                        transition_latency += ret * 1000;
index 1f3417a..97102b0 100644 (file)
@@ -110,13 +110,16 @@ static int omap_target(struct cpufreq_policy *policy,
        freq = ret;
 
        if (mpu_reg) {
+               rcu_read_lock();
                opp = opp_find_freq_ceil(mpu_dev, &freq);
                if (IS_ERR(opp)) {
+                       rcu_read_unlock();
                        dev_err(mpu_dev, "%s: unable to find MPU OPP for %d\n",
                                __func__, freqs.new);
                        return -EINVAL;
                }
                volt = opp_get_voltage(opp);
+               rcu_read_unlock();
                tol = volt * OPP_TOLERANCE / 100;
                volt_old = regulator_get_voltage(mpu_reg);
        }
index fb4a7dd..e1f6860 100644 (file)
@@ -69,24 +69,15 @@ int cpuidle_play_dead(void)
 {
        struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
        struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
-       int i, dead_state = -1;
-       int power_usage = INT_MAX;
+       int i;
 
        if (!drv)
                return -ENODEV;
 
        /* Find lowest-power state that supports long-term idle */
-       for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++) {
-               struct cpuidle_state *s = &drv->states[i];
-
-               if (s->power_usage < power_usage && s->enter_dead) {
-                       power_usage = s->power_usage;
-                       dead_state = i;
-               }
-       }
-
-       if (dead_state != -1)
-               return drv->states[dead_state].enter_dead(dev, dead_state);
+       for (i = drv->state_count - 1; i >= CPUIDLE_DRIVER_STATE_START; i--)
+               if (drv->states[i].enter_dead)
+                       return drv->states[i].enter_dead(dev, i);
 
        return -ENODEV;
 }
index c2b281a..422c7b6 100644 (file)
@@ -19,34 +19,9 @@ DEFINE_SPINLOCK(cpuidle_driver_lock);
 static void __cpuidle_set_cpu_driver(struct cpuidle_driver *drv, int cpu);
 static struct cpuidle_driver * __cpuidle_get_cpu_driver(int cpu);
 
-static void set_power_states(struct cpuidle_driver *drv)
-{
-       int i;
-
-       /*
-        * cpuidle driver should set the drv->power_specified bit
-        * before registering if the driver provides
-        * power_usage numbers.
-        *
-        * If power_specified is not set,
-        * we fill in power_usage with decreasing values as the
-        * cpuidle code has an implicit assumption that state Cn
-        * uses less power than C(n-1).
-        *
-        * With CONFIG_ARCH_HAS_CPU_RELAX, C0 is already assigned
-        * an power value of -1.  So we use -2, -3, etc, for other
-        * c-states.
-        */
-       for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++)
-               drv->states[i].power_usage = -1 - i;
-}
-
 static void __cpuidle_driver_init(struct cpuidle_driver *drv)
 {
        drv->refcnt = 0;
-
-       if (!drv->power_specified)
-               set_power_states(drv);
 }
 
 static int __cpuidle_register_driver(struct cpuidle_driver *drv, int cpu)
index 20ea33a..fe343a0 100644 (file)
@@ -312,7 +312,6 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
 {
        struct menu_device *data = &__get_cpu_var(menu_devices);
        int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
-       int power_usage = INT_MAX;
        int i;
        int multiplier;
        struct timespec t;
@@ -383,11 +382,8 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
                if (s->exit_latency * multiplier > data->predicted_us)
                        continue;
 
-               if (s->power_usage < power_usage) {
-                       power_usage = s->power_usage;
-                       data->last_state_idx = i;
-                       data->exit_us = s->exit_latency;
-               }
+               data->last_state_idx = i;
+               data->exit_us = s->exit_latency;
        }
 
        /* not deepest C-state chosen for low predicted residency */
index 3409429..428754a 100644 (file)
@@ -374,7 +374,7 @@ static int cpuidle_add_state_sysfs(struct cpuidle_device *device)
        struct cpuidle_driver *drv = cpuidle_get_cpu_driver(device);
 
        /* state statistics */
-       for (i = 0; i < drv->state_count; i++) {
+       for (i = 0; i < device->state_count; i++) {
                kobj = kzalloc(sizeof(struct cpuidle_state_kobj), GFP_KERNEL);
                if (!kobj)
                        goto error_state;
index 53766f3..3b36797 100644 (file)
@@ -994,6 +994,11 @@ module_exit(devfreq_exit);
  * @freq:      The frequency given to target function
  * @flags:     Flags handed from devfreq framework.
  *
+ * Locking: This function must be called under rcu_read_lock(). opp is a rcu
+ * protected pointer. The reason for the same is that the opp pointer which is
+ * returned will remain valid for use with opp_get_{voltage, freq} only while
+ * under the locked area. The pointer returned must be used prior to unlocking
+ * with rcu_read_unlock() to maintain the integrity of the pointer.
  */
 struct opp *devfreq_recommended_opp(struct device *dev, unsigned long *freq,
                                    u32 flags)
index 80c745e..46d94e9 100644 (file)
@@ -73,6 +73,16 @@ enum busclk_level_idx {
 #define EX4210_LV_NUM  (LV_2 + 1)
 #define EX4x12_LV_NUM  (LV_4 + 1)
 
+/**
+ * struct busfreq_opp_info - opp information for bus
+ * @rate:      Frequency in hertz
+ * @volt:      Voltage in microvolts corresponding to this OPP
+ */
+struct busfreq_opp_info {
+       unsigned long rate;
+       unsigned long volt;
+};
+
 struct busfreq_data {
        enum exynos4_busf_type type;
        struct device *dev;
@@ -80,7 +90,7 @@ struct busfreq_data {
        bool disabled;
        struct regulator *vdd_int;
        struct regulator *vdd_mif; /* Exynos4412/4212 only */
-       struct opp *curr_opp;
+       struct busfreq_opp_info curr_oppinfo;
        struct exynos4_ppmu dmc[2];
 
        struct notifier_block pm_notifier;
@@ -296,13 +306,14 @@ static unsigned int exynos4x12_clkdiv_sclkip[][3] = {
 };
 
 
-static int exynos4210_set_busclk(struct busfreq_data *data, struct opp *opp)
+static int exynos4210_set_busclk(struct busfreq_data *data,
+                                struct busfreq_opp_info *oppi)
 {
        unsigned int index;
        unsigned int tmp;
 
        for (index = LV_0; index < EX4210_LV_NUM; index++)
-               if (opp_get_freq(opp) == exynos4210_busclk_table[index].clk)
+               if (oppi->rate == exynos4210_busclk_table[index].clk)
                        break;
 
        if (index == EX4210_LV_NUM)
@@ -361,13 +372,14 @@ static int exynos4210_set_busclk(struct busfreq_data *data, struct opp *opp)
        return 0;
 }
 
-static int exynos4x12_set_busclk(struct busfreq_data *data, struct opp *opp)
+static int exynos4x12_set_busclk(struct busfreq_data *data,
+                                struct busfreq_opp_info *oppi)
 {
        unsigned int index;
        unsigned int tmp;
 
        for (index = LV_0; index < EX4x12_LV_NUM; index++)
-               if (opp_get_freq(opp) == exynos4x12_mifclk_table[index].clk)
+               if (oppi->rate == exynos4x12_mifclk_table[index].clk)
                        break;
 
        if (index == EX4x12_LV_NUM)
@@ -576,11 +588,12 @@ static int exynos4x12_get_intspec(unsigned long mifclk)
        return -EINVAL;
 }
 
-static int exynos4_bus_setvolt(struct busfreq_data *data, struct opp *opp,
-                              struct opp *oldopp)
+static int exynos4_bus_setvolt(struct busfreq_data *data,
+                              struct busfreq_opp_info *oppi,
+                              struct busfreq_opp_info *oldoppi)
 {
        int err = 0, tmp;
-       unsigned long volt = opp_get_voltage(opp);
+       unsigned long volt = oppi->volt;
 
        switch (data->type) {
        case TYPE_BUSF_EXYNOS4210:
@@ -595,11 +608,11 @@ static int exynos4_bus_setvolt(struct busfreq_data *data, struct opp *opp,
                if (err)
                        break;
 
-               tmp = exynos4x12_get_intspec(opp_get_freq(opp));
+               tmp = exynos4x12_get_intspec(oppi->rate);
                if (tmp < 0) {
                        err = tmp;
                        regulator_set_voltage(data->vdd_mif,
-                                             opp_get_voltage(oldopp),
+                                             oldoppi->volt,
                                              MAX_SAFEVOLT);
                        break;
                }
@@ -609,7 +622,7 @@ static int exynos4_bus_setvolt(struct busfreq_data *data, struct opp *opp,
                /*  Try to recover */
                if (err)
                        regulator_set_voltage(data->vdd_mif,
-                                             opp_get_voltage(oldopp),
+                                             oldoppi->volt,
                                              MAX_SAFEVOLT);
                break;
        default:
@@ -626,17 +639,26 @@ static int exynos4_bus_target(struct device *dev, unsigned long *_freq,
        struct platform_device *pdev = container_of(dev, struct platform_device,
                                                    dev);
        struct busfreq_data *data = platform_get_drvdata(pdev);
-       struct opp *opp = devfreq_recommended_opp(dev, _freq, flags);
-       unsigned long freq = opp_get_freq(opp);
-       unsigned long old_freq = opp_get_freq(data->curr_opp);
+       struct opp *opp;
+       unsigned long freq;
+       unsigned long old_freq = data->curr_oppinfo.rate;
+       struct busfreq_opp_info new_oppinfo;
 
-       if (IS_ERR(opp))
+       rcu_read_lock();
+       opp = devfreq_recommended_opp(dev, _freq, flags);
+       if (IS_ERR(opp)) {
+               rcu_read_unlock();
                return PTR_ERR(opp);
+       }
+       new_oppinfo.rate = opp_get_freq(opp);
+       new_oppinfo.volt = opp_get_voltage(opp);
+       rcu_read_unlock();
+       freq = new_oppinfo.rate;
 
        if (old_freq == freq)
                return 0;
 
-       dev_dbg(dev, "targetting %lukHz %luuV\n", freq, opp_get_voltage(opp));
+       dev_dbg(dev, "targetting %lukHz %luuV\n", freq, new_oppinfo.volt);
 
        mutex_lock(&data->lock);
 
@@ -644,17 +666,18 @@ static int exynos4_bus_target(struct device *dev, unsigned long *_freq,
                goto out;
 
        if (old_freq < freq)
-               err = exynos4_bus_setvolt(data, opp, data->curr_opp);
+               err = exynos4_bus_setvolt(data, &new_oppinfo,
+                                         &data->curr_oppinfo);
        if (err)
                goto out;
 
        if (old_freq != freq) {
                switch (data->type) {
                case TYPE_BUSF_EXYNOS4210:
-                       err = exynos4210_set_busclk(data, opp);
+                       err = exynos4210_set_busclk(data, &new_oppinfo);
                        break;
                case TYPE_BUSF_EXYNOS4x12:
-                       err = exynos4x12_set_busclk(data, opp);
+                       err = exynos4x12_set_busclk(data, &new_oppinfo);
                        break;
                default:
                        err = -EINVAL;
@@ -664,11 +687,12 @@ static int exynos4_bus_target(struct device *dev, unsigned long *_freq,
                goto out;
 
        if (old_freq > freq)
-               err = exynos4_bus_setvolt(data, opp, data->curr_opp);
+               err = exynos4_bus_setvolt(data, &new_oppinfo,
+                                         &data->curr_oppinfo);
        if (err)
                goto out;
 
-       data->curr_opp = opp;
+       data->curr_oppinfo = new_oppinfo;
 out:
        mutex_unlock(&data->lock);
        return err;
@@ -702,7 +726,7 @@ static int exynos4_bus_get_dev_status(struct device *dev,
 
        exynos4_read_ppmu(data);
        busier_dmc = exynos4_get_busier_dmc(data);
-       stat->current_frequency = opp_get_freq(data->curr_opp);
+       stat->current_frequency = data->curr_oppinfo.rate;
 
        if (busier_dmc)
                addr = S5P_VA_DMC1;
@@ -933,6 +957,7 @@ static int exynos4_busfreq_pm_notifier_event(struct notifier_block *this,
        struct busfreq_data *data = container_of(this, struct busfreq_data,
                                                 pm_notifier);
        struct opp *opp;
+       struct busfreq_opp_info new_oppinfo;
        unsigned long maxfreq = ULONG_MAX;
        int err = 0;
 
@@ -943,18 +968,29 @@ static int exynos4_busfreq_pm_notifier_event(struct notifier_block *this,
 
                data->disabled = true;
 
+               rcu_read_lock();
                opp = opp_find_freq_floor(data->dev, &maxfreq);
+               if (IS_ERR(opp)) {
+                       rcu_read_unlock();
+                       dev_err(data->dev, "%s: unable to find a min freq\n",
+                               __func__);
+                       return PTR_ERR(opp);
+               }
+               new_oppinfo.rate = opp_get_freq(opp);
+               new_oppinfo.volt = opp_get_voltage(opp);
+               rcu_read_unlock();
 
-               err = exynos4_bus_setvolt(data, opp, data->curr_opp);
+               err = exynos4_bus_setvolt(data, &new_oppinfo,
+                                         &data->curr_oppinfo);
                if (err)
                        goto unlock;
 
                switch (data->type) {
                case TYPE_BUSF_EXYNOS4210:
-                       err = exynos4210_set_busclk(data, opp);
+                       err = exynos4210_set_busclk(data, &new_oppinfo);
                        break;
                case TYPE_BUSF_EXYNOS4x12:
-                       err = exynos4x12_set_busclk(data, opp);
+                       err = exynos4x12_set_busclk(data, &new_oppinfo);
                        break;
                default:
                        err = -EINVAL;
@@ -962,7 +998,7 @@ static int exynos4_busfreq_pm_notifier_event(struct notifier_block *this,
                if (err)
                        goto unlock;
 
-               data->curr_opp = opp;
+               data->curr_oppinfo = new_oppinfo;
 unlock:
                mutex_unlock(&data->lock);
                if (err)
@@ -1027,13 +1063,17 @@ static int exynos4_busfreq_probe(struct platform_device *pdev)
                }
        }
 
+       rcu_read_lock();
        opp = opp_find_freq_floor(dev, &exynos4_devfreq_profile.initial_freq);
        if (IS_ERR(opp)) {
+               rcu_read_unlock();
                dev_err(dev, "Invalid initial frequency %lu kHz.\n",
                        exynos4_devfreq_profile.initial_freq);
                return PTR_ERR(opp);
        }
-       data->curr_opp = opp;
+       data->curr_oppinfo.rate = opp_get_freq(opp);
+       data->curr_oppinfo.volt = opp_get_voltage(opp);
+       rcu_read_unlock();
 
        platform_set_drvdata(pdev, data);
 
index dbf0e6f..a7dcf78 100644 (file)
@@ -684,9 +684,8 @@ static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
                        break;
                }
 
-               imxdmac->hw_chaining = 1;
-               if (!imxdma_hw_chain(imxdmac))
-                       return -EINVAL;
+               imxdmac->hw_chaining = 0;
+
                imxdmac->ccr_from_device = (mode | IMX_DMA_TYPE_FIFO) |
                        ((IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) << 2) |
                        CCR_REN;
index e5fc944..3e9d669 100644 (file)
@@ -951,7 +951,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
                        goto free_resources;
                }
        }
-       dma_sync_single_for_device(dev, dest_dma, PAGE_SIZE, DMA_TO_DEVICE);
+       dma_sync_single_for_device(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
 
        /* skip validate if the capability is not present */
        if (!dma_has_cap(DMA_XOR_VAL, dma_chan->device->cap_mask))
index c39e61b..3cad856 100644 (file)
@@ -266,6 +266,7 @@ static struct tegra_dma_desc *tegra_dma_desc_get(
                if (async_tx_test_ack(&dma_desc->txd)) {
                        list_del(&dma_desc->node);
                        spin_unlock_irqrestore(&tdc->lock, flags);
+                       dma_desc->txd.flags = 0;
                        return dma_desc;
                }
        }
@@ -1050,7 +1051,9 @@ struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic(
                                        TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT;
        ahb_seq |= TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32;
 
-       csr |= TEGRA_APBDMA_CSR_FLOW | TEGRA_APBDMA_CSR_IE_EOC;
+       csr |= TEGRA_APBDMA_CSR_FLOW;
+       if (flags & DMA_PREP_INTERRUPT)
+               csr |= TEGRA_APBDMA_CSR_IE_EOC;
        csr |= tdc->dma_sconfig.slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT;
 
        apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1;
@@ -1095,7 +1098,8 @@ struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic(
                mem += len;
        }
        sg_req->last_sg = true;
-       dma_desc->txd.flags = 0;
+       if (flags & DMA_CTRL_ACK)
+               dma_desc->txd.flags = DMA_CTRL_ACK;
 
        /*
         * Make sure that mode should not be conflicting with currently
index 9377050..5168a13 100644 (file)
@@ -29,7 +29,7 @@ config EXTCON_ADC_JACK
 
 config EXTCON_MAX77693
        tristate "MAX77693 EXTCON Support"
-       depends on MFD_MAX77693
+       depends on MFD_MAX77693 && INPUT
        select IRQ_DOMAIN
        select REGMAP_I2C
        help
index 7d9bd94..6819d63 100644 (file)
@@ -547,7 +547,6 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
        mvchip->membase = devm_request_and_ioremap(&pdev->dev, res);
        if (! mvchip->membase) {
                dev_err(&pdev->dev, "Cannot ioremap\n");
-               kfree(mvchip->chip.label);
                return -ENOMEM;
        }
 
@@ -557,14 +556,12 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
                res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
                if (! res) {
                        dev_err(&pdev->dev, "Cannot get memory resource\n");
-                       kfree(mvchip->chip.label);
                        return -ENODEV;
                }
 
                mvchip->percpu_membase = devm_request_and_ioremap(&pdev->dev, res);
                if (! mvchip->percpu_membase) {
                        dev_err(&pdev->dev, "Cannot ioremap\n");
-                       kfree(mvchip->chip.label);
                        return -ENOMEM;
                }
        }
@@ -625,7 +622,6 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
        mvchip->irqbase = irq_alloc_descs(-1, 0, ngpios, -1);
        if (mvchip->irqbase < 0) {
                dev_err(&pdev->dev, "no irqs\n");
-               kfree(mvchip->chip.label);
                return -ENOMEM;
        }
 
@@ -633,7 +629,6 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
                                    mvchip->membase, handle_level_irq);
        if (! gc) {
                dev_err(&pdev->dev, "Cannot allocate generic irq_chip\n");
-               kfree(mvchip->chip.label);
                return -ENOMEM;
        }
 
@@ -668,7 +663,6 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
                irq_remove_generic_chip(gc, IRQ_MSK(ngpios), IRQ_NOREQUEST,
                                        IRQ_LEVEL | IRQ_NOPROBE);
                kfree(gc);
-               kfree(mvchip->chip.label);
                return -ENODEV;
        }
 
index 01f7fe9..76be7ee 100644 (file)
@@ -32,7 +32,6 @@
 
 #include <mach/hardware.h>
 #include <mach/map.h>
-#include <mach/regs-clock.h>
 #include <mach/regs-gpio.h>
 
 #include <plat/cpu.h>
@@ -446,7 +445,7 @@ static struct samsung_gpio_cfg s3c24xx_gpiocfg_banka = {
 };
 #endif
 
-#if defined(CONFIG_ARCH_EXYNOS4) || defined(CONFIG_ARCH_EXYNOS5)
+#if defined(CONFIG_ARCH_EXYNOS4) || defined(CONFIG_SOC_EXYNOS5250)
 static struct samsung_gpio_cfg exynos_gpio_cfg = {
        .set_pull       = exynos_gpio_setpull,
        .get_pull       = exynos_gpio_getpull,
@@ -2446,7 +2445,7 @@ static struct samsung_gpio_chip exynos4_gpios_3[] = {
 };
 #endif
 
-#ifdef CONFIG_ARCH_EXYNOS5
+#ifdef CONFIG_SOC_EXYNOS5250
 static struct samsung_gpio_chip exynos5_gpios_1[] = {
        {
                .chip   = {
@@ -2614,7 +2613,7 @@ static struct samsung_gpio_chip exynos5_gpios_1[] = {
 };
 #endif
 
-#ifdef CONFIG_ARCH_EXYNOS5
+#ifdef CONFIG_SOC_EXYNOS5250
 static struct samsung_gpio_chip exynos5_gpios_2[] = {
        {
                .chip   = {
@@ -2675,7 +2674,7 @@ static struct samsung_gpio_chip exynos5_gpios_2[] = {
 };
 #endif
 
-#ifdef CONFIG_ARCH_EXYNOS5
+#ifdef CONFIG_SOC_EXYNOS5250
 static struct samsung_gpio_chip exynos5_gpios_3[] = {
        {
                .chip   = {
@@ -2711,7 +2710,7 @@ static struct samsung_gpio_chip exynos5_gpios_3[] = {
 };
 #endif
 
-#ifdef CONFIG_ARCH_EXYNOS5
+#ifdef CONFIG_SOC_EXYNOS5250
 static struct samsung_gpio_chip exynos5_gpios_4[] = {
        {
                .chip   = {
@@ -3010,7 +3009,7 @@ static __init int samsung_gpiolib_init(void)
        int i, nr_chips;
        int group = 0;
 
-#ifdef CONFIG_PINCTRL_SAMSUNG
+#if defined(CONFIG_PINCTRL_EXYNOS) || defined(CONFIG_PINCTRL_EXYNOS5440)
        /*
        * This gpio driver includes support for device tree support and there
        * are platforms using it. In order to maintain compatibility with those
@@ -3026,6 +3025,7 @@ static __init int samsung_gpiolib_init(void)
        static const struct of_device_id exynos_pinctrl_ids[] = {
                { .compatible = "samsung,pinctrl-exynos4210", },
                { .compatible = "samsung,pinctrl-exynos4x12", },
+               { .compatible = "samsung,pinctrl-exynos5440", },
        };
        for_each_matching_node(pctrl_np, exynos_pinctrl_ids)
                if (pctrl_np && of_device_is_available(pctrl_np))
index 2bf9670..2aa3314 100644 (file)
@@ -221,11 +221,13 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
 
        BUG_ON(!hole_node->hole_follows || node->allocated);
 
-       if (mm->color_adjust)
-               mm->color_adjust(hole_node, color, &adj_start, &adj_end);
-
        if (adj_start < start)
                adj_start = start;
+       if (adj_end > end)
+               adj_end = end;
+
+       if (mm->color_adjust)
+               mm->color_adjust(hole_node, color, &adj_start, &adj_end);
 
        if (alignment) {
                unsigned tmp = adj_start % alignment;
@@ -506,7 +508,7 @@ void drm_mm_init_scan(struct drm_mm *mm,
        mm->scan_size = size;
        mm->scanned_blocks = 0;
        mm->scan_hit_start = 0;
-       mm->scan_hit_size = 0;
+       mm->scan_hit_end = 0;
        mm->scan_check_range = 0;
        mm->prev_scanned_node = NULL;
 }
@@ -533,7 +535,7 @@ void drm_mm_init_scan_with_range(struct drm_mm *mm,
        mm->scan_size = size;
        mm->scanned_blocks = 0;
        mm->scan_hit_start = 0;
-       mm->scan_hit_size = 0;
+       mm->scan_hit_end = 0;
        mm->scan_start = start;
        mm->scan_end = end;
        mm->scan_check_range = 1;
@@ -552,8 +554,7 @@ int drm_mm_scan_add_block(struct drm_mm_node *node)
        struct drm_mm *mm = node->mm;
        struct drm_mm_node *prev_node;
        unsigned long hole_start, hole_end;
-       unsigned long adj_start;
-       unsigned long adj_end;
+       unsigned long adj_start, adj_end;
 
        mm->scanned_blocks++;
 
@@ -570,14 +571,8 @@ int drm_mm_scan_add_block(struct drm_mm_node *node)
        node->node_list.next = &mm->prev_scanned_node->node_list;
        mm->prev_scanned_node = node;
 
-       hole_start = drm_mm_hole_node_start(prev_node);
-       hole_end = drm_mm_hole_node_end(prev_node);
-
-       adj_start = hole_start;
-       adj_end = hole_end;
-
-       if (mm->color_adjust)
-               mm->color_adjust(prev_node, mm->scan_color, &adj_start, &adj_end);
+       adj_start = hole_start = drm_mm_hole_node_start(prev_node);
+       adj_end = hole_end = drm_mm_hole_node_end(prev_node);
 
        if (mm->scan_check_range) {
                if (adj_start < mm->scan_start)
@@ -586,11 +581,14 @@ int drm_mm_scan_add_block(struct drm_mm_node *node)
                        adj_end = mm->scan_end;
        }
 
+       if (mm->color_adjust)
+               mm->color_adjust(prev_node, mm->scan_color,
+                                &adj_start, &adj_end);
+
        if (check_free_hole(adj_start, adj_end,
                            mm->scan_size, mm->scan_alignment)) {
                mm->scan_hit_start = hole_start;
-               mm->scan_hit_size = hole_end;
-
+               mm->scan_hit_end = hole_end;
                return 1;
        }
 
@@ -626,19 +624,10 @@ int drm_mm_scan_remove_block(struct drm_mm_node *node)
                               node_list);
 
        prev_node->hole_follows = node->scanned_preceeds_hole;
-       INIT_LIST_HEAD(&node->node_list);
        list_add(&node->node_list, &prev_node->node_list);
 
-       /* Only need to check for containement because start&size for the
-        * complete resulting free block (not just the desired part) is
-        * stored. */
-       if (node->start >= mm->scan_hit_start &&
-           node->start + node->size
-                       <= mm->scan_hit_start + mm->scan_hit_size) {
-               return 1;
-       }
-
-       return 0;
+        return (drm_mm_hole_node_end(node) > mm->scan_hit_start &&
+                node->start < mm->scan_hit_end);
 }
 EXPORT_SYMBOL(drm_mm_scan_remove_block);
 
index e6a11ca..7944d30 100644 (file)
@@ -641,6 +641,7 @@ static void i915_ring_error_state(struct seq_file *m,
        seq_printf(m, "%s command stream:\n", ring_str(ring));
        seq_printf(m, "  HEAD: 0x%08x\n", error->head[ring]);
        seq_printf(m, "  TAIL: 0x%08x\n", error->tail[ring]);
+       seq_printf(m, "  CTL: 0x%08x\n", error->ctl[ring]);
        seq_printf(m, "  ACTHD: 0x%08x\n", error->acthd[ring]);
        seq_printf(m, "  IPEIR: 0x%08x\n", error->ipeir[ring]);
        seq_printf(m, "  IPEHR: 0x%08x\n", error->ipehr[ring]);
@@ -693,6 +694,8 @@ static int i915_error_state(struct seq_file *m, void *unused)
        seq_printf(m, "EIR: 0x%08x\n", error->eir);
        seq_printf(m, "IER: 0x%08x\n", error->ier);
        seq_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
+       seq_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake);
+       seq_printf(m, "DERRMR: 0x%08x\n", error->derrmr);
        seq_printf(m, "CCID: 0x%08x\n", error->ccid);
 
        for (i = 0; i < dev_priv->num_fence_regs; i++)
index ed30595..12ab3bd 100644 (file)
@@ -188,10 +188,13 @@ struct drm_i915_error_state {
        u32 pgtbl_er;
        u32 ier;
        u32 ccid;
+       u32 derrmr;
+       u32 forcewake;
        bool waiting[I915_NUM_RINGS];
        u32 pipestat[I915_MAX_PIPES];
        u32 tail[I915_NUM_RINGS];
        u32 head[I915_NUM_RINGS];
+       u32 ctl[I915_NUM_RINGS];
        u32 ipeir[I915_NUM_RINGS];
        u32 ipehr[I915_NUM_RINGS];
        u32 instdone[I915_NUM_RINGS];
index da3c82e..8febea6 100644 (file)
@@ -1717,7 +1717,8 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
 }
 
 static long
-i915_gem_purge(struct drm_i915_private *dev_priv, long target)
+__i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
+                 bool purgeable_only)
 {
        struct drm_i915_gem_object *obj, *next;
        long count = 0;
@@ -1725,7 +1726,7 @@ i915_gem_purge(struct drm_i915_private *dev_priv, long target)
        list_for_each_entry_safe(obj, next,
                                 &dev_priv->mm.unbound_list,
                                 gtt_list) {
-               if (i915_gem_object_is_purgeable(obj) &&
+               if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) &&
                    i915_gem_object_put_pages(obj) == 0) {
                        count += obj->base.size >> PAGE_SHIFT;
                        if (count >= target)
@@ -1736,7 +1737,7 @@ i915_gem_purge(struct drm_i915_private *dev_priv, long target)
        list_for_each_entry_safe(obj, next,
                                 &dev_priv->mm.inactive_list,
                                 mm_list) {
-               if (i915_gem_object_is_purgeable(obj) &&
+               if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) &&
                    i915_gem_object_unbind(obj) == 0 &&
                    i915_gem_object_put_pages(obj) == 0) {
                        count += obj->base.size >> PAGE_SHIFT;
@@ -1748,6 +1749,12 @@ i915_gem_purge(struct drm_i915_private *dev_priv, long target)
        return count;
 }
 
+static long
+i915_gem_purge(struct drm_i915_private *dev_priv, long target)
+{
+       return __i915_gem_shrink(dev_priv, target, true);
+}
+
 static void
 i915_gem_shrink_all(struct drm_i915_private *dev_priv)
 {
@@ -3522,14 +3529,15 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
                goto out;
        }
 
-       obj->user_pin_count++;
-       obj->pin_filp = file;
-       if (obj->user_pin_count == 1) {
+       if (obj->user_pin_count == 0) {
                ret = i915_gem_object_pin(obj, args->alignment, true, false);
                if (ret)
                        goto out;
        }
 
+       obj->user_pin_count++;
+       obj->pin_filp = file;
+
        /* XXX - flush the CPU caches for pinned objects
         * as the X server doesn't manage domains yet
         */
@@ -4395,6 +4403,9 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
        if (nr_to_scan) {
                nr_to_scan -= i915_gem_purge(dev_priv, nr_to_scan);
                if (nr_to_scan > 0)
+                       nr_to_scan -= __i915_gem_shrink(dev_priv, nr_to_scan,
+                                                       false);
+               if (nr_to_scan > 0)
                        i915_gem_shrink_all(dev_priv);
        }
 
@@ -4402,7 +4413,7 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
        list_for_each_entry(obj, &dev_priv->mm.unbound_list, gtt_list)
                if (obj->pages_pin_count == 0)
                        cnt += obj->base.size >> PAGE_SHIFT;
-       list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list)
+       list_for_each_entry(obj, &dev_priv->mm.inactive_list, gtt_list)
                if (obj->pin_count == 0 && obj->pages_pin_count == 0)
                        cnt += obj->base.size >> PAGE_SHIFT;
 
index d6a994a..26d08bb 100644 (file)
@@ -539,6 +539,8 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
        total = 0;
        for (i = 0; i < count; i++) {
                struct drm_i915_gem_relocation_entry __user *user_relocs;
+               u64 invalid_offset = (u64)-1;
+               int j;
 
                user_relocs = (void __user *)(uintptr_t)exec[i].relocs_ptr;
 
@@ -549,6 +551,25 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
                        goto err;
                }
 
+               /* As we do not update the known relocation offsets after
+                * relocating (due to the complexities in lock handling),
+                * we need to mark them as invalid now so that we force the
+                * relocation processing next time. Just in case the target
+                * object is evicted and then rebound into its old
+                * presumed_offset before the next execbuffer - if that
+                * happened we would make the mistake of assuming that the
+                * relocations were valid.
+                */
+               for (j = 0; j < exec[i].relocation_count; j++) {
+                       if (copy_to_user(&user_relocs[j].presumed_offset,
+                                        &invalid_offset,
+                                        sizeof(invalid_offset))) {
+                               ret = -EFAULT;
+                               mutex_lock(&dev->struct_mutex);
+                               goto err;
+                       }
+               }
+
                reloc_offset[i] = total;
                total += exec[i].relocation_count;
        }
index 2220dec..fe84338 100644 (file)
@@ -1157,6 +1157,7 @@ static void i915_record_ring_state(struct drm_device *dev,
        error->acthd[ring->id] = intel_ring_get_active_head(ring);
        error->head[ring->id] = I915_READ_HEAD(ring);
        error->tail[ring->id] = I915_READ_TAIL(ring);
+       error->ctl[ring->id] = I915_READ_CTL(ring);
 
        error->cpu_ring_head[ring->id] = ring->head;
        error->cpu_ring_tail[ring->id] = ring->tail;
@@ -1251,6 +1252,16 @@ static void i915_capture_error_state(struct drm_device *dev)
        else
                error->ier = I915_READ(IER);
 
+       if (INTEL_INFO(dev)->gen >= 6)
+               error->derrmr = I915_READ(DERRMR);
+
+       if (IS_VALLEYVIEW(dev))
+               error->forcewake = I915_READ(FORCEWAKE_VLV);
+       else if (INTEL_INFO(dev)->gen >= 7)
+               error->forcewake = I915_READ(FORCEWAKE_MT);
+       else if (INTEL_INFO(dev)->gen == 6)
+               error->forcewake = I915_READ(FORCEWAKE);
+
        for_each_pipe(pipe)
                error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
 
index 186ee5c..b401788 100644 (file)
 #define GEN7_ERR_INT   0x44040
 #define   ERR_INT_MMIO_UNCLAIMED (1<<13)
 
+#define DERRMR         0x44050
+
 /* GM45+ chicken bits -- debug workaround bits that may be required
  * for various sorts of correct behavior.  The top 16 bits of each are
  * the enables for writing to the corresponding low bit.
index a9fb046..da1ad9c 100644 (file)
@@ -8598,19 +8598,30 @@ int intel_framebuffer_init(struct drm_device *dev,
 {
        int ret;
 
-       if (obj->tiling_mode == I915_TILING_Y)
+       if (obj->tiling_mode == I915_TILING_Y) {
+               DRM_DEBUG("hardware does not support tiling Y\n");
                return -EINVAL;
+       }
 
-       if (mode_cmd->pitches[0] & 63)
+       if (mode_cmd->pitches[0] & 63) {
+               DRM_DEBUG("pitch (%d) must be at least 64 byte aligned\n",
+                         mode_cmd->pitches[0]);
                return -EINVAL;
+       }
 
        /* FIXME <= Gen4 stride limits are bit unclear */
-       if (mode_cmd->pitches[0] > 32768)
+       if (mode_cmd->pitches[0] > 32768) {
+               DRM_DEBUG("pitch (%d) must be at less than 32768\n",
+                         mode_cmd->pitches[0]);
                return -EINVAL;
+       }
 
        if (obj->tiling_mode != I915_TILING_NONE &&
-           mode_cmd->pitches[0] != obj->stride)
+           mode_cmd->pitches[0] != obj->stride) {
+               DRM_DEBUG("pitch (%d) must match tiling stride (%d)\n",
+                         mode_cmd->pitches[0], obj->stride);
                return -EINVAL;
+       }
 
        /* Reject formats not supported by any plane early. */
        switch (mode_cmd->pixel_format) {
@@ -8621,8 +8632,10 @@ int intel_framebuffer_init(struct drm_device *dev,
                break;
        case DRM_FORMAT_XRGB1555:
        case DRM_FORMAT_ARGB1555:
-               if (INTEL_INFO(dev)->gen > 3)
+               if (INTEL_INFO(dev)->gen > 3) {
+                       DRM_DEBUG("invalid format: 0x%08x\n", mode_cmd->pixel_format);
                        return -EINVAL;
+               }
                break;
        case DRM_FORMAT_XBGR8888:
        case DRM_FORMAT_ABGR8888:
@@ -8630,18 +8643,22 @@ int intel_framebuffer_init(struct drm_device *dev,
        case DRM_FORMAT_ARGB2101010:
        case DRM_FORMAT_XBGR2101010:
        case DRM_FORMAT_ABGR2101010:
-               if (INTEL_INFO(dev)->gen < 4)
+               if (INTEL_INFO(dev)->gen < 4) {
+                       DRM_DEBUG("invalid format: 0x%08x\n", mode_cmd->pixel_format);
                        return -EINVAL;
+               }
                break;
        case DRM_FORMAT_YUYV:
        case DRM_FORMAT_UYVY:
        case DRM_FORMAT_YVYU:
        case DRM_FORMAT_VYUY:
-               if (INTEL_INFO(dev)->gen < 6)
+               if (INTEL_INFO(dev)->gen < 5) {
+                       DRM_DEBUG("invalid format: 0x%08x\n", mode_cmd->pixel_format);
                        return -EINVAL;
+               }
                break;
        default:
-               DRM_DEBUG_KMS("unsupported pixel format 0x%08x\n", mode_cmd->pixel_format);
+               DRM_DEBUG("unsupported pixel format 0x%08x\n", mode_cmd->pixel_format);
                return -EINVAL;
        }
 
index 1b63d55..fb3715b 100644 (file)
@@ -2579,7 +2579,8 @@ intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connect
 
 static void
 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
-                                   struct intel_dp *intel_dp)
+                                   struct intel_dp *intel_dp,
+                                   struct edp_power_seq *out)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct edp_power_seq cur, vbt, spec, final;
@@ -2650,16 +2651,35 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
        intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
 #undef get_delay
 
+       DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
+                     intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
+                     intel_dp->panel_power_cycle_delay);
+
+       DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
+                     intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
+
+       if (out)
+               *out = final;
+}
+
+static void
+intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
+                                             struct intel_dp *intel_dp,
+                                             struct edp_power_seq *seq)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       u32 pp_on, pp_off, pp_div;
+
        /* And finally store the new values in the power sequencer. */
-       pp_on = (final.t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
-               (final.t8 << PANEL_LIGHT_ON_DELAY_SHIFT);
-       pp_off = (final.t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
-                (final.t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
+       pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
+               (seq->t8 << PANEL_LIGHT_ON_DELAY_SHIFT);
+       pp_off = (seq->t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
+                (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
        /* Compute the divisor for the pp clock, simply match the Bspec
         * formula. */
        pp_div = ((100 * intel_pch_rawclk(dev))/2 - 1)
                        << PP_REFERENCE_DIVIDER_SHIFT;
-       pp_div |= (DIV_ROUND_UP(final.t11_t12, 1000)
+       pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
                        << PANEL_POWER_CYCLE_DELAY_SHIFT);
 
        /* Haswell doesn't have any port selection bits for the panel
@@ -2675,14 +2695,6 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
        I915_WRITE(PCH_PP_OFF_DELAYS, pp_off);
        I915_WRITE(PCH_PP_DIVISOR, pp_div);
 
-
-       DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
-                     intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
-                     intel_dp->panel_power_cycle_delay);
-
-       DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
-                     intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
-
        DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
                      I915_READ(PCH_PP_ON_DELAYS),
                      I915_READ(PCH_PP_OFF_DELAYS),
@@ -2699,6 +2711,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
        struct drm_device *dev = intel_encoder->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_display_mode *fixed_mode = NULL;
+       struct edp_power_seq power_seq = { 0 };
        enum port port = intel_dig_port->port;
        const char *name = NULL;
        int type;
@@ -2771,7 +2784,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
        }
 
        if (is_edp(intel_dp))
-               intel_dp_init_panel_power_sequencer(dev, intel_dp);
+               intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
 
        intel_dp_i2c_init(intel_dp, intel_connector, name);
 
@@ -2798,6 +2811,10 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
                        return;
                }
 
+               /* We now know it's not a ghost, init power sequence regs. */
+               intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
+                                                             &power_seq);
+
                ironlake_edp_panel_vdd_on(intel_dp);
                edid = drm_get_edid(connector, &intel_dp->adapter);
                if (edid) {
index b9a660a..17aee74 100644 (file)
@@ -776,14 +776,6 @@ static const struct dmi_system_id intel_no_lvds[] = {
        },
        {
                .callback = intel_no_lvds_dmi_callback,
-               .ident = "ZOTAC ZBOXSD-ID12/ID13",
-               .matches = {
-                       DMI_MATCH(DMI_BOARD_VENDOR, "ZOTAC"),
-                       DMI_MATCH(DMI_BOARD_NAME, "ZBOXSD-ID12/ID13"),
-               },
-       },
-       {
-               .callback = intel_no_lvds_dmi_callback,
                .ident = "Gigabyte GA-D525TUD",
                .matches = {
                        DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
index e6f54ff..3280cff 100644 (file)
  * i915.i915_enable_fbc parameter
  */
 
+static bool intel_crtc_active(struct drm_crtc *crtc)
+{
+       /* Be paranoid as we can arrive here with only partial
+        * state retrieved from the hardware during setup.
+        */
+       return to_intel_crtc(crtc)->active && crtc->fb && crtc->mode.clock;
+}
+
 static void i8xx_disable_fbc(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -405,9 +413,8 @@ void intel_update_fbc(struct drm_device *dev)
         *   - going to an unsupported config (interlace, pixel multiply, etc.)
         */
        list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
-               if (to_intel_crtc(tmp_crtc)->active &&
-                   !to_intel_crtc(tmp_crtc)->primary_disabled &&
-                   tmp_crtc->fb) {
+               if (intel_crtc_active(tmp_crtc) &&
+                   !to_intel_crtc(tmp_crtc)->primary_disabled) {
                        if (crtc) {
                                DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
                                dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES;
@@ -992,7 +999,7 @@ static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
        struct drm_crtc *crtc, *enabled = NULL;
 
        list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-               if (to_intel_crtc(crtc)->active && crtc->fb) {
+               if (intel_crtc_active(crtc)) {
                        if (enabled)
                                return NULL;
                        enabled = crtc;
@@ -1086,7 +1093,7 @@ static bool g4x_compute_wm0(struct drm_device *dev,
        int entries, tlb_miss;
 
        crtc = intel_get_crtc_for_plane(dev, plane);
-       if (crtc->fb == NULL || !to_intel_crtc(crtc)->active) {
+       if (!intel_crtc_active(crtc)) {
                *cursor_wm = cursor->guard_size;
                *plane_wm = display->guard_size;
                return false;
@@ -1215,7 +1222,7 @@ static bool vlv_compute_drain_latency(struct drm_device *dev,
        int entries;
 
        crtc = intel_get_crtc_for_plane(dev, plane);
-       if (crtc->fb == NULL || !to_intel_crtc(crtc)->active)
+       if (!intel_crtc_active(crtc))
                return false;
 
        clock = crtc->mode.clock;       /* VESA DOT Clock */
@@ -1476,7 +1483,7 @@ static void i9xx_update_wm(struct drm_device *dev)
 
        fifo_size = dev_priv->display.get_fifo_size(dev, 0);
        crtc = intel_get_crtc_for_plane(dev, 0);
-       if (to_intel_crtc(crtc)->active && crtc->fb) {
+       if (intel_crtc_active(crtc)) {
                int cpp = crtc->fb->bits_per_pixel / 8;
                if (IS_GEN2(dev))
                        cpp = 4;
@@ -1490,7 +1497,7 @@ static void i9xx_update_wm(struct drm_device *dev)
 
        fifo_size = dev_priv->display.get_fifo_size(dev, 1);
        crtc = intel_get_crtc_for_plane(dev, 1);
-       if (to_intel_crtc(crtc)->active && crtc->fb) {
+       if (intel_crtc_active(crtc)) {
                int cpp = crtc->fb->bits_per_pixel / 8;
                if (IS_GEN2(dev))
                        cpp = 4;
@@ -2044,7 +2051,7 @@ sandybridge_compute_sprite_wm(struct drm_device *dev, int plane,
        int entries, tlb_miss;
 
        crtc = intel_get_crtc_for_plane(dev, plane);
-       if (crtc->fb == NULL || !to_intel_crtc(crtc)->active) {
+       if (!intel_crtc_active(crtc)) {
                *sprite_wm = display->guard_size;
                return false;
        }
@@ -4243,7 +4250,8 @@ static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
 static void __gen6_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv)
 {
        I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(0xffff));
-       POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
+       /* something from same cacheline, but !FORCEWAKE_MT */
+       POSTING_READ(ECOBUS);
 }
 
 static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
@@ -4260,7 +4268,8 @@ static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
                DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
 
        I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
-       POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
+       /* something from same cacheline, but !FORCEWAKE_MT */
+       POSTING_READ(ECOBUS);
 
        if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1),
                            FORCEWAKE_ACK_TIMEOUT_MS))
@@ -4297,14 +4306,16 @@ void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
 static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
 {
        I915_WRITE_NOTRACE(FORCEWAKE, 0);
-       /* gen6_gt_check_fifodbg doubles as the POSTING_READ */
+       /* something from same cacheline, but !FORCEWAKE */
+       POSTING_READ(ECOBUS);
        gen6_gt_check_fifodbg(dev_priv);
 }
 
 static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
 {
        I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
-       /* gen6_gt_check_fifodbg doubles as the POSTING_READ */
+       /* something from same cacheline, but !FORCEWAKE_MT */
+       POSTING_READ(ECOBUS);
        gen6_gt_check_fifodbg(dev_priv);
 }
 
@@ -4344,6 +4355,8 @@ int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
 static void vlv_force_wake_reset(struct drm_i915_private *dev_priv)
 {
        I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(0xffff));
+       /* something from same cacheline, but !FORCEWAKE_VLV */
+       POSTING_READ(FORCEWAKE_ACK_VLV);
 }
 
 static void vlv_force_wake_get(struct drm_i915_private *dev_priv)
@@ -4364,7 +4377,8 @@ static void vlv_force_wake_get(struct drm_i915_private *dev_priv)
 static void vlv_force_wake_put(struct drm_i915_private *dev_priv)
 {
        I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
-       /* The below doubles as a POSTING_READ */
+       /* something from same cacheline, but !FORCEWAKE_VLV */
+       POSTING_READ(FORCEWAKE_ACK_VLV);
        gen6_gt_check_fifodbg(dev_priv);
 }
 
index 827dcd4..d7b060e 100644 (file)
@@ -120,11 +120,10 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
        I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]);
        I915_WRITE(SPRPOS(pipe), (crtc_y << 16) | crtc_x);
 
-       linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
+       linear_offset = y * fb->pitches[0] + x * pixel_size;
        sprsurf_offset =
                intel_gen4_compute_offset_xtiled(&x, &y,
-                                                fb->bits_per_pixel / 8,
-                                                fb->pitches[0]);
+                                                pixel_size, fb->pitches[0]);
        linear_offset -= sprsurf_offset;
 
        /* HSW consolidates SPRTILEOFF and SPRLINOFF into a single SPROFFSET
@@ -286,11 +285,10 @@ ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
        I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]);
        I915_WRITE(DVSPOS(pipe), (crtc_y << 16) | crtc_x);
 
-       linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
+       linear_offset = y * fb->pitches[0] + x * pixel_size;
        dvssurf_offset =
                intel_gen4_compute_offset_xtiled(&x, &y,
-                                                fb->bits_per_pixel / 8,
-                                                fb->pitches[0]);
+                                                pixel_size, fb->pitches[0]);
        linear_offset -= dvssurf_offset;
 
        if (obj->tiling_mode != I915_TILING_NONE)
index c617f04..8bbb58f 100644 (file)
@@ -66,10 +66,8 @@ nouveau_client_create_(const char *name, u64 devname, const char *cfg,
 
        ret = nouveau_handle_create(nv_object(client), ~0, ~0,
                                    nv_object(client), &client->root);
-       if (ret) {
-               nouveau_namedb_destroy(&client->base);
+       if (ret)
                return ret;
-       }
 
        /* prevent init/fini being called, os in in charge of this */
        atomic_set(&nv_object(client)->usecount, 2);
index b8d2cbf..264c2b3 100644 (file)
@@ -109,7 +109,7 @@ nouveau_handle_create(struct nouveau_object *parent, u32 _parent, u32 _handle,
        while (!nv_iclass(namedb, NV_NAMEDB_CLASS))
                namedb = namedb->parent;
 
-       handle = *phandle = kzalloc(sizeof(*handle), GFP_KERNEL);
+       handle = kzalloc(sizeof(*handle), GFP_KERNEL);
        if (!handle)
                return -ENOMEM;
 
@@ -146,6 +146,9 @@ nouveau_handle_create(struct nouveau_object *parent, u32 _parent, u32 _handle,
        }
 
        hprintk(handle, TRACE, "created\n");
+
+       *phandle = handle;
+
        return 0;
 }
 
index 0f09af1..ca1a7d7 100644 (file)
@@ -851,20 +851,23 @@ exec_script(struct nv50_disp_priv *priv, int head, int id)
        for (i = 0; !(ctrl & (1 << head)) && i < 3; i++)
                ctrl = nv_rd32(priv, 0x610b5c + (i * 8));
 
-       if (nv_device(priv)->chipset  < 0x90 ||
-           nv_device(priv)->chipset == 0x92 ||
-           nv_device(priv)->chipset == 0xa0) {
-               for (i = 0; !(ctrl & (1 << head)) && i < 2; i++)
-                       ctrl = nv_rd32(priv, 0x610b74 + (i * 8));
-               i += 3;
-       } else {
-               for (i = 0; !(ctrl & (1 << head)) && i < 4; i++)
-                       ctrl = nv_rd32(priv, 0x610798 + (i * 8));
-               i += 3;
+       if (!(ctrl & (1 << head))) {
+               if (nv_device(priv)->chipset  < 0x90 ||
+                   nv_device(priv)->chipset == 0x92 ||
+                   nv_device(priv)->chipset == 0xa0) {
+                       for (i = 0; !(ctrl & (1 << head)) && i < 2; i++)
+                               ctrl = nv_rd32(priv, 0x610b74 + (i * 8));
+                       i += 4;
+               } else {
+                       for (i = 0; !(ctrl & (1 << head)) && i < 4; i++)
+                               ctrl = nv_rd32(priv, 0x610798 + (i * 8));
+                       i += 4;
+               }
        }
 
        if (!(ctrl & (1 << head)))
                return false;
+       i--;
 
        data = exec_lookup(priv, head, i, ctrl, &dcb, &ver, &hdr, &cnt, &len, &info);
        if (data) {
@@ -898,20 +901,23 @@ exec_clkcmp(struct nv50_disp_priv *priv, int head, int id, u32 pclk,
        for (i = 0; !(ctrl & (1 << head)) && i < 3; i++)
                ctrl = nv_rd32(priv, 0x610b58 + (i * 8));
 
-       if (nv_device(priv)->chipset  < 0x90 ||
-           nv_device(priv)->chipset == 0x92 ||
-           nv_device(priv)->chipset == 0xa0) {
-               for (i = 0; !(ctrl & (1 << head)) && i < 2; i++)
-                       ctrl = nv_rd32(priv, 0x610b70 + (i * 8));
-               i += 3;
-       } else {
-               for (i = 0; !(ctrl & (1 << head)) && i < 4; i++)
-                       ctrl = nv_rd32(priv, 0x610794 + (i * 8));
-               i += 3;
+       if (!(ctrl & (1 << head))) {
+               if (nv_device(priv)->chipset  < 0x90 ||
+                   nv_device(priv)->chipset == 0x92 ||
+                   nv_device(priv)->chipset == 0xa0) {
+                       for (i = 0; !(ctrl & (1 << head)) && i < 2; i++)
+                               ctrl = nv_rd32(priv, 0x610b70 + (i * 8));
+                       i += 4;
+               } else {
+                       for (i = 0; !(ctrl & (1 << head)) && i < 4; i++)
+                               ctrl = nv_rd32(priv, 0x610794 + (i * 8));
+                       i += 4;
+               }
        }
 
        if (!(ctrl & (1 << head)))
                return 0x0000;
+       i--;
 
        data = exec_lookup(priv, head, i, ctrl, outp, &ver, &hdr, &cnt, &len, &info1);
        if (!data)
index 0193532..63acc03 100644 (file)
@@ -36,6 +36,9 @@ nouveau_client(void *obj)
 
 int  nouveau_client_create_(const char *name, u64 device, const char *cfg,
                            const char *dbg, int, void **);
+#define nouveau_client_destroy(p)                                              \
+       nouveau_namedb_destroy(&(p)->base)
+
 int  nouveau_client_init(struct nouveau_client *);
 int  nouveau_client_fini(struct nouveau_client *, bool suspend);
 
index c345097..b2f3d4d 100644 (file)
@@ -38,6 +38,8 @@ enum nvbios_pll_type {
        PLL_UNK42  = 0x42,
        PLL_VPLL0  = 0x80,
        PLL_VPLL1  = 0x81,
+       PLL_VPLL2  = 0x82,
+       PLL_VPLL3  = 0x83,
        PLL_MAX    = 0xff
 };
 
index 2917d55..690ed43 100644 (file)
@@ -1534,7 +1534,6 @@ init_io(struct nvbios_init *init)
                mdelay(10);
                init_wr32(init, 0x614100, 0x10000018);
                init_wr32(init, 0x614900, 0x10000018);
-               return;
        }
 
        value = init_rdport(init, port) & mask;
index f6962c9..7c96262 100644 (file)
@@ -52,6 +52,8 @@ nvc0_clock_pll_set(struct nouveau_clock *clk, u32 type, u32 freq)
        switch (info.type) {
        case PLL_VPLL0:
        case PLL_VPLL1:
+       case PLL_VPLL2:
+       case PLL_VPLL3:
                nv_mask(priv, info.reg + 0x0c, 0x00000000, 0x00000100);
                nv_wr32(priv, info.reg + 0x04, (P << 16) | (N << 8) | M);
                nv_wr32(priv, info.reg + 0x10, fN << 16);
index 306bdf1..7606ed1 100644 (file)
@@ -145,14 +145,14 @@ nvc0_fb_vram_new(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin,
        mem->memtype = type;
        mem->size = size;
 
-       mutex_lock(&mm->mutex);
+       mutex_lock(&pfb->base.mutex);
        do {
                if (back)
                        ret = nouveau_mm_tail(mm, 1, size, ncmin, align, &r);
                else
                        ret = nouveau_mm_head(mm, 1, size, ncmin, align, &r);
                if (ret) {
-                       mutex_unlock(&mm->mutex);
+                       mutex_unlock(&pfb->base.mutex);
                        pfb->ram.put(pfb, &mem);
                        return ret;
                }
@@ -160,7 +160,7 @@ nvc0_fb_vram_new(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin,
                list_add_tail(&r->rl_entry, &mem->regions);
                size -= r->length;
        } while (size);
-       mutex_unlock(&mm->mutex);
+       mutex_unlock(&pfb->base.mutex);
 
        r = list_first_entry(&mem->regions, struct nouveau_mm_node, rl_entry);
        mem->offset = (u64)r->offset << 12;
index 1188227..6565f3d 100644 (file)
@@ -40,15 +40,21 @@ nouveau_instobj_create_(struct nouveau_object *parent,
        if (ret)
                return ret;
 
+       mutex_lock(&imem->base.mutex);
        list_add(&iobj->head, &imem->list);
+       mutex_unlock(&imem->base.mutex);
        return 0;
 }
 
 void
 nouveau_instobj_destroy(struct nouveau_instobj *iobj)
 {
-       if (iobj->head.prev)
-               list_del(&iobj->head);
+       struct nouveau_subdev *subdev = nv_subdev(iobj->base.engine);
+
+       mutex_lock(&subdev->mutex);
+       list_del(&iobj->head);
+       mutex_unlock(&subdev->mutex);
+
        return nouveau_object_destroy(&iobj->base);
 }
 
@@ -88,6 +94,8 @@ nouveau_instmem_init(struct nouveau_instmem *imem)
        if (ret)
                return ret;
 
+       mutex_lock(&imem->base.mutex);
+
        list_for_each_entry(iobj, &imem->list, head) {
                if (iobj->suspend) {
                        for (i = 0; i < iobj->size; i += 4)
@@ -97,6 +105,8 @@ nouveau_instmem_init(struct nouveau_instmem *imem)
                }
        }
 
+       mutex_unlock(&imem->base.mutex);
+
        return 0;
 }
 
@@ -104,17 +114,26 @@ int
 nouveau_instmem_fini(struct nouveau_instmem *imem, bool suspend)
 {
        struct nouveau_instobj *iobj;
-       int i;
+       int i, ret = 0;
 
        if (suspend) {
+               mutex_lock(&imem->base.mutex);
+
                list_for_each_entry(iobj, &imem->list, head) {
                        iobj->suspend = vmalloc(iobj->size);
-                       if (iobj->suspend) {
-                               for (i = 0; i < iobj->size; i += 4)
-                                       iobj->suspend[i / 4] = nv_ro32(iobj, i);
-                       } else
-                               return -ENOMEM;
+                       if (!iobj->suspend) {
+                               ret = -ENOMEM;
+                               break;
+                       }
+
+                       for (i = 0; i < iobj->size; i += 4)
+                               iobj->suspend[i / 4] = nv_ro32(iobj, i);
                }
+
+               mutex_unlock(&imem->base.mutex);
+
+               if (ret)
+                       return ret;
        }
 
        return nouveau_subdev_fini(&imem->base, suspend);
index 082c11b..77c67fc 100644 (file)
@@ -352,7 +352,7 @@ nouveau_vm_create(struct nouveau_vmmgr *vmm, u64 offset, u64 length,
        u64 mm_length = (offset + length) - mm_offset;
        int ret;
 
-       vm = *pvm = kzalloc(sizeof(*vm), GFP_KERNEL);
+       vm = kzalloc(sizeof(*vm), GFP_KERNEL);
        if (!vm)
                return -ENOMEM;
 
@@ -376,6 +376,8 @@ nouveau_vm_create(struct nouveau_vmmgr *vmm, u64 offset, u64 length,
                return ret;
        }
 
+       *pvm = vm;
+
        return 0;
 }
 
index ac340ba..e620ba8 100644 (file)
@@ -127,12 +127,26 @@ nouveau_connector_ddc_detect(struct drm_connector *connector,
                             struct nouveau_encoder **pnv_encoder)
 {
        struct drm_device *dev = connector->dev;
+       struct nouveau_connector *nv_connector = nouveau_connector(connector);
        struct nouveau_drm *drm = nouveau_drm(dev);
+       struct nouveau_gpio *gpio = nouveau_gpio(drm->device);
        struct nouveau_i2c *i2c = nouveau_i2c(drm->device);
-       int i;
+       struct nouveau_i2c_port *port = NULL;
+       int i, panel = -ENODEV;
+
+       /* eDP panels need powering on by us (if the VBIOS doesn't default it
+        * to on) before doing any AUX channel transactions.  LVDS panel power
+        * is handled by the SOR itself, and not required for LVDS DDC.
+        */
+       if (nv_connector->type == DCB_CONNECTOR_eDP) {
+               panel = gpio->get(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff);
+               if (panel == 0) {
+                       gpio->set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, 1);
+                       msleep(300);
+               }
+       }
 
        for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
-               struct nouveau_i2c_port *port = NULL;
                struct nouveau_encoder *nv_encoder;
                struct drm_mode_object *obj;
                int id;
@@ -150,11 +164,19 @@ nouveau_connector_ddc_detect(struct drm_connector *connector,
                        port = i2c->find(i2c, nv_encoder->dcb->i2c_index);
                if (port && nv_probe_i2c(port, 0x50)) {
                        *pnv_encoder = nv_encoder;
-                       return port;
+                       break;
                }
+
+               port = NULL;
        }
 
-       return NULL;
+       /* eDP panel not detected, restore panel power GPIO to previous
+        * state to avoid confusing the SOR for other output types.
+        */
+       if (!port && panel == 0)
+               gpio->set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, panel);
+
+       return port;
 }
 
 static struct nouveau_encoder *
index e4188f2..508b00a 100644 (file)
@@ -225,15 +225,6 @@ nouveau_display_init(struct drm_device *dev)
        if (ret)
                return ret;
 
-       /* power on internal panel if it's not already.  the init tables of
-        * some vbios default this to off for some reason, causing the
-        * panel to not work after resume
-        */
-       if (gpio && gpio->get(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff) == 0) {
-               gpio->set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, 1);
-               msleep(300);
-       }
-
        /* enable polling for external displays */
        drm_kms_helper_poll_enable(dev);
 
index 180a45e..8b090f1 100644 (file)
@@ -84,11 +84,16 @@ nouveau_cli_create(struct pci_dev *pdev, const char *name,
        struct nouveau_cli *cli;
        int ret;
 
+       *pcli = NULL;
        ret = nouveau_client_create_(name, nouveau_name(pdev), nouveau_config,
                                     nouveau_debug, size, pcli);
        cli = *pcli;
-       if (ret)
+       if (ret) {
+               if (cli)
+                       nouveau_client_destroy(&cli->base);
+               *pcli = NULL;
                return ret;
+       }
 
        mutex_init(&cli->mutex);
        return 0;
index bedafd1..cdb83ac 100644 (file)
@@ -60,6 +60,7 @@ u32  nv10_fence_read(struct nouveau_channel *);
 void nv10_fence_context_del(struct nouveau_channel *);
 void nv10_fence_destroy(struct nouveau_drm *);
 int  nv10_fence_create(struct nouveau_drm *);
+void nv17_fence_resume(struct nouveau_drm *drm);
 
 int nv50_fence_create(struct nouveau_drm *);
 int nv84_fence_create(struct nouveau_drm *);
index 184cdf8..39ffc07 100644 (file)
@@ -505,7 +505,7 @@ static void nv04_dfp_update_backlight(struct drm_encoder *encoder, int mode)
 
 static inline bool is_powersaving_dpms(int mode)
 {
-       return (mode != DRM_MODE_DPMS_ON);
+       return mode != DRM_MODE_DPMS_ON && mode != NV_DPMS_CLEARED;
 }
 
 static void nv04_lvds_dpms(struct drm_encoder *encoder, int mode)
index 7ae7f97..03017f2 100644 (file)
@@ -162,6 +162,13 @@ nv10_fence_destroy(struct nouveau_drm *drm)
        kfree(priv);
 }
 
+void nv17_fence_resume(struct nouveau_drm *drm)
+{
+       struct nv10_fence_priv *priv = drm->fence;
+
+       nouveau_bo_wr32(priv->bo, 0, priv->sequence);
+}
+
 int
 nv10_fence_create(struct nouveau_drm *drm)
 {
@@ -197,6 +204,7 @@ nv10_fence_create(struct nouveau_drm *drm)
                if (ret == 0) {
                        nouveau_bo_wr32(priv->bo, 0x000, 0x00000000);
                        priv->base.sync = nv17_fence_sync;
+                       priv->base.resume = nv17_fence_resume;
                }
        }
 
index c20f272..d889f3a 100644 (file)
@@ -122,6 +122,7 @@ nv50_fence_create(struct nouveau_drm *drm)
        if (ret == 0) {
                nouveau_bo_wr32(priv->bo, 0x000, 0x00000000);
                priv->base.sync = nv17_fence_sync;
+               priv->base.resume = nv17_fence_resume;
        }
 
        if (ret)
index 061fa0a..4d0e60a 100644 (file)
@@ -2401,6 +2401,12 @@ static int evergreen_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
 {
        struct evergreen_mc_save save;
 
+       if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
+               reset_mask &= ~(RADEON_RESET_GFX | RADEON_RESET_COMPUTE);
+
+       if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
+               reset_mask &= ~RADEON_RESET_DMA;
+
        if (reset_mask == 0)
                return 0;
 
index 896f1cb..59acabb 100644 (file)
@@ -1409,6 +1409,12 @@ static int cayman_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
 {
        struct evergreen_mc_save save;
 
+       if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
+               reset_mask &= ~(RADEON_RESET_GFX | RADEON_RESET_COMPUTE);
+
+       if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
+               reset_mask &= ~RADEON_RESET_DMA;
+
        if (reset_mask == 0)
                return 0;
 
index 537e259..3cb9d60 100644 (file)
@@ -1378,6 +1378,12 @@ static int r600_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
 {
        struct rv515_mc_save save;
 
+       if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
+               reset_mask &= ~(RADEON_RESET_GFX | RADEON_RESET_COMPUTE);
+
+       if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
+               reset_mask &= ~RADEON_RESET_DMA;
+
        if (reset_mask == 0)
                return 0;
 
index 03191a5..69ec24a 100644 (file)
@@ -2476,8 +2476,10 @@ static void r600_cs_parser_fini(struct radeon_cs_parser *parser, int error)
        kfree(parser->relocs);
        for (i = 0; i < parser->nchunks; i++) {
                kfree(parser->chunks[i].kdata);
-               kfree(parser->chunks[i].kpage[0]);
-               kfree(parser->chunks[i].kpage[1]);
+               if (parser->rdev && (parser->rdev->flags & RADEON_IS_AGP)) {
+                       kfree(parser->chunks[i].kpage[0]);
+                       kfree(parser->chunks[i].kpage[1]);
+               }
        }
        kfree(parser->chunks);
        kfree(parser->chunks_array);
@@ -2561,16 +2563,16 @@ int r600_dma_cs_next_reloc(struct radeon_cs_parser *p,
        struct radeon_cs_chunk *relocs_chunk;
        unsigned idx;
 
+       *cs_reloc = NULL;
        if (p->chunk_relocs_idx == -1) {
                DRM_ERROR("No relocation chunk !\n");
                return -EINVAL;
        }
-       *cs_reloc = NULL;
        relocs_chunk = &p->chunks[p->chunk_relocs_idx];
        idx = p->dma_reloc_idx;
-       if (idx >= relocs_chunk->length_dw) {
+       if (idx >= p->nrelocs) {
                DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
-                         idx, relocs_chunk->length_dw);
+                         idx, p->nrelocs);
                return -EINVAL;
        }
        *cs_reloc = p->relocs_ptr[idx];
index 34e5230..a08f657 100644 (file)
@@ -324,7 +324,6 @@ struct radeon_bo {
        struct list_head                list;
        /* Protected by tbo.reserved */
        u32                             placements[3];
-       u32                             busy_placements[3];
        struct ttm_placement            placement;
        struct ttm_buffer_object        tbo;
        struct ttm_bo_kmap_obj          kmap;
@@ -654,6 +653,8 @@ struct radeon_ring {
        u32                     ptr_reg_mask;
        u32                     nop;
        u32                     idx;
+       u64                     last_semaphore_signal_addr;
+       u64                     last_semaphore_wait_addr;
 };
 
 /*
index 396baba..469661f 100644 (file)
@@ -279,13 +279,13 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
                                  p->chunks[p->chunk_ib_idx].length_dw);
                        return -EINVAL;
                }
-               if ((p->rdev->flags & RADEON_IS_AGP)) {
+               if (p->rdev && (p->rdev->flags & RADEON_IS_AGP)) {
                        p->chunks[p->chunk_ib_idx].kpage[0] = kmalloc(PAGE_SIZE, GFP_KERNEL);
                        p->chunks[p->chunk_ib_idx].kpage[1] = kmalloc(PAGE_SIZE, GFP_KERNEL);
                        if (p->chunks[p->chunk_ib_idx].kpage[0] == NULL ||
                            p->chunks[p->chunk_ib_idx].kpage[1] == NULL) {
-                               kfree(p->chunks[i].kpage[0]);
-                               kfree(p->chunks[i].kpage[1]);
+                               kfree(p->chunks[p->chunk_ib_idx].kpage[0]);
+                               kfree(p->chunks[p->chunk_ib_idx].kpage[1]);
                                return -ENOMEM;
                        }
                }
@@ -583,7 +583,8 @@ static int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx)
        struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
        int i;
        int size = PAGE_SIZE;
-       bool copy1 = (p->rdev->flags & RADEON_IS_AGP) ? false : true;
+       bool copy1 = (p->rdev && (p->rdev->flags & RADEON_IS_AGP)) ?
+               false : true;
 
        for (i = ibc->last_copied_page + 1; i < pg_idx; i++) {
                if (DRM_COPY_FROM_USER(p->ib.ptr + (i * (PAGE_SIZE/4)),
index dff6cf7..d9bf96e 100644 (file)
  *   2.26.0 - r600-eg: fix htile size computation
  *   2.27.0 - r600-SI: Add CS ioctl support for async DMA
  *   2.28.0 - r600-eg: Add MEM_WRITE packet support
+ *   2.29.0 - R500 FP16 color clear registers
  */
 #define KMS_DRIVER_MAJOR       2
-#define KMS_DRIVER_MINOR       28
+#define KMS_DRIVER_MINOR       29
 #define KMS_DRIVER_PATCHLEVEL  0
 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
 int radeon_driver_unload_kms(struct drm_device *dev);
index f5ba224..62cd512 100644 (file)
@@ -640,6 +640,14 @@ static enum drm_connector_status radeon_legacy_primary_dac_detect(struct drm_enc
        enum drm_connector_status found = connector_status_disconnected;
        bool color = true;
 
+       /* just don't bother on RN50 those chip are often connected to remoting
+        * console hw and often we get failure to load detect those. So to make
+        * everyone happy report the encoder as always connected.
+        */
+       if (ASIC_IS_RN50(rdev)) {
+               return connector_status_connected;
+       }
+
        /* save the regs we need */
        vclk_ecp_cntl = RREG32_PLL(RADEON_VCLK_ECP_CNTL);
        crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL);
index 883c95d..d3aface 100644 (file)
@@ -84,6 +84,7 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
        rbo->placement.fpfn = 0;
        rbo->placement.lpfn = 0;
        rbo->placement.placement = rbo->placements;
+       rbo->placement.busy_placement = rbo->placements;
        if (domain & RADEON_GEM_DOMAIN_VRAM)
                rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
                                        TTM_PL_FLAG_VRAM;
@@ -104,14 +105,6 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
        if (!c)
                rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
        rbo->placement.num_placement = c;
-
-       c = 0;
-       rbo->placement.busy_placement = rbo->busy_placements;
-       if (rbo->rdev->flags & RADEON_IS_AGP) {
-               rbo->busy_placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT;
-       } else {
-               rbo->busy_placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT;
-       }
        rbo->placement.num_busy_placement = c;
 }
 
@@ -357,6 +350,7 @@ int radeon_bo_list_validate(struct list_head *head)
 {
        struct radeon_bo_list *lobj;
        struct radeon_bo *bo;
+       u32 domain;
        int r;
 
        r = ttm_eu_reserve_buffers(head);
@@ -366,9 +360,17 @@ int radeon_bo_list_validate(struct list_head *head)
        list_for_each_entry(lobj, head, tv.head) {
                bo = lobj->bo;
                if (!bo->pin_count) {
+                       domain = lobj->wdomain ? lobj->wdomain : lobj->rdomain;
+                       
+               retry:
+                       radeon_ttm_placement_from_domain(bo, domain);
                        r = ttm_bo_validate(&bo->tbo, &bo->placement,
                                                true, false);
                        if (unlikely(r)) {
+                               if (r != -ERESTARTSYS && domain == RADEON_GEM_DOMAIN_VRAM) {
+                                       domain |= RADEON_GEM_DOMAIN_GTT;
+                                       goto retry;
+                               }
                                return r;
                        }
                }
index 141f2b6..2430d80 100644 (file)
@@ -784,6 +784,8 @@ static int radeon_debugfs_ring_info(struct seq_file *m, void *data)
        }
        seq_printf(m, "driver's copy of the wptr: 0x%08x [%5d]\n", ring->wptr, ring->wptr);
        seq_printf(m, "driver's copy of the rptr: 0x%08x [%5d]\n", ring->rptr, ring->rptr);
+       seq_printf(m, "last semaphore signal addr : 0x%016llx\n", ring->last_semaphore_signal_addr);
+       seq_printf(m, "last semaphore wait addr   : 0x%016llx\n", ring->last_semaphore_wait_addr);
        seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw);
        seq_printf(m, "%u dwords in ring\n", count);
        /* print 8 dw before current rptr as often it's the last executed
index 97f3ece..8dcc20f 100644 (file)
@@ -95,6 +95,10 @@ int radeon_semaphore_sync_rings(struct radeon_device *rdev,
        /* we assume caller has already allocated space on waiters ring */
        radeon_semaphore_emit_wait(rdev, waiter, semaphore);
 
+       /* for debugging lockup only, used by sysfs debug files */
+       rdev->ring[signaler].last_semaphore_signal_addr = semaphore->gpu_addr;
+       rdev->ring[waiter].last_semaphore_wait_addr = semaphore->gpu_addr;
+
        return 0;
 }
 
index 911a8fb..78d5e99 100644 (file)
@@ -324,6 +324,8 @@ rv515 0x6d40
 0x46AC US_OUT_FMT_2
 0x46B0 US_OUT_FMT_3
 0x46B4 US_W_FMT
+0x46C0 RB3D_COLOR_CLEAR_VALUE_AR
+0x46C4 RB3D_COLOR_CLEAR_VALUE_GB
 0x4BC0 FG_FOG_BLEND
 0x4BC4 FG_FOG_FACTOR
 0x4BC8 FG_FOG_COLOR_R
index 3240a3d..ae8b482 100644 (file)
@@ -2215,6 +2215,12 @@ static int si_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
 {
        struct evergreen_mc_save save;
 
+       if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
+               reset_mask &= ~(RADEON_RESET_GFX | RADEON_RESET_COMPUTE);
+
+       if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
+               reset_mask &= ~RADEON_RESET_DMA;
+
        if (reset_mask == 0)
                return 0;
 
index 33d20be..52b20b1 100644 (file)
@@ -434,6 +434,7 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
                        bo->mem = tmp_mem;
                        bdev->driver->move_notify(bo, mem);
                        bo->mem = *mem;
+                       *mem = tmp_mem;
                }
 
                goto out_err;
index d73d6e3..44420fc 100644 (file)
@@ -344,8 +344,12 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
 
        if (ttm->state == tt_unpopulated) {
                ret = ttm->bdev->driver->ttm_tt_populate(ttm);
-               if (ret)
+               if (ret) {
+                       /* if we fail here don't nuke the mm node
+                        * as the bo still owns it */
+                       old_copy.mm_node = NULL;
                        goto out1;
+               }
        }
 
        add = 0;
@@ -371,8 +375,11 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
                                                   prot);
                } else
                        ret = ttm_copy_io_page(new_iomap, old_iomap, page);
-               if (ret)
+               if (ret) {
+                       /* failing here, means keep old copy as-is */
+                       old_copy.mm_node = NULL;
                        goto out1;
+               }
        }
        mb();
 out2:
index 512f44a..fe5cdbc 100644 (file)
 static u8 *udl_get_edid(struct udl_device *udl)
 {
        u8 *block;
-       char rbuf[3];
+       char *rbuf;
        int ret, i;
 
        block = kmalloc(EDID_LENGTH, GFP_KERNEL);
        if (block == NULL)
                return NULL;
 
+       rbuf = kmalloc(2, GFP_KERNEL);
+       if (rbuf == NULL)
+               goto error;
+
        for (i = 0; i < EDID_LENGTH; i++) {
                ret = usb_control_msg(udl->ddev->usbdev,
                                      usb_rcvctrlpipe(udl->ddev->usbdev, 0), (0x02),
@@ -36,16 +40,17 @@ static u8 *udl_get_edid(struct udl_device *udl)
                                      HZ);
                if (ret < 1) {
                        DRM_ERROR("Read EDID byte %d failed err %x\n", i, ret);
-                       i--;
                        goto error;
                }
                block[i] = rbuf[1];
        }
 
+       kfree(rbuf);
        return block;
 
 error:
        kfree(block);
+       kfree(rbuf);
        return NULL;
 }
 
@@ -57,6 +62,14 @@ static int udl_get_modes(struct drm_connector *connector)
 
        edid = (struct edid *)udl_get_edid(udl);
 
+       /*
+        * We only read the main block, but if the monitor reports extension
+        * blocks then the drm edid code expects them to be present, so patch
+        * the extension count to 0.
+        */
+       edid->checksum += edid->extensions;
+       edid->extensions = 0;
+
        drm_mode_connector_update_edid_property(connector, edid);
        ret = drm_add_edid_modes(connector, edid);
        kfree(edid);
index 3d62781..aa3fec0 100644 (file)
@@ -568,8 +568,7 @@ static int mousevsc_remove(struct hv_device *dev)
 
 static const struct hv_vmbus_device_id id_table[] = {
        /* Mouse guid */
-       { VMBUS_DEVICE(0x9E, 0xB6, 0xA8, 0xCF, 0x4A, 0x5B, 0xc0, 0x4c,
-                      0xB9, 0x8B, 0x8B, 0xA1, 0xA1, 0xF3, 0xF9, 0x5A) },
+       { HV_MOUSE_GUID, },
        { },
 };
 
index 773a2f2..0b122f8 100644 (file)
@@ -55,7 +55,7 @@ static void vmbus_setevent(struct vmbus_channel *channel)
                                        [channel->monitor_grp].pending);
 
        } else {
-               vmbus_set_event(channel->offermsg.child_relid);
+               vmbus_set_event(channel);
        }
 }
 
@@ -181,7 +181,7 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
        open_msg->ringbuffer_gpadlhandle = newchannel->ringbuffer_gpadlhandle;
        open_msg->downstream_ringbuffer_pageoffset = send_ringbuffer_size >>
                                                  PAGE_SHIFT;
-       open_msg->server_contextarea_gpadlhandle = 0;
+       open_msg->target_vp = newchannel->target_vp;
 
        if (userdatalen > MAX_USER_DEFINED_BYTES) {
                err = -EINVAL;
@@ -564,6 +564,7 @@ int vmbus_sendpacket(struct vmbus_channel *channel, const void *buffer,
        struct scatterlist bufferlist[3];
        u64 aligned_data = 0;
        int ret;
+       bool signal = false;
 
 
        /* Setup the descriptor */
@@ -580,9 +581,9 @@ int vmbus_sendpacket(struct vmbus_channel *channel, const void *buffer,
        sg_set_buf(&bufferlist[2], &aligned_data,
                   packetlen_aligned - packetlen);
 
-       ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3);
+       ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal);
 
-       if (ret == 0 && !hv_get_ringbuffer_interrupt_mask(&channel->outbound))
+       if (ret == 0 && signal)
                vmbus_setevent(channel);
 
        return ret;
@@ -606,6 +607,7 @@ int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
        u32 packetlen_aligned;
        struct scatterlist bufferlist[3];
        u64 aligned_data = 0;
+       bool signal = false;
 
        if (pagecount > MAX_PAGE_BUFFER_COUNT)
                return -EINVAL;
@@ -641,9 +643,9 @@ int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
        sg_set_buf(&bufferlist[2], &aligned_data,
                packetlen_aligned - packetlen);
 
-       ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3);
+       ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal);
 
-       if (ret == 0 && !hv_get_ringbuffer_interrupt_mask(&channel->outbound))
+       if (ret == 0 && signal)
                vmbus_setevent(channel);
 
        return ret;
@@ -665,6 +667,7 @@ int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
        u32 packetlen_aligned;
        struct scatterlist bufferlist[3];
        u64 aligned_data = 0;
+       bool signal = false;
        u32 pfncount = NUM_PAGES_SPANNED(multi_pagebuffer->offset,
                                         multi_pagebuffer->len);
 
@@ -703,9 +706,9 @@ int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
        sg_set_buf(&bufferlist[2], &aligned_data,
                packetlen_aligned - packetlen);
 
-       ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3);
+       ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal);
 
-       if (ret == 0 && !hv_get_ringbuffer_interrupt_mask(&channel->outbound))
+       if (ret == 0 && signal)
                vmbus_setevent(channel);
 
        return ret;
@@ -732,6 +735,7 @@ int vmbus_recvpacket(struct vmbus_channel *channel, void *buffer,
        u32 packetlen;
        u32 userlen;
        int ret;
+       bool signal = false;
 
        *buffer_actual_len = 0;
        *requestid = 0;
@@ -758,8 +762,10 @@ int vmbus_recvpacket(struct vmbus_channel *channel, void *buffer,
 
        /* Copy over the packet to the user buffer */
        ret = hv_ringbuffer_read(&channel->inbound, buffer, userlen,
-                            (desc.offset8 << 3));
+                            (desc.offset8 << 3), &signal);
 
+       if (signal)
+               vmbus_setevent(channel);
 
        return 0;
 }
@@ -774,8 +780,8 @@ int vmbus_recvpacket_raw(struct vmbus_channel *channel, void *buffer,
 {
        struct vmpacket_descriptor desc;
        u32 packetlen;
-       u32 userlen;
        int ret;
+       bool signal = false;
 
        *buffer_actual_len = 0;
        *requestid = 0;
@@ -788,7 +794,6 @@ int vmbus_recvpacket_raw(struct vmbus_channel *channel, void *buffer,
 
 
        packetlen = desc.len8 << 3;
-       userlen = packetlen - (desc.offset8 << 3);
 
        *buffer_actual_len = packetlen;
 
@@ -802,7 +807,11 @@ int vmbus_recvpacket_raw(struct vmbus_channel *channel, void *buffer,
        *requestid = desc.trans_id;
 
        /* Copy over the entire packet to the user buffer */
-       ret = hv_ringbuffer_read(&channel->inbound, buffer, packetlen, 0);
+       ret = hv_ringbuffer_read(&channel->inbound, buffer, packetlen, 0,
+                                &signal);
+
+       if (signal)
+               vmbus_setevent(channel);
 
        return 0;
 }
index 2f84c5c..53a8600 100644 (file)
@@ -257,6 +257,70 @@ static void vmbus_process_offer(struct work_struct *work)
        }
 }
 
+enum {
+       IDE = 0,
+       SCSI,
+       NIC,
+       MAX_PERF_CHN,
+};
+
+/*
+ * This is an array of device_ids (device types) that are performance critical.
+ * We attempt to distribute the interrupt load for these devices across
+ * all available CPUs.
+ */
+static const struct hv_vmbus_device_id hp_devs[] = {
+       /* IDE */
+       { HV_IDE_GUID, },
+       /* Storage - SCSI */
+       { HV_SCSI_GUID, },
+       /* Network */
+       { HV_NIC_GUID, },
+};
+
+
+/*
+ * We use this state to statically distribute the channel interrupt load.
+ */
+static u32  next_vp;
+
+/*
+ * Starting with Win8, we can statically distribute the incoming
+ * channel interrupt load by binding a channel to VCPU. We
+ * implement here a simple round robin scheme for distributing
+ * the interrupt load.
+ * We will bind channels that are not performance critical to cpu 0 and
+ * performance critical channels (IDE, SCSI and Network) will be uniformly
+ * distributed across all available CPUs.
+ */
+static u32 get_vp_index(uuid_le *type_guid)
+{
+       u32 cur_cpu;
+       int i;
+       bool perf_chn = false;
+       u32 max_cpus = num_online_cpus();
+
+       for (i = IDE; i < MAX_PERF_CHN; i++) {
+               if (!memcmp(type_guid->b, hp_devs[i].guid,
+                                sizeof(uuid_le))) {
+                       perf_chn = true;
+                       break;
+               }
+       }
+       if ((vmbus_proto_version == VERSION_WS2008) ||
+           (vmbus_proto_version == VERSION_WIN7) || (!perf_chn)) {
+               /*
+                * Prior to win8, all channel interrupts are
+                * delivered on cpu 0.
+                * Also if the channel is not a performance critical
+                * channel, bind it to cpu 0.
+                */
+               return 0;
+       }
+       cur_cpu = (++next_vp % max_cpus);
+       return 0;
+}
+
 /*
  * vmbus_onoffer - Handler for channel offers from vmbus in parent partition.
  *
@@ -275,6 +339,35 @@ static void vmbus_onoffer(struct vmbus_channel_message_header *hdr)
                return;
        }
 
+       /*
+        * By default we setup state to enable batched
+        * reading. A specific service can choose to
+        * disable this prior to opening the channel.
+        */
+       newchannel->batched_reading = true;
+
+       /*
+        * Setup state for signalling the host.
+        */
+       newchannel->sig_event = (struct hv_input_signal_event *)
+                               (ALIGN((unsigned long)
+                               &newchannel->sig_buf,
+                               HV_HYPERCALL_PARAM_ALIGN));
+
+       newchannel->sig_event->connectionid.asu32 = 0;
+       newchannel->sig_event->connectionid.u.id = VMBUS_EVENT_CONNECTION_ID;
+       newchannel->sig_event->flag_number = 0;
+       newchannel->sig_event->rsvdz = 0;
+
+       if (vmbus_proto_version != VERSION_WS2008) {
+               newchannel->is_dedicated_interrupt =
+                               (offer->is_dedicated_interrupt != 0);
+               newchannel->sig_event->connectionid.u.id =
+                               offer->connection_id;
+       }
+
+       newchannel->target_vp = get_vp_index(&offer->offer.if_type);
+
        memcpy(&newchannel->offermsg, offer,
               sizeof(struct vmbus_channel_offer_channel));
        newchannel->monitor_grp = (u8)offer->monitorid / 32;
index 650c9f0..253a74b 100644 (file)
@@ -30,6 +30,7 @@
 #include <linux/slab.h>
 #include <linux/vmalloc.h>
 #include <linux/hyperv.h>
+#include <linux/export.h>
 #include <asm/hyperv.h>
 #include "hyperv_vmbus.h"
 
@@ -40,15 +41,99 @@ struct vmbus_connection vmbus_connection = {
 };
 
 /*
+ * Negotiated protocol version with the host.
+ */
+__u32 vmbus_proto_version;
+EXPORT_SYMBOL_GPL(vmbus_proto_version);
+
+static __u32 vmbus_get_next_version(__u32 current_version)
+{
+       switch (current_version) {
+       case (VERSION_WIN7):
+               return VERSION_WS2008;
+
+       case (VERSION_WIN8):
+               return VERSION_WIN7;
+
+       case (VERSION_WS2008):
+       default:
+               return VERSION_INVAL;
+       }
+}
+
+static int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo,
+                                       __u32 version)
+{
+       int ret = 0;
+       struct vmbus_channel_initiate_contact *msg;
+       unsigned long flags;
+       int t;
+
+       init_completion(&msginfo->waitevent);
+
+       msg = (struct vmbus_channel_initiate_contact *)msginfo->msg;
+
+       msg->header.msgtype = CHANNELMSG_INITIATE_CONTACT;
+       msg->vmbus_version_requested = version;
+       msg->interrupt_page = virt_to_phys(vmbus_connection.int_page);
+       msg->monitor_page1 = virt_to_phys(vmbus_connection.monitor_pages);
+       msg->monitor_page2 = virt_to_phys(
+                       (void *)((unsigned long)vmbus_connection.monitor_pages +
+                                PAGE_SIZE));
+
+       /*
+        * Add to list before we send the request since we may
+        * receive the response before returning from this routine
+        */
+       spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
+       list_add_tail(&msginfo->msglistentry,
+                     &vmbus_connection.chn_msg_list);
+
+       spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
+
+       ret = vmbus_post_msg(msg,
+                              sizeof(struct vmbus_channel_initiate_contact));
+       if (ret != 0) {
+               spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
+               list_del(&msginfo->msglistentry);
+               spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock,
+                                       flags);
+               return ret;
+       }
+
+       /* Wait for the connection response */
+       t =  wait_for_completion_timeout(&msginfo->waitevent, 5*HZ);
+       if (t == 0) {
+               spin_lock_irqsave(&vmbus_connection.channelmsg_lock,
+                               flags);
+               list_del(&msginfo->msglistentry);
+               spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock,
+                                       flags);
+               return -ETIMEDOUT;
+       }
+
+       spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
+       list_del(&msginfo->msglistentry);
+       spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
+
+       /* Check if successful */
+       if (msginfo->response.version_response.version_supported) {
+               vmbus_connection.conn_state = CONNECTED;
+       } else {
+               return -ECONNREFUSED;
+       }
+
+       return ret;
+}
+
+/*
  * vmbus_connect - Sends a connect request on the partition service connection
  */
 int vmbus_connect(void)
 {
        int ret = 0;
-       int t;
        struct vmbus_channel_msginfo *msginfo = NULL;
-       struct vmbus_channel_initiate_contact *msg;
-       unsigned long flags;
+       __u32 version;
 
        /* Initialize the vmbus connection */
        vmbus_connection.conn_state = CONNECTING;
@@ -99,69 +184,38 @@ int vmbus_connect(void)
                goto cleanup;
        }
 
-       init_completion(&msginfo->waitevent);
-
-       msg = (struct vmbus_channel_initiate_contact *)msginfo->msg;
-
-       msg->header.msgtype = CHANNELMSG_INITIATE_CONTACT;
-       msg->vmbus_version_requested = VMBUS_REVISION_NUMBER;
-       msg->interrupt_page = virt_to_phys(vmbus_connection.int_page);
-       msg->monitor_page1 = virt_to_phys(vmbus_connection.monitor_pages);
-       msg->monitor_page2 = virt_to_phys(
-                       (void *)((unsigned long)vmbus_connection.monitor_pages +
-                                PAGE_SIZE));
-
        /*
-        * Add to list before we send the request since we may
-        * receive the response before returning from this routine
+        * Negotiate a compatible VMBUS version number with the
+        * host. We start with the highest number we can support
+        * and work our way down until we negotiate a compatible
+        * version.
         */
-       spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
-       list_add_tail(&msginfo->msglistentry,
-                     &vmbus_connection.chn_msg_list);
 
-       spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
+       version = VERSION_CURRENT;
 
-       ret = vmbus_post_msg(msg,
-                              sizeof(struct vmbus_channel_initiate_contact));
-       if (ret != 0) {
-               spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
-               list_del(&msginfo->msglistentry);
-               spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock,
-                                       flags);
-               goto cleanup;
-       }
+       do {
+               ret = vmbus_negotiate_version(msginfo, version);
+               if (ret == 0)
+                       break;
 
-       /* Wait for the connection response */
-       t =  wait_for_completion_timeout(&msginfo->waitevent, 5*HZ);
-       if (t == 0) {
-               spin_lock_irqsave(&vmbus_connection.channelmsg_lock,
-                               flags);
-               list_del(&msginfo->msglistentry);
-               spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock,
-                                       flags);
-               ret = -ETIMEDOUT;
-               goto cleanup;
-       }
+               version = vmbus_get_next_version(version);
+       } while (version != VERSION_INVAL);
 
-       spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
-       list_del(&msginfo->msglistentry);
-       spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
-
-       /* Check if successful */
-       if (msginfo->response.version_response.version_supported) {
-               vmbus_connection.conn_state = CONNECTED;
-       } else {
-               pr_err("Unable to connect, "
-                       "Version %d not supported by Hyper-V\n",
-                       VMBUS_REVISION_NUMBER);
-               ret = -ECONNREFUSED;
+       if (version == VERSION_INVAL)
                goto cleanup;
-       }
+
+       vmbus_proto_version = version;
+       pr_info("Hyper-V Host Build:%d-%d.%d-%d-%d.%d; Vmbus version:%d.%d\n",
+                   host_info_eax, host_info_ebx >> 16,
+                   host_info_ebx & 0xFFFF, host_info_ecx,
+                   host_info_edx >> 24, host_info_edx & 0xFFFFFF,
+                   version >> 16, version & 0xFFFF);
 
        kfree(msginfo);
        return 0;
 
 cleanup:
+       pr_err("Unable to connect to host\n");
        vmbus_connection.conn_state = DISCONNECTED;
 
        if (vmbus_connection.work_queue)
@@ -212,6 +266,9 @@ static void process_chn_event(u32 relid)
 {
        struct vmbus_channel *channel;
        unsigned long flags;
+       void *arg;
+       bool read_state;
+       u32 bytes_to_read;
 
        /*
         * Find the channel based on this relid and invokes the
@@ -234,10 +291,29 @@ static void process_chn_event(u32 relid)
         */
 
        spin_lock_irqsave(&channel->inbound_lock, flags);
-       if (channel->onchannel_callback != NULL)
-               channel->onchannel_callback(channel->channel_callback_context);
-       else
+       if (channel->onchannel_callback != NULL) {
+               arg = channel->channel_callback_context;
+               read_state = channel->batched_reading;
+               /*
+                * This callback reads the messages sent by the host.
+                * We can optimize host to guest signaling by ensuring:
+                * 1. While reading the channel, we disable interrupts from
+                *    host.
+                * 2. Ensure that we process all posted messages from the host
+                *    before returning from this callback.
+                * 3. Once we return, enable signaling from the host. Once this
+                *    state is set we check to see if additional packets are
+                *    available to read. In this case we repeat the process.
+                */
+
+               do {
+                       hv_begin_read(&channel->inbound);
+                       channel->onchannel_callback(arg);
+                       bytes_to_read = hv_end_read(&channel->inbound);
+               } while (read_state && (bytes_to_read != 0));
+       } else {
                pr_err("no channel callback for relid - %u\n", relid);
+       }
 
        spin_unlock_irqrestore(&channel->inbound_lock, flags);
 }
@@ -248,10 +324,32 @@ static void process_chn_event(u32 relid)
 void vmbus_on_event(unsigned long data)
 {
        u32 dword;
-       u32 maxdword = MAX_NUM_CHANNELS_SUPPORTED >> 5;
+       u32 maxdword;
        int bit;
        u32 relid;
-       u32 *recv_int_page = vmbus_connection.recv_int_page;
+       u32 *recv_int_page = NULL;
+       void *page_addr;
+       int cpu = smp_processor_id();
+       union hv_synic_event_flags *event;
+
+       if ((vmbus_proto_version == VERSION_WS2008) ||
+               (vmbus_proto_version == VERSION_WIN7)) {
+               maxdword = MAX_NUM_CHANNELS_SUPPORTED >> 5;
+               recv_int_page = vmbus_connection.recv_int_page;
+       } else {
+               /*
+                * When the host is win8 and beyond, the event page
+                * can be directly checked to get the id of the channel
+                * that has the interrupt pending.
+                */
+               maxdword = HV_EVENT_FLAGS_DWORD_COUNT;
+               page_addr = hv_context.synic_event_page[cpu];
+               event = (union hv_synic_event_flags *)page_addr +
+                                                VMBUS_MESSAGE_SINT;
+               recv_int_page = event->flags32;
+       }
+
+
 
        /* Check events */
        if (!recv_int_page)
@@ -307,12 +405,16 @@ int vmbus_post_msg(void *buffer, size_t buflen)
 /*
  * vmbus_set_event - Send an event notification to the parent
  */
-int vmbus_set_event(u32 child_relid)
+int vmbus_set_event(struct vmbus_channel *channel)
 {
-       /* Each u32 represents 32 channels */
-       sync_set_bit(child_relid & 31,
-               (unsigned long *)vmbus_connection.send_int_page +
-               (child_relid >> 5));
+       u32 child_relid = channel->offermsg.child_relid;
+
+       if (!channel->is_dedicated_interrupt) {
+               /* Each u32 represents 32 channels */
+               sync_set_bit(child_relid & 31,
+                       (unsigned long *)vmbus_connection.send_int_page +
+                       (child_relid >> 5));
+       }
 
-       return hv_signal_event();
+       return hv_signal_event(channel->sig_event);
 }
index 3648f8f..1c5481d 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/vmalloc.h>
 #include <linux/hyperv.h>
 #include <linux/version.h>
+#include <linux/interrupt.h>
 #include <asm/hyperv.h>
 #include "hyperv_vmbus.h"
 
 struct hv_context hv_context = {
        .synic_initialized      = false,
        .hypercall_page         = NULL,
-       .signal_event_param     = NULL,
-       .signal_event_buffer    = NULL,
 };
 
 /*
  * query_hypervisor_info - Get version info of the windows hypervisor
  */
+unsigned int host_info_eax;
+unsigned int host_info_ebx;
+unsigned int host_info_ecx;
+unsigned int host_info_edx;
+
 static int query_hypervisor_info(void)
 {
        unsigned int eax;
@@ -70,13 +74,10 @@ static int query_hypervisor_info(void)
                edx = 0;
                op = HVCPUID_VERSION;
                cpuid(op, &eax, &ebx, &ecx, &edx);
-               pr_info("Hyper-V Host OS Build:%d-%d.%d-%d-%d.%d\n",
-                           eax,
-                           ebx >> 16,
-                           ebx & 0xFFFF,
-                           ecx,
-                           edx >> 24,
-                           edx & 0xFFFFFF);
+               host_info_eax = eax;
+               host_info_ebx = ebx;
+               host_info_ecx = ecx;
+               host_info_edx = edx;
        }
        return max_leaf;
 }
@@ -137,6 +138,10 @@ int hv_init(void)
        memset(hv_context.synic_event_page, 0, sizeof(void *) * NR_CPUS);
        memset(hv_context.synic_message_page, 0,
               sizeof(void *) * NR_CPUS);
+       memset(hv_context.vp_index, 0,
+              sizeof(int) * NR_CPUS);
+       memset(hv_context.event_dpc, 0,
+              sizeof(void *) * NR_CPUS);
 
        max_leaf = query_hypervisor_info();
 
@@ -168,24 +173,6 @@ int hv_init(void)
 
        hv_context.hypercall_page = virtaddr;
 
-       /* Setup the global signal event param for the signal event hypercall */
-       hv_context.signal_event_buffer =
-                       kmalloc(sizeof(struct hv_input_signal_event_buffer),
-                               GFP_KERNEL);
-       if (!hv_context.signal_event_buffer)
-               goto cleanup;
-
-       hv_context.signal_event_param =
-               (struct hv_input_signal_event *)
-                       (ALIGN((unsigned long)
-                                 hv_context.signal_event_buffer,
-                                 HV_HYPERCALL_PARAM_ALIGN));
-       hv_context.signal_event_param->connectionid.asu32 = 0;
-       hv_context.signal_event_param->connectionid.u.id =
-                                               VMBUS_EVENT_CONNECTION_ID;
-       hv_context.signal_event_param->flag_number = 0;
-       hv_context.signal_event_param->rsvdz = 0;
-
        return 0;
 
 cleanup:
@@ -213,10 +200,6 @@ void hv_cleanup(void)
        /* Reset our OS id */
        wrmsrl(HV_X64_MSR_GUEST_OS_ID, 0);
 
-       kfree(hv_context.signal_event_buffer);
-       hv_context.signal_event_buffer = NULL;
-       hv_context.signal_event_param = NULL;
-
        if (hv_context.hypercall_page) {
                hypercall_msr.as_uint64 = 0;
                wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
@@ -273,13 +256,12 @@ int hv_post_message(union hv_connection_id connection_id,
  *
  * This involves a hypercall.
  */
-u16 hv_signal_event(void)
+u16 hv_signal_event(void *con_id)
 {
        u16 status;
 
-       status = do_hypercall(HVCALL_SIGNAL_EVENT,
-                              hv_context.signal_event_param,
-                              NULL) & 0xFFFF;
+       status = (do_hypercall(HVCALL_SIGNAL_EVENT, con_id, NULL) & 0xFFFF);
+
        return status;
 }
 
@@ -297,6 +279,7 @@ void hv_synic_init(void *irqarg)
        union hv_synic_siefp siefp;
        union hv_synic_sint shared_sint;
        union hv_synic_scontrol sctrl;
+       u64 vp_index;
 
        u32 irq_vector = *((u32 *)(irqarg));
        int cpu = smp_processor_id();
@@ -307,6 +290,15 @@ void hv_synic_init(void *irqarg)
        /* Check the version */
        rdmsrl(HV_X64_MSR_SVERSION, version);
 
+       hv_context.event_dpc[cpu] = (struct tasklet_struct *)
+                                       kmalloc(sizeof(struct tasklet_struct),
+                                               GFP_ATOMIC);
+       if (hv_context.event_dpc[cpu] == NULL) {
+               pr_err("Unable to allocate event dpc\n");
+               goto cleanup;
+       }
+       tasklet_init(hv_context.event_dpc[cpu], vmbus_on_event, cpu);
+
        hv_context.synic_message_page[cpu] =
                (void *)get_zeroed_page(GFP_ATOMIC);
 
@@ -345,7 +337,7 @@ void hv_synic_init(void *irqarg)
        shared_sint.as_uint64 = 0;
        shared_sint.vector = irq_vector; /* HV_SHARED_SINT_IDT_VECTOR + 0x20; */
        shared_sint.masked = false;
-       shared_sint.auto_eoi = false;
+       shared_sint.auto_eoi = true;
 
        wrmsrl(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
 
@@ -356,6 +348,14 @@ void hv_synic_init(void *irqarg)
        wrmsrl(HV_X64_MSR_SCONTROL, sctrl.as_uint64);
 
        hv_context.synic_initialized = true;
+
+       /*
+        * Setup the mapping between Hyper-V's notion
+        * of cpuid and Linux' notion of cpuid.
+        * This array will be indexed using Linux cpuid.
+        */
+       rdmsrl(HV_X64_MSR_VP_INDEX, vp_index);
+       hv_context.vp_index[cpu] = (u32)vp_index;
        return;
 
 cleanup:
index f6c0011..3787321 100644 (file)
@@ -29,7 +29,6 @@
 #include <linux/memory_hotplug.h>
 #include <linux/memory.h>
 #include <linux/notifier.h>
-#include <linux/mman.h>
 #include <linux/percpu_counter.h>
 
 #include <linux/hyperv.h>
@@ -403,7 +402,7 @@ struct dm_info_header {
  */
 
 struct dm_info_msg {
-       struct dm_info_header header;
+       struct dm_header hdr;
        __u32 reserved;
        __u32 info_size;
        __u8  info[];
@@ -415,10 +414,17 @@ struct dm_info_msg {
 
 static bool hot_add;
 static bool do_hot_add;
+/*
+ * Delay reporting memory pressure by
+ * the specified number of seconds.
+ */
+static uint pressure_report_delay = 30;
 
 module_param(hot_add, bool, (S_IRUGO | S_IWUSR));
 MODULE_PARM_DESC(hot_add, "If set attempt memory hot_add");
 
+module_param(pressure_report_delay, uint, (S_IRUGO | S_IWUSR));
+MODULE_PARM_DESC(pressure_report_delay, "Delay in secs in reporting pressure");
 static atomic_t trans_id = ATOMIC_INIT(0);
 
 static int dm_ring_size = (5 * PAGE_SIZE);
@@ -503,16 +509,48 @@ static void hot_add_req(struct hv_dynmem_device *dm, struct dm_hot_add *msg)
 
 static void process_info(struct hv_dynmem_device *dm, struct dm_info_msg *msg)
 {
-       switch (msg->header.type) {
+       struct dm_info_header *info_hdr;
+
+       info_hdr = (struct dm_info_header *)msg->info;
+
+       switch (info_hdr->type) {
        case INFO_TYPE_MAX_PAGE_CNT:
                pr_info("Received INFO_TYPE_MAX_PAGE_CNT\n");
-               pr_info("Data Size is %d\n", msg->header.data_size);
+               pr_info("Data Size is %d\n", info_hdr->data_size);
                break;
        default:
-               pr_info("Received Unknown type: %d\n", msg->header.type);
+               pr_info("Received Unknown type: %d\n", info_hdr->type);
        }
 }
 
+unsigned long compute_balloon_floor(void)
+{
+       unsigned long min_pages;
+#define MB2PAGES(mb) ((mb) << (20 - PAGE_SHIFT))
+       /* Simple continuous piecewiese linear function:
+        *  max MiB -> min MiB  gradient
+        *       0         0
+        *      16        16
+        *      32        24
+        *     128        72    (1/2)
+        *     512       168    (1/4)
+        *    2048       360    (1/8)
+        *    8192       552    (1/32)
+        *   32768      1320
+        *  131072      4392
+        */
+       if (totalram_pages < MB2PAGES(128))
+               min_pages = MB2PAGES(8) + (totalram_pages >> 1);
+       else if (totalram_pages < MB2PAGES(512))
+               min_pages = MB2PAGES(40) + (totalram_pages >> 2);
+       else if (totalram_pages < MB2PAGES(2048))
+               min_pages = MB2PAGES(104) + (totalram_pages >> 3);
+       else
+               min_pages = MB2PAGES(296) + (totalram_pages >> 5);
+#undef MB2PAGES
+       return min_pages;
+}
+
 /*
  * Post our status as it relates memory pressure to the
  * host. Host expects the guests to post this status
@@ -526,15 +564,30 @@ static void process_info(struct hv_dynmem_device *dm, struct dm_info_msg *msg)
 static void post_status(struct hv_dynmem_device *dm)
 {
        struct dm_status status;
+       struct sysinfo val;
 
-
+       if (pressure_report_delay > 0) {
+               --pressure_report_delay;
+               return;
+       }
+       si_meminfo(&val);
        memset(&status, 0, sizeof(struct dm_status));
        status.hdr.type = DM_STATUS_REPORT;
        status.hdr.size = sizeof(struct dm_status);
        status.hdr.trans_id = atomic_inc_return(&trans_id);
 
-
-       status.num_committed = vm_memory_committed();
+       /*
+        * The host expects the guest to report free memory.
+        * Further, the host expects the pressure information to
+        * include the ballooned out pages.
+        * For a given amount of memory that we are managing, we
+        * need to compute a floor below which we should not balloon.
+        * Compute this and add it to the pressure report.
+        */
+       status.num_avail = val.freeram;
+       status.num_committed = vm_memory_committed() +
+                               dm->num_pages_ballooned +
+                               compute_balloon_floor();
 
        vmbus_sendpacket(dm->dev->channel, &status,
                                sizeof(struct dm_status),
@@ -543,8 +596,6 @@ static void post_status(struct hv_dynmem_device *dm)
 
 }
 
-
-
 static void free_balloon_pages(struct hv_dynmem_device *dm,
                         union dm_mem_page_range *range_array)
 {
@@ -879,7 +930,7 @@ static int balloon_probe(struct hv_device *dev,
                        balloon_onchannelcallback, dev);
 
        if (ret)
-               return ret;
+               goto probe_error0;
 
        dm_device.dev = dev;
        dm_device.state = DM_INITIALIZING;
@@ -891,7 +942,7 @@ static int balloon_probe(struct hv_device *dev,
                 kthread_run(dm_thread_func, &dm_device, "hv_balloon");
        if (IS_ERR(dm_device.thread)) {
                ret = PTR_ERR(dm_device.thread);
-               goto probe_error0;
+               goto probe_error1;
        }
 
        hv_set_drvdata(dev, &dm_device);
@@ -914,12 +965,12 @@ static int balloon_probe(struct hv_device *dev,
                                VM_PKT_DATA_INBAND,
                                VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
        if (ret)
-               goto probe_error1;
+               goto probe_error2;
 
        t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ);
        if (t == 0) {
                ret = -ETIMEDOUT;
-               goto probe_error1;
+               goto probe_error2;
        }
 
        /*
@@ -928,7 +979,7 @@ static int balloon_probe(struct hv_device *dev,
         */
        if (dm_device.state == DM_INIT_ERROR) {
                ret = -ETIMEDOUT;
-               goto probe_error1;
+               goto probe_error2;
        }
        /*
         * Now submit our capabilities to the host.
@@ -961,12 +1012,12 @@ static int balloon_probe(struct hv_device *dev,
                                VM_PKT_DATA_INBAND,
                                VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
        if (ret)
-               goto probe_error1;
+               goto probe_error2;
 
        t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ);
        if (t == 0) {
                ret = -ETIMEDOUT;
-               goto probe_error1;
+               goto probe_error2;
        }
 
        /*
@@ -975,18 +1026,20 @@ static int balloon_probe(struct hv_device *dev,
         */
        if (dm_device.state == DM_INIT_ERROR) {
                ret = -ETIMEDOUT;
-               goto probe_error1;
+               goto probe_error2;
        }
 
        dm_device.state = DM_INITIALIZED;
 
        return 0;
 
-probe_error1:
+probe_error2:
        kthread_stop(dm_device.thread);
 
-probe_error0:
+probe_error1:
        vmbus_close(dev->channel);
+probe_error0:
+       kfree(send_buffer);
        return ret;
 }
 
@@ -999,6 +1052,7 @@ static int balloon_remove(struct hv_device *dev)
 
        vmbus_close(dev->channel);
        kthread_stop(dm->thread);
+       kfree(send_buffer);
 
        return 0;
 }
@@ -1006,9 +1060,7 @@ static int balloon_remove(struct hv_device *dev)
 static const struct hv_vmbus_device_id id_table[] = {
        /* Dynamic Memory Class ID */
        /* 525074DC-8985-46e2-8057-A307DC18A502 */
-       { VMBUS_DEVICE(0xdc, 0x74, 0x50, 0X52, 0x85, 0x89, 0xe2, 0x46,
-                      0x80, 0x57, 0xa3, 0x07, 0xdc, 0x18, 0xa5, 0x02)
-       },
+       { HV_DM_GUID, },
        { },
 };
 
index a0667de..1d4cbd8 100644 (file)
@@ -49,6 +49,16 @@ static struct hv_util_service util_kvp = {
        .util_deinit = hv_kvp_deinit,
 };
 
+static void perform_shutdown(struct work_struct *dummy)
+{
+       orderly_poweroff(true);
+}
+
+/*
+ * Perform the shutdown operation in a thread context.
+ */
+static DECLARE_WORK(shutdown_work, perform_shutdown);
+
 static void shutdown_onchannelcallback(void *context)
 {
        struct vmbus_channel *channel = context;
@@ -106,7 +116,7 @@ static void shutdown_onchannelcallback(void *context)
        }
 
        if (execute_shutdown == true)
-               orderly_poweroff(true);
+               schedule_work(&shutdown_work);
 }
 
 /*
@@ -274,6 +284,16 @@ static int util_probe(struct hv_device *dev,
                }
        }
 
+       /*
+        * The set of services managed by the util driver are not performance
+        * critical and do not need batched reading. Furthermore, some services
+        * such as KVP can only handle one message from the host at a time.
+        * Turn off batched reading for all util drivers before we open the
+        * channel.
+        */
+
+       set_channel_read_state(dev->channel, false);
+
        ret = vmbus_open(dev->channel, 4 * PAGE_SIZE, 4 * PAGE_SIZE, NULL, 0,
                        srv->util_cb, dev->channel);
        if (ret)
@@ -304,21 +324,21 @@ static int util_remove(struct hv_device *dev)
 
 static const struct hv_vmbus_device_id id_table[] = {
        /* Shutdown guid */
-       { VMBUS_DEVICE(0x31, 0x60, 0x0B, 0X0E, 0x13, 0x52, 0x34, 0x49,
-                      0x81, 0x8B, 0x38, 0XD9, 0x0C, 0xED, 0x39, 0xDB)
-         .driver_data = (unsigned long)&util_shutdown },
+       { HV_SHUTDOWN_GUID,
+         .driver_data = (unsigned long)&util_shutdown
+       },
        /* Time synch guid */
-       { VMBUS_DEVICE(0x30, 0xe6, 0x27, 0x95, 0xae, 0xd0, 0x7b, 0x49,
-                      0xad, 0xce, 0xe8, 0x0a, 0xb0, 0x17, 0x5c, 0xaf)
-         .driver_data = (unsigned long)&util_timesynch },
+       { HV_TS_GUID,
+         .driver_data = (unsigned long)&util_timesynch
+       },
        /* Heartbeat guid */
-       { VMBUS_DEVICE(0x39, 0x4f, 0x16, 0x57, 0x15, 0x91, 0x78, 0x4e,
-                      0xab, 0x55, 0x38, 0x2f, 0x3b, 0xd5, 0x42, 0x2d)
-         .driver_data = (unsigned long)&util_heartbeat },
+       { HV_HEART_BEAT_GUID,
+         .driver_data = (unsigned long)&util_heartbeat
+       },
        /* KVP guid */
-       { VMBUS_DEVICE(0xe7, 0xf4, 0xa0, 0xa9, 0x45, 0x5a, 0x96, 0x4d,
-                      0xb8, 0x27, 0x8a, 0x84, 0x1e, 0x8c, 0x3,  0xe6)
-         .driver_data = (unsigned long)&util_kvp },
+       { HV_KVP_GUID,
+         .driver_data = (unsigned long)&util_kvp
+       },
        { },
 };
 
index d8d1fad..12f2f9e 100644 (file)
@@ -101,15 +101,6 @@ enum hv_message_type {
 /* Define invalid partition identifier. */
 #define HV_PARTITION_ID_INVALID                ((u64)0x0)
 
-/* Define connection identifier type. */
-union hv_connection_id {
-       u32 asu32;
-       struct {
-               u32 id:24;
-               u32 reserved:8;
-       } u;
-};
-
 /* Define port identifier type. */
 union hv_port_id {
        u32 asu32;
@@ -338,13 +329,6 @@ struct hv_input_post_message {
        u64 payload[HV_MESSAGE_PAYLOAD_QWORD_COUNT];
 };
 
-/* Definition of the hv_signal_event hypercall input structure. */
-struct hv_input_signal_event {
-       union hv_connection_id connectionid;
-       u16 flag_number;
-       u16 rsvdz;
-};
-
 /*
  * Versioning definitions used for guests reporting themselves to the
  * hypervisor, and visa versa.
@@ -498,11 +482,6 @@ static const uuid_le VMBUS_SERVICE_ID = {
 
 
 
-struct hv_input_signal_event_buffer {
-       u64 align8;
-       struct hv_input_signal_event event;
-};
-
 struct hv_context {
        /* We only support running on top of Hyper-V
        * So at this point this really can only contain the Hyper-V ID
@@ -513,16 +492,24 @@ struct hv_context {
 
        bool synic_initialized;
 
-       /*
-        * This is used as an input param to HvCallSignalEvent hypercall. The
-        * input param is immutable in our usage and must be dynamic mem (vs
-        * stack or global). */
-       struct hv_input_signal_event_buffer *signal_event_buffer;
-       /* 8-bytes aligned of the buffer above */
-       struct hv_input_signal_event *signal_event_param;
-
        void *synic_message_page[NR_CPUS];
        void *synic_event_page[NR_CPUS];
+       /*
+        * Hypervisor's notion of virtual processor ID is different from
+        * Linux' notion of CPU ID. This information can only be retrieved
+        * in the context of the calling CPU. Setup a map for easy access
+        * to this information:
+        *
+        * vp_index[a] is the Hyper-V's processor ID corresponding to
+        * Linux cpuid 'a'.
+        */
+       u32 vp_index[NR_CPUS];
+       /*
+        * Starting with win8, we can take channel interrupts on any CPU;
+        * we will manage the tasklet that handles events on a per CPU
+        * basis.
+        */
+       struct tasklet_struct *event_dpc[NR_CPUS];
 };
 
 extern struct hv_context hv_context;
@@ -538,12 +525,19 @@ extern int hv_post_message(union hv_connection_id connection_id,
                         enum hv_message_type message_type,
                         void *payload, size_t payload_size);
 
-extern u16 hv_signal_event(void);
+extern u16 hv_signal_event(void *con_id);
 
 extern void hv_synic_init(void *irqarg);
 
 extern void hv_synic_cleanup(void *arg);
 
+/*
+ * Host version information.
+ */
+extern unsigned int host_info_eax;
+extern unsigned int host_info_ebx;
+extern unsigned int host_info_ecx;
+extern unsigned int host_info_edx;
 
 /* Interface */
 
@@ -555,7 +549,7 @@ void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info);
 
 int hv_ringbuffer_write(struct hv_ring_buffer_info *ring_info,
                    struct scatterlist *sglist,
-                   u32 sgcount);
+                   u32 sgcount, bool *signal);
 
 int hv_ringbuffer_peek(struct hv_ring_buffer_info *ring_info, void *buffer,
                   u32 buflen);
@@ -563,13 +557,16 @@ int hv_ringbuffer_peek(struct hv_ring_buffer_info *ring_info, void *buffer,
 int hv_ringbuffer_read(struct hv_ring_buffer_info *ring_info,
                   void *buffer,
                   u32 buflen,
-                  u32 offset);
+                  u32 offset, bool *signal);
 
-u32 hv_get_ringbuffer_interrupt_mask(struct hv_ring_buffer_info *ring_info);
 
 void hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
                            struct hv_ring_buffer_debug_info *debug_info);
 
+void hv_begin_read(struct hv_ring_buffer_info *rbi);
+
+u32 hv_end_read(struct hv_ring_buffer_info *rbi);
+
 /*
  * Maximum channels is determined by the size of the interrupt page
  * which is PAGE_SIZE. 1/2 of PAGE_SIZE is for send endpoint interrupt
@@ -657,7 +654,7 @@ int vmbus_connect(void);
 
 int vmbus_post_msg(void *buffer, size_t buflen);
 
-int vmbus_set_event(u32 child_relid);
+int vmbus_set_event(struct vmbus_channel *channel);
 
 void vmbus_on_event(unsigned long data);
 
index 7233c88..cafa72f 100644 (file)
 
 #include "hyperv_vmbus.h"
 
+void hv_begin_read(struct hv_ring_buffer_info *rbi)
+{
+       rbi->ring_buffer->interrupt_mask = 1;
+       smp_mb();
+}
+
+u32 hv_end_read(struct hv_ring_buffer_info *rbi)
+{
+       u32 read;
+       u32 write;
+
+       rbi->ring_buffer->interrupt_mask = 0;
+       smp_mb();
+
+       /*
+        * Now check to see if the ring buffer is still empty.
+        * If it is not, we raced and we need to process new
+        * incoming messages.
+        */
+       hv_get_ringbuffer_availbytes(rbi, &read, &write);
+
+       return read;
+}
+
+/*
+ * When we write to the ring buffer, check if the host needs to
+ * be signaled. Here is the details of this protocol:
+ *
+ *     1. The host guarantees that while it is draining the
+ *        ring buffer, it will set the interrupt_mask to
+ *        indicate it does not need to be interrupted when
+ *        new data is placed.
+ *
+ *     2. The host guarantees that it will completely drain
+ *        the ring buffer before exiting the read loop. Further,
+ *        once the ring buffer is empty, it will clear the
+ *        interrupt_mask and re-check to see if new data has
+ *        arrived.
+ */
+
+static bool hv_need_to_signal(u32 old_write, struct hv_ring_buffer_info *rbi)
+{
+       if (rbi->ring_buffer->interrupt_mask)
+               return false;
+
+       /*
+        * This is the only case we need to signal when the
+        * ring transitions from being empty to non-empty.
+        */
+       if (old_write == rbi->ring_buffer->read_index)
+               return true;
+
+       return false;
+}
+
+/*
+ * To optimize the flow management on the send-side,
+ * when the sender is blocked because of lack of
+ * sufficient space in the ring buffer, potential the
+ * consumer of the ring buffer can signal the producer.
+ * This is controlled by the following parameters:
+ *
+ * 1. pending_send_sz: This is the size in bytes that the
+ *    producer is trying to send.
+ * 2. The feature bit feat_pending_send_sz set to indicate if
+ *    the consumer of the ring will signal when the ring
+ *    state transitions from being full to a state where
+ *    there is room for the producer to send the pending packet.
+ */
+
+static bool hv_need_to_signal_on_read(u32 old_rd,
+                                        struct hv_ring_buffer_info *rbi)
+{
+       u32 prev_write_sz;
+       u32 cur_write_sz;
+       u32 r_size;
+       u32 write_loc = rbi->ring_buffer->write_index;
+       u32 read_loc = rbi->ring_buffer->read_index;
+       u32 pending_sz = rbi->ring_buffer->pending_send_sz;
+
+       /*
+        * If the other end is not blocked on write don't bother.
+        */
+       if (pending_sz == 0)
+               return false;
+
+       r_size = rbi->ring_datasize;
+       cur_write_sz = write_loc >= read_loc ? r_size - (write_loc - read_loc) :
+                       read_loc - write_loc;
+
+       prev_write_sz = write_loc >= old_rd ? r_size - (write_loc - old_rd) :
+                       old_rd - write_loc;
+
+
+       if ((prev_write_sz < pending_sz) && (cur_write_sz >= pending_sz))
+               return true;
+
+       return false;
+}
 
 /*
  * hv_get_next_write_location()
@@ -239,19 +338,6 @@ void hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
        }
 }
 
-
-/*
- *
- * hv_get_ringbuffer_interrupt_mask()
- *
- * Get the interrupt mask for the specified ring buffer
- *
- */
-u32 hv_get_ringbuffer_interrupt_mask(struct hv_ring_buffer_info *rbi)
-{
-       return rbi->ring_buffer->interrupt_mask;
-}
-
 /*
  *
  * hv_ringbuffer_init()
@@ -298,7 +384,7 @@ void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
  *
  */
 int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
-                   struct scatterlist *sglist, u32 sgcount)
+                   struct scatterlist *sglist, u32 sgcount, bool *signal)
 {
        int i = 0;
        u32 bytes_avail_towrite;
@@ -307,6 +393,7 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
 
        struct scatterlist *sg;
        u32 next_write_location;
+       u32 old_write;
        u64 prev_indices = 0;
        unsigned long flags;
 
@@ -335,6 +422,8 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
        /* Write to the ring buffer */
        next_write_location = hv_get_next_write_location(outring_info);
 
+       old_write = next_write_location;
+
        for_each_sg(sglist, sg, sgcount, i)
        {
                next_write_location = hv_copyto_ringbuffer(outring_info,
@@ -351,14 +440,16 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
                                             &prev_indices,
                                             sizeof(u64));
 
-       /* Make sure we flush all writes before updating the writeIndex */
-       smp_wmb();
+       /* Issue a full memory barrier before updating the write index */
+       smp_mb();
 
        /* Now, update the write location */
        hv_set_next_write_location(outring_info, next_write_location);
 
 
        spin_unlock_irqrestore(&outring_info->ring_lock, flags);
+
+       *signal = hv_need_to_signal(old_write, outring_info);
        return 0;
 }
 
@@ -414,13 +505,14 @@ int hv_ringbuffer_peek(struct hv_ring_buffer_info *Inring_info,
  *
  */
 int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, void *buffer,
-                  u32 buflen, u32 offset)
+                  u32 buflen, u32 offset, bool *signal)
 {
        u32 bytes_avail_towrite;
        u32 bytes_avail_toread;
        u32 next_read_location = 0;
        u64 prev_indices = 0;
        unsigned long flags;
+       u32 old_read;
 
        if (buflen <= 0)
                return -EINVAL;
@@ -431,6 +523,8 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, void *buffer,
                                &bytes_avail_toread,
                                &bytes_avail_towrite);
 
+       old_read = bytes_avail_toread;
+
        /* Make sure there is something to read */
        if (bytes_avail_toread < buflen) {
                spin_unlock_irqrestore(&inring_info->ring_lock, flags);
@@ -461,5 +555,7 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, void *buffer,
 
        spin_unlock_irqrestore(&inring_info->ring_lock, flags);
 
+       *signal = hv_need_to_signal_on_read(old_read, inring_info);
+
        return 0;
 }
index 8e1a9ec..cf19dfa 100644 (file)
@@ -33,6 +33,7 @@
 #include <acpi/acpi_bus.h>
 #include <linux/completion.h>
 #include <linux/hyperv.h>
+#include <linux/kernel_stat.h>
 #include <asm/hyperv.h>
 #include <asm/hypervisor.h>
 #include "hyperv_vmbus.h"
@@ -41,7 +42,6 @@
 static struct acpi_device  *hv_acpi_dev;
 
 static struct tasklet_struct msg_dpc;
-static struct tasklet_struct event_dpc;
 static struct completion probe_event;
 static int irq;
 
@@ -454,21 +454,40 @@ static irqreturn_t vmbus_isr(int irq, void *dev_id)
        union hv_synic_event_flags *event;
        bool handled = false;
 
+       page_addr = hv_context.synic_event_page[cpu];
+       if (page_addr == NULL)
+               return IRQ_NONE;
+
+       event = (union hv_synic_event_flags *)page_addr +
+                                        VMBUS_MESSAGE_SINT;
        /*
         * Check for events before checking for messages. This is the order
         * in which events and messages are checked in Windows guests on
         * Hyper-V, and the Windows team suggested we do the same.
         */
 
-       page_addr = hv_context.synic_event_page[cpu];
-       event = (union hv_synic_event_flags *)page_addr + VMBUS_MESSAGE_SINT;
+       if ((vmbus_proto_version == VERSION_WS2008) ||
+               (vmbus_proto_version == VERSION_WIN7)) {
 
-       /* Since we are a child, we only need to check bit 0 */
-       if (sync_test_and_clear_bit(0, (unsigned long *) &event->flags32[0])) {
+               /* Since we are a child, we only need to check bit 0 */
+               if (sync_test_and_clear_bit(0,
+                       (unsigned long *) &event->flags32[0])) {
+                       handled = true;
+               }
+       } else {
+               /*
+                * Our host is win8 or above. The signaling mechanism
+                * has changed and we can directly look at the event page.
+                * If bit n is set then we have an interrup on the channel
+                * whose id is n.
+                */
                handled = true;
-               tasklet_schedule(&event_dpc);
        }
 
+       if (handled)
+               tasklet_schedule(hv_context.event_dpc[cpu]);
+
+
        page_addr = hv_context.synic_message_page[cpu];
        msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT;
 
@@ -485,6 +504,19 @@ static irqreturn_t vmbus_isr(int irq, void *dev_id)
 }
 
 /*
+ * vmbus interrupt flow handler:
+ * vmbus interrupts can concurrently occur on multiple CPUs and
+ * can be handled concurrently.
+ */
+
+static void vmbus_flow_handler(unsigned int irq, struct irq_desc *desc)
+{
+       kstat_incr_irqs_this_cpu(irq, desc);
+
+       desc->action->handler(irq, desc->action->dev_id);
+}
+
+/*
  * vmbus_bus_init -Main vmbus driver initialization routine.
  *
  * Here, we
@@ -506,7 +538,6 @@ static int vmbus_bus_init(int irq)
        }
 
        tasklet_init(&msg_dpc, vmbus_on_msg_dpc, 0);
-       tasklet_init(&event_dpc, vmbus_on_event, 0);
 
        ret = bus_register(&hv_bus);
        if (ret)
@@ -520,6 +551,13 @@ static int vmbus_bus_init(int irq)
                goto err_unregister;
        }
 
+       /*
+        * Vmbus interrupts can be handled concurrently on
+        * different CPUs. Establish an appropriate interrupt flow
+        * handler that can support this model.
+        */
+       irq_set_handler(irq, vmbus_flow_handler);
+
        vector = IRQ0_VECTOR + irq;
 
        /*
@@ -575,8 +613,6 @@ int __vmbus_driver_register(struct hv_driver *hv_driver, struct module *owner, c
 
        ret = driver_register(&hv_driver->driver);
 
-       vmbus_request_offers();
-
        return ret;
 }
 EXPORT_SYMBOL_GPL(__vmbus_driver_register);
index 86d7f6d..d867e6b 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/hwmon.h>
 #include <linux/hwmon-sysfs.h>
 #include <linux/module.h>
+#include <linux/of.h>
 #include <linux/of_device.h>
 #include <linux/platform_device.h>
 #include <linux/vexpress.h>
index cbba7db..f5258c2 100644 (file)
@@ -34,6 +34,7 @@
 #include <linux/io.h>
 #include <linux/pm_runtime.h>
 #include <linux/delay.h>
+#include <linux/module.h>
 #include "i2c-designware-core.h"
 
 /*
@@ -725,3 +726,6 @@ u32 i2c_dw_read_comp_param(struct dw_i2c_dev *dev)
        return dw_readl(dev, DW_IC_COMP_PARAM_1);
 }
 EXPORT_SYMBOL_GPL(i2c_dw_read_comp_param);
+
+MODULE_DESCRIPTION("Synopsys DesignWare I2C bus adapter core");
+MODULE_LICENSE("GPL");
index 1b1a936..d6abaf2 100644 (file)
@@ -127,7 +127,7 @@ struct mxs_i2c_dev {
        struct device *dev;
        void __iomem *regs;
        struct completion cmd_complete;
-       u32 cmd_err;
+       int cmd_err;
        struct i2c_adapter adapter;
        const struct mxs_i2c_speed_config *speed;
 
@@ -316,7 +316,7 @@ static int mxs_i2c_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg,
        if (msg->len == 0)
                return -EINVAL;
 
-       init_completion(&i2c->cmd_complete);
+       INIT_COMPLETION(i2c->cmd_complete);
        i2c->cmd_err = 0;
 
        ret = mxs_i2c_dma_setup_xfer(adap, msg, flags);
@@ -473,6 +473,8 @@ static int mxs_i2c_probe(struct platform_device *pdev)
        i2c->dev = dev;
        i2c->speed = &mxs_i2c_95kHz_config;
 
+       init_completion(&i2c->cmd_complete);
+
        if (dev->of_node) {
                err = mxs_i2c_get_ofdata(i2c);
                if (err)
index 20d41bf..4cc2f05 100644 (file)
@@ -803,7 +803,7 @@ static int errata_omap3_i462(struct omap_i2c_dev *dev)
                        if (stat & OMAP_I2C_STAT_AL) {
                                dev_err(dev->dev, "Arbitration lost\n");
                                dev->cmd_err |= OMAP_I2C_STAT_AL;
-                               omap_i2c_ack_stat(dev, OMAP_I2C_STAT_NACK);
+                               omap_i2c_ack_stat(dev, OMAP_I2C_STAT_AL);
                        }
 
                        return -EIO;
@@ -963,7 +963,7 @@ omap_i2c_isr_thread(int this_irq, void *dev_id)
                                i2c_omap_errata_i207(dev, stat);
 
                        omap_i2c_ack_stat(dev, OMAP_I2C_STAT_RDR);
-                       break;
+                       continue;
                }
 
                if (stat & OMAP_I2C_STAT_RRDY) {
@@ -989,7 +989,7 @@ omap_i2c_isr_thread(int this_irq, void *dev_id)
                                break;
 
                        omap_i2c_ack_stat(dev, OMAP_I2C_STAT_XDR);
-                       break;
+                       continue;
                }
 
                if (stat & OMAP_I2C_STAT_XRDY) {
index 3f1818b..e03381a 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/slab.h>
 #include <linux/platform_device.h>
 #include <linux/i2c.h>
+#include <linux/of_i2c.h>
 #include <linux/clk.h>
 #include <linux/err.h>
 #include <linux/io.h>
@@ -328,6 +329,7 @@ static int i2c_sirfsoc_probe(struct platform_device *pdev)
        adap->algo = &i2c_sirfsoc_algo;
        adap->algo_data = siic;
 
+       adap->dev.of_node = pdev->dev.of_node;
        adap->dev.parent = &pdev->dev;
        adap->nr = pdev->id;
 
@@ -371,6 +373,8 @@ static int i2c_sirfsoc_probe(struct platform_device *pdev)
 
        clk_disable(clk);
 
+       of_i2c_register_devices(adap);
+
        dev_info(&pdev->dev, " I2C adapter ready to operate\n");
 
        return 0;
index 1e44d04..a43c0ce 100644 (file)
@@ -167,7 +167,7 @@ static int i2c_mux_pinctrl_probe(struct platform_device *pdev)
        }
 
        mux->busses = devm_kzalloc(&pdev->dev,
-                                  sizeof(mux->busses) * mux->pdata->bus_count,
+                                  sizeof(*mux->busses) * mux->pdata->bus_count,
                                   GFP_KERNEL);
        if (!mux->busses) {
                dev_err(&pdev->dev, "Cannot allocate busses\n");
index 4ba384f..2df9414 100644 (file)
@@ -448,8 +448,6 @@ static int intel_idle_probe(void)
        else
                on_each_cpu(__setup_broadcast_timer, (void *)true, 1);
 
-       register_cpu_notifier(&cpu_hotplug_notifier);
-
        pr_debug(PREFIX "v" INTEL_IDLE_VERSION
                " model 0x%X\n", boot_cpu_data.x86_model);
 
@@ -612,6 +610,7 @@ static int __init intel_idle_init(void)
                        return retval;
                }
        }
+       register_cpu_notifier(&cpu_hotplug_notifier);
 
        return 0;
 }
index fe4bcd7..05e996f 100644 (file)
@@ -8,6 +8,7 @@ config HID_SENSOR_ACCEL_3D
        select IIO_BUFFER
        select IIO_TRIGGERED_BUFFER
        select HID_SENSOR_IIO_COMMON
+       select HID_SENSOR_IIO_TRIGGER
        tristate "HID Accelerometers 3D"
        help
          Say yes here to build support for the HID SENSOR
index 4a5f639..bbad9b9 100644 (file)
@@ -411,7 +411,11 @@ static int ad7266_probe(struct spi_device *spi)
                if (ret)
                        goto error_put_reg;
 
-               st->vref_uv = regulator_get_voltage(st->reg);
+               ret = regulator_get_voltage(st->reg);
+               if (ret < 0)
+                       goto error_disable_reg;
+
+               st->vref_uv = ret;
        } else {
                /* Use internal reference */
                st->vref_uv = 2500000;
index 04b0135..a526c0e 100644 (file)
@@ -80,7 +80,7 @@ static irqreturn_t at91_adc_trigger_handler(int irq, void *p)
                *timestamp = pf->timestamp;
        }
 
-       iio_push_to_buffers(indio_dev, (u8 *)st->buffer);
+       iio_push_to_buffers(idev, (u8 *)st->buffer);
 
        iio_trigger_notify_done(idev->trig);
 
index b5669be..03b25b3 100644 (file)
@@ -1605,19 +1605,20 @@ static int max1363_probe(struct i2c_client *client,
 
        return 0;
 error_free_irq:
-       free_irq(st->client->irq, indio_dev);
+       if (client->irq)
+               free_irq(st->client->irq, indio_dev);
 error_uninit_buffer:
        iio_buffer_unregister(indio_dev);
 error_cleanup_buffer:
        max1363_buffer_cleanup(indio_dev);
 error_free_available_scan_masks:
        kfree(indio_dev->available_scan_masks);
-error_unregister_map:
-       iio_map_array_unregister(indio_dev, client->dev.platform_data);
 error_disable_reg:
        regulator_disable(st->reg);
 error_put_reg:
        regulator_put(st->reg);
+error_unregister_map:
+       iio_map_array_unregister(indio_dev, client->dev.platform_data);
 error_free_device:
        iio_device_free(indio_dev);
 error_out:
@@ -1635,10 +1636,8 @@ static int max1363_remove(struct i2c_client *client)
        iio_buffer_unregister(indio_dev);
        max1363_buffer_cleanup(indio_dev);
        kfree(indio_dev->available_scan_masks);
-       if (!IS_ERR(st->reg)) {
-               regulator_disable(st->reg);
-               regulator_put(st->reg);
-       }
+       regulator_disable(st->reg);
+       regulator_put(st->reg);
        iio_map_array_unregister(indio_dev, client->dev.platform_data);
        iio_device_free(indio_dev);
 
index ae10778..1178121 100644 (file)
@@ -6,7 +6,7 @@ menu "Hid Sensor IIO Common"
 config HID_SENSOR_IIO_COMMON
        tristate "Common modules for all HID Sensor IIO drivers"
        depends on HID_SENSOR_HUB
-       select IIO_TRIGGER if IIO_BUFFER
+       select HID_SENSOR_IIO_TRIGGER if IIO_BUFFER
        help
          Say yes here to build support for HID sensor to use
          HID sensor common processing for attributes and IIO triggers.
@@ -14,6 +14,17 @@ config HID_SENSOR_IIO_COMMON
          HID sensor drivers, this module contains processing for those
          attributes.
 
+config HID_SENSOR_IIO_TRIGGER
+       tristate "Common module (trigger) for all HID Sensor IIO drivers"
+       depends on HID_SENSOR_HUB && HID_SENSOR_IIO_COMMON
+       select IIO_TRIGGER
+       help
+         Say yes here to build trigger support for HID sensors.
+         Triggers will be send if all requested attributes were read.
+
+         If this driver is compiled as a module, it will be named
+         hid-sensor-trigger.
+
 config HID_SENSOR_ENUM_BASE_QUIRKS
        bool "ENUM base quirks for HID Sensor IIO drivers"
        depends on HID_SENSOR_IIO_COMMON
index 1f463e0..22e7c5a 100644 (file)
@@ -3,4 +3,5 @@
 #
 
 obj-$(CONFIG_HID_SENSOR_IIO_COMMON) += hid-sensor-iio-common.o
-hid-sensor-iio-common-y := hid-sensor-attributes.o hid-sensor-trigger.o
+obj-$(CONFIG_HID_SENSOR_IIO_TRIGGER) += hid-sensor-trigger.o
+hid-sensor-iio-common-y := hid-sensor-attributes.o
index 6c7898c..483fc37 100644 (file)
@@ -406,7 +406,11 @@ static int ad5380_probe(struct device *dev, struct regmap *regmap,
                        goto error_free_reg;
                }
 
-               st->vref = regulator_get_voltage(st->vref_reg);
+               ret = regulator_get_voltage(st->vref_reg);
+               if (ret < 0)
+                       goto error_disable_reg;
+
+               st->vref = ret;
        } else {
                st->vref = st->chip_info->int_vref;
                ctrl |= AD5380_CTRL_INT_VREF_EN;
index 29f653d..f5583ae 100644 (file)
@@ -226,7 +226,11 @@ static int ad5446_probe(struct device *dev, const char *name,
                if (ret)
                        goto error_put_reg;
 
-               voltage_uv = regulator_get_voltage(reg);
+               ret = regulator_get_voltage(reg);
+               if (ret < 0)
+                       goto error_disable_reg;
+
+               voltage_uv = ret;
        }
 
        indio_dev = iio_device_alloc(sizeof(*st));
index b2a31a0..0661829 100644 (file)
@@ -296,7 +296,11 @@ static int ad5504_probe(struct spi_device *spi)
                if (ret)
                        goto error_put_reg;
 
-               voltage_uv = regulator_get_voltage(reg);
+               ret = regulator_get_voltage(reg);
+               if (ret < 0)
+                       goto error_disable_reg;
+
+               voltage_uv = ret;
        }
 
        spi_set_drvdata(spi, indio_dev);
index e994796..f6e1166 100644 (file)
@@ -238,7 +238,11 @@ static int ad5624r_probe(struct spi_device *spi)
                if (ret)
                        goto error_put_reg;
 
-               voltage_uv = regulator_get_voltage(st->reg);
+               ret = regulator_get_voltage(st->reg);
+               if (ret < 0)
+                       goto error_disable_reg;
+
+               voltage_uv = ret;
        }
 
        spi_set_drvdata(spi, indio_dev);
index 36e5138..ca9609d 100644 (file)
@@ -332,7 +332,11 @@ static int ad5686_probe(struct spi_device *spi)
                if (ret)
                        goto error_put_reg;
 
-               voltage_uv = regulator_get_voltage(st->reg);
+               ret = regulator_get_voltage(st->reg);
+               if (ret < 0)
+                       goto error_disable_reg;
+
+               voltage_uv = ret;
        }
 
        st->chip_info =
index c84180f..6407b54 100644 (file)
@@ -365,7 +365,11 @@ static int ad5791_probe(struct spi_device *spi)
                if (ret)
                        goto error_put_reg_pos;
 
-               pos_voltage_uv = regulator_get_voltage(st->reg_vdd);
+               ret = regulator_get_voltage(st->reg_vdd);
+               if (ret < 0)
+                       goto error_disable_reg_pos;
+
+               pos_voltage_uv = ret;
        }
 
        st->reg_vss = regulator_get(&spi->dev, "vss");
@@ -374,7 +378,11 @@ static int ad5791_probe(struct spi_device *spi)
                if (ret)
                        goto error_put_reg_neg;
 
-               neg_voltage_uv = regulator_get_voltage(st->reg_vss);
+               ret = regulator_get_voltage(st->reg_vss);
+               if (ret < 0)
+                       goto error_disable_reg_neg;
+
+               neg_voltage_uv = ret;
        }
 
        st->pwr_down = true;
@@ -428,6 +436,7 @@ error_put_reg_neg:
        if (!IS_ERR(st->reg_vss))
                regulator_put(st->reg_vss);
 
+error_disable_reg_pos:
        if (!IS_ERR(st->reg_vdd))
                regulator_disable(st->reg_vdd);
 error_put_reg_pos:
index e5033b4..a884252 100644 (file)
@@ -173,7 +173,7 @@ static int adf4350_set_freq(struct adf4350_state *st, unsigned long long freq)
                        } while ((st->r1_mod > ADF4350_MAX_MODULUS) && r_cnt);
                } while (r_cnt == 0);
 
-               tmp = freq * (u64)st->r1_mod + (st->fpfd > 1);
+               tmp = freq * (u64)st->r1_mod + (st->fpfd >> 1);
                do_div(tmp, st->fpfd); /* Div round closest (n + d/2)/d */
                st->r0_fract = do_div(tmp, st->r1_mod);
                st->r0_int = tmp;
index 48ed148..96b68f6 100644 (file)
@@ -17,6 +17,7 @@ config HID_SENSOR_GYRO_3D
        select IIO_BUFFER
        select IIO_TRIGGERED_BUFFER
        select HID_SENSOR_IIO_COMMON
+       select HID_SENSOR_IIO_TRIGGER
        tristate "HID Gyroscope 3D"
        help
          Say yes here to build support for the HID SENSOR
index 1763c9b..dbf80ab 100644 (file)
@@ -47,6 +47,7 @@ config HID_SENSOR_ALS
        select IIO_BUFFER
        select IIO_TRIGGERED_BUFFER
        select HID_SENSOR_IIO_COMMON
+       select HID_SENSOR_IIO_TRIGGER
        tristate "HID ALS"
        help
          Say yes here to build support for the HID SENSOR
index c1f0cdd..ff11d68 100644 (file)
@@ -8,6 +8,7 @@ config HID_SENSOR_MAGNETOMETER_3D
        select IIO_BUFFER
        select IIO_TRIGGERED_BUFFER
        select HID_SENSOR_IIO_COMMON
+       select HID_SENSOR_IIO_TRIGGER
        tristate "HID Magenetometer 3D"
        help
          Say yes here to build support for the HID SENSOR
index 576d53d..a2cf0f2 100644 (file)
@@ -20,7 +20,6 @@
 #include <linux/serial.h>
 #include <linux/tty_flip.h>
 #include <linux/slab.h>
-#include <linux/atomic.h>
 #include <linux/io.h>
 #include <linux/ipack.h>
 #include "ipoctal.h"
@@ -38,21 +37,19 @@ struct ipoctal_channel {
        spinlock_t                      lock;
        unsigned int                    pointer_read;
        unsigned int                    pointer_write;
-       atomic_t                        open;
        struct tty_port                 tty_port;
        union scc2698_channel __iomem   *regs;
        union scc2698_block __iomem     *block_regs;
        unsigned int                    board_id;
-       unsigned char                   *board_write;
        u8                              isr_rx_rdy_mask;
        u8                              isr_tx_rdy_mask;
+       unsigned int                    rx_enable;
 };
 
 struct ipoctal {
        struct ipack_device             *dev;
        unsigned int                    board_id;
        struct ipoctal_channel          channel[NR_CHANNELS];
-       unsigned char                   write;
        struct tty_driver               *tty_drv;
        u8 __iomem                      *mem8_space;
        u8 __iomem                      *int_space;
@@ -64,28 +61,23 @@ static int ipoctal_port_activate(struct tty_port *port, struct tty_struct *tty)
 
        channel = dev_get_drvdata(tty->dev);
 
+       /*
+        * Enable RX. TX will be enabled when
+        * there is something to send
+        */
        iowrite8(CR_ENABLE_RX, &channel->regs->w.cr);
+       channel->rx_enable = 1;
        return 0;
 }
 
 static int ipoctal_open(struct tty_struct *tty, struct file *file)
 {
-       int res;
        struct ipoctal_channel *channel;
 
        channel = dev_get_drvdata(tty->dev);
-
-       if (atomic_read(&channel->open))
-               return -EBUSY;
-
        tty->driver_data = channel;
 
-       res = tty_port_open(&channel->tty_port, tty, file);
-       if (res)
-               return res;
-
-       atomic_inc(&channel->open);
-       return 0;
+       return tty_port_open(&channel->tty_port, tty, file);
 }
 
 static void ipoctal_reset_stats(struct ipoctal_stats *stats)
@@ -111,9 +103,7 @@ static void ipoctal_close(struct tty_struct *tty, struct file *filp)
        struct ipoctal_channel *channel = tty->driver_data;
 
        tty_port_close(&channel->tty_port, tty, filp);
-
-       if (atomic_dec_and_test(&channel->open))
-               ipoctal_free_channel(channel);
+       ipoctal_free_channel(channel);
 }
 
 static int ipoctal_get_icount(struct tty_struct *tty,
@@ -137,11 +127,12 @@ static void ipoctal_irq_rx(struct ipoctal_channel *channel,
                           struct tty_struct *tty, u8 sr)
 {
        unsigned char value;
-       unsigned char flag = TTY_NORMAL;
+       unsigned char flag;
        u8 isr;
 
        do {
                value = ioread8(&channel->regs->r.rhr);
+               flag = TTY_NORMAL;
                /* Error: count statistics */
                if (sr & SR_ERROR) {
                        iowrite8(CR_CMD_RESET_ERR_STATUS, &channel->regs->w.cr);
@@ -183,10 +174,8 @@ static void ipoctal_irq_tx(struct ipoctal_channel *channel)
        unsigned char value;
        unsigned int *pointer_write = &channel->pointer_write;
 
-       if (channel->nb_bytes <= 0) {
-               channel->nb_bytes = 0;
+       if (channel->nb_bytes == 0)
                return;
-       }
 
        value = channel->tty_port.xmit_buf[*pointer_write];
        iowrite8(value, &channel->regs->w.thr);
@@ -194,15 +183,6 @@ static void ipoctal_irq_tx(struct ipoctal_channel *channel)
        (*pointer_write)++;
        *pointer_write = *pointer_write % PAGE_SIZE;
        channel->nb_bytes--;
-
-       if ((channel->nb_bytes == 0) &&
-           (waitqueue_active(&channel->queue))) {
-
-               if (channel->board_id != IPACK1_DEVICE_ID_SBS_OCTAL_485) {
-                       *channel->board_write = 1;
-                       wake_up_interruptible(&channel->queue);
-               }
-       }
 }
 
 static void ipoctal_irq_channel(struct ipoctal_channel *channel)
@@ -210,27 +190,25 @@ static void ipoctal_irq_channel(struct ipoctal_channel *channel)
        u8 isr, sr;
        struct tty_struct *tty;
 
-       /* If there is no client, skip the check */
-       if (!atomic_read(&channel->open))
-               return;
-
        tty = tty_port_tty_get(&channel->tty_port);
        if (!tty)
                return;
+
+       spin_lock(&channel->lock);
        /* The HW is organized in pair of channels.  See which register we need
         * to read from */
        isr = ioread8(&channel->block_regs->r.isr);
        sr = ioread8(&channel->regs->r.sr);
 
-       /* In case of RS-485, change from TX to RX when finishing TX.
-        * Half-duplex. */
-       if ((channel->board_id == IPACK1_DEVICE_ID_SBS_OCTAL_485) &&
-           (sr & SR_TX_EMPTY) && (channel->nb_bytes == 0)) {
+       if ((sr & SR_TX_EMPTY) && (channel->nb_bytes == 0)) {
                iowrite8(CR_DISABLE_TX, &channel->regs->w.cr);
-               iowrite8(CR_CMD_NEGATE_RTSN, &channel->regs->w.cr);
-               iowrite8(CR_ENABLE_RX, &channel->regs->w.cr);
-               *channel->board_write = 1;
-               wake_up_interruptible(&channel->queue);
+               /* In case of RS-485, change from TX to RX when finishing TX.
+                * Half-duplex. */
+               if (channel->board_id == IPACK1_DEVICE_ID_SBS_OCTAL_485) {
+                       iowrite8(CR_CMD_NEGATE_RTSN, &channel->regs->w.cr);
+                       iowrite8(CR_ENABLE_RX, &channel->regs->w.cr);
+                       channel->rx_enable = 1;
+               }
        }
 
        /* RX data */
@@ -241,8 +219,8 @@ static void ipoctal_irq_channel(struct ipoctal_channel *channel)
        if ((isr & channel->isr_tx_rdy_mask) && (sr & SR_TX_READY))
                ipoctal_irq_tx(channel);
 
-       tty_flip_buffer_push(tty);
        tty_kref_put(tty);
+       spin_unlock(&channel->lock);
 }
 
 static irqreturn_t ipoctal_irq_handler(void *arg)
@@ -250,14 +228,14 @@ static irqreturn_t ipoctal_irq_handler(void *arg)
        unsigned int i;
        struct ipoctal *ipoctal = (struct ipoctal *) arg;
 
-       /* Check all channels */
-       for (i = 0; i < NR_CHANNELS; i++)
-               ipoctal_irq_channel(&ipoctal->channel[i]);
-
        /* Clear the IPack device interrupt */
        readw(ipoctal->int_space + ACK_INT_REQ0);
        readw(ipoctal->int_space + ACK_INT_REQ1);
 
+       /* Check all channels */
+       for (i = 0; i < NR_CHANNELS; i++)
+               ipoctal_irq_channel(&ipoctal->channel[i]);
+
        return IRQ_HANDLED;
 }
 
@@ -311,7 +289,7 @@ static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr,
        ipoctal->mem8_space =
                devm_ioremap_nocache(&ipoctal->dev->dev,
                                     region->start, 0x8000);
-       if (!addr) {
+       if (!ipoctal->mem8_space) {
                dev_err(&ipoctal->dev->dev,
                        "Unable to map slot [%d:%d] MEM8 space!\n",
                        bus_nr, slot);
@@ -324,7 +302,6 @@ static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr,
                struct ipoctal_channel *channel = &ipoctal->channel[i];
                channel->regs = chan_regs + i;
                channel->block_regs = block_regs + (i >> 1);
-               channel->board_write = &ipoctal->write;
                channel->board_id = ipoctal->board_id;
                if (i & 1) {
                        channel->isr_tx_rdy_mask = ISR_TxRDY_B;
@@ -335,6 +312,7 @@ static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr,
                }
 
                iowrite8(CR_DISABLE_RX | CR_DISABLE_TX, &channel->regs->w.cr);
+               channel->rx_enable = 0;
                iowrite8(CR_CMD_RESET_RX, &channel->regs->w.cr);
                iowrite8(CR_CMD_RESET_TX, &channel->regs->w.cr);
                iowrite8(MR1_CHRL_8_BITS | MR1_ERROR_CHAR | MR1_RxINT_RxRDY,
@@ -407,8 +385,6 @@ static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr,
 
                ipoctal_reset_stats(&channel->stats);
                channel->nb_bytes = 0;
-               init_waitqueue_head(&channel->queue);
-
                spin_lock_init(&channel->lock);
                channel->pointer_read = 0;
                channel->pointer_write = 0;
@@ -419,12 +395,6 @@ static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr,
                        continue;
                }
                dev_set_drvdata(tty_dev, channel);
-
-               /*
-                * Enable again the RX. TX will be enabled when
-                * there is something to send
-                */
-               iowrite8(CR_ENABLE_RX, &channel->regs->w.cr);
        }
 
        return 0;
@@ -464,6 +434,7 @@ static int ipoctal_write_tty(struct tty_struct *tty,
        /* As the IP-OCTAL 485 only supports half duplex, do it manually */
        if (channel->board_id == IPACK1_DEVICE_ID_SBS_OCTAL_485) {
                iowrite8(CR_DISABLE_RX, &channel->regs->w.cr);
+               channel->rx_enable = 0;
                iowrite8(CR_CMD_ASSERT_RTSN, &channel->regs->w.cr);
        }
 
@@ -472,10 +443,6 @@ static int ipoctal_write_tty(struct tty_struct *tty,
         * operations
         */
        iowrite8(CR_ENABLE_TX, &channel->regs->w.cr);
-       wait_event_interruptible(channel->queue, *channel->board_write);
-       iowrite8(CR_DISABLE_TX, &channel->regs->w.cr);
-
-       *channel->board_write = 0;
        return char_copied;
 }
 
@@ -627,8 +594,9 @@ static void ipoctal_set_termios(struct tty_struct *tty,
        iowrite8(mr2, &channel->regs->w.mr);
        iowrite8(csr, &channel->regs->w.csr);
 
-       /* Enable again the RX */
-       iowrite8(CR_ENABLE_RX, &channel->regs->w.cr);
+       /* Enable again the RX, if it was before */
+       if (channel->rx_enable)
+               iowrite8(CR_ENABLE_RX, &channel->regs->w.cr);
 }
 
 static void ipoctal_hangup(struct tty_struct *tty)
@@ -648,6 +616,7 @@ static void ipoctal_hangup(struct tty_struct *tty)
        tty_port_hangup(&channel->tty_port);
 
        iowrite8(CR_DISABLE_RX | CR_DISABLE_TX, &channel->regs->w.cr);
+       channel->rx_enable = 0;
        iowrite8(CR_CMD_RESET_RX, &channel->regs->w.cr);
        iowrite8(CR_CMD_RESET_TX, &channel->regs->w.cr);
        iowrite8(CR_CMD_RESET_ERR_STATUS, &channel->regs->w.cr);
@@ -657,6 +626,22 @@ static void ipoctal_hangup(struct tty_struct *tty)
        wake_up_interruptible(&channel->tty_port.open_wait);
 }
 
+static void ipoctal_shutdown(struct tty_struct *tty)
+{
+       struct ipoctal_channel *channel = tty->driver_data;
+
+       if (channel == NULL)
+               return;
+
+       iowrite8(CR_DISABLE_RX | CR_DISABLE_TX, &channel->regs->w.cr);
+       channel->rx_enable = 0;
+       iowrite8(CR_CMD_RESET_RX, &channel->regs->w.cr);
+       iowrite8(CR_CMD_RESET_TX, &channel->regs->w.cr);
+       iowrite8(CR_CMD_RESET_ERR_STATUS, &channel->regs->w.cr);
+       iowrite8(CR_CMD_RESET_MR, &channel->regs->w.cr);
+       clear_bit(ASYNCB_INITIALIZED, &channel->tty_port.flags);
+}
+
 static const struct tty_operations ipoctal_fops = {
        .ioctl =                NULL,
        .open =                 ipoctal_open,
@@ -667,6 +652,7 @@ static const struct tty_operations ipoctal_fops = {
        .chars_in_buffer =      ipoctal_chars_in_buffer,
        .get_icount =           ipoctal_get_icount,
        .hangup =               ipoctal_hangup,
+       .shutdown =             ipoctal_shutdown,
 };
 
 static int ipoctal_probe(struct ipack_device *dev)
index 8a8d42f..d4e7567 100644 (file)
@@ -556,7 +556,7 @@ static int m5mols_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
        mutex_lock(&info->lock);
 
        format = __find_format(info, fh, fmt->which, info->res_type);
-       if (!format)
+       if (format)
                fmt->format = *format;
        else
                ret = -EINVAL;
index 1cf8293..4a980e0 100644 (file)
@@ -23,8 +23,8 @@
 #include <linux/slab.h>
 #include <linux/videodev2.h>
 #include <linux/of.h>
+#include <linux/platform_data/imx-iram.h>
 
-#include <mach/iram.h>
 #include <media/v4l2-ctrls.h>
 #include <media/v4l2-device.h>
 #include <media/v4l2-ioctl.h>
index e0d73a6..8dac175 100644 (file)
@@ -35,9 +35,6 @@
 #include <linux/vmalloc.h>
 #include <media/v4l2-dev.h>
 #include <media/v4l2-ioctl.h>
-#include <plat/iommu.h>
-#include <plat/iovmm.h>
-#include <plat/omap-pm.h>
 
 #include "ispvideo.h"
 #include "isp.h"
index 4ab99f3..b4a68ec 100644 (file)
@@ -593,7 +593,7 @@ static int __fimc_md_create_flite_source_links(struct fimc_md *fmd)
 {
        struct media_entity *source, *sink;
        unsigned int flags = MEDIA_LNK_FL_ENABLED;
-       int i, ret;
+       int i, ret = 0;
 
        for (i = 0; i < FIMC_LITE_MAX_DEVS; i++) {
                struct fimc_lite *fimc = fmd->fimc_lite[i];
index 379f574..681bc6b 100644 (file)
@@ -412,62 +412,48 @@ leave_handle_frame:
 }
 
 /* Error handling for interrupt */
-static void s5p_mfc_handle_error(struct s5p_mfc_ctx *ctx,
-                                unsigned int reason, unsigned int err)
+static void s5p_mfc_handle_error(struct s5p_mfc_dev *dev,
+               struct s5p_mfc_ctx *ctx, unsigned int reason, unsigned int err)
 {
-       struct s5p_mfc_dev *dev;
        unsigned long flags;
 
-       /* If no context is available then all necessary
-        * processing has been done. */
-       if (ctx == NULL)
-               return;
-
-       dev = ctx->dev;
        mfc_err("Interrupt Error: %08x\n", err);
-       s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev);
-       wake_up_dev(dev, reason, err);
 
-       /* Error recovery is dependent on the state of context */
-       switch (ctx->state) {
-       case MFCINST_INIT:
-               /* This error had to happen while acquireing instance */
-       case MFCINST_GOT_INST:
-               /* This error had to happen while parsing the header */
-       case MFCINST_HEAD_PARSED:
-               /* This error had to happen while setting dst buffers */
-       case MFCINST_RETURN_INST:
-               /* This error had to happen while releasing instance */
-               clear_work_bit(ctx);
-               wake_up_ctx(ctx, reason, err);
-               if (test_and_clear_bit(0, &dev->hw_lock) == 0)
-                       BUG();
-               s5p_mfc_clock_off();
-               ctx->state = MFCINST_ERROR;
-               break;
-       case MFCINST_FINISHING:
-       case MFCINST_FINISHED:
-       case MFCINST_RUNNING:
-               /* It is higly probable that an error occured
-                * while decoding a frame */
-               clear_work_bit(ctx);
-               ctx->state = MFCINST_ERROR;
-               /* Mark all dst buffers as having an error */
-               spin_lock_irqsave(&dev->irqlock, flags);
-               s5p_mfc_hw_call(dev->mfc_ops, cleanup_queue, &ctx->dst_queue,
-                               &ctx->vq_dst);
-               /* Mark all src buffers as having an error */
-               s5p_mfc_hw_call(dev->mfc_ops, cleanup_queue, &ctx->src_queue,
-                               &ctx->vq_src);
-               spin_unlock_irqrestore(&dev->irqlock, flags);
-               if (test_and_clear_bit(0, &dev->hw_lock) == 0)
-                       BUG();
-               s5p_mfc_clock_off();
-               break;
-       default:
-               mfc_err("Encountered an error interrupt which had not been handled\n");
-               break;
+       if (ctx != NULL) {
+               /* Error recovery is dependent on the state of context */
+               switch (ctx->state) {
+               case MFCINST_RES_CHANGE_INIT:
+               case MFCINST_RES_CHANGE_FLUSH:
+               case MFCINST_RES_CHANGE_END:
+               case MFCINST_FINISHING:
+               case MFCINST_FINISHED:
+               case MFCINST_RUNNING:
+                       /* It is higly probable that an error occured
+                        * while decoding a frame */
+                       clear_work_bit(ctx);
+                       ctx->state = MFCINST_ERROR;
+                       /* Mark all dst buffers as having an error */
+                       spin_lock_irqsave(&dev->irqlock, flags);
+                       s5p_mfc_hw_call(dev->mfc_ops, cleanup_queue,
+                                               &ctx->dst_queue, &ctx->vq_dst);
+                       /* Mark all src buffers as having an error */
+                       s5p_mfc_hw_call(dev->mfc_ops, cleanup_queue,
+                                               &ctx->src_queue, &ctx->vq_src);
+                       spin_unlock_irqrestore(&dev->irqlock, flags);
+                       wake_up_ctx(ctx, reason, err);
+                       break;
+               default:
+                       clear_work_bit(ctx);
+                       ctx->state = MFCINST_ERROR;
+                       wake_up_ctx(ctx, reason, err);
+                       break;
+               }
        }
+       if (test_and_clear_bit(0, &dev->hw_lock) == 0)
+               BUG();
+       s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev);
+       s5p_mfc_clock_off();
+       wake_up_dev(dev, reason, err);
        return;
 }
 
@@ -632,7 +618,7 @@ static irqreturn_t s5p_mfc_irq(int irq, void *priv)
                                dev->warn_start)
                        s5p_mfc_handle_frame(ctx, reason, err);
                else
-                       s5p_mfc_handle_error(ctx, reason, err);
+                       s5p_mfc_handle_error(dev, ctx, reason, err);
                clear_bit(0, &dev->enter_suspend);
                break;
 
index 40ad668..3773a8a 100644 (file)
@@ -381,6 +381,7 @@ static const struct sd_desc sd_desc = {
 /* -- module initialisation -- */
 static const struct usb_device_id device_table[] = {
        {USB_DEVICE(0x045e, 0x02ae)},
+       {USB_DEVICE(0x045e, 0x02bf)},
        {}
 };
 
index 70511d5..1220340 100644 (file)
@@ -496,7 +496,7 @@ static void reg_w(struct gspca_dev *gspca_dev,
        }
 }
 
-static void i2c_w(struct gspca_dev *gspca_dev, const __u8 *buffer)
+static void i2c_w(struct gspca_dev *gspca_dev, const u8 *buf)
 {
        int retry = 60;
 
@@ -504,16 +504,19 @@ static void i2c_w(struct gspca_dev *gspca_dev, const __u8 *buffer)
                return;
 
        /* is i2c ready */
-       reg_w(gspca_dev, 0x08, buffer, 8);
+       reg_w(gspca_dev, 0x08, buf, 8);
        while (retry--) {
                if (gspca_dev->usb_err < 0)
                        return;
-               msleep(10);
+               msleep(1);
                reg_r(gspca_dev, 0x08);
                if (gspca_dev->usb_buf[0] & 0x04) {
                        if (gspca_dev->usb_buf[0] & 0x08) {
                                dev_err(gspca_dev->v4l2_dev.dev,
-                                       "i2c write error\n");
+                                       "i2c error writing %02x %02x %02x %02x"
+                                       " %02x %02x %02x %02x\n",
+                                       buf[0], buf[1], buf[2], buf[3],
+                                       buf[4], buf[5], buf[6], buf[7]);
                                gspca_dev->usb_err = -EIO;
                        }
                        return;
@@ -530,7 +533,7 @@ static void i2c_w_vector(struct gspca_dev *gspca_dev,
        for (;;) {
                if (gspca_dev->usb_err < 0)
                        return;
-               reg_w(gspca_dev, 0x08, *buffer, 8);
+               i2c_w(gspca_dev, *buffer);
                len -= 8;
                if (len <= 0)
                        break;
index 5a86047..36307a9 100644 (file)
@@ -1550,6 +1550,7 @@ static void i2c_w1(struct gspca_dev *gspca_dev, u8 reg, u8 val)
                        0,
                        gspca_dev->usb_buf, 8,
                        500);
+       msleep(2);
        if (ret < 0) {
                pr_err("i2c_w1 err %d\n", ret);
                gspca_dev->usb_err = ret;
index 2bb7613..d5baab1 100644 (file)
@@ -1431,8 +1431,10 @@ int uvc_ctrl_set(struct uvc_video_chain *chain,
        int ret;
 
        ctrl = uvc_find_control(chain, xctrl->id, &mapping);
-       if (ctrl == NULL || (ctrl->info.flags & UVC_CTRL_FLAG_SET_CUR) == 0)
+       if (ctrl == NULL)
                return -EINVAL;
+       if (!(ctrl->info.flags & UVC_CTRL_FLAG_SET_CUR))
+               return -EACCES;
 
        /* Clamp out of range values. */
        switch (mapping->v4l2_type) {
index f2ee8c6..68d59b5 100644 (file)
@@ -657,8 +657,7 @@ static long uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
                        ret = uvc_ctrl_get(chain, ctrl);
                        if (ret < 0) {
                                uvc_ctrl_rollback(handle);
-                               ctrls->error_idx = ret == -ENOENT
-                                                ? ctrls->count : i;
+                               ctrls->error_idx = i;
                                return ret;
                        }
                }
@@ -686,8 +685,7 @@ static long uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
                        ret = uvc_ctrl_set(chain, ctrl);
                        if (ret < 0) {
                                uvc_ctrl_rollback(handle);
-                               ctrls->error_idx = (ret == -ENOENT &&
-                                                   cmd == VIDIOC_S_EXT_CTRLS)
+                               ctrls->error_idx = cmd == VIDIOC_S_EXT_CTRLS
                                                 ? ctrls->count : i;
                                return ret;
                        }
index 9f81be2..e02c479 100644 (file)
@@ -921,8 +921,10 @@ static void __fill_vb2_buffer(struct vb2_buffer *vb, const struct v4l2_buffer *b
                 * In videobuf we use our internal V4l2_planes struct for
                 * single-planar buffers as well, for simplicity.
                 */
-               if (V4L2_TYPE_IS_OUTPUT(b->type))
+               if (V4L2_TYPE_IS_OUTPUT(b->type)) {
                        v4l2_planes[0].bytesused = b->bytesused;
+                       v4l2_planes[0].data_offset = 0;
+               }
 
                if (b->memory == V4L2_MEMORY_USERPTR) {
                        v4l2_planes[0].m.userptr = b->m.userptr;
index 1c0abd4..47ad4e2 100644 (file)
@@ -292,6 +292,7 @@ config TWL4030_CORE
        bool "Texas Instruments TWL4030/TWL5030/TWL6030/TPS659x0 Support"
        depends on I2C=y && GENERIC_HARDIRQS
        select IRQ_DOMAIN
+       select REGMAP_I2C
        help
          Say yes here if you have TWL4030 / TWL6030 family chip on your board.
          This core driver provides register access and IRQ handling
index e5d8f63..77048b1 100644 (file)
@@ -313,19 +313,11 @@ static void vexpress_sysreg_config_complete(unsigned long data)
 }
 
 
-void __init vexpress_sysreg_early_init(void __iomem *base)
+void __init vexpress_sysreg_setup(struct device_node *node)
 {
-       struct device_node *node = of_find_compatible_node(NULL, NULL,
-                       "arm,vexpress-sysreg");
-
-       if (node)
-               base = of_iomap(node, 0);
-
-       if (WARN_ON(!base))
+       if (WARN_ON(!vexpress_sysreg_base))
                return;
 
-       vexpress_sysreg_base = base;
-
        if (readl(vexpress_sysreg_base + SYS_MISC) & SYS_MISC_MASTERSITE)
                vexpress_master_site = VEXPRESS_SITE_DB2;
        else
@@ -336,9 +328,23 @@ void __init vexpress_sysreg_early_init(void __iomem *base)
        WARN_ON(!vexpress_sysreg_config_bridge);
 }
 
+void __init vexpress_sysreg_early_init(void __iomem *base)
+{
+       vexpress_sysreg_base = base;
+       vexpress_sysreg_setup(NULL);
+}
+
 void __init vexpress_sysreg_of_early_init(void)
 {
-       vexpress_sysreg_early_init(NULL);
+       struct device_node *node = of_find_compatible_node(NULL, NULL,
+                       "arm,vexpress-sysreg");
+
+       if (node) {
+               vexpress_sysreg_base = of_iomap(node, 0);
+               vexpress_sysreg_setup(node);
+       } else {
+               pr_info("vexpress-sysreg: No Device Tree node found.");
+       }
 }
 
 
@@ -426,9 +432,11 @@ static int vexpress_sysreg_probe(struct platform_device *pdev)
                return -EBUSY;
        }
 
-       if (!vexpress_sysreg_base)
+       if (!vexpress_sysreg_base) {
                vexpress_sysreg_base = devm_ioremap(&pdev->dev, res->start,
                                resource_size(res));
+               vexpress_sysreg_setup(pdev->dev.of_node);
+       }
 
        if (!vexpress_sysreg_base) {
                dev_err(&pdev->dev, "Failed to obtain base address!\n");
index b151b7c..3ec3f9e 100644 (file)
@@ -499,6 +499,17 @@ config USB_SWITCH_FSA9480
          stereo and mono audio, video, microphone and UART data to use
          a common connector port.
 
+config LATTICE_ECP3_CONFIG
+       tristate "Lattice ECP3 FPGA bitstream configuration via SPI"
+       depends on SPI && SYSFS
+       select FW_LOADER
+       default n
+       help
+         This option enables support for bitstream configuration (programming
+         or loading) of the Lattice ECP3 FPGA family via SPI.
+
+         If unsure, say N.
+
 source "drivers/misc/c2port/Kconfig"
 source "drivers/misc/eeprom/Kconfig"
 source "drivers/misc/cb710/Kconfig"
@@ -507,4 +518,5 @@ source "drivers/misc/lis3lv02d/Kconfig"
 source "drivers/misc/carma/Kconfig"
 source "drivers/misc/altera-stapl/Kconfig"
 source "drivers/misc/mei/Kconfig"
+source "drivers/misc/vmw_vmci/Kconfig"
 endmenu
index 2129377..35a1463 100644 (file)
@@ -49,3 +49,6 @@ obj-y                         += carma/
 obj-$(CONFIG_USB_SWITCH_FSA9480) += fsa9480.o
 obj-$(CONFIG_ALTERA_STAPL)     +=altera-stapl/
 obj-$(CONFIG_INTEL_MEI)                += mei/
+obj-$(CONFIG_MAX8997_MUIC)     += max8997-muic.o
+obj-$(CONFIG_VMWARE_VMCI)      += vmw_vmci/
+obj-$(CONFIG_LATTICE_ECP3_CONFIG)      += lattice-ecp3-config.o
index 158da5a..3c09cbb 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/module.h>
 
 #include <linux/of.h>
+#include <linux/pinctrl/consumer.h>
 
 /* Serialize access to ssc_list and user count */
 static DEFINE_SPINLOCK(user_lock);
@@ -131,6 +132,13 @@ static int ssc_probe(struct platform_device *pdev)
        struct resource *regs;
        struct ssc_device *ssc;
        const struct atmel_ssc_platform_data *plat_dat;
+       struct pinctrl *pinctrl;
+
+       pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
+       if (IS_ERR(pinctrl)) {
+               dev_err(&pdev->dev, "Failed to request pinctrl\n");
+               return PTR_ERR(pinctrl);
+       }
 
        ssc = devm_kzalloc(&pdev->dev, sizeof(struct ssc_device), GFP_KERNEL);
        if (!ssc) {
index 22429b8..5acb9c5 100644 (file)
@@ -1,6 +1,6 @@
 config CB710_CORE
        tristate "ENE CB710/720 Flash memory card reader support"
-       depends on PCI
+       depends on PCI && GENERIC_HARDIRQS
        help
          This option enables support for PCI ENE CB710/720 Flash memory card
          reader found in some laptops (ie. some versions of HP Compaq nx9500).
diff --git a/drivers/misc/lattice-ecp3-config.c b/drivers/misc/lattice-ecp3-config.c
new file mode 100644 (file)
index 0000000..155700b
--- /dev/null
@@ -0,0 +1,243 @@
+/*
+ * Copyright (C) 2012 Stefan Roese <sr@denx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/device.h>
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/spi/spi.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+
+#define FIRMWARE_NAME  "lattice-ecp3.bit"
+
+/*
+ * The JTAG ID's of the supported FPGA's. The ID is 32bit wide
+ * reversed as noted in the manual.
+ */
+#define ID_ECP3_17     0xc2088080
+#define ID_ECP3_35     0xc2048080
+
+/* FPGA commands */
+#define FPGA_CMD_READ_ID       0x07    /* plus 24 bits */
+#define FPGA_CMD_READ_STATUS   0x09    /* plus 24 bits */
+#define FPGA_CMD_CLEAR         0x70
+#define FPGA_CMD_REFRESH       0x71
+#define FPGA_CMD_WRITE_EN      0x4a    /* plus 2 bits */
+#define FPGA_CMD_WRITE_DIS     0x4f    /* plus 8 bits */
+#define FPGA_CMD_WRITE_INC     0x41    /* plus 0 bits */
+
+/*
+ * The status register is 32bit revered, DONE is bit 17 from the TN1222.pdf
+ * (LatticeECP3 Slave SPI Port User's Guide)
+ */
+#define FPGA_STATUS_DONE       0x00004000
+#define FPGA_STATUS_CLEARED    0x00010000
+
+#define FPGA_CLEAR_TIMEOUT     5000    /* max. 5000ms for FPGA clear */
+#define FPGA_CLEAR_MSLEEP      10
+#define FPGA_CLEAR_LOOP_COUNT  (FPGA_CLEAR_TIMEOUT / FPGA_CLEAR_MSLEEP)
+
+struct fpga_data {
+       struct completion fw_loaded;
+};
+
+struct ecp3_dev {
+       u32 jedec_id;
+       char *name;
+};
+
+static const struct ecp3_dev ecp3_dev[] = {
+       {
+               .jedec_id = ID_ECP3_17,
+               .name = "Lattice ECP3-17",
+       },
+       {
+               .jedec_id = ID_ECP3_35,
+               .name = "Lattice ECP3-35",
+       },
+};
+
+static void firmware_load(const struct firmware *fw, void *context)
+{
+       struct spi_device *spi = (struct spi_device *)context;
+       struct fpga_data *data = dev_get_drvdata(&spi->dev);
+       u8 *buffer;
+       int ret;
+       u8 txbuf[8];
+       u8 rxbuf[8];
+       int rx_len = 8;
+       int i;
+       u32 jedec_id;
+       u32 status;
+
+       if (fw->size == 0) {
+               dev_err(&spi->dev, "Error: Firmware size is 0!\n");
+               return;
+       }
+
+       /* Fill dummy data (24 stuffing bits for commands) */
+       txbuf[1] = 0x00;
+       txbuf[2] = 0x00;
+       txbuf[3] = 0x00;
+
+       /* Trying to speak with the FPGA via SPI... */
+       txbuf[0] = FPGA_CMD_READ_ID;
+       ret = spi_write_then_read(spi, txbuf, 8, rxbuf, rx_len);
+       dev_dbg(&spi->dev, "FPGA JTAG ID=%08x\n", *(u32 *)&rxbuf[4]);
+       jedec_id = *(u32 *)&rxbuf[4];
+
+       for (i = 0; i < ARRAY_SIZE(ecp3_dev); i++) {
+               if (jedec_id == ecp3_dev[i].jedec_id)
+                       break;
+       }
+       if (i == ARRAY_SIZE(ecp3_dev)) {
+               dev_err(&spi->dev,
+                       "Error: No supported FPGA detected (JEDEC_ID=%08x)!\n",
+                       jedec_id);
+               return;
+       }
+
+       dev_info(&spi->dev, "FPGA %s detected\n", ecp3_dev[i].name);
+
+       txbuf[0] = FPGA_CMD_READ_STATUS;
+       ret = spi_write_then_read(spi, txbuf, 8, rxbuf, rx_len);
+       dev_dbg(&spi->dev, "FPGA Status=%08x\n", *(u32 *)&rxbuf[4]);
+
+       buffer = kzalloc(fw->size + 8, GFP_KERNEL);
+       if (!buffer) {
+               dev_err(&spi->dev, "Error: Can't allocate memory!\n");
+               return;
+       }
+
+       /*
+        * Insert WRITE_INC command into stream (one SPI frame)
+        */
+       buffer[0] = FPGA_CMD_WRITE_INC;
+       buffer[1] = 0xff;
+       buffer[2] = 0xff;
+       buffer[3] = 0xff;
+       memcpy(buffer + 4, fw->data, fw->size);
+
+       txbuf[0] = FPGA_CMD_REFRESH;
+       ret = spi_write(spi, txbuf, 4);
+
+       txbuf[0] = FPGA_CMD_WRITE_EN;
+       ret = spi_write(spi, txbuf, 4);
+
+       txbuf[0] = FPGA_CMD_CLEAR;
+       ret = spi_write(spi, txbuf, 4);
+
+       /*
+        * Wait for FPGA memory to become cleared
+        */
+       for (i = 0; i < FPGA_CLEAR_LOOP_COUNT; i++) {
+               txbuf[0] = FPGA_CMD_READ_STATUS;
+               ret = spi_write_then_read(spi, txbuf, 8, rxbuf, rx_len);
+               status = *(u32 *)&rxbuf[4];
+               if (status == FPGA_STATUS_CLEARED)
+                       break;
+
+               msleep(FPGA_CLEAR_MSLEEP);
+       }
+
+       if (i == FPGA_CLEAR_LOOP_COUNT) {
+               dev_err(&spi->dev,
+                       "Error: Timeout waiting for FPGA to clear (status=%08x)!\n",
+                       status);
+               kfree(buffer);
+               return;
+       }
+
+       dev_info(&spi->dev, "Configuring the FPGA...\n");
+       ret = spi_write(spi, buffer, fw->size + 8);
+
+       txbuf[0] = FPGA_CMD_WRITE_DIS;
+       ret = spi_write(spi, txbuf, 4);
+
+       txbuf[0] = FPGA_CMD_READ_STATUS;
+       ret = spi_write_then_read(spi, txbuf, 8, rxbuf, rx_len);
+       dev_dbg(&spi->dev, "FPGA Status=%08x\n", *(u32 *)&rxbuf[4]);
+       status = *(u32 *)&rxbuf[4];
+
+       /* Check result */
+       if (status & FPGA_STATUS_DONE)
+               dev_info(&spi->dev, "FPGA succesfully configured!\n");
+       else
+               dev_info(&spi->dev, "FPGA not configured (DONE not set)\n");
+
+       /*
+        * Don't forget to release the firmware again
+        */
+       release_firmware(fw);
+
+       kfree(buffer);
+
+       complete(&data->fw_loaded);
+}
+
+static int lattice_ecp3_probe(struct spi_device *spi)
+{
+       struct fpga_data *data;
+       int err;
+
+       data = devm_kzalloc(&spi->dev, sizeof(*data), GFP_KERNEL);
+       if (!data) {
+               dev_err(&spi->dev, "Memory allocation for fpga_data failed\n");
+               return -ENOMEM;
+       }
+       spi_set_drvdata(spi, data);
+
+       init_completion(&data->fw_loaded);
+       err = request_firmware_nowait(THIS_MODULE, FW_ACTION_NOHOTPLUG,
+                                     FIRMWARE_NAME, &spi->dev,
+                                     GFP_KERNEL, spi, firmware_load);
+       if (err) {
+               dev_err(&spi->dev, "Firmware loading failed with %d!\n", err);
+               return err;
+       }
+
+       dev_info(&spi->dev, "FPGA bitstream configuration driver registered\n");
+
+       return 0;
+}
+
+static int lattice_ecp3_remove(struct spi_device *spi)
+{
+       struct fpga_data *data = spi_get_drvdata(spi);
+
+       wait_for_completion(&data->fw_loaded);
+
+       return 0;
+}
+
+static const struct spi_device_id lattice_ecp3_id[] = {
+       { "ecp3-17", 0 },
+       { "ecp3-35", 0 },
+       { }
+};
+MODULE_DEVICE_TABLE(spi, lattice_ecp3_id);
+
+static struct spi_driver lattice_ecp3_driver = {
+       .driver = {
+               .name = "lattice-ecp3",
+               .owner = THIS_MODULE,
+       },
+       .probe = lattice_ecp3_probe,
+       .remove = lattice_ecp3_remove,
+       .id_table = lattice_ecp3_id,
+};
+
+module_spi_driver(lattice_ecp3_driver);
+
+MODULE_AUTHOR("Stefan Roese <sr@denx.de>");
+MODULE_DESCRIPTION("Lattice ECP3 FPGA configuration via SPI");
+MODULE_LICENSE("GPL");
index 5a79ccd..d21b4d0 100644 (file)
@@ -1,11 +1,22 @@
 config INTEL_MEI
-       tristate "Intel Management Engine Interface (Intel MEI)"
+       tristate "Intel Management Engine Interface"
        depends on X86 && PCI && WATCHDOG_CORE
        help
          The Intel Management Engine (Intel ME) provides Manageability,
          Security and Media services for system containing Intel chipsets.
          if selected /dev/mei misc device will be created.
 
+         For more information see
+         <http://software.intel.com/en-us/manageability/>
+
+config INTEL_MEI_ME
+       bool "ME Enabled Intel Chipsets"
+       depends on INTEL_MEI
+       depends on X86 && PCI && WATCHDOG_CORE
+       default y
+       help
+         MEI support for ME Enabled Intel chipsets.
+
          Supported Chipsets are:
          7 Series Chipset Family
          6 Series Chipset Family
@@ -24,5 +35,3 @@ config INTEL_MEI
          82Q33 Express
          82X38/X48 Express
 
-         For more information see
-         <http://software.intel.com/en-us/manageability/>
index 0017842..040af6c 100644 (file)
@@ -4,9 +4,11 @@
 #
 obj-$(CONFIG_INTEL_MEI) += mei.o
 mei-objs := init.o
+mei-objs += hbm.o
 mei-objs += interrupt.o
-mei-objs += interface.o
-mei-objs += iorw.o
+mei-objs += client.o
 mei-objs += main.o
 mei-objs += amthif.o
 mei-objs += wd.o
+mei-$(CONFIG_INTEL_MEI_ME) += pci-me.o
+mei-$(CONFIG_INTEL_MEI_ME) += hw-me.o
index 18794ae..c86d7e3 100644 (file)
 #include <linux/jiffies.h>
 #include <linux/uaccess.h>
 
+#include <linux/mei.h>
 
 #include "mei_dev.h"
-#include "hw.h"
-#include <linux/mei.h>
-#include "interface.h"
+#include "hbm.h"
+#include "hw-me.h"
+#include "client.h"
 
-const uuid_le mei_amthi_guid  = UUID_LE(0x12f80028, 0xb4b7, 0x4b2d, 0xac,
-                                               0xa8, 0x46, 0xe0, 0xff, 0x65,
-                                               0x81, 0x4c);
+const uuid_le mei_amthif_guid  = UUID_LE(0x12f80028, 0xb4b7, 0x4b2d,
+                                        0xac, 0xa8, 0x46, 0xe0,
+                                        0xff, 0x65, 0x81, 0x4c);
 
 /**
  * mei_amthif_reset_params - initializes mei device iamthif
@@ -64,22 +65,24 @@ void mei_amthif_reset_params(struct mei_device *dev)
  * @dev: the device structure
  *
  */
-void mei_amthif_host_init(struct mei_device *dev)
+int mei_amthif_host_init(struct mei_device *dev)
 {
-       int i;
+       struct mei_cl *cl = &dev->iamthif_cl;
        unsigned char *msg_buf;
+       int ret, i;
 
-       mei_cl_init(&dev->iamthif_cl, dev);
-       dev->iamthif_cl.state = MEI_FILE_DISCONNECTED;
+       dev->iamthif_state = MEI_IAMTHIF_IDLE;
+
+       mei_cl_init(cl, dev);
 
-       /* find ME amthi client */
-       i = mei_me_cl_link(dev, &dev->iamthif_cl,
-                           &mei_amthi_guid, MEI_IAMTHIF_HOST_CLIENT_ID);
+       i = mei_me_cl_by_uuid(dev, &mei_amthif_guid);
        if (i < 0) {
-               dev_info(&dev->pdev->dev, "failed to find iamthif client.\n");
-               return;
+               dev_info(&dev->pdev->dev, "amthif: failed to find the client\n");
+               return -ENOENT;
        }
 
+       cl->me_client_id = dev->me_clients[i].client_id;
+
        /* Assign iamthif_mtu to the value received from ME  */
 
        dev->iamthif_mtu = dev->me_clients[i].props.max_msg_length;
@@ -93,19 +96,29 @@ void mei_amthif_host_init(struct mei_device *dev)
        msg_buf = kcalloc(dev->iamthif_mtu,
                        sizeof(unsigned char), GFP_KERNEL);
        if (!msg_buf) {
-               dev_dbg(&dev->pdev->dev, "memory allocation for ME message buffer failed.\n");
-               return;
+               dev_err(&dev->pdev->dev, "amthif: memory allocation for ME message buffer failed.\n");
+               return -ENOMEM;
        }
 
        dev->iamthif_msg_buf = msg_buf;
 
-       if (mei_connect(dev, &dev->iamthif_cl)) {
-               dev_dbg(&dev->pdev->dev, "Failed to connect to AMTHI client\n");
-               dev->iamthif_cl.state = MEI_FILE_DISCONNECTED;
-               dev->iamthif_cl.host_client_id = 0;
+       ret = mei_cl_link(cl, MEI_IAMTHIF_HOST_CLIENT_ID);
+
+       if (ret < 0) {
+               dev_err(&dev->pdev->dev, "amthif: failed link client\n");
+               return -ENOENT;
+       }
+
+       cl->state = MEI_FILE_CONNECTING;
+
+       if (mei_hbm_cl_connect_req(dev, cl)) {
+               dev_dbg(&dev->pdev->dev, "amthif: Failed to connect to ME client\n");
+               cl->state = MEI_FILE_DISCONNECTED;
+               cl->host_client_id = 0;
        } else {
-               dev->iamthif_cl.timer_count = MEI_CONNECT_TIMEOUT;
+               cl->timer_count = MEI_CONNECT_TIMEOUT;
        }
+       return 0;
 }
 
 /**
@@ -168,10 +181,10 @@ int mei_amthif_read(struct mei_device *dev, struct file *file,
        i = mei_me_cl_by_id(dev, dev->iamthif_cl.me_client_id);
 
        if (i < 0) {
-               dev_dbg(&dev->pdev->dev, "amthi client not found.\n");
+               dev_dbg(&dev->pdev->dev, "amthif client not found.\n");
                return -ENODEV;
        }
-       dev_dbg(&dev->pdev->dev, "checking amthi data\n");
+       dev_dbg(&dev->pdev->dev, "checking amthif data\n");
        cb = mei_amthif_find_read_list_entry(dev, file);
 
        /* Check for if we can block or not*/
@@ -179,7 +192,7 @@ int mei_amthif_read(struct mei_device *dev, struct file *file,
                return -EAGAIN;
 
 
-       dev_dbg(&dev->pdev->dev, "waiting for amthi data\n");
+       dev_dbg(&dev->pdev->dev, "waiting for amthif data\n");
        while (cb == NULL) {
                /* unlock the Mutex */
                mutex_unlock(&dev->device_lock);
@@ -187,27 +200,27 @@ int mei_amthif_read(struct mei_device *dev, struct file *file,
                wait_ret = wait_event_interruptible(dev->iamthif_cl.wait,
                        (cb = mei_amthif_find_read_list_entry(dev, file)));
 
+               /* Locking again the Mutex */
+               mutex_lock(&dev->device_lock);
+
                if (wait_ret)
                        return -ERESTARTSYS;
 
                dev_dbg(&dev->pdev->dev, "woke up from sleep\n");
-
-               /* Locking again the Mutex */
-               mutex_lock(&dev->device_lock);
        }
 
 
-       dev_dbg(&dev->pdev->dev, "Got amthi data\n");
+       dev_dbg(&dev->pdev->dev, "Got amthif data\n");
        dev->iamthif_timer = 0;
 
        if (cb) {
                timeout = cb->read_time +
                        mei_secs_to_jiffies(MEI_IAMTHIF_READ_TIMER);
-               dev_dbg(&dev->pdev->dev, "amthi timeout = %lud\n",
+               dev_dbg(&dev->pdev->dev, "amthif timeout = %lud\n",
                                timeout);
 
                if  (time_after(jiffies, timeout)) {
-                       dev_dbg(&dev->pdev->dev, "amthi Time out\n");
+                       dev_dbg(&dev->pdev->dev, "amthif Time out\n");
                        /* 15 sec for the message has expired */
                        list_del(&cb->list);
                        rets = -ETIMEDOUT;
@@ -227,9 +240,9 @@ int mei_amthif_read(struct mei_device *dev, struct file *file,
                 * remove message from deletion list
                 */
 
-       dev_dbg(&dev->pdev->dev, "amthi cb->response_buffer size - %d\n",
+       dev_dbg(&dev->pdev->dev, "amthif cb->response_buffer size - %d\n",
            cb->response_buffer.size);
-       dev_dbg(&dev->pdev->dev, "amthi cb->buf_idx - %lu\n", cb->buf_idx);
+       dev_dbg(&dev->pdev->dev, "amthif cb->buf_idx - %lu\n", cb->buf_idx);
 
        /* length is being turncated to PAGE_SIZE, however,
         * the buf_idx may point beyond */
@@ -245,7 +258,7 @@ int mei_amthif_read(struct mei_device *dev, struct file *file,
                }
        }
 free:
-       dev_dbg(&dev->pdev->dev, "free amthi cb memory.\n");
+       dev_dbg(&dev->pdev->dev, "free amthif cb memory.\n");
        *offset = 0;
        mei_io_cb_free(cb);
 out:
@@ -269,7 +282,7 @@ static int mei_amthif_send_cmd(struct mei_device *dev, struct mei_cl_cb *cb)
        if (!dev || !cb)
                return -ENODEV;
 
-       dev_dbg(&dev->pdev->dev, "write data to amthi client.\n");
+       dev_dbg(&dev->pdev->dev, "write data to amthif client.\n");
 
        dev->iamthif_state = MEI_IAMTHIF_WRITING;
        dev->iamthif_current_cb = cb;
@@ -280,15 +293,15 @@ static int mei_amthif_send_cmd(struct mei_device *dev, struct mei_cl_cb *cb)
        memcpy(dev->iamthif_msg_buf, cb->request_buffer.data,
               cb->request_buffer.size);
 
-       ret = mei_flow_ctrl_creds(dev, &dev->iamthif_cl);
+       ret = mei_cl_flow_ctrl_creds(&dev->iamthif_cl);
        if (ret < 0)
                return ret;
 
-       if (ret && dev->mei_host_buffer_is_empty) {
+       if (ret && dev->hbuf_is_ready) {
                ret = 0;
-               dev->mei_host_buffer_is_empty = false;
-               if (cb->request_buffer.size > mei_hbuf_max_data(dev)) {
-                       mei_hdr.length = mei_hbuf_max_data(dev);
+               dev->hbuf_is_ready = false;
+               if (cb->request_buffer.size > mei_hbuf_max_len(dev)) {
+                       mei_hdr.length = mei_hbuf_max_len(dev);
                        mei_hdr.msg_complete = 0;
                } else {
                        mei_hdr.length = cb->request_buffer.size;
@@ -300,25 +313,24 @@ static int mei_amthif_send_cmd(struct mei_device *dev, struct mei_cl_cb *cb)
                mei_hdr.reserved = 0;
                dev->iamthif_msg_buf_index += mei_hdr.length;
                if (mei_write_message(dev, &mei_hdr,
-                                       (unsigned char *)(dev->iamthif_msg_buf),
-                                       mei_hdr.length))
+                                       (unsigned char *)dev->iamthif_msg_buf))
                        return -ENODEV;
 
                if (mei_hdr.msg_complete) {
-                       if (mei_flow_ctrl_reduce(dev, &dev->iamthif_cl))
+                       if (mei_cl_flow_ctrl_reduce(&dev->iamthif_cl))
                                return -ENODEV;
                        dev->iamthif_flow_control_pending = true;
                        dev->iamthif_state = MEI_IAMTHIF_FLOW_CONTROL;
-                       dev_dbg(&dev->pdev->dev, "add amthi cb to write waiting list\n");
+                       dev_dbg(&dev->pdev->dev, "add amthif cb to write waiting list\n");
                        dev->iamthif_current_cb = cb;
                        dev->iamthif_file_object = cb->file_object;
                        list_add_tail(&cb->list, &dev->write_waiting_list.list);
                } else {
-                       dev_dbg(&dev->pdev->dev, "message does not complete, so add amthi cb to write list.\n");
+                       dev_dbg(&dev->pdev->dev, "message does not complete, so add amthif cb to write list.\n");
                        list_add_tail(&cb->list, &dev->write_list.list);
                }
        } else {
-               if (!(dev->mei_host_buffer_is_empty))
+               if (!dev->hbuf_is_ready)
                        dev_dbg(&dev->pdev->dev, "host buffer is not empty");
 
                dev_dbg(&dev->pdev->dev, "No flow control credentials, so add iamthif cb to write list.\n");
@@ -383,7 +395,7 @@ void mei_amthif_run_next_cmd(struct mei_device *dev)
        dev->iamthif_timer = 0;
        dev->iamthif_file_object = NULL;
 
-       dev_dbg(&dev->pdev->dev, "complete amthi cmd_list cb.\n");
+       dev_dbg(&dev->pdev->dev, "complete amthif cmd_list cb.\n");
 
        list_for_each_entry_safe(pos, next, &dev->amthif_cmd_list.list, list) {
                list_del(&pos->list);
@@ -392,7 +404,7 @@ void mei_amthif_run_next_cmd(struct mei_device *dev)
                        status = mei_amthif_send_cmd(dev, pos);
                        if (status) {
                                dev_dbg(&dev->pdev->dev,
-                                       "amthi write failed status = %d\n",
+                                       "amthif write failed status = %d\n",
                                                status);
                                return;
                        }
@@ -412,7 +424,7 @@ unsigned int mei_amthif_poll(struct mei_device *dev,
        if (dev->iamthif_state == MEI_IAMTHIF_READ_COMPLETE &&
                dev->iamthif_file_object == file) {
                mask |= (POLLIN | POLLRDNORM);
-               dev_dbg(&dev->pdev->dev, "run next amthi cb\n");
+               dev_dbg(&dev->pdev->dev, "run next amthif cb\n");
                mei_amthif_run_next_cmd(dev);
        }
        return mask;
@@ -434,54 +446,51 @@ unsigned int mei_amthif_poll(struct mei_device *dev,
 int mei_amthif_irq_write_complete(struct mei_device *dev, s32 *slots,
                        struct mei_cl_cb *cb, struct mei_cl_cb *cmpl_list)
 {
-       struct mei_msg_hdr *mei_hdr;
+       struct mei_msg_hdr mei_hdr;
        struct mei_cl *cl = cb->cl;
        size_t len = dev->iamthif_msg_buf_size - dev->iamthif_msg_buf_index;
        size_t msg_slots = mei_data2slots(len);
 
-       mei_hdr = (struct mei_msg_hdr *)&dev->wr_msg_buf[0];
-       mei_hdr->host_addr = cl->host_client_id;
-       mei_hdr->me_addr = cl->me_client_id;
-       mei_hdr->reserved = 0;
+       mei_hdr.host_addr = cl->host_client_id;
+       mei_hdr.me_addr = cl->me_client_id;
+       mei_hdr.reserved = 0;
 
        if (*slots >= msg_slots) {
-               mei_hdr->length = len;
-               mei_hdr->msg_complete = 1;
+               mei_hdr.length = len;
+               mei_hdr.msg_complete = 1;
        /* Split the message only if we can write the whole host buffer */
        } else if (*slots == dev->hbuf_depth) {
                msg_slots = *slots;
                len = (*slots * sizeof(u32)) - sizeof(struct mei_msg_hdr);
-               mei_hdr->length = len;
-               mei_hdr->msg_complete = 0;
+               mei_hdr.length = len;
+               mei_hdr.msg_complete = 0;
        } else {
                /* wait for next time the host buffer is empty */
                return 0;
        }
 
-       dev_dbg(&dev->pdev->dev, "msg: len = %d complete = %d\n",
-                       mei_hdr->length, mei_hdr->msg_complete);
+       dev_dbg(&dev->pdev->dev, MEI_HDR_FMT,  MEI_HDR_PRM(&mei_hdr));
 
        *slots -=  msg_slots;
-       if (mei_write_message(dev, mei_hdr,
-               dev->iamthif_msg_buf + dev->iamthif_msg_buf_index,
-               mei_hdr->length)) {
+       if (mei_write_message(dev, &mei_hdr,
+               dev->iamthif_msg_buf + dev->iamthif_msg_buf_index)) {
                        dev->iamthif_state = MEI_IAMTHIF_IDLE;
                        cl->status = -ENODEV;
                        list_del(&cb->list);
                        return -ENODEV;
        }
 
-       if (mei_flow_ctrl_reduce(dev, cl))
+       if (mei_cl_flow_ctrl_reduce(cl))
                return -ENODEV;
 
-       dev->iamthif_msg_buf_index += mei_hdr->length;
+       dev->iamthif_msg_buf_index += mei_hdr.length;
        cl->status = 0;
 
-       if (mei_hdr->msg_complete) {
+       if (mei_hdr.msg_complete) {
                dev->iamthif_state = MEI_IAMTHIF_FLOW_CONTROL;
                dev->iamthif_flow_control_pending = true;
 
-               /* save iamthif cb sent to amthi client */
+               /* save iamthif cb sent to amthif client */
                cb->buf_idx = dev->iamthif_msg_buf_index;
                dev->iamthif_current_cb = cb;
 
@@ -494,11 +503,11 @@ int mei_amthif_irq_write_complete(struct mei_device *dev, s32 *slots,
 
 /**
  * mei_amthif_irq_read_message - read routine after ISR to
- *                     handle the read amthi message
+ *                     handle the read amthif message
  *
  * @complete_list: An instance of our list structure
  * @dev: the device structure
- * @mei_hdr: header of amthi message
+ * @mei_hdr: header of amthif message
  *
  * returns 0 on success, <0 on failure.
  */
@@ -522,10 +531,10 @@ int mei_amthif_irq_read_message(struct mei_cl_cb *complete_list,
                return 0;
 
        dev_dbg(&dev->pdev->dev,
-                       "amthi_message_buffer_index =%d\n",
+                       "amthif_message_buffer_index =%d\n",
                        mei_hdr->length);
 
-       dev_dbg(&dev->pdev->dev, "completed amthi read.\n ");
+       dev_dbg(&dev->pdev->dev, "completed amthif read.\n ");
        if (!dev->iamthif_current_cb)
                return -ENODEV;
 
@@ -540,8 +549,8 @@ int mei_amthif_irq_read_message(struct mei_cl_cb *complete_list,
        cb->read_time = jiffies;
        if (dev->iamthif_ioctl && cb->cl == &dev->iamthif_cl) {
                /* found the iamthif cb */
-               dev_dbg(&dev->pdev->dev, "complete the amthi read cb.\n ");
-               dev_dbg(&dev->pdev->dev, "add the amthi read cb to complete.\n ");
+               dev_dbg(&dev->pdev->dev, "complete the amthif read cb.\n ");
+               dev_dbg(&dev->pdev->dev, "add the amthif read cb to complete.\n ");
                list_add_tail(&cb->list, &complete_list->list);
        }
        return 0;
@@ -563,7 +572,7 @@ int mei_amthif_irq_read(struct mei_device *dev, s32 *slots)
                return -EMSGSIZE;
        }
        *slots -= mei_data2slots(sizeof(struct hbm_flow_control));
-       if (mei_send_flow_control(dev, &dev->iamthif_cl)) {
+       if (mei_hbm_cl_flow_control_req(dev, &dev->iamthif_cl)) {
                dev_dbg(&dev->pdev->dev, "iamthif flow control failed\n");
                return -EIO;
        }
@@ -574,7 +583,7 @@ int mei_amthif_irq_read(struct mei_device *dev, s32 *slots)
        dev->iamthif_msg_buf_index = 0;
        dev->iamthif_msg_buf_size = 0;
        dev->iamthif_stall_timer = MEI_IAMTHIF_STALL_TIMER;
-       dev->mei_host_buffer_is_empty = mei_hbuf_is_empty(dev);
+       dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
        return 0;
 }
 
@@ -593,7 +602,7 @@ void mei_amthif_complete(struct mei_device *dev, struct mei_cl_cb *cb)
                                dev->iamthif_msg_buf,
                                dev->iamthif_msg_buf_index);
                list_add_tail(&cb->list, &dev->amthif_rd_complete_list.list);
-               dev_dbg(&dev->pdev->dev, "amthi read completed\n");
+               dev_dbg(&dev->pdev->dev, "amthif read completed\n");
                dev->iamthif_timer = jiffies;
                dev_dbg(&dev->pdev->dev, "dev->iamthif_timer = %ld\n",
                                dev->iamthif_timer);
@@ -601,7 +610,7 @@ void mei_amthif_complete(struct mei_device *dev, struct mei_cl_cb *cb)
                mei_amthif_run_next_cmd(dev);
        }
 
-       dev_dbg(&dev->pdev->dev, "completing amthi call back.\n");
+       dev_dbg(&dev->pdev->dev, "completing amthif call back.\n");
        wake_up_interruptible(&dev->iamthif_cl.wait);
 }
 
@@ -635,7 +644,8 @@ static bool mei_clear_list(struct mei_device *dev,
                        if (dev->iamthif_current_cb == cb_pos) {
                                dev->iamthif_current_cb = NULL;
                                /* send flow control to iamthif client */
-                               mei_send_flow_control(dev, &dev->iamthif_cl);
+                               mei_hbm_cl_flow_control_req(dev,
+                                                       &dev->iamthif_cl);
                        }
                        /* free all allocated buffers */
                        mei_io_cb_free(cb_pos);
@@ -706,11 +716,11 @@ int mei_amthif_release(struct mei_device *dev, struct file *file)
        if (dev->iamthif_file_object == file &&
            dev->iamthif_state != MEI_IAMTHIF_IDLE) {
 
-               dev_dbg(&dev->pdev->dev, "amthi canceled iamthif state %d\n",
+               dev_dbg(&dev->pdev->dev, "amthif canceled iamthif state %d\n",
                    dev->iamthif_state);
                dev->iamthif_canceled = true;
                if (dev->iamthif_state == MEI_IAMTHIF_READ_COMPLETE) {
-                       dev_dbg(&dev->pdev->dev, "run next amthi iamthif cb\n");
+                       dev_dbg(&dev->pdev->dev, "run next amthif iamthif cb\n");
                        mei_amthif_run_next_cmd(dev);
                }
        }
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
new file mode 100644 (file)
index 0000000..1569afe
--- /dev/null
@@ -0,0 +1,729 @@
+/*
+ *
+ * Intel Management Engine Interface (Intel MEI) Linux driver
+ * Copyright (c) 2003-2012, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#include <linux/pci.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/delay.h>
+
+#include <linux/mei.h>
+
+#include "mei_dev.h"
+#include "hbm.h"
+#include "client.h"
+
+/**
+ * mei_me_cl_by_uuid - locate index of me client
+ *
+ * @dev: mei device
+ * returns me client index or -ENOENT if not found
+ */
+int mei_me_cl_by_uuid(const struct mei_device *dev, const uuid_le *uuid)
+{
+       int i, res = -ENOENT;
+
+       for (i = 0; i < dev->me_clients_num; ++i)
+               if (uuid_le_cmp(*uuid,
+                               dev->me_clients[i].props.protocol_name) == 0) {
+                       res = i;
+                       break;
+               }
+
+       return res;
+}
+
+
+/**
+ * mei_me_cl_by_id return index to me_clients for client_id
+ *
+ * @dev: the device structure
+ * @client_id: me client id
+ *
+ * Locking: called under "dev->device_lock" lock
+ *
+ * returns index on success, -ENOENT on failure.
+ */
+
+int mei_me_cl_by_id(struct mei_device *dev, u8 client_id)
+{
+       int i;
+       for (i = 0; i < dev->me_clients_num; i++)
+               if (dev->me_clients[i].client_id == client_id)
+                       break;
+       if (WARN_ON(dev->me_clients[i].client_id != client_id))
+               return -ENOENT;
+
+       if (i == dev->me_clients_num)
+               return -ENOENT;
+
+       return i;
+}
+
+
+/**
+ * mei_io_list_flush - removes list entry belonging to cl.
+ *
+ * @list:  An instance of our list structure
+ * @cl: host client
+ */
+void mei_io_list_flush(struct mei_cl_cb *list, struct mei_cl *cl)
+{
+       struct mei_cl_cb *cb;
+       struct mei_cl_cb *next;
+
+       list_for_each_entry_safe(cb, next, &list->list, list) {
+               if (cb->cl && mei_cl_cmp_id(cl, cb->cl))
+                       list_del(&cb->list);
+       }
+}
+
+/**
+ * mei_io_cb_free - free mei_cb_private related memory
+ *
+ * @cb: mei callback struct
+ */
+void mei_io_cb_free(struct mei_cl_cb *cb)
+{
+       if (cb == NULL)
+               return;
+
+       kfree(cb->request_buffer.data);
+       kfree(cb->response_buffer.data);
+       kfree(cb);
+}
+
+/**
+ * mei_io_cb_init - allocate and initialize io callback
+ *
+ * @cl - mei client
+ * @file: pointer to file structure
+ *
+ * returns mei_cl_cb pointer or NULL;
+ */
+struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, struct file *fp)
+{
+       struct mei_cl_cb *cb;
+
+       cb = kzalloc(sizeof(struct mei_cl_cb), GFP_KERNEL);
+       if (!cb)
+               return NULL;
+
+       mei_io_list_init(cb);
+
+       cb->file_object = fp;
+       cb->cl = cl;
+       cb->buf_idx = 0;
+       return cb;
+}
+
+/**
+ * mei_io_cb_alloc_req_buf - allocate request buffer
+ *
+ * @cb -  io callback structure
+ * @size: size of the buffer
+ *
+ * returns 0 on success
+ *         -EINVAL if cb is NULL
+ *         -ENOMEM if allocation failed
+ */
+int mei_io_cb_alloc_req_buf(struct mei_cl_cb *cb, size_t length)
+{
+       if (!cb)
+               return -EINVAL;
+
+       if (length == 0)
+               return 0;
+
+       cb->request_buffer.data = kmalloc(length, GFP_KERNEL);
+       if (!cb->request_buffer.data)
+               return -ENOMEM;
+       cb->request_buffer.size = length;
+       return 0;
+}
+/**
+ * mei_io_cb_alloc_req_buf - allocate respose buffer
+ *
+ * @cb -  io callback structure
+ * @size: size of the buffer
+ *
+ * returns 0 on success
+ *         -EINVAL if cb is NULL
+ *         -ENOMEM if allocation failed
+ */
+int mei_io_cb_alloc_resp_buf(struct mei_cl_cb *cb, size_t length)
+{
+       if (!cb)
+               return -EINVAL;
+
+       if (length == 0)
+               return 0;
+
+       cb->response_buffer.data = kmalloc(length, GFP_KERNEL);
+       if (!cb->response_buffer.data)
+               return -ENOMEM;
+       cb->response_buffer.size = length;
+       return 0;
+}
+
+
+
+/**
+ * mei_cl_flush_queues - flushes queue lists belonging to cl.
+ *
+ * @dev: the device structure
+ * @cl: host client
+ */
+int mei_cl_flush_queues(struct mei_cl *cl)
+{
+       if (WARN_ON(!cl || !cl->dev))
+               return -EINVAL;
+
+       dev_dbg(&cl->dev->pdev->dev, "remove list entry belonging to cl\n");
+       mei_io_list_flush(&cl->dev->read_list, cl);
+       mei_io_list_flush(&cl->dev->write_list, cl);
+       mei_io_list_flush(&cl->dev->write_waiting_list, cl);
+       mei_io_list_flush(&cl->dev->ctrl_wr_list, cl);
+       mei_io_list_flush(&cl->dev->ctrl_rd_list, cl);
+       mei_io_list_flush(&cl->dev->amthif_cmd_list, cl);
+       mei_io_list_flush(&cl->dev->amthif_rd_complete_list, cl);
+       return 0;
+}
+
+
+/**
+ * mei_cl_init - initializes intialize cl.
+ *
+ * @cl: host client to be initialized
+ * @dev: mei device
+ */
+void mei_cl_init(struct mei_cl *cl, struct mei_device *dev)
+{
+       memset(cl, 0, sizeof(struct mei_cl));
+       init_waitqueue_head(&cl->wait);
+       init_waitqueue_head(&cl->rx_wait);
+       init_waitqueue_head(&cl->tx_wait);
+       INIT_LIST_HEAD(&cl->link);
+       cl->reading_state = MEI_IDLE;
+       cl->writing_state = MEI_IDLE;
+       cl->dev = dev;
+}
+
+/**
+ * mei_cl_allocate - allocates cl  structure and sets it up.
+ *
+ * @dev: mei device
+ * returns  The allocated file or NULL on failure
+ */
+struct mei_cl *mei_cl_allocate(struct mei_device *dev)
+{
+       struct mei_cl *cl;
+
+       cl = kmalloc(sizeof(struct mei_cl), GFP_KERNEL);
+       if (!cl)
+               return NULL;
+
+       mei_cl_init(cl, dev);
+
+       return cl;
+}
+
+/**
+ * mei_cl_find_read_cb - find this cl's callback in the read list
+ *
+ * @dev: device structure
+ * returns cb on success, NULL on error
+ */
+struct mei_cl_cb *mei_cl_find_read_cb(struct mei_cl *cl)
+{
+       struct mei_device *dev = cl->dev;
+       struct mei_cl_cb *cb = NULL;
+       struct mei_cl_cb *next = NULL;
+
+       list_for_each_entry_safe(cb, next, &dev->read_list.list, list)
+               if (mei_cl_cmp_id(cl, cb->cl))
+                       return cb;
+       return NULL;
+}
+
+/** mei_cl_link: allocte host id in the host map
+ *
+ * @cl - host client
+ * @id - fixed host id or -1 for genereting one
+ * returns 0 on success
+ *     -EINVAL on incorrect values
+ *     -ENONET if client not found
+ */
+int mei_cl_link(struct mei_cl *cl, int id)
+{
+       struct mei_device *dev;
+
+       if (WARN_ON(!cl || !cl->dev))
+               return -EINVAL;
+
+       dev = cl->dev;
+
+       /* If Id is not asigned get one*/
+       if (id == MEI_HOST_CLIENT_ID_ANY)
+               id = find_first_zero_bit(dev->host_clients_map,
+                                       MEI_CLIENTS_MAX);
+
+       if (id >= MEI_CLIENTS_MAX) {
+               dev_err(&dev->pdev->dev, "id exceded %d", MEI_CLIENTS_MAX) ;
+               return -ENOENT;
+       }
+
+       dev->open_handle_count++;
+
+       cl->host_client_id = id;
+       list_add_tail(&cl->link, &dev->file_list);
+
+       set_bit(id, dev->host_clients_map);
+
+       cl->state = MEI_FILE_INITIALIZING;
+
+       dev_dbg(&dev->pdev->dev, "link cl host id = %d\n", cl->host_client_id);
+       return 0;
+}
+
+/**
+ * mei_cl_unlink - remove me_cl from the list
+ *
+ * @dev: the device structure
+ */
+int mei_cl_unlink(struct mei_cl *cl)
+{
+       struct mei_device *dev;
+       struct mei_cl *pos, *next;
+
+       /* don't shout on error exit path */
+       if (!cl)
+               return 0;
+
+       /* wd and amthif might not be initialized */
+       if (!cl->dev)
+               return 0;
+
+       dev = cl->dev;
+
+       list_for_each_entry_safe(pos, next, &dev->file_list, link) {
+               if (cl->host_client_id == pos->host_client_id) {
+                       dev_dbg(&dev->pdev->dev, "remove host client = %d, ME client = %d\n",
+                               pos->host_client_id, pos->me_client_id);
+                       list_del_init(&pos->link);
+                       break;
+               }
+       }
+       return 0;
+}
+
+
+void mei_host_client_init(struct work_struct *work)
+{
+       struct mei_device *dev = container_of(work,
+                                             struct mei_device, init_work);
+       struct mei_client_properties *client_props;
+       int i;
+
+       mutex_lock(&dev->device_lock);
+
+       bitmap_zero(dev->host_clients_map, MEI_CLIENTS_MAX);
+       dev->open_handle_count = 0;
+
+       /*
+        * Reserving the first three client IDs
+        * 0: Reserved for MEI Bus Message communications
+        * 1: Reserved for Watchdog
+        * 2: Reserved for AMTHI
+        */
+       bitmap_set(dev->host_clients_map, 0, 3);
+
+       for (i = 0; i < dev->me_clients_num; i++) {
+               client_props = &dev->me_clients[i].props;
+
+               if (!uuid_le_cmp(client_props->protocol_name, mei_amthif_guid))
+                       mei_amthif_host_init(dev);
+               else if (!uuid_le_cmp(client_props->protocol_name, mei_wd_guid))
+                       mei_wd_host_init(dev);
+       }
+
+       dev->dev_state = MEI_DEV_ENABLED;
+
+       mutex_unlock(&dev->device_lock);
+}
+
+
+/**
+ * mei_cl_disconnect - disconnect host clinet form the me one
+ *
+ * @cl: host client
+ *
+ * Locking: called under "dev->device_lock" lock
+ *
+ * returns 0 on success, <0 on failure.
+ */
+int mei_cl_disconnect(struct mei_cl *cl)
+{
+       struct mei_device *dev;
+       struct mei_cl_cb *cb;
+       int rets, err;
+
+       if (WARN_ON(!cl || !cl->dev))
+               return -ENODEV;
+
+       dev = cl->dev;
+
+       if (cl->state != MEI_FILE_DISCONNECTING)
+               return 0;
+
+       cb = mei_io_cb_init(cl, NULL);
+       if (!cb)
+               return -ENOMEM;
+
+       cb->fop_type = MEI_FOP_CLOSE;
+       if (dev->hbuf_is_ready) {
+               dev->hbuf_is_ready = false;
+               if (mei_hbm_cl_disconnect_req(dev, cl)) {
+                       rets = -ENODEV;
+                       dev_err(&dev->pdev->dev, "failed to disconnect.\n");
+                       goto free;
+               }
+               mdelay(10); /* Wait for hardware disconnection ready */
+               list_add_tail(&cb->list, &dev->ctrl_rd_list.list);
+       } else {
+               dev_dbg(&dev->pdev->dev, "add disconnect cb to control write list\n");
+               list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
+
+       }
+       mutex_unlock(&dev->device_lock);
+
+       err = wait_event_timeout(dev->wait_recvd_msg,
+                       MEI_FILE_DISCONNECTED == cl->state,
+                       mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
+
+       mutex_lock(&dev->device_lock);
+       if (MEI_FILE_DISCONNECTED == cl->state) {
+               rets = 0;
+               dev_dbg(&dev->pdev->dev, "successfully disconnected from FW client.\n");
+       } else {
+               rets = -ENODEV;
+               if (MEI_FILE_DISCONNECTED != cl->state)
+                       dev_dbg(&dev->pdev->dev, "wrong status client disconnect.\n");
+
+               if (err)
+                       dev_dbg(&dev->pdev->dev,
+                                       "wait failed disconnect err=%08x\n",
+                                       err);
+
+               dev_dbg(&dev->pdev->dev, "failed to disconnect from FW client.\n");
+       }
+
+       mei_io_list_flush(&dev->ctrl_rd_list, cl);
+       mei_io_list_flush(&dev->ctrl_wr_list, cl);
+free:
+       mei_io_cb_free(cb);
+       return rets;
+}
+
+
+/**
+ * mei_cl_is_other_connecting - checks if other
+ *    client with the same me client id is connecting
+ *
+ * @cl: private data of the file object
+ *
+ * returns ture if other client is connected, 0 - otherwise.
+ */
+bool mei_cl_is_other_connecting(struct mei_cl *cl)
+{
+       struct mei_device *dev;
+       struct mei_cl *pos;
+       struct mei_cl *next;
+
+       if (WARN_ON(!cl || !cl->dev))
+               return false;
+
+       dev = cl->dev;
+
+       list_for_each_entry_safe(pos, next, &dev->file_list, link) {
+               if ((pos->state == MEI_FILE_CONNECTING) &&
+                   (pos != cl) && cl->me_client_id == pos->me_client_id)
+                       return true;
+
+       }
+
+       return false;
+}
+
+/**
+ * mei_cl_connect - connect host clinet to the me one
+ *
+ * @cl: host client
+ *
+ * Locking: called under "dev->device_lock" lock
+ *
+ * returns 0 on success, <0 on failure.
+ */
+int mei_cl_connect(struct mei_cl *cl, struct file *file)
+{
+       struct mei_device *dev;
+       struct mei_cl_cb *cb;
+       long timeout = mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT);
+       int rets;
+
+       if (WARN_ON(!cl || !cl->dev))
+               return -ENODEV;
+
+       dev = cl->dev;
+
+       cb = mei_io_cb_init(cl, file);
+       if (!cb) {
+               rets = -ENOMEM;
+               goto out;
+       }
+
+       cb->fop_type = MEI_FOP_IOCTL;
+
+       if (dev->hbuf_is_ready && !mei_cl_is_other_connecting(cl)) {
+               dev->hbuf_is_ready = false;
+
+               if (mei_hbm_cl_connect_req(dev, cl)) {
+                       rets = -ENODEV;
+                       goto out;
+               }
+               cl->timer_count = MEI_CONNECT_TIMEOUT;
+               list_add_tail(&cb->list, &dev->ctrl_rd_list.list);
+       } else {
+               list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
+       }
+
+       mutex_unlock(&dev->device_lock);
+       rets = wait_event_timeout(dev->wait_recvd_msg,
+                                (cl->state == MEI_FILE_CONNECTED ||
+                                 cl->state == MEI_FILE_DISCONNECTED),
+                                timeout * HZ);
+       mutex_lock(&dev->device_lock);
+
+       if (cl->state != MEI_FILE_CONNECTED) {
+               rets = -EFAULT;
+
+               mei_io_list_flush(&dev->ctrl_rd_list, cl);
+               mei_io_list_flush(&dev->ctrl_wr_list, cl);
+               goto out;
+       }
+
+       rets = cl->status;
+
+out:
+       mei_io_cb_free(cb);
+       return rets;
+}
+
+/**
+ * mei_cl_flow_ctrl_creds - checks flow_control credits for cl.
+ *
+ * @dev: the device structure
+ * @cl: private data of the file object
+ *
+ * returns 1 if mei_flow_ctrl_creds >0, 0 - otherwise.
+ *     -ENOENT if mei_cl is not present
+ *     -EINVAL if single_recv_buf == 0
+ */
+int mei_cl_flow_ctrl_creds(struct mei_cl *cl)
+{
+       struct mei_device *dev;
+       int i;
+
+       if (WARN_ON(!cl || !cl->dev))
+               return -EINVAL;
+
+       dev = cl->dev;
+
+       if (!dev->me_clients_num)
+               return 0;
+
+       if (cl->mei_flow_ctrl_creds > 0)
+               return 1;
+
+       for (i = 0; i < dev->me_clients_num; i++) {
+               struct mei_me_client  *me_cl = &dev->me_clients[i];
+               if (me_cl->client_id == cl->me_client_id) {
+                       if (me_cl->mei_flow_ctrl_creds) {
+                               if (WARN_ON(me_cl->props.single_recv_buf == 0))
+                                       return -EINVAL;
+                               return 1;
+                       } else {
+                               return 0;
+                       }
+               }
+       }
+       return -ENOENT;
+}
+
+/**
+ * mei_cl_flow_ctrl_reduce - reduces flow_control.
+ *
+ * @dev: the device structure
+ * @cl: private data of the file object
+ * @returns
+ *     0 on success
+ *     -ENOENT when me client is not found
+ *     -EINVAL when ctrl credits are <= 0
+ */
+int mei_cl_flow_ctrl_reduce(struct mei_cl *cl)
+{
+       struct mei_device *dev;
+       int i;
+
+       if (WARN_ON(!cl || !cl->dev))
+               return -EINVAL;
+
+       dev = cl->dev;
+
+       if (!dev->me_clients_num)
+               return -ENOENT;
+
+       for (i = 0; i < dev->me_clients_num; i++) {
+               struct mei_me_client  *me_cl = &dev->me_clients[i];
+               if (me_cl->client_id == cl->me_client_id) {
+                       if (me_cl->props.single_recv_buf != 0) {
+                               if (WARN_ON(me_cl->mei_flow_ctrl_creds <= 0))
+                                       return -EINVAL;
+                               dev->me_clients[i].mei_flow_ctrl_creds--;
+                       } else {
+                               if (WARN_ON(cl->mei_flow_ctrl_creds <= 0))
+                                       return -EINVAL;
+                               cl->mei_flow_ctrl_creds--;
+                       }
+                       return 0;
+               }
+       }
+       return -ENOENT;
+}
+
+/**
+ * mei_cl_start_read - the start read client message function.
+ *
+ * @cl: host client
+ *
+ * returns 0 on success, <0 on failure.
+ */
+int mei_cl_read_start(struct mei_cl *cl)
+{
+       struct mei_device *dev;
+       struct mei_cl_cb *cb;
+       int rets;
+       int i;
+
+       if (WARN_ON(!cl || !cl->dev))
+               return -ENODEV;
+
+       dev = cl->dev;
+
+       if (cl->state != MEI_FILE_CONNECTED)
+               return -ENODEV;
+
+       if (dev->dev_state != MEI_DEV_ENABLED)
+               return -ENODEV;
+
+       if (cl->read_cb) {
+               dev_dbg(&dev->pdev->dev, "read is pending.\n");
+               return -EBUSY;
+       }
+       i = mei_me_cl_by_id(dev, cl->me_client_id);
+       if (i < 0) {
+               dev_err(&dev->pdev->dev, "no such me client %d\n",
+                       cl->me_client_id);
+               return  -ENODEV;
+       }
+
+       cb = mei_io_cb_init(cl, NULL);
+       if (!cb)
+               return -ENOMEM;
+
+       rets = mei_io_cb_alloc_resp_buf(cb,
+                       dev->me_clients[i].props.max_msg_length);
+       if (rets)
+               goto err;
+
+       cb->fop_type = MEI_FOP_READ;
+       cl->read_cb = cb;
+       if (dev->hbuf_is_ready) {
+               dev->hbuf_is_ready = false;
+               if (mei_hbm_cl_flow_control_req(dev, cl)) {
+                       rets = -ENODEV;
+                       goto err;
+               }
+               list_add_tail(&cb->list, &dev->read_list.list);
+       } else {
+               list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
+       }
+       return rets;
+err:
+       mei_io_cb_free(cb);
+       return rets;
+}
+
+/**
+ * mei_cl_all_disconnect - disconnect forcefully all connected clients
+ *
+ * @dev - mei device
+ */
+
+void mei_cl_all_disconnect(struct mei_device *dev)
+{
+       struct mei_cl *cl, *next;
+
+       list_for_each_entry_safe(cl, next, &dev->file_list, link) {
+               cl->state = MEI_FILE_DISCONNECTED;
+               cl->mei_flow_ctrl_creds = 0;
+               cl->read_cb = NULL;
+               cl->timer_count = 0;
+       }
+}
+
+
+/**
+ * mei_cl_all_read_wakeup  - wake up all readings so they can be interrupted
+ *
+ * @dev  - mei device
+ */
+void mei_cl_all_read_wakeup(struct mei_device *dev)
+{
+       struct mei_cl *cl, *next;
+       list_for_each_entry_safe(cl, next, &dev->file_list, link) {
+               if (waitqueue_active(&cl->rx_wait)) {
+                       dev_dbg(&dev->pdev->dev, "Waking up client!\n");
+                       wake_up_interruptible(&cl->rx_wait);
+               }
+       }
+}
+
+/**
+ * mei_cl_all_write_clear - clear all pending writes
+
+ * @dev - mei device
+ */
+void mei_cl_all_write_clear(struct mei_device *dev)
+{
+       struct mei_cl_cb *cb, *next;
+
+       list_for_each_entry_safe(cb, next, &dev->write_list.list, list) {
+               list_del(&cb->list);
+               mei_io_cb_free(cb);
+       }
+}
+
+
diff --git a/drivers/misc/mei/client.h b/drivers/misc/mei/client.h
new file mode 100644 (file)
index 0000000..214b239
--- /dev/null
@@ -0,0 +1,102 @@
+/*
+ *
+ * Intel Management Engine Interface (Intel MEI) Linux driver
+ * Copyright (c) 2003-2012, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#ifndef _MEI_CLIENT_H_
+#define _MEI_CLIENT_H_
+
+#include <linux/types.h>
+#include <linux/watchdog.h>
+#include <linux/poll.h>
+#include <linux/mei.h>
+
+#include "mei_dev.h"
+
+int mei_me_cl_by_uuid(const struct mei_device *dev, const uuid_le *cuuid);
+int mei_me_cl_by_id(struct mei_device *dev, u8 client_id);
+
+/*
+ * MEI IO Functions
+ */
+struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, struct file *fp);
+void mei_io_cb_free(struct mei_cl_cb *priv_cb);
+int mei_io_cb_alloc_req_buf(struct mei_cl_cb *cb, size_t length);
+int mei_io_cb_alloc_resp_buf(struct mei_cl_cb *cb, size_t length);
+
+
+/**
+ * mei_io_list_init - Sets up a queue list.
+ *
+ * @list: An instance cl callback structure
+ */
+static inline void mei_io_list_init(struct mei_cl_cb *list)
+{
+       INIT_LIST_HEAD(&list->list);
+}
+void mei_io_list_flush(struct mei_cl_cb *list, struct mei_cl *cl);
+
+/*
+ * MEI Host Client Functions
+ */
+
+struct mei_cl *mei_cl_allocate(struct mei_device *dev);
+void mei_cl_init(struct mei_cl *cl, struct mei_device *dev);
+
+
+int mei_cl_link(struct mei_cl *cl, int id);
+int mei_cl_unlink(struct mei_cl *cl);
+
+int mei_cl_flush_queues(struct mei_cl *cl);
+struct mei_cl_cb *mei_cl_find_read_cb(struct mei_cl *cl);
+
+/**
+ * mei_cl_cmp_id - tells if file private data have same id
+ *
+ * @fe1: private data of 1. file object
+ * @fe2: private data of 2. file object
+ *
+ * returns true  - if ids are the same and not NULL
+ */
+static inline bool mei_cl_cmp_id(const struct mei_cl *cl1,
+                               const struct mei_cl *cl2)
+{
+       return cl1 && cl2 &&
+               (cl1->host_client_id == cl2->host_client_id) &&
+               (cl1->me_client_id == cl2->me_client_id);
+}
+
+
+int mei_cl_flow_ctrl_creds(struct mei_cl *cl);
+
+int mei_cl_flow_ctrl_reduce(struct mei_cl *cl);
+/*
+ *  MEI input output function prototype
+ */
+bool mei_cl_is_other_connecting(struct mei_cl *cl);
+int mei_cl_disconnect(struct mei_cl *cl);
+
+int mei_cl_read_start(struct mei_cl *cl);
+
+int mei_cl_connect(struct mei_cl *cl, struct file *file);
+
+void mei_host_client_init(struct work_struct *work);
+
+
+void mei_cl_all_disconnect(struct mei_device *dev);
+void mei_cl_all_read_wakeup(struct mei_device *dev);
+void mei_cl_all_write_clear(struct mei_device *dev);
+
+
+#endif /* _MEI_CLIENT_H_ */
diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c
new file mode 100644 (file)
index 0000000..fb9e63b
--- /dev/null
@@ -0,0 +1,669 @@
+/*
+ *
+ * Intel Management Engine Interface (Intel MEI) Linux driver
+ * Copyright (c) 2003-2012, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#include <linux/pci.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/mei.h>
+
+#include "mei_dev.h"
+#include "hbm.h"
+#include "hw-me.h"
+
+/**
+ * mei_hbm_me_cl_allocate - allocates storage for me clients
+ *
+ * @dev: the device structure
+ *
+ * returns none.
+ */
+static void mei_hbm_me_cl_allocate(struct mei_device *dev)
+{
+       struct mei_me_client *clients;
+       int b;
+
+       /* count how many ME clients we have */
+       for_each_set_bit(b, dev->me_clients_map, MEI_CLIENTS_MAX)
+               dev->me_clients_num++;
+
+       if (dev->me_clients_num <= 0)
+               return;
+
+       kfree(dev->me_clients);
+       dev->me_clients = NULL;
+
+       dev_dbg(&dev->pdev->dev, "memory allocation for ME clients size=%zd.\n",
+               dev->me_clients_num * sizeof(struct mei_me_client));
+       /* allocate storage for ME clients representation */
+       clients = kcalloc(dev->me_clients_num,
+                       sizeof(struct mei_me_client), GFP_KERNEL);
+       if (!clients) {
+               dev_err(&dev->pdev->dev, "memory allocation for ME clients failed.\n");
+               dev->dev_state = MEI_DEV_RESETING;
+               mei_reset(dev, 1);
+               return;
+       }
+       dev->me_clients = clients;
+       return;
+}
+
+/**
+ * mei_hbm_cl_hdr - construct client hbm header
+ * @cl: - client
+ * @hbm_cmd: host bus message command
+ * @buf: buffer for cl header
+ * @len: buffer length
+ */
+static inline
+void mei_hbm_cl_hdr(struct mei_cl *cl, u8 hbm_cmd, void *buf, size_t len)
+{
+       struct mei_hbm_cl_cmd *cmd = buf;
+
+       memset(cmd, 0, len);
+
+       cmd->hbm_cmd = hbm_cmd;
+       cmd->host_addr = cl->host_client_id;
+       cmd->me_addr = cl->me_client_id;
+}
+
+/**
+ * same_disconn_addr - tells if they have the same address
+ *
+ * @file: private data of the file object.
+ * @disconn: disconnection request.
+ *
+ * returns true if addres are same
+ */
+static inline
+bool mei_hbm_cl_addr_equal(struct mei_cl *cl, void *buf)
+{
+       struct mei_hbm_cl_cmd *cmd = buf;
+       return cl->host_client_id == cmd->host_addr &&
+               cl->me_client_id == cmd->me_addr;
+}
+
+
+/**
+ * is_treat_specially_client - checks if the message belongs
+ * to the file private data.
+ *
+ * @cl: private data of the file object
+ * @rs: connect response bus message
+ *
+ */
+static bool is_treat_specially_client(struct mei_cl *cl,
+               struct hbm_client_connect_response *rs)
+{
+       if (mei_hbm_cl_addr_equal(cl, rs)) {
+               if (!rs->status) {
+                       cl->state = MEI_FILE_CONNECTED;
+                       cl->status = 0;
+
+               } else {
+                       cl->state = MEI_FILE_DISCONNECTED;
+                       cl->status = -ENODEV;
+               }
+               cl->timer_count = 0;
+
+               return true;
+       }
+       return false;
+}
+
+/**
+ * mei_hbm_start_req - sends start request message.
+ *
+ * @dev: the device structure
+ */
+void mei_hbm_start_req(struct mei_device *dev)
+{
+       struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr;
+       struct hbm_host_version_request *start_req;
+       const size_t len = sizeof(struct hbm_host_version_request);
+
+       mei_hbm_hdr(mei_hdr, len);
+
+       /* host start message */
+       start_req = (struct hbm_host_version_request *)dev->wr_msg.data;
+       memset(start_req, 0, len);
+       start_req->hbm_cmd = HOST_START_REQ_CMD;
+       start_req->host_version.major_version = HBM_MAJOR_VERSION;
+       start_req->host_version.minor_version = HBM_MINOR_VERSION;
+
+       dev->recvd_msg = false;
+       if (mei_write_message(dev, mei_hdr, dev->wr_msg.data)) {
+               dev_dbg(&dev->pdev->dev, "write send version message to FW fail.\n");
+               dev->dev_state = MEI_DEV_RESETING;
+               mei_reset(dev, 1);
+       }
+       dev->init_clients_state = MEI_START_MESSAGE;
+       dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT;
+       return ;
+}
+
+/**
+ * mei_hbm_enum_clients_req - sends enumeration client request message.
+ *
+ * @dev: the device structure
+ *
+ * returns none.
+ */
+static void mei_hbm_enum_clients_req(struct mei_device *dev)
+{
+       struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr;
+       struct hbm_host_enum_request *enum_req;
+       const size_t len = sizeof(struct hbm_host_enum_request);
+       /* enumerate clients */
+       mei_hbm_hdr(mei_hdr, len);
+
+       enum_req = (struct hbm_host_enum_request *)dev->wr_msg.data;
+       memset(enum_req, 0, len);
+       enum_req->hbm_cmd = HOST_ENUM_REQ_CMD;
+
+       if (mei_write_message(dev, mei_hdr, dev->wr_msg.data)) {
+               dev->dev_state = MEI_DEV_RESETING;
+               dev_dbg(&dev->pdev->dev, "write send enumeration request message to FW fail.\n");
+               mei_reset(dev, 1);
+       }
+       dev->init_clients_state = MEI_ENUM_CLIENTS_MESSAGE;
+       dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT;
+       return;
+}
+
+/**
+ * mei_hbm_prop_requsest - request property for a single client
+ *
+ * @dev: the device structure
+ *
+ * returns none.
+ */
+
+static int mei_hbm_prop_req(struct mei_device *dev)
+{
+
+       struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr;
+       struct hbm_props_request *prop_req;
+       const size_t len = sizeof(struct hbm_props_request);
+       unsigned long next_client_index;
+       u8 client_num;
+
+
+       client_num = dev->me_client_presentation_num;
+
+       next_client_index = find_next_bit(dev->me_clients_map, MEI_CLIENTS_MAX,
+                                         dev->me_client_index);
+
+       /* We got all client properties */
+       if (next_client_index == MEI_CLIENTS_MAX) {
+               schedule_work(&dev->init_work);
+
+               return 0;
+       }
+
+       dev->me_clients[client_num].client_id = next_client_index;
+       dev->me_clients[client_num].mei_flow_ctrl_creds = 0;
+
+       mei_hbm_hdr(mei_hdr, len);
+       prop_req = (struct hbm_props_request *)dev->wr_msg.data;
+
+       memset(prop_req, 0, sizeof(struct hbm_props_request));
+
+
+       prop_req->hbm_cmd = HOST_CLIENT_PROPERTIES_REQ_CMD;
+       prop_req->address = next_client_index;
+
+       if (mei_write_message(dev, mei_hdr, dev->wr_msg.data)) {
+               dev->dev_state = MEI_DEV_RESETING;
+               dev_err(&dev->pdev->dev, "Properties request command failed\n");
+               mei_reset(dev, 1);
+
+               return -EIO;
+       }
+
+       dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT;
+       dev->me_client_index = next_client_index;
+
+       return 0;
+}
+
+/**
+ * mei_hbm_stop_req_prepare - perpare stop request message
+ *
+ * @dev - mei device
+ * @mei_hdr - mei message header
+ * @data - hbm message body buffer
+ */
+static void mei_hbm_stop_req_prepare(struct mei_device *dev,
+               struct mei_msg_hdr *mei_hdr, unsigned char *data)
+{
+       struct hbm_host_stop_request *req =
+                       (struct hbm_host_stop_request *)data;
+       const size_t len = sizeof(struct hbm_host_stop_request);
+
+       mei_hbm_hdr(mei_hdr, len);
+
+       memset(req, 0, len);
+       req->hbm_cmd = HOST_STOP_REQ_CMD;
+       req->reason = DRIVER_STOP_REQUEST;
+}
+
+/**
+ * mei_hbm_cl_flow_control_req - sends flow control requst.
+ *
+ * @dev: the device structure
+ * @cl: client info
+ *
+ * This function returns -EIO on write failure
+ */
+int mei_hbm_cl_flow_control_req(struct mei_device *dev, struct mei_cl *cl)
+{
+       struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr;
+       const size_t len = sizeof(struct hbm_flow_control);
+
+       mei_hbm_hdr(mei_hdr, len);
+       mei_hbm_cl_hdr(cl, MEI_FLOW_CONTROL_CMD, dev->wr_msg.data, len);
+
+       dev_dbg(&dev->pdev->dev, "sending flow control host client = %d, ME client = %d\n",
+               cl->host_client_id, cl->me_client_id);
+
+       return mei_write_message(dev, mei_hdr, dev->wr_msg.data);
+}
+
+/**
+ * add_single_flow_creds - adds single buffer credentials.
+ *
+ * @file: private data ot the file object.
+ * @flow: flow control.
+ */
+static void mei_hbm_add_single_flow_creds(struct mei_device *dev,
+                                 struct hbm_flow_control *flow)
+{
+       struct mei_me_client *client;
+       int i;
+
+       for (i = 0; i < dev->me_clients_num; i++) {
+               client = &dev->me_clients[i];
+               if (client && flow->me_addr == client->client_id) {
+                       if (client->props.single_recv_buf) {
+                               client->mei_flow_ctrl_creds++;
+                               dev_dbg(&dev->pdev->dev, "recv flow ctrl msg ME %d (single).\n",
+                                   flow->me_addr);
+                               dev_dbg(&dev->pdev->dev, "flow control credentials =%d.\n",
+                                   client->mei_flow_ctrl_creds);
+                       } else {
+                               BUG();  /* error in flow control */
+                       }
+               }
+       }
+}
+
+/**
+ * mei_hbm_cl_flow_control_res - flow control response from me
+ *
+ * @dev: the device structure
+ * @flow_control: flow control response bus message
+ */
+static void mei_hbm_cl_flow_control_res(struct mei_device *dev,
+               struct hbm_flow_control *flow_control)
+{
+       struct mei_cl *cl = NULL;
+       struct mei_cl *next = NULL;
+
+       if (!flow_control->host_addr) {
+               /* single receive buffer */
+               mei_hbm_add_single_flow_creds(dev, flow_control);
+               return;
+       }
+
+       /* normal connection */
+       list_for_each_entry_safe(cl, next, &dev->file_list, link) {
+               if (mei_hbm_cl_addr_equal(cl, flow_control)) {
+                       cl->mei_flow_ctrl_creds++;
+                       dev_dbg(&dev->pdev->dev, "flow ctrl msg for host %d ME %d.\n",
+                               flow_control->host_addr, flow_control->me_addr);
+                       dev_dbg(&dev->pdev->dev, "flow control credentials = %d.\n",
+                                   cl->mei_flow_ctrl_creds);
+                               break;
+               }
+       }
+}
+
+
+/**
+ * mei_hbm_cl_disconnect_req - sends disconnect message to fw.
+ *
+ * @dev: the device structure
+ * @cl: a client to disconnect from
+ *
+ * This function returns -EIO on write failure
+ */
+int mei_hbm_cl_disconnect_req(struct mei_device *dev, struct mei_cl *cl)
+{
+       struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr;
+       const size_t len = sizeof(struct hbm_client_connect_request);
+
+       mei_hbm_hdr(mei_hdr, len);
+       mei_hbm_cl_hdr(cl, CLIENT_DISCONNECT_REQ_CMD, dev->wr_msg.data, len);
+
+       return mei_write_message(dev, mei_hdr, dev->wr_msg.data);
+}
+
+/**
+ * mei_hbm_cl_disconnect_res - disconnect response from ME
+ *
+ * @dev: the device structure
+ * @rs: disconnect response bus message
+ */
+static void mei_hbm_cl_disconnect_res(struct mei_device *dev,
+               struct hbm_client_connect_response *rs)
+{
+       struct mei_cl *cl;
+       struct mei_cl_cb *pos = NULL, *next = NULL;
+
+       dev_dbg(&dev->pdev->dev,
+                       "disconnect_response:\n"
+                       "ME Client = %d\n"
+                       "Host Client = %d\n"
+                       "Status = %d\n",
+                       rs->me_addr,
+                       rs->host_addr,
+                       rs->status);
+
+       list_for_each_entry_safe(pos, next, &dev->ctrl_rd_list.list, list) {
+               cl = pos->cl;
+
+               if (!cl) {
+                       list_del(&pos->list);
+                       return;
+               }
+
+               dev_dbg(&dev->pdev->dev, "list_for_each_entry_safe in ctrl_rd_list.\n");
+               if (mei_hbm_cl_addr_equal(cl, rs)) {
+                       list_del(&pos->list);
+                       if (!rs->status)
+                               cl->state = MEI_FILE_DISCONNECTED;
+
+                       cl->status = 0;
+                       cl->timer_count = 0;
+                       break;
+               }
+       }
+}
+
+/**
+ * mei_hbm_cl_connect_req - send connection request to specific me client
+ *
+ * @dev: the device structure
+ * @cl: a client to connect to
+ *
+ * returns -EIO on write failure
+ */
+int mei_hbm_cl_connect_req(struct mei_device *dev, struct mei_cl *cl)
+{
+       struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr;
+       const size_t len = sizeof(struct hbm_client_connect_request);
+
+       mei_hbm_hdr(mei_hdr, len);
+       mei_hbm_cl_hdr(cl, CLIENT_CONNECT_REQ_CMD, dev->wr_msg.data, len);
+
+       return mei_write_message(dev, mei_hdr,  dev->wr_msg.data);
+}
+
+/**
+ * mei_hbm_cl_connect_res - connect resposne from the ME
+ *
+ * @dev: the device structure
+ * @rs: connect response bus message
+ */
+static void mei_hbm_cl_connect_res(struct mei_device *dev,
+               struct hbm_client_connect_response *rs)
+{
+
+       struct mei_cl *cl;
+       struct mei_cl_cb *pos = NULL, *next = NULL;
+
+       dev_dbg(&dev->pdev->dev,
+                       "connect_response:\n"
+                       "ME Client = %d\n"
+                       "Host Client = %d\n"
+                       "Status = %d\n",
+                       rs->me_addr,
+                       rs->host_addr,
+                       rs->status);
+
+       /* if WD or iamthif client treat specially */
+
+       if (is_treat_specially_client(&dev->wd_cl, rs)) {
+               dev_dbg(&dev->pdev->dev, "successfully connected to WD client.\n");
+               mei_watchdog_register(dev);
+
+               return;
+       }
+
+       if (is_treat_specially_client(&dev->iamthif_cl, rs)) {
+               dev->iamthif_state = MEI_IAMTHIF_IDLE;
+               return;
+       }
+       list_for_each_entry_safe(pos, next, &dev->ctrl_rd_list.list, list) {
+
+               cl = pos->cl;
+               if (!cl) {
+                       list_del(&pos->list);
+                       return;
+               }
+               if (pos->fop_type == MEI_FOP_IOCTL) {
+                       if (is_treat_specially_client(cl, rs)) {
+                               list_del(&pos->list);
+                               cl->status = 0;
+                               cl->timer_count = 0;
+                               break;
+                       }
+               }
+       }
+}
+
+
+/**
+ * mei_client_disconnect_request - disconnect request initiated by me
+ *  host sends disoconnect response
+ *
+ * @dev: the device structure.
+ * @disconnect_req: disconnect request bus message from the me
+ */
+static void mei_hbm_fw_disconnect_req(struct mei_device *dev,
+               struct hbm_client_connect_request *disconnect_req)
+{
+       struct mei_cl *cl, *next;
+       const size_t len = sizeof(struct hbm_client_connect_response);
+
+       list_for_each_entry_safe(cl, next, &dev->file_list, link) {
+               if (mei_hbm_cl_addr_equal(cl, disconnect_req)) {
+                       dev_dbg(&dev->pdev->dev, "disconnect request host client %d ME client %d.\n",
+                                       disconnect_req->host_addr,
+                                       disconnect_req->me_addr);
+                       cl->state = MEI_FILE_DISCONNECTED;
+                       cl->timer_count = 0;
+                       if (cl == &dev->wd_cl)
+                               dev->wd_pending = false;
+                       else if (cl == &dev->iamthif_cl)
+                               dev->iamthif_timer = 0;
+
+                       /* prepare disconnect response */
+                       mei_hbm_hdr(&dev->wr_ext_msg.hdr, len);
+                       mei_hbm_cl_hdr(cl, CLIENT_DISCONNECT_RES_CMD,
+                                        dev->wr_ext_msg.data, len);
+                       break;
+               }
+       }
+}
+
+
+/**
+ * mei_hbm_dispatch - bottom half read routine after ISR to
+ * handle the read bus message cmd processing.
+ *
+ * @dev: the device structure
+ * @mei_hdr: header of bus message
+ */
+void mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
+{
+       struct mei_bus_message *mei_msg;
+       struct mei_me_client *me_client;
+       struct hbm_host_version_response *version_res;
+       struct hbm_client_connect_response *connect_res;
+       struct hbm_client_connect_response *disconnect_res;
+       struct hbm_client_connect_request *disconnect_req;
+       struct hbm_flow_control *flow_control;
+       struct hbm_props_response *props_res;
+       struct hbm_host_enum_response *enum_res;
+
+       /* read the message to our buffer */
+       BUG_ON(hdr->length >= sizeof(dev->rd_msg_buf));
+       mei_read_slots(dev, dev->rd_msg_buf, hdr->length);
+       mei_msg = (struct mei_bus_message *)dev->rd_msg_buf;
+
+       switch (mei_msg->hbm_cmd) {
+       case HOST_START_RES_CMD:
+               version_res = (struct hbm_host_version_response *)mei_msg;
+               if (!version_res->host_version_supported) {
+                       dev->version = version_res->me_max_version;
+                       dev_dbg(&dev->pdev->dev, "version mismatch.\n");
+
+                       mei_hbm_stop_req_prepare(dev, &dev->wr_msg.hdr,
+                                               dev->wr_msg.data);
+                       mei_write_message(dev, &dev->wr_msg.hdr,
+                                       dev->wr_msg.data);
+                       return;
+               }
+
+               dev->version.major_version = HBM_MAJOR_VERSION;
+               dev->version.minor_version = HBM_MINOR_VERSION;
+               if (dev->dev_state == MEI_DEV_INIT_CLIENTS &&
+                   dev->init_clients_state == MEI_START_MESSAGE) {
+                       dev->init_clients_timer = 0;
+                       mei_hbm_enum_clients_req(dev);
+               } else {
+                       dev->recvd_msg = false;
+                       dev_dbg(&dev->pdev->dev, "reset due to received hbm: host start\n");
+                       mei_reset(dev, 1);
+                       return;
+               }
+
+               dev->recvd_msg = true;
+               dev_dbg(&dev->pdev->dev, "host start response message received.\n");
+               break;
+
+       case CLIENT_CONNECT_RES_CMD:
+               connect_res = (struct hbm_client_connect_response *) mei_msg;
+               mei_hbm_cl_connect_res(dev, connect_res);
+               dev_dbg(&dev->pdev->dev, "client connect response message received.\n");
+               wake_up(&dev->wait_recvd_msg);
+               break;
+
+       case CLIENT_DISCONNECT_RES_CMD:
+               disconnect_res = (struct hbm_client_connect_response *) mei_msg;
+               mei_hbm_cl_disconnect_res(dev, disconnect_res);
+               dev_dbg(&dev->pdev->dev, "client disconnect response message received.\n");
+               wake_up(&dev->wait_recvd_msg);
+               break;
+
+       case MEI_FLOW_CONTROL_CMD:
+               flow_control = (struct hbm_flow_control *) mei_msg;
+               mei_hbm_cl_flow_control_res(dev, flow_control);
+               dev_dbg(&dev->pdev->dev, "client flow control response message received.\n");
+               break;
+
+       case HOST_CLIENT_PROPERTIES_RES_CMD:
+               props_res = (struct hbm_props_response *)mei_msg;
+               me_client = &dev->me_clients[dev->me_client_presentation_num];
+
+               if (props_res->status || !dev->me_clients) {
+                       dev_dbg(&dev->pdev->dev, "reset due to received host client properties response bus message wrong status.\n");
+                       mei_reset(dev, 1);
+                       return;
+               }
+
+               if (me_client->client_id != props_res->address) {
+                       dev_err(&dev->pdev->dev,
+                               "Host client properties reply mismatch\n");
+                       mei_reset(dev, 1);
+
+                       return;
+               }
+
+               if (dev->dev_state != MEI_DEV_INIT_CLIENTS ||
+                   dev->init_clients_state != MEI_CLIENT_PROPERTIES_MESSAGE) {
+                       dev_err(&dev->pdev->dev,
+                               "Unexpected client properties reply\n");
+                       mei_reset(dev, 1);
+
+                       return;
+               }
+
+               me_client->props = props_res->client_properties;
+               dev->me_client_index++;
+               dev->me_client_presentation_num++;
+
+               /* request property for the next client */
+               mei_hbm_prop_req(dev);
+
+               break;
+
+       case HOST_ENUM_RES_CMD:
+               enum_res = (struct hbm_host_enum_response *) mei_msg;
+               memcpy(dev->me_clients_map, enum_res->valid_addresses, 32);
+               if (dev->dev_state == MEI_DEV_INIT_CLIENTS &&
+                   dev->init_clients_state == MEI_ENUM_CLIENTS_MESSAGE) {
+                               dev->init_clients_timer = 0;
+                               dev->me_client_presentation_num = 0;
+                               dev->me_client_index = 0;
+                               mei_hbm_me_cl_allocate(dev);
+                               dev->init_clients_state =
+                                       MEI_CLIENT_PROPERTIES_MESSAGE;
+
+                               /* first property reqeust */
+                               mei_hbm_prop_req(dev);
+               } else {
+                       dev_dbg(&dev->pdev->dev, "reset due to received host enumeration clients response bus message.\n");
+                       mei_reset(dev, 1);
+                       return;
+               }
+               break;
+
+       case HOST_STOP_RES_CMD:
+               dev->dev_state = MEI_DEV_DISABLED;
+               dev_dbg(&dev->pdev->dev, "resetting because of FW stop response.\n");
+               mei_reset(dev, 1);
+               break;
+
+       case CLIENT_DISCONNECT_REQ_CMD:
+               /* search for client */
+               disconnect_req = (struct hbm_client_connect_request *)mei_msg;
+               mei_hbm_fw_disconnect_req(dev, disconnect_req);
+               break;
+
+       case ME_STOP_REQ_CMD:
+
+               mei_hbm_stop_req_prepare(dev, &dev->wr_ext_msg.hdr,
+                                       dev->wr_ext_msg.data);
+               break;
+       default:
+               BUG();
+               break;
+
+       }
+}
+
diff --git a/drivers/misc/mei/hbm.h b/drivers/misc/mei/hbm.h
new file mode 100644 (file)
index 0000000..b552afb
--- /dev/null
@@ -0,0 +1,39 @@
+/*
+ *
+ * Intel Management Engine Interface (Intel MEI) Linux driver
+ * Copyright (c) 2003-2012, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#ifndef _MEI_HBM_H_
+#define _MEI_HBM_H_
+
+void mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr);
+
+static inline void mei_hbm_hdr(struct mei_msg_hdr *hdr, size_t length)
+{
+       hdr->host_addr = 0;
+       hdr->me_addr = 0;
+       hdr->length = length;
+       hdr->msg_complete = 1;
+       hdr->reserved = 0;
+}
+
+void mei_hbm_start_req(struct mei_device *dev);
+
+int mei_hbm_cl_flow_control_req(struct mei_device *dev, struct mei_cl *cl);
+int mei_hbm_cl_disconnect_req(struct mei_device *dev, struct mei_cl *cl);
+int mei_hbm_cl_connect_req(struct mei_device *dev, struct mei_cl *cl);
+
+
+#endif /* _MEI_HBM_H_ */
+
diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
new file mode 100644 (file)
index 0000000..6a203b6
--- /dev/null
@@ -0,0 +1,167 @@
+/******************************************************************************
+ * Intel Management Engine Interface (Intel MEI) Linux driver
+ * Intel MEI Interface Header
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ *     Intel Corporation.
+ *     linux-mei@linux.intel.com
+ *     http://www.intel.com
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#ifndef _MEI_HW_MEI_REGS_H_
+#define _MEI_HW_MEI_REGS_H_
+
+/*
+ * MEI device IDs
+ */
+#define MEI_DEV_ID_82946GZ    0x2974  /* 82946GZ/GL */
+#define MEI_DEV_ID_82G35      0x2984  /* 82G35 Express */
+#define MEI_DEV_ID_82Q965     0x2994  /* 82Q963/Q965 */
+#define MEI_DEV_ID_82G965     0x29A4  /* 82P965/G965 */
+
+#define MEI_DEV_ID_82GM965    0x2A04  /* Mobile PM965/GM965 */
+#define MEI_DEV_ID_82GME965   0x2A14  /* Mobile GME965/GLE960 */
+
+#define MEI_DEV_ID_ICH9_82Q35 0x29B4  /* 82Q35 Express */
+#define MEI_DEV_ID_ICH9_82G33 0x29C4  /* 82G33/G31/P35/P31 Express */
+#define MEI_DEV_ID_ICH9_82Q33 0x29D4  /* 82Q33 Express */
+#define MEI_DEV_ID_ICH9_82X38 0x29E4  /* 82X38/X48 Express */
+#define MEI_DEV_ID_ICH9_3200  0x29F4  /* 3200/3210 Server */
+
+#define MEI_DEV_ID_ICH9_6     0x28B4  /* Bearlake */
+#define MEI_DEV_ID_ICH9_7     0x28C4  /* Bearlake */
+#define MEI_DEV_ID_ICH9_8     0x28D4  /* Bearlake */
+#define MEI_DEV_ID_ICH9_9     0x28E4  /* Bearlake */
+#define MEI_DEV_ID_ICH9_10    0x28F4  /* Bearlake */
+
+#define MEI_DEV_ID_ICH9M_1    0x2A44  /* Cantiga */
+#define MEI_DEV_ID_ICH9M_2    0x2A54  /* Cantiga */
+#define MEI_DEV_ID_ICH9M_3    0x2A64  /* Cantiga */
+#define MEI_DEV_ID_ICH9M_4    0x2A74  /* Cantiga */
+
+#define MEI_DEV_ID_ICH10_1    0x2E04  /* Eaglelake */
+#define MEI_DEV_ID_ICH10_2    0x2E14  /* Eaglelake */
+#define MEI_DEV_ID_ICH10_3    0x2E24  /* Eaglelake */
+#define MEI_DEV_ID_ICH10_4    0x2E34  /* Eaglelake */
+
+#define MEI_DEV_ID_IBXPK_1    0x3B64  /* Calpella */
+#define MEI_DEV_ID_IBXPK_2    0x3B65  /* Calpella */
+
+#define MEI_DEV_ID_CPT_1      0x1C3A  /* Couger Point */
+#define MEI_DEV_ID_PBG_1      0x1D3A  /* C600/X79 Patsburg */
+
+#define MEI_DEV_ID_PPT_1      0x1E3A  /* Panther Point */
+#define MEI_DEV_ID_PPT_2      0x1CBA  /* Panther Point */
+#define MEI_DEV_ID_PPT_3      0x1DBA  /* Panther Point */
+
+#define MEI_DEV_ID_LPT        0x8C3A  /* Lynx Point */
+#define MEI_DEV_ID_LPT_LP     0x9C3A  /* Lynx Point LP */
+/*
+ * MEI HW Section
+ */
+
+/* MEI registers */
+/* H_CB_WW - Host Circular Buffer (CB) Write Window register */
+#define H_CB_WW    0
+/* H_CSR - Host Control Status register */
+#define H_CSR      4
+/* ME_CB_RW - ME Circular Buffer Read Window register (read only) */
+#define ME_CB_RW   8
+/* ME_CSR_HA - ME Control Status Host Access register (read only) */
+#define ME_CSR_HA  0xC
+
+
+/* register bits of H_CSR (Host Control Status register) */
+/* Host Circular Buffer Depth - maximum number of 32-bit entries in CB */
+#define H_CBD             0xFF000000
+/* Host Circular Buffer Write Pointer */
+#define H_CBWP            0x00FF0000
+/* Host Circular Buffer Read Pointer */
+#define H_CBRP            0x0000FF00
+/* Host Reset */
+#define H_RST             0x00000010
+/* Host Ready */
+#define H_RDY             0x00000008
+/* Host Interrupt Generate */
+#define H_IG              0x00000004
+/* Host Interrupt Status */
+#define H_IS              0x00000002
+/* Host Interrupt Enable */
+#define H_IE              0x00000001
+
+
+/* register bits of ME_CSR_HA (ME Control Status Host Access register) */
+/* ME CB (Circular Buffer) Depth HRA (Host Read Access) - host read only
+access to ME_CBD */
+#define ME_CBD_HRA        0xFF000000
+/* ME CB Write Pointer HRA - host read only access to ME_CBWP */
+#define ME_CBWP_HRA       0x00FF0000
+/* ME CB Read Pointer HRA - host read only access to ME_CBRP */
+#define ME_CBRP_HRA       0x0000FF00
+/* ME Reset HRA - host read only access to ME_RST */
+#define ME_RST_HRA        0x00000010
+/* ME Ready HRA - host read only access to ME_RDY */
+#define ME_RDY_HRA        0x00000008
+/* ME Interrupt Generate HRA - host read only access to ME_IG */
+#define ME_IG_HRA         0x00000004
+/* ME Interrupt Status HRA - host read only access to ME_IS */
+#define ME_IS_HRA         0x00000002
+/* ME Interrupt Enable HRA - host read only access to ME_IE */
+#define ME_IE_HRA         0x00000001
+
+#endif /* _MEI_HW_MEI_REGS_H_ */
diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c
new file mode 100644 (file)
index 0000000..45ea718
--- /dev/null
@@ -0,0 +1,576 @@
+/*
+ *
+ * Intel Management Engine Interface (Intel MEI) Linux driver
+ * Copyright (c) 2003-2012, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#include <linux/pci.h>
+
+#include <linux/kthread.h>
+#include <linux/interrupt.h>
+
+#include "mei_dev.h"
+#include "hw-me.h"
+
+#include "hbm.h"
+
+
+/**
+ * mei_reg_read - Reads 32bit data from the mei device
+ *
+ * @dev: the device structure
+ * @offset: offset from which to read the data
+ *
+ * returns register value (u32)
+ */
+static inline u32 mei_reg_read(const struct mei_me_hw *hw,
+                              unsigned long offset)
+{
+       return ioread32(hw->mem_addr + offset);
+}
+
+
+/**
+ * mei_reg_write - Writes 32bit data to the mei device
+ *
+ * @dev: the device structure
+ * @offset: offset from which to write the data
+ * @value: register value to write (u32)
+ */
+static inline void mei_reg_write(const struct mei_me_hw *hw,
+                                unsigned long offset, u32 value)
+{
+       iowrite32(value, hw->mem_addr + offset);
+}
+
+/**
+ * mei_mecbrw_read - Reads 32bit data from ME circular buffer
+ *  read window register
+ *
+ * @dev: the device structure
+ *
+ * returns ME_CB_RW register value (u32)
+ */
+static u32 mei_me_mecbrw_read(const struct mei_device *dev)
+{
+       return mei_reg_read(to_me_hw(dev), ME_CB_RW);
+}
+/**
+ * mei_mecsr_read - Reads 32bit data from the ME CSR
+ *
+ * @dev: the device structure
+ *
+ * returns ME_CSR_HA register value (u32)
+ */
+static inline u32 mei_mecsr_read(const struct mei_me_hw *hw)
+{
+       return mei_reg_read(hw, ME_CSR_HA);
+}
+
+/**
+ * mei_hcsr_read - Reads 32bit data from the host CSR
+ *
+ * @dev: the device structure
+ *
+ * returns H_CSR register value (u32)
+ */
+static inline u32 mei_hcsr_read(const struct mei_me_hw *hw)
+{
+       return mei_reg_read(hw, H_CSR);
+}
+
+/**
+ * mei_hcsr_set - writes H_CSR register to the mei device,
+ * and ignores the H_IS bit for it is write-one-to-zero.
+ *
+ * @dev: the device structure
+ */
+static inline void mei_hcsr_set(struct mei_me_hw *hw, u32 hcsr)
+{
+       hcsr &= ~H_IS;
+       mei_reg_write(hw, H_CSR, hcsr);
+}
+
+
+/**
+ * me_hw_config - configure hw dependent settings
+ *
+ * @dev: mei device
+ */
+static void mei_me_hw_config(struct mei_device *dev)
+{
+       u32 hcsr = mei_hcsr_read(to_me_hw(dev));
+       /* Doesn't change in runtime */
+       dev->hbuf_depth = (hcsr & H_CBD) >> 24;
+}
+/**
+ * mei_clear_interrupts - clear and stop interrupts
+ *
+ * @dev: the device structure
+ */
+static void mei_me_intr_clear(struct mei_device *dev)
+{
+       struct mei_me_hw *hw = to_me_hw(dev);
+       u32 hcsr = mei_hcsr_read(hw);
+       if ((hcsr & H_IS) == H_IS)
+               mei_reg_write(hw, H_CSR, hcsr);
+}
+/**
+ * mei_me_intr_enable - enables mei device interrupts
+ *
+ * @dev: the device structure
+ */
+static void mei_me_intr_enable(struct mei_device *dev)
+{
+       struct mei_me_hw *hw = to_me_hw(dev);
+       u32 hcsr = mei_hcsr_read(hw);
+       hcsr |= H_IE;
+       mei_hcsr_set(hw, hcsr);
+}
+
+/**
+ * mei_disable_interrupts - disables mei device interrupts
+ *
+ * @dev: the device structure
+ */
+static void mei_me_intr_disable(struct mei_device *dev)
+{
+       struct mei_me_hw *hw = to_me_hw(dev);
+       u32 hcsr = mei_hcsr_read(hw);
+       hcsr  &= ~H_IE;
+       mei_hcsr_set(hw, hcsr);
+}
+
+/**
+ * mei_me_hw_reset - resets fw via mei csr register.
+ *
+ * @dev: the device structure
+ * @interrupts_enabled: if interrupt should be enabled after reset.
+ */
+static void mei_me_hw_reset(struct mei_device *dev, bool intr_enable)
+{
+       struct mei_me_hw *hw = to_me_hw(dev);
+       u32 hcsr = mei_hcsr_read(hw);
+
+       dev_dbg(&dev->pdev->dev, "before reset HCSR = 0x%08x.\n", hcsr);
+
+       hcsr |= (H_RST | H_IG);
+
+       if (intr_enable)
+               hcsr |= H_IE;
+       else
+               hcsr &= ~H_IE;
+
+       mei_hcsr_set(hw, hcsr);
+
+       hcsr = mei_hcsr_read(hw) | H_IG;
+       hcsr &= ~H_RST;
+
+       mei_hcsr_set(hw, hcsr);
+
+       hcsr = mei_hcsr_read(hw);
+
+       dev_dbg(&dev->pdev->dev, "current HCSR = 0x%08x.\n", hcsr);
+}
+
+/**
+ * mei_me_host_set_ready - enable device
+ *
+ * @dev - mei device
+ * returns bool
+ */
+
+static void mei_me_host_set_ready(struct mei_device *dev)
+{
+       struct mei_me_hw *hw = to_me_hw(dev);
+       hw->host_hw_state |= H_IE | H_IG | H_RDY;
+       mei_hcsr_set(hw, hw->host_hw_state);
+}
+/**
+ * mei_me_host_is_ready - check whether the host has turned ready
+ *
+ * @dev - mei device
+ * returns bool
+ */
+static bool mei_me_host_is_ready(struct mei_device *dev)
+{
+       struct mei_me_hw *hw = to_me_hw(dev);
+       hw->host_hw_state = mei_hcsr_read(hw);
+       return (hw->host_hw_state & H_RDY) == H_RDY;
+}
+
+/**
+ * mei_me_hw_is_ready - check whether the me(hw) has turned ready
+ *
+ * @dev - mei device
+ * returns bool
+ */
+static bool mei_me_hw_is_ready(struct mei_device *dev)
+{
+       struct mei_me_hw *hw = to_me_hw(dev);
+       hw->me_hw_state = mei_mecsr_read(hw);
+       return (hw->me_hw_state & ME_RDY_HRA) == ME_RDY_HRA;
+}
+
+/**
+ * mei_hbuf_filled_slots - gets number of device filled buffer slots
+ *
+ * @dev: the device structure
+ *
+ * returns number of filled slots
+ */
+static unsigned char mei_hbuf_filled_slots(struct mei_device *dev)
+{
+       struct mei_me_hw *hw = to_me_hw(dev);
+       char read_ptr, write_ptr;
+
+       hw->host_hw_state = mei_hcsr_read(hw);
+
+       read_ptr = (char) ((hw->host_hw_state & H_CBRP) >> 8);
+       write_ptr = (char) ((hw->host_hw_state & H_CBWP) >> 16);
+
+       return (unsigned char) (write_ptr - read_ptr);
+}
+
+/**
+ * mei_hbuf_is_empty - checks if host buffer is empty.
+ *
+ * @dev: the device structure
+ *
+ * returns true if empty, false - otherwise.
+ */
+static bool mei_me_hbuf_is_empty(struct mei_device *dev)
+{
+       return mei_hbuf_filled_slots(dev) == 0;
+}
+
+/**
+ * mei_me_hbuf_empty_slots - counts write empty slots.
+ *
+ * @dev: the device structure
+ *
+ * returns -1(ESLOTS_OVERFLOW) if overflow, otherwise empty slots count
+ */
+static int mei_me_hbuf_empty_slots(struct mei_device *dev)
+{
+       unsigned char filled_slots, empty_slots;
+
+       filled_slots = mei_hbuf_filled_slots(dev);
+       empty_slots = dev->hbuf_depth - filled_slots;
+
+       /* check for overflow */
+       if (filled_slots > dev->hbuf_depth)
+               return -EOVERFLOW;
+
+       return empty_slots;
+}
+
+static size_t mei_me_hbuf_max_len(const struct mei_device *dev)
+{
+       return dev->hbuf_depth * sizeof(u32) - sizeof(struct mei_msg_hdr);
+}
+
+
+/**
+ * mei_write_message - writes a message to mei device.
+ *
+ * @dev: the device structure
+ * @header: mei HECI header of message
+ * @buf: message payload will be written
+ *
+ * This function returns -EIO if write has failed
+ */
+static int mei_me_write_message(struct mei_device *dev,
+                       struct mei_msg_hdr *header,
+                       unsigned char *buf)
+{
+       struct mei_me_hw *hw = to_me_hw(dev);
+       unsigned long rem, dw_cnt;
+       unsigned long length = header->length;
+       u32 *reg_buf = (u32 *)buf;
+       u32 hcsr;
+       int i;
+       int empty_slots;
+
+       dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(header));
+
+       empty_slots = mei_hbuf_empty_slots(dev);
+       dev_dbg(&dev->pdev->dev, "empty slots = %hu.\n", empty_slots);
+
+       dw_cnt = mei_data2slots(length);
+       if (empty_slots < 0 || dw_cnt > empty_slots)
+               return -EIO;
+
+       mei_reg_write(hw, H_CB_WW, *((u32 *) header));
+
+       for (i = 0; i < length / 4; i++)
+               mei_reg_write(hw, H_CB_WW, reg_buf[i]);
+
+       rem = length & 0x3;
+       if (rem > 0) {
+               u32 reg = 0;
+               memcpy(&reg, &buf[length - rem], rem);
+               mei_reg_write(hw, H_CB_WW, reg);
+       }
+
+       hcsr = mei_hcsr_read(hw) | H_IG;
+       mei_hcsr_set(hw, hcsr);
+       if (!mei_me_hw_is_ready(dev))
+               return -EIO;
+
+       return 0;
+}
+
+/**
+ * mei_me_count_full_read_slots - counts read full slots.
+ *
+ * @dev: the device structure
+ *
+ * returns -1(ESLOTS_OVERFLOW) if overflow, otherwise filled slots count
+ */
+static int mei_me_count_full_read_slots(struct mei_device *dev)
+{
+       struct mei_me_hw *hw = to_me_hw(dev);
+       char read_ptr, write_ptr;
+       unsigned char buffer_depth, filled_slots;
+
+       hw->me_hw_state = mei_mecsr_read(hw);
+       buffer_depth = (unsigned char)((hw->me_hw_state & ME_CBD_HRA) >> 24);
+       read_ptr = (char) ((hw->me_hw_state & ME_CBRP_HRA) >> 8);
+       write_ptr = (char) ((hw->me_hw_state & ME_CBWP_HRA) >> 16);
+       filled_slots = (unsigned char) (write_ptr - read_ptr);
+
+       /* check for overflow */
+       if (filled_slots > buffer_depth)
+               return -EOVERFLOW;
+
+       dev_dbg(&dev->pdev->dev, "filled_slots =%08x\n", filled_slots);
+       return (int)filled_slots;
+}
+
+/**
+ * mei_me_read_slots - reads a message from mei device.
+ *
+ * @dev: the device structure
+ * @buffer: message buffer will be written
+ * @buffer_length: message size will be read
+ */
+static int mei_me_read_slots(struct mei_device *dev, unsigned char *buffer,
+                   unsigned long buffer_length)
+{
+       struct mei_me_hw *hw = to_me_hw(dev);
+       u32 *reg_buf = (u32 *)buffer;
+       u32 hcsr;
+
+       for (; buffer_length >= sizeof(u32); buffer_length -= sizeof(u32))
+               *reg_buf++ = mei_me_mecbrw_read(dev);
+
+       if (buffer_length > 0) {
+               u32 reg = mei_me_mecbrw_read(dev);
+               memcpy(reg_buf, &reg, buffer_length);
+       }
+
+       hcsr = mei_hcsr_read(hw) | H_IG;
+       mei_hcsr_set(hw, hcsr);
+       return 0;
+}
+
+/**
+ * mei_me_irq_quick_handler - The ISR of the MEI device
+ *
+ * @irq: The irq number
+ * @dev_id: pointer to the device structure
+ *
+ * returns irqreturn_t
+ */
+
+irqreturn_t mei_me_irq_quick_handler(int irq, void *dev_id)
+{
+       struct mei_device *dev = (struct mei_device *) dev_id;
+       struct mei_me_hw *hw = to_me_hw(dev);
+       u32 csr_reg = mei_hcsr_read(hw);
+
+       if ((csr_reg & H_IS) != H_IS)
+               return IRQ_NONE;
+
+       /* clear H_IS bit in H_CSR */
+       mei_reg_write(hw, H_CSR, csr_reg);
+
+       return IRQ_WAKE_THREAD;
+}
+
+/**
+ * mei_me_irq_thread_handler - function called after ISR to handle the interrupt
+ * processing.
+ *
+ * @irq: The irq number
+ * @dev_id: pointer to the device structure
+ *
+ * returns irqreturn_t
+ *
+ */
+irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
+{
+       struct mei_device *dev = (struct mei_device *) dev_id;
+       struct mei_cl_cb complete_list;
+       struct mei_cl_cb *cb_pos = NULL, *cb_next = NULL;
+       struct mei_cl *cl;
+       s32 slots;
+       int rets;
+       bool  bus_message_received;
+
+
+       dev_dbg(&dev->pdev->dev, "function called after ISR to handle the interrupt processing.\n");
+       /* initialize our complete list */
+       mutex_lock(&dev->device_lock);
+       mei_io_list_init(&complete_list);
+
+       /* Ack the interrupt here
+        * In case of MSI we don't go through the quick handler */
+       if (pci_dev_msi_enabled(dev->pdev))
+               mei_clear_interrupts(dev);
+
+       /* check if ME wants a reset */
+       if (!mei_hw_is_ready(dev) &&
+           dev->dev_state != MEI_DEV_RESETING &&
+           dev->dev_state != MEI_DEV_INITIALIZING) {
+               dev_dbg(&dev->pdev->dev, "FW not ready.\n");
+               mei_reset(dev, 1);
+               mutex_unlock(&dev->device_lock);
+               return IRQ_HANDLED;
+       }
+
+       /*  check if we need to start the dev */
+       if (!mei_host_is_ready(dev)) {
+               if (mei_hw_is_ready(dev)) {
+                       dev_dbg(&dev->pdev->dev, "we need to start the dev.\n");
+
+                       mei_host_set_ready(dev);
+
+                       dev_dbg(&dev->pdev->dev, "link is established start sending messages.\n");
+                       /* link is established * start sending messages.  */
+
+                       dev->dev_state = MEI_DEV_INIT_CLIENTS;
+
+                       mei_hbm_start_req(dev);
+                       mutex_unlock(&dev->device_lock);
+                       return IRQ_HANDLED;
+               } else {
+                       dev_dbg(&dev->pdev->dev, "FW not ready.\n");
+                       mutex_unlock(&dev->device_lock);
+                       return IRQ_HANDLED;
+               }
+       }
+       /* check slots available for reading */
+       slots = mei_count_full_read_slots(dev);
+       while (slots > 0) {
+               /* we have urgent data to send so break the read */
+               if (dev->wr_ext_msg.hdr.length)
+                       break;
+               dev_dbg(&dev->pdev->dev, "slots =%08x\n", slots);
+               dev_dbg(&dev->pdev->dev, "call mei_irq_read_handler.\n");
+               rets = mei_irq_read_handler(dev, &complete_list, &slots);
+               if (rets)
+                       goto end;
+       }
+       rets = mei_irq_write_handler(dev, &complete_list);
+end:
+       dev_dbg(&dev->pdev->dev, "end of bottom half function.\n");
+       dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
+
+       bus_message_received = false;
+       if (dev->recvd_msg && waitqueue_active(&dev->wait_recvd_msg)) {
+               dev_dbg(&dev->pdev->dev, "received waiting bus message\n");
+               bus_message_received = true;
+       }
+       mutex_unlock(&dev->device_lock);
+       if (bus_message_received) {
+               dev_dbg(&dev->pdev->dev, "wake up dev->wait_recvd_msg\n");
+               wake_up_interruptible(&dev->wait_recvd_msg);
+               bus_message_received = false;
+       }
+       if (list_empty(&complete_list.list))
+               return IRQ_HANDLED;
+
+
+       list_for_each_entry_safe(cb_pos, cb_next, &complete_list.list, list) {
+               cl = cb_pos->cl;
+               list_del(&cb_pos->list);
+               if (cl) {
+                       if (cl != &dev->iamthif_cl) {
+                               dev_dbg(&dev->pdev->dev, "completing call back.\n");
+                               mei_irq_complete_handler(cl, cb_pos);
+                               cb_pos = NULL;
+                       } else if (cl == &dev->iamthif_cl) {
+                               mei_amthif_complete(dev, cb_pos);
+                       }
+               }
+       }
+       return IRQ_HANDLED;
+}
+static const struct mei_hw_ops mei_me_hw_ops = {
+
+       .host_set_ready = mei_me_host_set_ready,
+       .host_is_ready = mei_me_host_is_ready,
+
+       .hw_is_ready = mei_me_hw_is_ready,
+       .hw_reset = mei_me_hw_reset,
+       .hw_config  = mei_me_hw_config,
+
+       .intr_clear = mei_me_intr_clear,
+       .intr_enable = mei_me_intr_enable,
+       .intr_disable = mei_me_intr_disable,
+
+       .hbuf_free_slots = mei_me_hbuf_empty_slots,
+       .hbuf_is_ready = mei_me_hbuf_is_empty,
+       .hbuf_max_len = mei_me_hbuf_max_len,
+
+       .write = mei_me_write_message,
+
+       .rdbuf_full_slots = mei_me_count_full_read_slots,
+       .read_hdr = mei_me_mecbrw_read,
+       .read = mei_me_read_slots
+};
+
+/**
+ * init_mei_device - allocates and initializes the mei device structure
+ *
+ * @pdev: The pci device structure
+ *
+ * returns The mei_device_device pointer on success, NULL on failure.
+ */
+struct mei_device *mei_me_dev_init(struct pci_dev *pdev)
+{
+       struct mei_device *dev;
+
+       dev = kzalloc(sizeof(struct mei_device) +
+                        sizeof(struct mei_me_hw), GFP_KERNEL);
+       if (!dev)
+               return NULL;
+
+       mei_device_init(dev);
+
+       INIT_LIST_HEAD(&dev->wd_cl.link);
+       INIT_LIST_HEAD(&dev->iamthif_cl.link);
+       mei_io_list_init(&dev->amthif_cmd_list);
+       mei_io_list_init(&dev->amthif_rd_complete_list);
+
+       INIT_DELAYED_WORK(&dev->timer_work, mei_timer);
+       INIT_WORK(&dev->init_work, mei_host_client_init);
+
+       dev->ops = &mei_me_hw_ops;
+
+       dev->pdev = pdev;
+       return dev;
+}
+
diff --git a/drivers/misc/mei/hw-me.h b/drivers/misc/mei/hw-me.h
new file mode 100644 (file)
index 0000000..8518d3e
--- /dev/null
@@ -0,0 +1,48 @@
+/*
+ *
+ * Intel Management Engine Interface (Intel MEI) Linux driver
+ * Copyright (c) 2003-2012, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ */
+
+
+
+#ifndef _MEI_INTERFACE_H_
+#define _MEI_INTERFACE_H_
+
+#include <linux/mei.h>
+#include "mei_dev.h"
+#include "client.h"
+
+struct mei_me_hw {
+       void __iomem *mem_addr;
+       /*
+        * hw states of host and fw(ME)
+        */
+       u32 host_hw_state;
+       u32 me_hw_state;
+};
+
+#define to_me_hw(dev) (struct mei_me_hw *)((dev)->hw)
+
+struct mei_device *mei_me_dev_init(struct pci_dev *pdev);
+
+/* get slots (dwords) from a message length + header (bytes) */
+static inline unsigned char mei_data2slots(size_t length)
+{
+       return DIV_ROUND_UP(sizeof(struct mei_msg_hdr) + length, 4);
+}
+
+irqreturn_t mei_me_irq_quick_handler(int irq, void *dev_id);
+irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id);
+
+#endif /* _MEI_INTERFACE_H_ */
index be8ca6b..cb2f556 100644 (file)
 #define MEI_IAMTHIF_STALL_TIMER    12  /* HPS */
 #define MEI_IAMTHIF_READ_TIMER     10  /* HPS */
 
-/*
- * Internal Clients Number
- */
-#define MEI_WD_HOST_CLIENT_ID          1
-#define MEI_IAMTHIF_HOST_CLIENT_ID     2
-
-/*
- * MEI device IDs
- */
-#define MEI_DEV_ID_82946GZ    0x2974  /* 82946GZ/GL */
-#define MEI_DEV_ID_82G35      0x2984  /* 82G35 Express */
-#define MEI_DEV_ID_82Q965     0x2994  /* 82Q963/Q965 */
-#define MEI_DEV_ID_82G965     0x29A4  /* 82P965/G965 */
-
-#define MEI_DEV_ID_82GM965    0x2A04  /* Mobile PM965/GM965 */
-#define MEI_DEV_ID_82GME965   0x2A14  /* Mobile GME965/GLE960 */
-
-#define MEI_DEV_ID_ICH9_82Q35 0x29B4  /* 82Q35 Express */
-#define MEI_DEV_ID_ICH9_82G33 0x29C4  /* 82G33/G31/P35/P31 Express */
-#define MEI_DEV_ID_ICH9_82Q33 0x29D4  /* 82Q33 Express */
-#define MEI_DEV_ID_ICH9_82X38 0x29E4  /* 82X38/X48 Express */
-#define MEI_DEV_ID_ICH9_3200  0x29F4  /* 3200/3210 Server */
-
-#define MEI_DEV_ID_ICH9_6     0x28B4  /* Bearlake */
-#define MEI_DEV_ID_ICH9_7     0x28C4  /* Bearlake */
-#define MEI_DEV_ID_ICH9_8     0x28D4  /* Bearlake */
-#define MEI_DEV_ID_ICH9_9     0x28E4  /* Bearlake */
-#define MEI_DEV_ID_ICH9_10    0x28F4  /* Bearlake */
-
-#define MEI_DEV_ID_ICH9M_1    0x2A44  /* Cantiga */
-#define MEI_DEV_ID_ICH9M_2    0x2A54  /* Cantiga */
-#define MEI_DEV_ID_ICH9M_3    0x2A64  /* Cantiga */
-#define MEI_DEV_ID_ICH9M_4    0x2A74  /* Cantiga */
-
-#define MEI_DEV_ID_ICH10_1    0x2E04  /* Eaglelake */
-#define MEI_DEV_ID_ICH10_2    0x2E14  /* Eaglelake */
-#define MEI_DEV_ID_ICH10_3    0x2E24  /* Eaglelake */
-#define MEI_DEV_ID_ICH10_4    0x2E34  /* Eaglelake */
-
-#define MEI_DEV_ID_IBXPK_1    0x3B64  /* Calpella */
-#define MEI_DEV_ID_IBXPK_2    0x3B65  /* Calpella */
-
-#define MEI_DEV_ID_CPT_1      0x1C3A  /* Couger Point */
-#define MEI_DEV_ID_PBG_1      0x1D3A  /* C600/X79 Patsburg */
-
-#define MEI_DEV_ID_PPT_1      0x1E3A  /* Panther Point */
-#define MEI_DEV_ID_PPT_2      0x1CBA  /* Panther Point */
-#define MEI_DEV_ID_PPT_3      0x1DBA  /* Panther Point */
-
-#define MEI_DEV_ID_LPT        0x8C3A  /* Lynx Point */
-#define MEI_DEV_ID_LPT_LP     0x9C3A  /* Lynx Point LP */
-/*
- * MEI HW Section
- */
-
-/* MEI registers */
-/* H_CB_WW - Host Circular Buffer (CB) Write Window register */
-#define H_CB_WW    0
-/* H_CSR - Host Control Status register */
-#define H_CSR      4
-/* ME_CB_RW - ME Circular Buffer Read Window register (read only) */
-#define ME_CB_RW   8
-/* ME_CSR_HA - ME Control Status Host Access register (read only) */
-#define ME_CSR_HA  0xC
-
-
-/* register bits of H_CSR (Host Control Status register) */
-/* Host Circular Buffer Depth - maximum number of 32-bit entries in CB */
-#define H_CBD             0xFF000000
-/* Host Circular Buffer Write Pointer */
-#define H_CBWP            0x00FF0000
-/* Host Circular Buffer Read Pointer */
-#define H_CBRP            0x0000FF00
-/* Host Reset */
-#define H_RST             0x00000010
-/* Host Ready */
-#define H_RDY             0x00000008
-/* Host Interrupt Generate */
-#define H_IG              0x00000004
-/* Host Interrupt Status */
-#define H_IS              0x00000002
-/* Host Interrupt Enable */
-#define H_IE              0x00000001
-
-
-/* register bits of ME_CSR_HA (ME Control Status Host Access register) */
-/* ME CB (Circular Buffer) Depth HRA (Host Read Access) - host read only
-access to ME_CBD */
-#define ME_CBD_HRA        0xFF000000
-/* ME CB Write Pointer HRA - host read only access to ME_CBWP */
-#define ME_CBWP_HRA       0x00FF0000
-/* ME CB Read Pointer HRA - host read only access to ME_CBRP */
-#define ME_CBRP_HRA       0x0000FF00
-/* ME Reset HRA - host read only access to ME_RST */
-#define ME_RST_HRA        0x00000010
-/* ME Ready HRA - host read only access to ME_RDY */
-#define ME_RDY_HRA        0x00000008
-/* ME Interrupt Generate HRA - host read only access to ME_IG */
-#define ME_IG_HRA         0x00000004
-/* ME Interrupt Status HRA - host read only access to ME_IS */
-#define ME_IS_HRA         0x00000002
-/* ME Interrupt Enable HRA - host read only access to ME_IE */
-#define ME_IE_HRA         0x00000001
 
 /*
  * MEI Version
@@ -224,6 +121,22 @@ struct mei_bus_message {
        u8 data[0];
 } __packed;
 
+/**
+ * struct hbm_cl_cmd - client specific host bus command
+ *     CONNECT, DISCONNECT, and FlOW CONTROL
+ *
+ * @hbm_cmd - bus message command header
+ * @me_addr - address of the client in ME
+ * @host_addr - address of the client in the driver
+ * @data
+ */
+struct mei_hbm_cl_cmd {
+       u8 hbm_cmd;
+       u8 me_addr;
+       u8 host_addr;
+       u8 data;
+};
+
 struct hbm_version {
        u8 minor_version;
        u8 major_version;
@@ -333,11 +246,5 @@ struct hbm_flow_control {
        u8 reserved[MEI_FC_MESSAGE_RESERVED_LENGTH];
 } __packed;
 
-struct mei_me_client {
-       struct mei_client_properties props;
-       u8 client_id;
-       u8 mei_flow_ctrl_creds;
-} __packed;
-
 
 #endif
index a54cd55..6ec5301 100644 (file)
 #include <linux/wait.h>
 #include <linux/delay.h>
 
-#include "mei_dev.h"
-#include "hw.h"
-#include "interface.h"
 #include <linux/mei.h>
 
+#include "mei_dev.h"
+#include "client.h"
+
 const char *mei_dev_state_str(int state)
 {
 #define MEI_DEV_STATE(state) case MEI_DEV_##state: return #state
@@ -42,84 +42,20 @@ const char *mei_dev_state_str(int state)
 #undef MEI_DEV_STATE
 }
 
-
-
-/**
- * mei_io_list_flush - removes list entry belonging to cl.
- *
- * @list:  An instance of our list structure
- * @cl: private data of the file object
- */
-void mei_io_list_flush(struct mei_cl_cb *list, struct mei_cl *cl)
-{
-       struct mei_cl_cb *pos;
-       struct mei_cl_cb *next;
-
-       list_for_each_entry_safe(pos, next, &list->list, list) {
-               if (pos->cl) {
-                       if (mei_cl_cmp_id(cl, pos->cl))
-                               list_del(&pos->list);
-               }
-       }
-}
-/**
- * mei_cl_flush_queues - flushes queue lists belonging to cl.
- *
- * @dev: the device structure
- * @cl: private data of the file object
- */
-int mei_cl_flush_queues(struct mei_cl *cl)
+void mei_device_init(struct mei_device *dev)
 {
-       if (!cl || !cl->dev)
-               return -EINVAL;
-
-       dev_dbg(&cl->dev->pdev->dev, "remove list entry belonging to cl\n");
-       mei_io_list_flush(&cl->dev->read_list, cl);
-       mei_io_list_flush(&cl->dev->write_list, cl);
-       mei_io_list_flush(&cl->dev->write_waiting_list, cl);
-       mei_io_list_flush(&cl->dev->ctrl_wr_list, cl);
-       mei_io_list_flush(&cl->dev->ctrl_rd_list, cl);
-       mei_io_list_flush(&cl->dev->amthif_cmd_list, cl);
-       mei_io_list_flush(&cl->dev->amthif_rd_complete_list, cl);
-       return 0;
-}
-
-
-
-/**
- * init_mei_device - allocates and initializes the mei device structure
- *
- * @pdev: The pci device structure
- *
- * returns The mei_device_device pointer on success, NULL on failure.
- */
-struct mei_device *mei_device_init(struct pci_dev *pdev)
-{
-       struct mei_device *dev;
-
-       dev = kzalloc(sizeof(struct mei_device), GFP_KERNEL);
-       if (!dev)
-               return NULL;
-
        /* setup our list array */
        INIT_LIST_HEAD(&dev->file_list);
-       INIT_LIST_HEAD(&dev->wd_cl.link);
-       INIT_LIST_HEAD(&dev->iamthif_cl.link);
        mutex_init(&dev->device_lock);
        init_waitqueue_head(&dev->wait_recvd_msg);
        init_waitqueue_head(&dev->wait_stop_wd);
        dev->dev_state = MEI_DEV_INITIALIZING;
-       dev->iamthif_state = MEI_IAMTHIF_IDLE;
 
        mei_io_list_init(&dev->read_list);
        mei_io_list_init(&dev->write_list);
        mei_io_list_init(&dev->write_waiting_list);
        mei_io_list_init(&dev->ctrl_wr_list);
        mei_io_list_init(&dev->ctrl_rd_list);
-       mei_io_list_init(&dev->amthif_cmd_list);
-       mei_io_list_init(&dev->amthif_rd_complete_list);
-       dev->pdev = pdev;
-       return dev;
 }
 
 /**
@@ -131,101 +67,64 @@ struct mei_device *mei_device_init(struct pci_dev *pdev)
  */
 int mei_hw_init(struct mei_device *dev)
 {
-       int err = 0;
-       int ret;
+       int ret = 0;
 
        mutex_lock(&dev->device_lock);
 
-       dev->host_hw_state = mei_hcsr_read(dev);
-       dev->me_hw_state = mei_mecsr_read(dev);
-       dev_dbg(&dev->pdev->dev, "host_hw_state = 0x%08x, mestate = 0x%08x.\n",
-           dev->host_hw_state, dev->me_hw_state);
-
        /* acknowledge interrupt and stop interupts */
-       if ((dev->host_hw_state & H_IS) == H_IS)
-               mei_reg_write(dev, H_CSR, dev->host_hw_state);
+       mei_clear_interrupts(dev);
 
-       /* Doesn't change in runtime */
-       dev->hbuf_depth = (dev->host_hw_state & H_CBD) >> 24;
+       mei_hw_config(dev);
 
        dev->recvd_msg = false;
        dev_dbg(&dev->pdev->dev, "reset in start the mei device.\n");
 
        mei_reset(dev, 1);
 
-       dev_dbg(&dev->pdev->dev, "host_hw_state = 0x%08x, me_hw_state = 0x%08x.\n",
-           dev->host_hw_state, dev->me_hw_state);
-
        /* wait for ME to turn on ME_RDY */
        if (!dev->recvd_msg) {
                mutex_unlock(&dev->device_lock);
-               err = wait_event_interruptible_timeout(dev->wait_recvd_msg,
+               ret = wait_event_interruptible_timeout(dev->wait_recvd_msg,
                        dev->recvd_msg,
                        mei_secs_to_jiffies(MEI_INTEROP_TIMEOUT));
                mutex_lock(&dev->device_lock);
        }
 
-       if (err <= 0 && !dev->recvd_msg) {
+       if (ret <= 0 && !dev->recvd_msg) {
                dev->dev_state = MEI_DEV_DISABLED;
                dev_dbg(&dev->pdev->dev,
                        "wait_event_interruptible_timeout failed"
                        "on wait for ME to turn on ME_RDY.\n");
-               ret = -ENODEV;
-               goto out;
+               goto err;
        }
 
-       if (!(((dev->host_hw_state & H_RDY) == H_RDY) &&
-             ((dev->me_hw_state & ME_RDY_HRA) == ME_RDY_HRA))) {
-               dev->dev_state = MEI_DEV_DISABLED;
-               dev_dbg(&dev->pdev->dev,
-                       "host_hw_state = 0x%08x, me_hw_state = 0x%08x.\n",
-                       dev->host_hw_state, dev->me_hw_state);
-
-               if (!(dev->host_hw_state & H_RDY))
-                       dev_dbg(&dev->pdev->dev, "host turn off H_RDY.\n");
 
-               if (!(dev->me_hw_state & ME_RDY_HRA))
-                       dev_dbg(&dev->pdev->dev, "ME turn off ME_RDY.\n");
+       if (!mei_host_is_ready(dev)) {
+               dev_err(&dev->pdev->dev, "host is not ready.\n");
+               goto err;
+       }
 
-               dev_err(&dev->pdev->dev, "link layer initialization failed.\n");
-               ret = -ENODEV;
-               goto out;
+       if (!mei_hw_is_ready(dev)) {
+               dev_err(&dev->pdev->dev, "ME is not ready.\n");
+               goto err;
        }
 
        if (dev->version.major_version != HBM_MAJOR_VERSION ||
            dev->version.minor_version != HBM_MINOR_VERSION) {
                dev_dbg(&dev->pdev->dev, "MEI start failed.\n");
-               ret = -ENODEV;
-               goto out;
+               goto err;
        }
 
        dev->recvd_msg = false;
-       dev_dbg(&dev->pdev->dev, "host_hw_state = 0x%08x, me_hw_state = 0x%08x.\n",
-           dev->host_hw_state, dev->me_hw_state);
-       dev_dbg(&dev->pdev->dev, "ME turn on ME_RDY and host turn on H_RDY.\n");
        dev_dbg(&dev->pdev->dev, "link layer has been established.\n");
-       dev_dbg(&dev->pdev->dev, "MEI  start success.\n");
-       ret = 0;
 
-out:
        mutex_unlock(&dev->device_lock);
-       return ret;
-}
-
-/**
- * mei_hw_reset - resets fw via mei csr register.
- *
- * @dev: the device structure
- * @interrupts_enabled: if interrupt should be enabled after reset.
- */
-static void mei_hw_reset(struct mei_device *dev, int interrupts_enabled)
-{
-       dev->host_hw_state |= (H_RST | H_IG);
-
-       if (interrupts_enabled)
-               mei_enable_interrupts(dev);
-       else
-               mei_disable_interrupts(dev);
+       return 0;
+err:
+       dev_err(&dev->pdev->dev, "link layer initialization failed.\n");
+       dev->dev_state = MEI_DEV_DISABLED;
+       mutex_unlock(&dev->device_lock);
+       return -ENODEV;
 }
 
 /**
@@ -236,56 +135,34 @@ static void mei_hw_reset(struct mei_device *dev, int interrupts_enabled)
  */
 void mei_reset(struct mei_device *dev, int interrupts_enabled)
 {
-       struct mei_cl *cl_pos = NULL;
-       struct mei_cl *cl_next = NULL;
-       struct mei_cl_cb *cb_pos = NULL;
-       struct mei_cl_cb *cb_next = NULL;
        bool unexpected;
 
-       if (dev->dev_state == MEI_DEV_RECOVERING_FROM_RESET) {
-               dev->need_reset = true;
+       if (dev->dev_state == MEI_DEV_RECOVERING_FROM_RESET)
                return;
-       }
 
        unexpected = (dev->dev_state != MEI_DEV_INITIALIZING &&
                        dev->dev_state != MEI_DEV_DISABLED &&
                        dev->dev_state != MEI_DEV_POWER_DOWN &&
                        dev->dev_state != MEI_DEV_POWER_UP);
 
-       dev->host_hw_state = mei_hcsr_read(dev);
-
-       dev_dbg(&dev->pdev->dev, "before reset host_hw_state = 0x%08x.\n",
-           dev->host_hw_state);
-
        mei_hw_reset(dev, interrupts_enabled);
 
-       dev->host_hw_state &= ~H_RST;
-       dev->host_hw_state |= H_IG;
-
-       mei_hcsr_set(dev);
-
-       dev_dbg(&dev->pdev->dev, "currently saved host_hw_state = 0x%08x.\n",
-           dev->host_hw_state);
-
-       dev->need_reset = false;
 
        if (dev->dev_state != MEI_DEV_INITIALIZING) {
                if (dev->dev_state != MEI_DEV_DISABLED &&
                    dev->dev_state != MEI_DEV_POWER_DOWN)
                        dev->dev_state = MEI_DEV_RESETING;
 
-               list_for_each_entry_safe(cl_pos,
-                               cl_next, &dev->file_list, link) {
-                       cl_pos->state = MEI_FILE_DISCONNECTED;
-                       cl_pos->mei_flow_ctrl_creds = 0;
-                       cl_pos->read_cb = NULL;
-                       cl_pos->timer_count = 0;
-               }
+               mei_cl_all_disconnect(dev);
+
                /* remove entry if already in list */
                dev_dbg(&dev->pdev->dev, "remove iamthif and wd from the file list.\n");
-               mei_me_cl_unlink(dev, &dev->wd_cl);
-
-               mei_me_cl_unlink(dev, &dev->iamthif_cl);
+               mei_cl_unlink(&dev->wd_cl);
+               if (dev->open_handle_count > 0)
+                       dev->open_handle_count--;
+               mei_cl_unlink(&dev->iamthif_cl);
+               if (dev->open_handle_count > 0)
+                       dev->open_handle_count--;
 
                mei_amthif_reset_params(dev);
                memset(&dev->wr_ext_msg, 0, sizeof(dev->wr_ext_msg));
@@ -295,392 +172,17 @@ void mei_reset(struct mei_device *dev, int interrupts_enabled)
        dev->rd_msg_hdr = 0;
        dev->wd_pending = false;
 
-       /* update the state of the registers after reset */
-       dev->host_hw_state = mei_hcsr_read(dev);
-       dev->me_hw_state = mei_mecsr_read(dev);
-
-       dev_dbg(&dev->pdev->dev, "after reset host_hw_state = 0x%08x, me_hw_state = 0x%08x.\n",
-           dev->host_hw_state, dev->me_hw_state);
-
        if (unexpected)
                dev_warn(&dev->pdev->dev, "unexpected reset: dev_state = %s\n",
                         mei_dev_state_str(dev->dev_state));
 
-       /* Wake up all readings so they can be interrupted */
-       list_for_each_entry_safe(cl_pos, cl_next, &dev->file_list, link) {
-               if (waitqueue_active(&cl_pos->rx_wait)) {
-                       dev_dbg(&dev->pdev->dev, "Waking up client!\n");
-                       wake_up_interruptible(&cl_pos->rx_wait);
-               }
-       }
-       /* remove all waiting requests */
-       list_for_each_entry_safe(cb_pos, cb_next, &dev->write_list.list, list) {
-               list_del(&cb_pos->list);
-               mei_io_cb_free(cb_pos);
-       }
-}
-
-
-
-/**
- * host_start_message - mei host sends start message.
- *
- * @dev: the device structure
- *
- * returns none.
- */
-void mei_host_start_message(struct mei_device *dev)
-{
-       struct mei_msg_hdr *mei_hdr;
-       struct hbm_host_version_request *start_req;
-       const size_t len = sizeof(struct hbm_host_version_request);
-
-       mei_hdr = mei_hbm_hdr(&dev->wr_msg_buf[0], len);
-
-       /* host start message */
-       start_req = (struct hbm_host_version_request *)&dev->wr_msg_buf[1];
-       memset(start_req, 0, len);
-       start_req->hbm_cmd = HOST_START_REQ_CMD;
-       start_req->host_version.major_version = HBM_MAJOR_VERSION;
-       start_req->host_version.minor_version = HBM_MINOR_VERSION;
-
-       dev->recvd_msg = false;
-       if (mei_write_message(dev, mei_hdr, (unsigned char *)start_req, len)) {
-               dev_dbg(&dev->pdev->dev, "write send version message to FW fail.\n");
-               dev->dev_state = MEI_DEV_RESETING;
-               mei_reset(dev, 1);
-       }
-       dev->init_clients_state = MEI_START_MESSAGE;
-       dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT;
-       return ;
-}
-
-/**
- * host_enum_clients_message - host sends enumeration client request message.
- *
- * @dev: the device structure
- *
- * returns none.
- */
-void mei_host_enum_clients_message(struct mei_device *dev)
-{
-       struct mei_msg_hdr *mei_hdr;
-       struct hbm_host_enum_request *enum_req;
-       const size_t len = sizeof(struct hbm_host_enum_request);
-       /* enumerate clients */
-       mei_hdr = mei_hbm_hdr(&dev->wr_msg_buf[0], len);
-
-       enum_req = (struct hbm_host_enum_request *) &dev->wr_msg_buf[1];
-       memset(enum_req, 0, sizeof(struct hbm_host_enum_request));
-       enum_req->hbm_cmd = HOST_ENUM_REQ_CMD;
-
-       if (mei_write_message(dev, mei_hdr, (unsigned char *)enum_req, len)) {
-               dev->dev_state = MEI_DEV_RESETING;
-               dev_dbg(&dev->pdev->dev, "write send enumeration request message to FW fail.\n");
-               mei_reset(dev, 1);
-       }
-       dev->init_clients_state = MEI_ENUM_CLIENTS_MESSAGE;
-       dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT;
-       return;
-}
-
-
-/**
- * allocate_me_clients_storage - allocates storage for me clients
- *
- * @dev: the device structure
- *
- * returns none.
- */
-void mei_allocate_me_clients_storage(struct mei_device *dev)
-{
-       struct mei_me_client *clients;
-       int b;
-
-       /* count how many ME clients we have */
-       for_each_set_bit(b, dev->me_clients_map, MEI_CLIENTS_MAX)
-               dev->me_clients_num++;
-
-       if (dev->me_clients_num <= 0)
-               return ;
-
-
-       if (dev->me_clients != NULL) {
-               kfree(dev->me_clients);
-               dev->me_clients = NULL;
-       }
-       dev_dbg(&dev->pdev->dev, "memory allocation for ME clients size=%zd.\n",
-               dev->me_clients_num * sizeof(struct mei_me_client));
-       /* allocate storage for ME clients representation */
-       clients = kcalloc(dev->me_clients_num,
-                       sizeof(struct mei_me_client), GFP_KERNEL);
-       if (!clients) {
-               dev_dbg(&dev->pdev->dev, "memory allocation for ME clients failed.\n");
-               dev->dev_state = MEI_DEV_RESETING;
-               mei_reset(dev, 1);
-               return ;
-       }
-       dev->me_clients = clients;
-       return ;
-}
-
-void mei_host_client_init(struct work_struct *work)
-{
-       struct mei_device *dev = container_of(work,
-                                             struct mei_device, init_work);
-       struct mei_client_properties *client_props;
-       int i;
-
-       mutex_lock(&dev->device_lock);
-
-       bitmap_zero(dev->host_clients_map, MEI_CLIENTS_MAX);
-       dev->open_handle_count = 0;
-
-       /*
-        * Reserving the first three client IDs
-        * 0: Reserved for MEI Bus Message communications
-        * 1: Reserved for Watchdog
-        * 2: Reserved for AMTHI
-        */
-       bitmap_set(dev->host_clients_map, 0, 3);
-
-       for (i = 0; i < dev->me_clients_num; i++) {
-               client_props = &dev->me_clients[i].props;
-
-               if (!uuid_le_cmp(client_props->protocol_name, mei_amthi_guid))
-                       mei_amthif_host_init(dev);
-               else if (!uuid_le_cmp(client_props->protocol_name, mei_wd_guid))
-                       mei_wd_host_init(dev);
-       }
-
-       dev->dev_state = MEI_DEV_ENABLED;
-
-       mutex_unlock(&dev->device_lock);
-}
-
-int mei_host_client_enumerate(struct mei_device *dev)
-{
-
-       struct mei_msg_hdr *mei_hdr;
-       struct hbm_props_request *prop_req;
-       const size_t len = sizeof(struct hbm_props_request);
-       unsigned long next_client_index;
-       u8 client_num;
-
-
-       client_num = dev->me_client_presentation_num;
-
-       next_client_index = find_next_bit(dev->me_clients_map, MEI_CLIENTS_MAX,
-                                         dev->me_client_index);
-
-       /* We got all client properties */
-       if (next_client_index == MEI_CLIENTS_MAX) {
-               schedule_work(&dev->init_work);
-
-               return 0;
-       }
-
-       dev->me_clients[client_num].client_id = next_client_index;
-       dev->me_clients[client_num].mei_flow_ctrl_creds = 0;
-
-       mei_hdr = mei_hbm_hdr(&dev->wr_msg_buf[0], len);
-       prop_req = (struct hbm_props_request *)&dev->wr_msg_buf[1];
-
-       memset(prop_req, 0, sizeof(struct hbm_props_request));
-
-
-       prop_req->hbm_cmd = HOST_CLIENT_PROPERTIES_REQ_CMD;
-       prop_req->address = next_client_index;
-
-       if (mei_write_message(dev, mei_hdr, (unsigned char *) prop_req,
-                             mei_hdr->length)) {
-               dev->dev_state = MEI_DEV_RESETING;
-               dev_err(&dev->pdev->dev, "Properties request command failed\n");
-               mei_reset(dev, 1);
-
-               return -EIO;
-       }
-
-       dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT;
-       dev->me_client_index = next_client_index;
-
-       return 0;
-}
-
-/**
- * mei_init_file_private - initializes private file structure.
- *
- * @priv: private file structure to be initialized
- * @file: the file structure
- */
-void mei_cl_init(struct mei_cl *priv, struct mei_device *dev)
-{
-       memset(priv, 0, sizeof(struct mei_cl));
-       init_waitqueue_head(&priv->wait);
-       init_waitqueue_head(&priv->rx_wait);
-       init_waitqueue_head(&priv->tx_wait);
-       INIT_LIST_HEAD(&priv->link);
-       priv->reading_state = MEI_IDLE;
-       priv->writing_state = MEI_IDLE;
-       priv->dev = dev;
-}
-
-int mei_me_cl_by_uuid(const struct mei_device *dev, const uuid_le *cuuid)
-{
-       int i, res = -ENOENT;
-
-       for (i = 0; i < dev->me_clients_num; ++i)
-               if (uuid_le_cmp(*cuuid,
-                               dev->me_clients[i].props.protocol_name) == 0) {
-                       res = i;
-                       break;
-               }
-
-       return res;
-}
-
-
-/**
- * mei_me_cl_link - create link between host and me clinet and add
- *   me_cl to the list
- *
- * @dev: the device structure
- * @cl: link between me and host client assocated with opened file descriptor
- * @cuuid: uuid of ME client
- * @client_id: id of the host client
- *
- * returns ME client index if ME client
- *     -EINVAL on incorrect values
- *     -ENONET if client not found
- */
-int mei_me_cl_link(struct mei_device *dev, struct mei_cl *cl,
-                       const uuid_le *cuuid, u8 host_cl_id)
-{
-       int i;
-
-       if (!dev || !cl || !cuuid)
-               return -EINVAL;
-
-       /* check for valid client id */
-       i = mei_me_cl_by_uuid(dev, cuuid);
-       if (i >= 0) {
-               cl->me_client_id = dev->me_clients[i].client_id;
-               cl->state = MEI_FILE_CONNECTING;
-               cl->host_client_id = host_cl_id;
-
-               list_add_tail(&cl->link, &dev->file_list);
-               return (u8)i;
-       }
-
-       return -ENOENT;
-}
-/**
- * mei_me_cl_unlink - remove me_cl from the list
- *
- * @dev: the device structure
- * @host_client_id: host client id to be removed
- */
-void mei_me_cl_unlink(struct mei_device *dev, struct mei_cl *cl)
-{
-       struct mei_cl *pos, *next;
-       list_for_each_entry_safe(pos, next, &dev->file_list, link) {
-               if (cl->host_client_id == pos->host_client_id) {
-                       dev_dbg(&dev->pdev->dev, "remove host client = %d, ME client = %d\n",
-                                       pos->host_client_id, pos->me_client_id);
-                       list_del_init(&pos->link);
-                       break;
-               }
-       }
-}
+       /* wake up all readings so they can be interrupted */
+       mei_cl_all_read_wakeup(dev);
 
-/**
- * mei_alloc_file_private - allocates a private file structure and sets it up.
- * @file: the file structure
- *
- * returns  The allocated file or NULL on failure
- */
-struct mei_cl *mei_cl_allocate(struct mei_device *dev)
-{
-       struct mei_cl *cl;
-
-       cl = kmalloc(sizeof(struct mei_cl), GFP_KERNEL);
-       if (!cl)
-               return NULL;
-
-       mei_cl_init(cl, dev);
-
-       return cl;
+       /* remove all waiting requests */
+       mei_cl_all_write_clear(dev);
 }
 
 
 
-/**
- * mei_disconnect_host_client - sends disconnect message to fw from host client.
- *
- * @dev: the device structure
- * @cl: private data of the file object
- *
- * Locking: called under "dev->device_lock" lock
- *
- * returns 0 on success, <0 on failure.
- */
-int mei_disconnect_host_client(struct mei_device *dev, struct mei_cl *cl)
-{
-       struct mei_cl_cb *cb;
-       int rets, err;
-
-       if (!dev || !cl)
-               return -ENODEV;
-
-       if (cl->state != MEI_FILE_DISCONNECTING)
-               return 0;
-
-       cb = mei_io_cb_init(cl, NULL);
-       if (!cb)
-               return -ENOMEM;
-
-       cb->fop_type = MEI_FOP_CLOSE;
-       if (dev->mei_host_buffer_is_empty) {
-               dev->mei_host_buffer_is_empty = false;
-               if (mei_disconnect(dev, cl)) {
-                       rets = -ENODEV;
-                       dev_dbg(&dev->pdev->dev, "failed to call mei_disconnect.\n");
-                       goto free;
-               }
-               mdelay(10); /* Wait for hardware disconnection ready */
-               list_add_tail(&cb->list, &dev->ctrl_rd_list.list);
-       } else {
-               dev_dbg(&dev->pdev->dev, "add disconnect cb to control write list\n");
-               list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
-
-       }
-       mutex_unlock(&dev->device_lock);
-
-       err = wait_event_timeout(dev->wait_recvd_msg,
-                       MEI_FILE_DISCONNECTED == cl->state,
-                       mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
-
-       mutex_lock(&dev->device_lock);
-       if (MEI_FILE_DISCONNECTED == cl->state) {
-               rets = 0;
-               dev_dbg(&dev->pdev->dev, "successfully disconnected from FW client.\n");
-       } else {
-               rets = -ENODEV;
-               if (MEI_FILE_DISCONNECTED != cl->state)
-                       dev_dbg(&dev->pdev->dev, "wrong status client disconnect.\n");
-
-               if (err)
-                       dev_dbg(&dev->pdev->dev,
-                                       "wait failed disconnect err=%08x\n",
-                                       err);
-
-               dev_dbg(&dev->pdev->dev, "failed to disconnect from FW client.\n");
-       }
-
-       mei_io_list_flush(&dev->ctrl_rd_list, cl);
-       mei_io_list_flush(&dev->ctrl_wr_list, cl);
-free:
-       mei_io_cb_free(cb);
-       return rets;
-}
 
diff --git a/drivers/misc/mei/interface.c b/drivers/misc/mei/interface.c
deleted file mode 100644 (file)
index 8de8547..0000000
+++ /dev/null
@@ -1,388 +0,0 @@
-/*
- *
- * Intel Management Engine Interface (Intel MEI) Linux driver
- * Copyright (c) 2003-2012, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- */
-
-#include <linux/pci.h>
-#include "mei_dev.h"
-#include <linux/mei.h>
-#include "interface.h"
-
-
-
-/**
- * mei_set_csr_register - writes H_CSR register to the mei device,
- * and ignores the H_IS bit for it is write-one-to-zero.
- *
- * @dev: the device structure
- */
-void mei_hcsr_set(struct mei_device *dev)
-{
-       if ((dev->host_hw_state & H_IS) == H_IS)
-               dev->host_hw_state &= ~H_IS;
-       mei_reg_write(dev, H_CSR, dev->host_hw_state);
-       dev->host_hw_state = mei_hcsr_read(dev);
-}
-
-/**
- * mei_csr_enable_interrupts - enables mei device interrupts
- *
- * @dev: the device structure
- */
-void mei_enable_interrupts(struct mei_device *dev)
-{
-       dev->host_hw_state |= H_IE;
-       mei_hcsr_set(dev);
-}
-
-/**
- * mei_csr_disable_interrupts - disables mei device interrupts
- *
- * @dev: the device structure
- */
-void mei_disable_interrupts(struct mei_device *dev)
-{
-       dev->host_hw_state &= ~H_IE;
-       mei_hcsr_set(dev);
-}
-
-/**
- * mei_hbuf_filled_slots - gets number of device filled buffer slots
- *
- * @device: the device structure
- *
- * returns number of filled slots
- */
-static unsigned char mei_hbuf_filled_slots(struct mei_device *dev)
-{
-       char read_ptr, write_ptr;
-
-       dev->host_hw_state = mei_hcsr_read(dev);
-
-       read_ptr = (char) ((dev->host_hw_state & H_CBRP) >> 8);
-       write_ptr = (char) ((dev->host_hw_state & H_CBWP) >> 16);
-
-       return (unsigned char) (write_ptr - read_ptr);
-}
-
-/**
- * mei_hbuf_is_empty - checks if host buffer is empty.
- *
- * @dev: the device structure
- *
- * returns true if empty, false - otherwise.
- */
-bool mei_hbuf_is_empty(struct mei_device *dev)
-{
-       return mei_hbuf_filled_slots(dev) == 0;
-}
-
-/**
- * mei_hbuf_empty_slots - counts write empty slots.
- *
- * @dev: the device structure
- *
- * returns -1(ESLOTS_OVERFLOW) if overflow, otherwise empty slots count
- */
-int mei_hbuf_empty_slots(struct mei_device *dev)
-{
-       unsigned char filled_slots, empty_slots;
-
-       filled_slots = mei_hbuf_filled_slots(dev);
-       empty_slots = dev->hbuf_depth - filled_slots;
-
-       /* check for overflow */
-       if (filled_slots > dev->hbuf_depth)
-               return -EOVERFLOW;
-
-       return empty_slots;
-}
-
-/**
- * mei_write_message - writes a message to mei device.
- *
- * @dev: the device structure
- * @header: header of message
- * @write_buffer: message buffer will be written
- * @write_length: message size will be written
- *
- * This function returns -EIO if write has failed
- */
-int mei_write_message(struct mei_device *dev, struct mei_msg_hdr *header,
-                     unsigned char *buf, unsigned long length)
-{
-       unsigned long rem, dw_cnt;
-       u32 *reg_buf = (u32 *)buf;
-       int i;
-       int empty_slots;
-
-
-       dev_dbg(&dev->pdev->dev,
-                       "mei_write_message header=%08x.\n",
-                       *((u32 *) header));
-
-       empty_slots = mei_hbuf_empty_slots(dev);
-       dev_dbg(&dev->pdev->dev, "empty slots = %hu.\n", empty_slots);
-
-       dw_cnt = mei_data2slots(length);
-       if (empty_slots < 0 || dw_cnt > empty_slots)
-               return -EIO;
-
-       mei_reg_write(dev, H_CB_WW, *((u32 *) header));
-
-       for (i = 0; i < length / 4; i++)
-               mei_reg_write(dev, H_CB_WW, reg_buf[i]);
-
-       rem = length & 0x3;
-       if (rem > 0) {
-               u32 reg = 0;
-               memcpy(&reg, &buf[length - rem], rem);
-               mei_reg_write(dev, H_CB_WW, reg);
-       }
-
-       dev->host_hw_state = mei_hcsr_read(dev);
-       dev->host_hw_state |= H_IG;
-       mei_hcsr_set(dev);
-       dev->me_hw_state = mei_mecsr_read(dev);
-       if ((dev->me_hw_state & ME_RDY_HRA) != ME_RDY_HRA)
-               return -EIO;
-
-       return 0;
-}
-
-/**
- * mei_count_full_read_slots - counts read full slots.
- *
- * @dev: the device structure
- *
- * returns -1(ESLOTS_OVERFLOW) if overflow, otherwise filled slots count
- */
-int mei_count_full_read_slots(struct mei_device *dev)
-{
-       char read_ptr, write_ptr;
-       unsigned char buffer_depth, filled_slots;
-
-       dev->me_hw_state = mei_mecsr_read(dev);
-       buffer_depth = (unsigned char)((dev->me_hw_state & ME_CBD_HRA) >> 24);
-       read_ptr = (char) ((dev->me_hw_state & ME_CBRP_HRA) >> 8);
-       write_ptr = (char) ((dev->me_hw_state & ME_CBWP_HRA) >> 16);
-       filled_slots = (unsigned char) (write_ptr - read_ptr);
-
-       /* check for overflow */
-       if (filled_slots > buffer_depth)
-               return -EOVERFLOW;
-
-       dev_dbg(&dev->pdev->dev, "filled_slots =%08x\n", filled_slots);
-       return (int)filled_slots;
-}
-
-/**
- * mei_read_slots - reads a message from mei device.
- *
- * @dev: the device structure
- * @buffer: message buffer will be written
- * @buffer_length: message size will be read
- */
-void mei_read_slots(struct mei_device *dev, unsigned char *buffer,
-                   unsigned long buffer_length)
-{
-       u32 *reg_buf = (u32 *)buffer;
-
-       for (; buffer_length >= sizeof(u32); buffer_length -= sizeof(u32))
-               *reg_buf++ = mei_mecbrw_read(dev);
-
-       if (buffer_length > 0) {
-               u32 reg = mei_mecbrw_read(dev);
-               memcpy(reg_buf, &reg, buffer_length);
-       }
-
-       dev->host_hw_state |= H_IG;
-       mei_hcsr_set(dev);
-}
-
-/**
- * mei_flow_ctrl_creds - checks flow_control credentials.
- *
- * @dev: the device structure
- * @cl: private data of the file object
- *
- * returns 1 if mei_flow_ctrl_creds >0, 0 - otherwise.
- *     -ENOENT if mei_cl is not present
- *     -EINVAL if single_recv_buf == 0
- */
-int mei_flow_ctrl_creds(struct mei_device *dev, struct mei_cl *cl)
-{
-       int i;
-
-       if (!dev->me_clients_num)
-               return 0;
-
-       if (cl->mei_flow_ctrl_creds > 0)
-               return 1;
-
-       for (i = 0; i < dev->me_clients_num; i++) {
-               struct mei_me_client  *me_cl = &dev->me_clients[i];
-               if (me_cl->client_id == cl->me_client_id) {
-                       if (me_cl->mei_flow_ctrl_creds) {
-                               if (WARN_ON(me_cl->props.single_recv_buf == 0))
-                                       return -EINVAL;
-                               return 1;
-                       } else {
-                               return 0;
-                       }
-               }
-       }
-       return -ENOENT;
-}
-
-/**
- * mei_flow_ctrl_reduce - reduces flow_control.
- *
- * @dev: the device structure
- * @cl: private data of the file object
- * @returns
- *     0 on success
- *     -ENOENT when me client is not found
- *     -EINVAL when ctrl credits are <= 0
- */
-int mei_flow_ctrl_reduce(struct mei_device *dev, struct mei_cl *cl)
-{
-       int i;
-
-       if (!dev->me_clients_num)
-               return -ENOENT;
-
-       for (i = 0; i < dev->me_clients_num; i++) {
-               struct mei_me_client  *me_cl = &dev->me_clients[i];
-               if (me_cl->client_id == cl->me_client_id) {
-                       if (me_cl->props.single_recv_buf != 0) {
-                               if (WARN_ON(me_cl->mei_flow_ctrl_creds <= 0))
-                                       return -EINVAL;
-                               dev->me_clients[i].mei_flow_ctrl_creds--;
-                       } else {
-                               if (WARN_ON(cl->mei_flow_ctrl_creds <= 0))
-                                       return -EINVAL;
-                               cl->mei_flow_ctrl_creds--;
-                       }
-                       return 0;
-               }
-       }
-       return -ENOENT;
-}
-
-/**
- * mei_send_flow_control - sends flow control to fw.
- *
- * @dev: the device structure
- * @cl: private data of the file object
- *
- * This function returns -EIO on write failure
- */
-int mei_send_flow_control(struct mei_device *dev, struct mei_cl *cl)
-{
-       struct mei_msg_hdr *mei_hdr;
-       struct hbm_flow_control *flow_ctrl;
-       const size_t len = sizeof(struct hbm_flow_control);
-
-       mei_hdr = mei_hbm_hdr(&dev->wr_msg_buf[0], len);
-
-       flow_ctrl = (struct hbm_flow_control *)&dev->wr_msg_buf[1];
-       memset(flow_ctrl, 0, len);
-       flow_ctrl->hbm_cmd = MEI_FLOW_CONTROL_CMD;
-       flow_ctrl->host_addr = cl->host_client_id;
-       flow_ctrl->me_addr = cl->me_client_id;
-       /* FIXME: reserved !? */
-       memset(flow_ctrl->reserved, 0, sizeof(flow_ctrl->reserved));
-       dev_dbg(&dev->pdev->dev, "sending flow control host client = %d, ME client = %d\n",
-               cl->host_client_id, cl->me_client_id);
-
-       return mei_write_message(dev, mei_hdr,
-                       (unsigned char *) flow_ctrl, len);
-}
-
-/**
- * mei_other_client_is_connecting - checks if other
- *    client with the same client id is connected.
- *
- * @dev: the device structure
- * @cl: private data of the file object
- *
- * returns 1 if other client is connected, 0 - otherwise.
- */
-int mei_other_client_is_connecting(struct mei_device *dev,
-                               struct mei_cl *cl)
-{
-       struct mei_cl *cl_pos = NULL;
-       struct mei_cl *cl_next = NULL;
-
-       list_for_each_entry_safe(cl_pos, cl_next, &dev->file_list, link) {
-               if ((cl_pos->state == MEI_FILE_CONNECTING) &&
-                       (cl_pos != cl) &&
-                       cl->me_client_id == cl_pos->me_client_id)
-                       return 1;
-
-       }
-       return 0;
-}
-
-/**
- * mei_disconnect - sends disconnect message to fw.
- *
- * @dev: the device structure
- * @cl: private data of the file object
- *
- * This function returns -EIO on write failure
- */
-int mei_disconnect(struct mei_device *dev, struct mei_cl *cl)
-{
-       struct mei_msg_hdr *mei_hdr;
-       struct hbm_client_connect_request *req;
-       const size_t len = sizeof(struct hbm_client_connect_request);
-
-       mei_hdr = mei_hbm_hdr(&dev->wr_msg_buf[0], len);
-
-       req = (struct hbm_client_connect_request *)&dev->wr_msg_buf[1];
-       memset(req, 0, len);
-       req->hbm_cmd = CLIENT_DISCONNECT_REQ_CMD;
-       req->host_addr = cl->host_client_id;
-       req->me_addr = cl->me_client_id;
-       req->reserved = 0;
-
-       return mei_write_message(dev, mei_hdr, (unsigned char *)req, len);
-}
-
-/**
- * mei_connect - sends connect message to fw.
- *
- * @dev: the device structure
- * @cl: private data of the file object
- *
- * This function returns -EIO on write failure
- */
-int mei_connect(struct mei_device *dev, struct mei_cl *cl)
-{
-       struct mei_msg_hdr *mei_hdr;
-       struct hbm_client_connect_request *req;
-       const size_t len = sizeof(struct hbm_client_connect_request);
-
-       mei_hdr = mei_hbm_hdr(&dev->wr_msg_buf[0], len);
-
-       req = (struct hbm_client_connect_request *) &dev->wr_msg_buf[1];
-       req->hbm_cmd = CLIENT_CONNECT_REQ_CMD;
-       req->host_addr = cl->host_client_id;
-       req->me_addr = cl->me_client_id;
-       req->reserved = 0;
-
-       return mei_write_message(dev, mei_hdr, (unsigned char *) req, len);
-}
diff --git a/drivers/misc/mei/interface.h b/drivers/misc/mei/interface.h
deleted file mode 100644 (file)
index ec6c785..0000000
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- *
- * Intel Management Engine Interface (Intel MEI) Linux driver
- * Copyright (c) 2003-2012, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- */
-
-
-
-#ifndef _MEI_INTERFACE_H_
-#define _MEI_INTERFACE_H_
-
-#include <linux/mei.h>
-#include "mei_dev.h"
-
-
-
-void mei_read_slots(struct mei_device *dev,
-                    unsigned char *buffer,
-                    unsigned long buffer_length);
-
-int mei_write_message(struct mei_device *dev,
-                            struct mei_msg_hdr *header,
-                            unsigned char *write_buffer,
-                            unsigned long write_length);
-
-bool mei_hbuf_is_empty(struct mei_device *dev);
-
-int mei_hbuf_empty_slots(struct mei_device *dev);
-
-static inline size_t mei_hbuf_max_data(const struct mei_device *dev)
-{
-       return dev->hbuf_depth * sizeof(u32) - sizeof(struct mei_msg_hdr);
-}
-
-/* get slots (dwords) from a message length + header (bytes) */
-static inline unsigned char mei_data2slots(size_t length)
-{
-       return DIV_ROUND_UP(sizeof(struct mei_msg_hdr) + length, 4);
-}
-
-int mei_count_full_read_slots(struct mei_device *dev);
-
-
-int mei_flow_ctrl_creds(struct mei_device *dev, struct mei_cl *cl);
-
-
-
-int mei_wd_send(struct mei_device *dev);
-int mei_wd_stop(struct mei_device *dev);
-int mei_wd_host_init(struct mei_device *dev);
-/*
- * mei_watchdog_register  - Registering watchdog interface
- *   once we got connection to the WD Client
- * @dev - mei device
- */
-void mei_watchdog_register(struct mei_device *dev);
-/*
- * mei_watchdog_unregister  - Unregistering watchdog interface
- * @dev - mei device
- */
-void mei_watchdog_unregister(struct mei_device *dev);
-
-int mei_flow_ctrl_reduce(struct mei_device *dev, struct mei_cl *cl);
-
-int mei_send_flow_control(struct mei_device *dev, struct mei_cl *cl);
-
-int mei_disconnect(struct mei_device *dev, struct mei_cl *cl);
-int mei_other_client_is_connecting(struct mei_device *dev, struct mei_cl *cl);
-int mei_connect(struct mei_device *dev, struct mei_cl *cl);
-
-#endif /* _MEI_INTERFACE_H_ */
index 04fa213..3535b26 100644 (file)
 #include <linux/fs.h>
 #include <linux/jiffies.h>
 
-#include "mei_dev.h"
 #include <linux/mei.h>
-#include "hw.h"
-#include "interface.h"
-
-
-/**
- * mei_interrupt_quick_handler - The ISR of the MEI device
- *
- * @irq: The irq number
- * @dev_id: pointer to the device structure
- *
- * returns irqreturn_t
- */
-irqreturn_t mei_interrupt_quick_handler(int irq, void *dev_id)
-{
-       struct mei_device *dev = (struct mei_device *) dev_id;
-       u32 csr_reg = mei_hcsr_read(dev);
-
-       if ((csr_reg & H_IS) != H_IS)
-               return IRQ_NONE;
 
-       /* clear H_IS bit in H_CSR */
-       mei_reg_write(dev, H_CSR, csr_reg);
+#include "mei_dev.h"
+#include "hbm.h"
+#include "hw-me.h"
+#include "client.h"
 
-       return IRQ_WAKE_THREAD;
-}
 
 /**
- * _mei_cmpl - processes completed operation.
+ * mei_complete_handler - processes completed operation.
  *
  * @cl: private data of the file object.
  * @cb_pos: callback block.
  */
-static void _mei_cmpl(struct mei_cl *cl, struct mei_cl_cb *cb_pos)
+void mei_irq_complete_handler(struct mei_cl *cl, struct mei_cl_cb *cb_pos)
 {
        if (cb_pos->fop_type == MEI_FOP_WRITE) {
                mei_io_cb_free(cb_pos);
@@ -150,8 +130,8 @@ quit:
        dev_dbg(&dev->pdev->dev, "message read\n");
        if (!buffer) {
                mei_read_slots(dev, dev->rd_msg_buf, mei_hdr->length);
-               dev_dbg(&dev->pdev->dev, "discarding message, header =%08x.\n",
-                               *(u32 *) dev->rd_msg_buf);
+               dev_dbg(&dev->pdev->dev, "discarding message " MEI_HDR_FMT "\n",
+                               MEI_HDR_PRM(mei_hdr));
        }
 
        return 0;
@@ -179,7 +159,7 @@ static int _mei_irq_thread_close(struct mei_device *dev, s32 *slots,
 
        *slots -= mei_data2slots(sizeof(struct hbm_client_connect_request));
 
-       if (mei_disconnect(dev, cl)) {
+       if (mei_hbm_cl_disconnect_req(dev, cl)) {
                cl->status = 0;
                cb_pos->buf_idx = 0;
                list_move_tail(&cb_pos->list, &cmpl_list->list);
@@ -195,440 +175,6 @@ static int _mei_irq_thread_close(struct mei_device *dev, s32 *slots,
        return 0;
 }
 
-/**
- * is_treat_specially_client - checks if the message belongs
- * to the file private data.
- *
- * @cl: private data of the file object
- * @rs: connect response bus message
- *
- */
-static bool is_treat_specially_client(struct mei_cl *cl,
-               struct hbm_client_connect_response *rs)
-{
-
-       if (cl->host_client_id == rs->host_addr &&
-           cl->me_client_id == rs->me_addr) {
-               if (!rs->status) {
-                       cl->state = MEI_FILE_CONNECTED;
-                       cl->status = 0;
-
-               } else {
-                       cl->state = MEI_FILE_DISCONNECTED;
-                       cl->status = -ENODEV;
-               }
-               cl->timer_count = 0;
-
-               return true;
-       }
-       return false;
-}
-
-/**
- * mei_client_connect_response - connects to response irq routine
- *
- * @dev: the device structure
- * @rs: connect response bus message
- */
-static void mei_client_connect_response(struct mei_device *dev,
-               struct hbm_client_connect_response *rs)
-{
-
-       struct mei_cl *cl;
-       struct mei_cl_cb *pos = NULL, *next = NULL;
-
-       dev_dbg(&dev->pdev->dev,
-                       "connect_response:\n"
-                       "ME Client = %d\n"
-                       "Host Client = %d\n"
-                       "Status = %d\n",
-                       rs->me_addr,
-                       rs->host_addr,
-                       rs->status);
-
-       /* if WD or iamthif client treat specially */
-
-       if (is_treat_specially_client(&(dev->wd_cl), rs)) {
-               dev_dbg(&dev->pdev->dev, "successfully connected to WD client.\n");
-               mei_watchdog_register(dev);
-
-               return;
-       }
-
-       if (is_treat_specially_client(&(dev->iamthif_cl), rs)) {
-               dev->iamthif_state = MEI_IAMTHIF_IDLE;
-               return;
-       }
-       list_for_each_entry_safe(pos, next, &dev->ctrl_rd_list.list, list) {
-
-               cl = pos->cl;
-               if (!cl) {
-                       list_del(&pos->list);
-                       return;
-               }
-               if (pos->fop_type == MEI_FOP_IOCTL) {
-                       if (is_treat_specially_client(cl, rs)) {
-                               list_del(&pos->list);
-                               cl->status = 0;
-                               cl->timer_count = 0;
-                               break;
-                       }
-               }
-       }
-}
-
-/**
- * mei_client_disconnect_response - disconnects from response irq routine
- *
- * @dev: the device structure
- * @rs: disconnect response bus message
- */
-static void mei_client_disconnect_response(struct mei_device *dev,
-                                       struct hbm_client_connect_response *rs)
-{
-       struct mei_cl *cl;
-       struct mei_cl_cb *pos = NULL, *next = NULL;
-
-       dev_dbg(&dev->pdev->dev,
-                       "disconnect_response:\n"
-                       "ME Client = %d\n"
-                       "Host Client = %d\n"
-                       "Status = %d\n",
-                       rs->me_addr,
-                       rs->host_addr,
-                       rs->status);
-
-       list_for_each_entry_safe(pos, next, &dev->ctrl_rd_list.list, list) {
-               cl = pos->cl;
-
-               if (!cl) {
-                       list_del(&pos->list);
-                       return;
-               }
-
-               dev_dbg(&dev->pdev->dev, "list_for_each_entry_safe in ctrl_rd_list.\n");
-               if (cl->host_client_id == rs->host_addr &&
-                   cl->me_client_id == rs->me_addr) {
-
-                       list_del(&pos->list);
-                       if (!rs->status)
-                               cl->state = MEI_FILE_DISCONNECTED;
-
-                       cl->status = 0;
-                       cl->timer_count = 0;
-                       break;
-               }
-       }
-}
-
-/**
- * same_flow_addr - tells if they have the same address.
- *
- * @file: private data of the file object.
- * @flow: flow control.
- *
- * returns  !=0, same; 0,not.
- */
-static int same_flow_addr(struct mei_cl *cl, struct hbm_flow_control *flow)
-{
-       return (cl->host_client_id == flow->host_addr &&
-               cl->me_client_id == flow->me_addr);
-}
-
-/**
- * add_single_flow_creds - adds single buffer credentials.
- *
- * @file: private data ot the file object.
- * @flow: flow control.
- */
-static void add_single_flow_creds(struct mei_device *dev,
-                                 struct hbm_flow_control *flow)
-{
-       struct mei_me_client *client;
-       int i;
-
-       for (i = 0; i < dev->me_clients_num; i++) {
-               client = &dev->me_clients[i];
-               if (client && flow->me_addr == client->client_id) {
-                       if (client->props.single_recv_buf) {
-                               client->mei_flow_ctrl_creds++;
-                               dev_dbg(&dev->pdev->dev, "recv flow ctrl msg ME %d (single).\n",
-                                   flow->me_addr);
-                               dev_dbg(&dev->pdev->dev, "flow control credentials =%d.\n",
-                                   client->mei_flow_ctrl_creds);
-                       } else {
-                               BUG();  /* error in flow control */
-                       }
-               }
-       }
-}
-
-/**
- * mei_client_flow_control_response - flow control response irq routine
- *
- * @dev: the device structure
- * @flow_control: flow control response bus message
- */
-static void mei_client_flow_control_response(struct mei_device *dev,
-               struct hbm_flow_control *flow_control)
-{
-       struct mei_cl *cl_pos = NULL;
-       struct mei_cl *cl_next = NULL;
-
-       if (!flow_control->host_addr) {
-               /* single receive buffer */
-               add_single_flow_creds(dev, flow_control);
-       } else {
-               /* normal connection */
-               list_for_each_entry_safe(cl_pos, cl_next,
-                               &dev->file_list, link) {
-                       dev_dbg(&dev->pdev->dev, "list_for_each_entry_safe in file_list\n");
-
-                       dev_dbg(&dev->pdev->dev, "cl of host client %d ME client %d.\n",
-                           cl_pos->host_client_id,
-                           cl_pos->me_client_id);
-                       dev_dbg(&dev->pdev->dev, "flow ctrl msg for host %d ME %d.\n",
-                           flow_control->host_addr,
-                           flow_control->me_addr);
-                       if (same_flow_addr(cl_pos, flow_control)) {
-                               dev_dbg(&dev->pdev->dev, "recv ctrl msg for host  %d ME %d.\n",
-                                   flow_control->host_addr,
-                                   flow_control->me_addr);
-                               cl_pos->mei_flow_ctrl_creds++;
-                               dev_dbg(&dev->pdev->dev, "flow control credentials = %d.\n",
-                                   cl_pos->mei_flow_ctrl_creds);
-                               break;
-                       }
-               }
-       }
-}
-
-/**
- * same_disconn_addr - tells if they have the same address
- *
- * @file: private data of the file object.
- * @disconn: disconnection request.
- *
- * returns !=0, same; 0,not.
- */
-static int same_disconn_addr(struct mei_cl *cl,
-                            struct hbm_client_connect_request *req)
-{
-       return (cl->host_client_id == req->host_addr &&
-               cl->me_client_id == req->me_addr);
-}
-
-/**
- * mei_client_disconnect_request - disconnects from request irq routine
- *
- * @dev: the device structure.
- * @disconnect_req: disconnect request bus message.
- */
-static void mei_client_disconnect_request(struct mei_device *dev,
-               struct hbm_client_connect_request *disconnect_req)
-{
-       struct hbm_client_connect_response *disconnect_res;
-       struct mei_cl *pos, *next;
-       const size_t len = sizeof(struct hbm_client_connect_response);
-
-       list_for_each_entry_safe(pos, next, &dev->file_list, link) {
-               if (same_disconn_addr(pos, disconnect_req)) {
-                       dev_dbg(&dev->pdev->dev, "disconnect request host client %d ME client %d.\n",
-                                       disconnect_req->host_addr,
-                                       disconnect_req->me_addr);
-                       pos->state = MEI_FILE_DISCONNECTED;
-                       pos->timer_count = 0;
-                       if (pos == &dev->wd_cl)
-                               dev->wd_pending = false;
-                       else if (pos == &dev->iamthif_cl)
-                               dev->iamthif_timer = 0;
-
-                       /* prepare disconnect response */
-                       (void)mei_hbm_hdr((u32 *)&dev->wr_ext_msg.hdr, len);
-                       disconnect_res =
-                               (struct hbm_client_connect_response *)
-                               &dev->wr_ext_msg.data;
-                       disconnect_res->hbm_cmd = CLIENT_DISCONNECT_RES_CMD;
-                       disconnect_res->host_addr = pos->host_client_id;
-                       disconnect_res->me_addr = pos->me_client_id;
-                       disconnect_res->status = 0;
-                       break;
-               }
-       }
-}
-
-/**
- * mei_irq_thread_read_bus_message - bottom half read routine after ISR to
- * handle the read bus message cmd processing.
- *
- * @dev: the device structure
- * @mei_hdr: header of bus message
- */
-static void mei_irq_thread_read_bus_message(struct mei_device *dev,
-               struct mei_msg_hdr *mei_hdr)
-{
-       struct mei_bus_message *mei_msg;
-       struct mei_me_client *me_client;
-       struct hbm_host_version_response *version_res;
-       struct hbm_client_connect_response *connect_res;
-       struct hbm_client_connect_response *disconnect_res;
-       struct hbm_client_connect_request *disconnect_req;
-       struct hbm_flow_control *flow_control;
-       struct hbm_props_response *props_res;
-       struct hbm_host_enum_response *enum_res;
-       struct hbm_host_stop_request *stop_req;
-
-       /* read the message to our buffer */
-       BUG_ON(mei_hdr->length >= sizeof(dev->rd_msg_buf));
-       mei_read_slots(dev, dev->rd_msg_buf, mei_hdr->length);
-       mei_msg = (struct mei_bus_message *)dev->rd_msg_buf;
-
-       switch (mei_msg->hbm_cmd) {
-       case HOST_START_RES_CMD:
-               version_res = (struct hbm_host_version_response *) mei_msg;
-               if (version_res->host_version_supported) {
-                       dev->version.major_version = HBM_MAJOR_VERSION;
-                       dev->version.minor_version = HBM_MINOR_VERSION;
-                       if (dev->dev_state == MEI_DEV_INIT_CLIENTS &&
-                           dev->init_clients_state == MEI_START_MESSAGE) {
-                               dev->init_clients_timer = 0;
-                               mei_host_enum_clients_message(dev);
-                       } else {
-                               dev->recvd_msg = false;
-                               dev_dbg(&dev->pdev->dev, "IMEI reset due to received host start response bus message.\n");
-                               mei_reset(dev, 1);
-                               return;
-                       }
-               } else {
-                       u32 *buf = dev->wr_msg_buf;
-                       const size_t len = sizeof(struct hbm_host_stop_request);
-
-                       dev->version = version_res->me_max_version;
-
-                       /* send stop message */
-                       mei_hdr = mei_hbm_hdr(&buf[0], len);
-                       stop_req = (struct hbm_host_stop_request *)&buf[1];
-                       memset(stop_req, 0, len);
-                       stop_req->hbm_cmd = HOST_STOP_REQ_CMD;
-                       stop_req->reason = DRIVER_STOP_REQUEST;
-
-                       mei_write_message(dev, mei_hdr,
-                                       (unsigned char *)stop_req, len);
-                       dev_dbg(&dev->pdev->dev, "version mismatch.\n");
-                       return;
-               }
-
-               dev->recvd_msg = true;
-               dev_dbg(&dev->pdev->dev, "host start response message received.\n");
-               break;
-
-       case CLIENT_CONNECT_RES_CMD:
-               connect_res = (struct hbm_client_connect_response *) mei_msg;
-               mei_client_connect_response(dev, connect_res);
-               dev_dbg(&dev->pdev->dev, "client connect response message received.\n");
-               wake_up(&dev->wait_recvd_msg);
-               break;
-
-       case CLIENT_DISCONNECT_RES_CMD:
-               disconnect_res = (struct hbm_client_connect_response *) mei_msg;
-               mei_client_disconnect_response(dev, disconnect_res);
-               dev_dbg(&dev->pdev->dev, "client disconnect response message received.\n");
-               wake_up(&dev->wait_recvd_msg);
-               break;
-
-       case MEI_FLOW_CONTROL_CMD:
-               flow_control = (struct hbm_flow_control *) mei_msg;
-               mei_client_flow_control_response(dev, flow_control);
-               dev_dbg(&dev->pdev->dev, "client flow control response message received.\n");
-               break;
-
-       case HOST_CLIENT_PROPERTIES_RES_CMD:
-               props_res = (struct hbm_props_response *)mei_msg;
-               me_client = &dev->me_clients[dev->me_client_presentation_num];
-
-               if (props_res->status || !dev->me_clients) {
-                       dev_dbg(&dev->pdev->dev, "reset due to received host client properties response bus message wrong status.\n");
-                       mei_reset(dev, 1);
-                       return;
-               }
-
-               if (me_client->client_id != props_res->address) {
-                       dev_err(&dev->pdev->dev,
-                               "Host client properties reply mismatch\n");
-                       mei_reset(dev, 1);
-
-                       return;
-               }
-
-               if (dev->dev_state != MEI_DEV_INIT_CLIENTS ||
-                   dev->init_clients_state != MEI_CLIENT_PROPERTIES_MESSAGE) {
-                       dev_err(&dev->pdev->dev,
-                               "Unexpected client properties reply\n");
-                       mei_reset(dev, 1);
-
-                       return;
-               }
-
-               me_client->props = props_res->client_properties;
-               dev->me_client_index++;
-               dev->me_client_presentation_num++;
-
-               mei_host_client_enumerate(dev);
-
-               break;
-
-       case HOST_ENUM_RES_CMD:
-               enum_res = (struct hbm_host_enum_response *) mei_msg;
-               memcpy(dev->me_clients_map, enum_res->valid_addresses, 32);
-               if (dev->dev_state == MEI_DEV_INIT_CLIENTS &&
-                   dev->init_clients_state == MEI_ENUM_CLIENTS_MESSAGE) {
-                               dev->init_clients_timer = 0;
-                               dev->me_client_presentation_num = 0;
-                               dev->me_client_index = 0;
-                               mei_allocate_me_clients_storage(dev);
-                               dev->init_clients_state =
-                                       MEI_CLIENT_PROPERTIES_MESSAGE;
-
-                               mei_host_client_enumerate(dev);
-               } else {
-                       dev_dbg(&dev->pdev->dev, "reset due to received host enumeration clients response bus message.\n");
-                       mei_reset(dev, 1);
-                       return;
-               }
-               break;
-
-       case HOST_STOP_RES_CMD:
-               dev->dev_state = MEI_DEV_DISABLED;
-               dev_dbg(&dev->pdev->dev, "resetting because of FW stop response.\n");
-               mei_reset(dev, 1);
-               break;
-
-       case CLIENT_DISCONNECT_REQ_CMD:
-               /* search for client */
-               disconnect_req = (struct hbm_client_connect_request *)mei_msg;
-               mei_client_disconnect_request(dev, disconnect_req);
-               break;
-
-       case ME_STOP_REQ_CMD:
-       {
-               /* prepare stop request: sent in next interrupt event */
-
-               const size_t len = sizeof(struct hbm_host_stop_request);
-
-               mei_hdr = mei_hbm_hdr((u32 *)&dev->wr_ext_msg.hdr, len);
-               stop_req = (struct hbm_host_stop_request *)&dev->wr_ext_msg.data;
-               memset(stop_req, 0, len);
-               stop_req->hbm_cmd = HOST_STOP_REQ_CMD;
-               stop_req->reason = DRIVER_STOP_REQUEST;
-               break;
-       }
-       default:
-               BUG();
-               break;
-
-       }
-}
-
 
 /**
  * _mei_hb_read - processes read related operation.
@@ -655,7 +201,7 @@ static int _mei_irq_thread_read(struct mei_device *dev,     s32 *slots,
 
        *slots -= mei_data2slots(sizeof(struct hbm_flow_control));
 
-       if (mei_send_flow_control(dev, cl)) {
+       if (mei_hbm_cl_flow_control_req(dev, cl)) {
                cl->status = -ENODEV;
                cb_pos->buf_idx = 0;
                list_move_tail(&cb_pos->list, &cmpl_list->list);
@@ -691,8 +237,8 @@ static int _mei_irq_thread_ioctl(struct mei_device *dev, s32 *slots,
        }
 
        cl->state = MEI_FILE_CONNECTING;
-        *slots -= mei_data2slots(sizeof(struct hbm_client_connect_request));
-       if (mei_connect(dev, cl)) {
+       *slots -= mei_data2slots(sizeof(struct hbm_client_connect_request));
+       if (mei_hbm_cl_connect_req(dev, cl)) {
                cl->status = -ENODEV;
                cb_pos->buf_idx = 0;
                list_del(&cb_pos->list);
@@ -717,25 +263,24 @@ static int _mei_irq_thread_ioctl(struct mei_device *dev, s32 *slots,
 static int mei_irq_thread_write_complete(struct mei_device *dev, s32 *slots,
                        struct mei_cl_cb *cb, struct mei_cl_cb *cmpl_list)
 {
-       struct mei_msg_hdr *mei_hdr;
+       struct mei_msg_hdr mei_hdr;
        struct mei_cl *cl = cb->cl;
        size_t len = cb->request_buffer.size - cb->buf_idx;
        size_t msg_slots = mei_data2slots(len);
 
-       mei_hdr = (struct mei_msg_hdr *)&dev->wr_msg_buf[0];
-       mei_hdr->host_addr = cl->host_client_id;
-       mei_hdr->me_addr = cl->me_client_id;
-       mei_hdr->reserved = 0;
+       mei_hdr.host_addr = cl->host_client_id;
+       mei_hdr.me_addr = cl->me_client_id;
+       mei_hdr.reserved = 0;
 
        if (*slots >= msg_slots) {
-               mei_hdr->length = len;
-               mei_hdr->msg_complete = 1;
+               mei_hdr.length = len;
+               mei_hdr.msg_complete = 1;
        /* Split the message only if we can write the whole host buffer */
        } else if (*slots == dev->hbuf_depth) {
                msg_slots = *slots;
                len = (*slots * sizeof(u32)) - sizeof(struct mei_msg_hdr);
-               mei_hdr->length = len;
-               mei_hdr->msg_complete = 0;
+               mei_hdr.length = len;
+               mei_hdr.msg_complete = 0;
        } else {
                /* wait for next time the host buffer is empty */
                return 0;
@@ -743,23 +288,22 @@ static int mei_irq_thread_write_complete(struct mei_device *dev, s32 *slots,
 
        dev_dbg(&dev->pdev->dev, "buf: size = %d idx = %lu\n",
                        cb->request_buffer.size, cb->buf_idx);
-       dev_dbg(&dev->pdev->dev, "msg: len = %d complete = %d\n",
-                       mei_hdr->length, mei_hdr->msg_complete);
+       dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(&mei_hdr));
 
        *slots -=  msg_slots;
-       if (mei_write_message(dev, mei_hdr,
-               cb->request_buffer.data + cb->buf_idx, len)) {
+       if (mei_write_message(dev, &mei_hdr,
+                       cb->request_buffer.data + cb->buf_idx)) {
                cl->status = -ENODEV;
                list_move_tail(&cb->list, &cmpl_list->list);
                return -ENODEV;
        }
 
-       if (mei_flow_ctrl_reduce(dev, cl))
+       if (mei_cl_flow_ctrl_reduce(cl))
                return -ENODEV;
 
        cl->status = 0;
-       cb->buf_idx += mei_hdr->length;
-       if (mei_hdr->msg_complete)
+       cb->buf_idx += mei_hdr.length;
+       if (mei_hdr.msg_complete)
                list_move_tail(&cb->list, &dev->write_waiting_list.list);
 
        return 0;
@@ -769,15 +313,14 @@ static int mei_irq_thread_write_complete(struct mei_device *dev, s32 *slots,
  * mei_irq_thread_read_handler - bottom half read routine after ISR to
  * handle the read processing.
  *
- * @cmpl_list: An instance of our list structure
  * @dev: the device structure
+ * @cmpl_list: An instance of our list structure
  * @slots: slots to read.
  *
  * returns 0 on success, <0 on failure.
  */
-static int mei_irq_thread_read_handler(struct mei_cl_cb *cmpl_list,
-               struct mei_device *dev,
-               s32 *slots)
+int mei_irq_read_handler(struct mei_device *dev,
+               struct mei_cl_cb *cmpl_list, s32 *slots)
 {
        struct mei_msg_hdr *mei_hdr;
        struct mei_cl *cl_pos = NULL;
@@ -785,13 +328,13 @@ static int mei_irq_thread_read_handler(struct mei_cl_cb *cmpl_list,
        int ret = 0;
 
        if (!dev->rd_msg_hdr) {
-               dev->rd_msg_hdr = mei_mecbrw_read(dev);
+               dev->rd_msg_hdr = mei_read_hdr(dev);
                dev_dbg(&dev->pdev->dev, "slots =%08x.\n", *slots);
                (*slots)--;
                dev_dbg(&dev->pdev->dev, "slots =%08x.\n", *slots);
        }
        mei_hdr = (struct mei_msg_hdr *) &dev->rd_msg_hdr;
-       dev_dbg(&dev->pdev->dev, "mei_hdr->length =%d\n", mei_hdr->length);
+       dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(mei_hdr));
 
        if (mei_hdr->reserved || !dev->rd_msg_hdr) {
                dev_dbg(&dev->pdev->dev, "corrupted message header.\n");
@@ -830,19 +373,18 @@ static int mei_irq_thread_read_handler(struct mei_cl_cb *cmpl_list,
        /* decide where to read the message too */
        if (!mei_hdr->host_addr) {
                dev_dbg(&dev->pdev->dev, "call mei_irq_thread_read_bus_message.\n");
-               mei_irq_thread_read_bus_message(dev, mei_hdr);
+               mei_hbm_dispatch(dev, mei_hdr);
                dev_dbg(&dev->pdev->dev, "end mei_irq_thread_read_bus_message.\n");
        } else if (mei_hdr->host_addr == dev->iamthif_cl.host_client_id &&
                   (MEI_FILE_CONNECTED == dev->iamthif_cl.state) &&
                   (dev->iamthif_state == MEI_IAMTHIF_READING)) {
                dev_dbg(&dev->pdev->dev, "call mei_irq_thread_read_iamthif_message.\n");
-               dev_dbg(&dev->pdev->dev, "mei_hdr->length =%d\n",
-                               mei_hdr->length);
+
+               dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(mei_hdr));
 
                ret = mei_amthif_irq_read_message(cmpl_list, dev, mei_hdr);
                if (ret)
                        goto end;
-
        } else {
                dev_dbg(&dev->pdev->dev, "call mei_irq_thread_read_client_message.\n");
                ret = mei_irq_thread_read_client_message(cmpl_list,
@@ -869,15 +411,15 @@ end:
 
 
 /**
- * mei_irq_thread_write_handler - bottom half write routine after
- * ISR to handle the write processing.
+ * mei_irq_write_handler -  dispatch write requests
+ *  after irq received
  *
  * @dev: the device structure
  * @cmpl_list: An instance of our list structure
  *
  * returns 0 on success, <0 on failure.
  */
-static int mei_irq_thread_write_handler(struct mei_device *dev,
+int mei_irq_write_handler(struct mei_device *dev,
                                struct mei_cl_cb *cmpl_list)
 {
 
@@ -887,7 +429,7 @@ static int mei_irq_thread_write_handler(struct mei_device *dev,
        s32 slots;
        int ret;
 
-       if (!mei_hbuf_is_empty(dev)) {
+       if (!mei_hbuf_is_ready(dev)) {
                dev_dbg(&dev->pdev->dev, "host buffer is not empty.\n");
                return 0;
        }
@@ -930,16 +472,16 @@ static int mei_irq_thread_write_handler(struct mei_device *dev,
 
        if (dev->wr_ext_msg.hdr.length) {
                mei_write_message(dev, &dev->wr_ext_msg.hdr,
-                       dev->wr_ext_msg.data, dev->wr_ext_msg.hdr.length);
+                               dev->wr_ext_msg.data);
                slots -= mei_data2slots(dev->wr_ext_msg.hdr.length);
                dev->wr_ext_msg.hdr.length = 0;
        }
        if (dev->dev_state == MEI_DEV_ENABLED) {
                if (dev->wd_pending &&
-                   mei_flow_ctrl_creds(dev, &dev->wd_cl) > 0) {
+                   mei_cl_flow_ctrl_creds(&dev->wd_cl) > 0) {
                        if (mei_wd_send(dev))
                                dev_dbg(&dev->pdev->dev, "wd send failed.\n");
-                       else if (mei_flow_ctrl_reduce(dev, &dev->wd_cl))
+                       else if (mei_cl_flow_ctrl_reduce(&dev->wd_cl))
                                return -ENODEV;
 
                        dev->wd_pending = false;
@@ -978,7 +520,7 @@ static int mei_irq_thread_write_handler(struct mei_device *dev,
                        break;
                case MEI_FOP_IOCTL:
                        /* connect message */
-                       if (mei_other_client_is_connecting(dev, cl))
+                       if (mei_cl_is_other_connecting(cl))
                                continue;
                        ret = _mei_irq_thread_ioctl(dev, &slots, pos,
                                                cl, cmpl_list);
@@ -998,7 +540,7 @@ static int mei_irq_thread_write_handler(struct mei_device *dev,
                cl = pos->cl;
                if (cl == NULL)
                        continue;
-               if (mei_flow_ctrl_creds(dev, cl) <= 0) {
+               if (mei_cl_flow_ctrl_creds(cl) <= 0) {
                        dev_dbg(&dev->pdev->dev,
                                "No flow control credentials for client %d, not sending.\n",
                                cl->host_client_id);
@@ -1123,115 +665,3 @@ out:
        mutex_unlock(&dev->device_lock);
 }
 
-/**
- *  mei_interrupt_thread_handler - function called after ISR to handle the interrupt
- * processing.
- *
- * @irq: The irq number
- * @dev_id: pointer to the device structure
- *
- * returns irqreturn_t
- *
- */
-irqreturn_t mei_interrupt_thread_handler(int irq, void *dev_id)
-{
-       struct mei_device *dev = (struct mei_device *) dev_id;
-       struct mei_cl_cb complete_list;
-       struct mei_cl_cb *cb_pos = NULL, *cb_next = NULL;
-       struct mei_cl *cl;
-       s32 slots;
-       int rets;
-       bool  bus_message_received;
-
-
-       dev_dbg(&dev->pdev->dev, "function called after ISR to handle the interrupt processing.\n");
-       /* initialize our complete list */
-       mutex_lock(&dev->device_lock);
-       mei_io_list_init(&complete_list);
-       dev->host_hw_state = mei_hcsr_read(dev);
-
-       /* Ack the interrupt here
-        * In case of MSI we don't go through the quick handler */
-       if (pci_dev_msi_enabled(dev->pdev))
-               mei_reg_write(dev, H_CSR, dev->host_hw_state);
-
-       dev->me_hw_state = mei_mecsr_read(dev);
-
-       /* check if ME wants a reset */
-       if ((dev->me_hw_state & ME_RDY_HRA) == 0 &&
-           dev->dev_state != MEI_DEV_RESETING &&
-           dev->dev_state != MEI_DEV_INITIALIZING) {
-               dev_dbg(&dev->pdev->dev, "FW not ready.\n");
-               mei_reset(dev, 1);
-               mutex_unlock(&dev->device_lock);
-               return IRQ_HANDLED;
-       }
-
-       /*  check if we need to start the dev */
-       if ((dev->host_hw_state & H_RDY) == 0) {
-               if ((dev->me_hw_state & ME_RDY_HRA) == ME_RDY_HRA) {
-                       dev_dbg(&dev->pdev->dev, "we need to start the dev.\n");
-                       dev->host_hw_state |= (H_IE | H_IG | H_RDY);
-                       mei_hcsr_set(dev);
-                       dev->dev_state = MEI_DEV_INIT_CLIENTS;
-                       dev_dbg(&dev->pdev->dev, "link is established start sending messages.\n");
-                       /* link is established
-                        * start sending messages.
-                        */
-                       mei_host_start_message(dev);
-                       mutex_unlock(&dev->device_lock);
-                       return IRQ_HANDLED;
-               } else {
-                       dev_dbg(&dev->pdev->dev, "FW not ready.\n");
-                       mutex_unlock(&dev->device_lock);
-                       return IRQ_HANDLED;
-               }
-       }
-       /* check slots available for reading */
-       slots = mei_count_full_read_slots(dev);
-       while (slots > 0) {
-               /* we have urgent data to send so break the read */
-               if (dev->wr_ext_msg.hdr.length)
-                       break;
-               dev_dbg(&dev->pdev->dev, "slots =%08x\n", slots);
-               dev_dbg(&dev->pdev->dev, "call mei_irq_thread_read_handler.\n");
-               rets = mei_irq_thread_read_handler(&complete_list, dev, &slots);
-               if (rets)
-                       goto end;
-       }
-       rets = mei_irq_thread_write_handler(dev, &complete_list);
-end:
-       dev_dbg(&dev->pdev->dev, "end of bottom half function.\n");
-       dev->host_hw_state = mei_hcsr_read(dev);
-       dev->mei_host_buffer_is_empty = mei_hbuf_is_empty(dev);
-
-       bus_message_received = false;
-       if (dev->recvd_msg && waitqueue_active(&dev->wait_recvd_msg)) {
-               dev_dbg(&dev->pdev->dev, "received waiting bus message\n");
-               bus_message_received = true;
-       }
-       mutex_unlock(&dev->device_lock);
-       if (bus_message_received) {
-               dev_dbg(&dev->pdev->dev, "wake up dev->wait_recvd_msg\n");
-               wake_up_interruptible(&dev->wait_recvd_msg);
-               bus_message_received = false;
-       }
-       if (list_empty(&complete_list.list))
-               return IRQ_HANDLED;
-
-
-       list_for_each_entry_safe(cb_pos, cb_next, &complete_list.list, list) {
-               cl = cb_pos->cl;
-               list_del(&cb_pos->list);
-               if (cl) {
-                       if (cl != &dev->iamthif_cl) {
-                               dev_dbg(&dev->pdev->dev, "completing call back.\n");
-                               _mei_cmpl(cl, cb_pos);
-                               cb_pos = NULL;
-                       } else if (cl == &dev->iamthif_cl) {
-                               mei_amthif_complete(dev, cb_pos);
-                       }
-               }
-       }
-       return IRQ_HANDLED;
-}
diff --git a/drivers/misc/mei/iorw.c b/drivers/misc/mei/iorw.c
deleted file mode 100644 (file)
index eb93a1b..0000000
+++ /dev/null
@@ -1,366 +0,0 @@
-/*
- *
- * Intel Management Engine Interface (Intel MEI) Linux driver
- * Copyright (c) 2003-2012, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- */
-
-
-#include <linux/kernel.h>
-#include <linux/fs.h>
-#include <linux/errno.h>
-#include <linux/types.h>
-#include <linux/fcntl.h>
-#include <linux/aio.h>
-#include <linux/pci.h>
-#include <linux/init.h>
-#include <linux/ioctl.h>
-#include <linux/cdev.h>
-#include <linux/list.h>
-#include <linux/delay.h>
-#include <linux/sched.h>
-#include <linux/uuid.h>
-#include <linux/jiffies.h>
-#include <linux/uaccess.h>
-
-
-#include "mei_dev.h"
-#include "hw.h"
-#include <linux/mei.h>
-#include "interface.h"
-
-/**
- * mei_io_cb_free - free mei_cb_private related memory
- *
- * @cb: mei callback struct
- */
-void mei_io_cb_free(struct mei_cl_cb *cb)
-{
-       if (cb == NULL)
-               return;
-
-       kfree(cb->request_buffer.data);
-       kfree(cb->response_buffer.data);
-       kfree(cb);
-}
-/**
- * mei_io_cb_init - allocate and initialize io callback
- *
- * @cl - mei client
- * @file: pointer to file structure
- *
- * returns mei_cl_cb pointer or NULL;
- */
-struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, struct file *fp)
-{
-       struct mei_cl_cb *cb;
-
-       cb = kzalloc(sizeof(struct mei_cl_cb), GFP_KERNEL);
-       if (!cb)
-               return NULL;
-
-       mei_io_list_init(cb);
-
-       cb->file_object = fp;
-       cb->cl = cl;
-       cb->buf_idx = 0;
-       return cb;
-}
-
-
-/**
- * mei_io_cb_alloc_req_buf - allocate request buffer
- *
- * @cb -  io callback structure
- * @size: size of the buffer
- *
- * returns 0 on success
- *         -EINVAL if cb is NULL
- *         -ENOMEM if allocation failed
- */
-int mei_io_cb_alloc_req_buf(struct mei_cl_cb *cb, size_t length)
-{
-       if (!cb)
-               return -EINVAL;
-
-       if (length == 0)
-               return 0;
-
-       cb->request_buffer.data = kmalloc(length, GFP_KERNEL);
-       if (!cb->request_buffer.data)
-               return -ENOMEM;
-       cb->request_buffer.size = length;
-       return 0;
-}
-/**
- * mei_io_cb_alloc_req_buf - allocate respose buffer
- *
- * @cb -  io callback structure
- * @size: size of the buffer
- *
- * returns 0 on success
- *         -EINVAL if cb is NULL
- *         -ENOMEM if allocation failed
- */
-int mei_io_cb_alloc_resp_buf(struct mei_cl_cb *cb, size_t length)
-{
-       if (!cb)
-               return -EINVAL;
-
-       if (length == 0)
-               return 0;
-
-       cb->response_buffer.data = kmalloc(length, GFP_KERNEL);
-       if (!cb->response_buffer.data)
-               return -ENOMEM;
-       cb->response_buffer.size = length;
-       return 0;
-}
-
-
-/**
- * mei_me_cl_by_id return index to me_clients for client_id
- *
- * @dev: the device structure
- * @client_id: me client id
- *
- * Locking: called under "dev->device_lock" lock
- *
- * returns index on success, -ENOENT on failure.
- */
-
-int mei_me_cl_by_id(struct mei_device *dev, u8 client_id)
-{
-       int i;
-       for (i = 0; i < dev->me_clients_num; i++)
-               if (dev->me_clients[i].client_id == client_id)
-                       break;
-       if (WARN_ON(dev->me_clients[i].client_id != client_id))
-               return -ENOENT;
-
-       if (i == dev->me_clients_num)
-               return -ENOENT;
-
-       return i;
-}
-
-/**
- * mei_ioctl_connect_client - the connect to fw client IOCTL function
- *
- * @dev: the device structure
- * @data: IOCTL connect data, input and output parameters
- * @file: private data of the file object
- *
- * Locking: called under "dev->device_lock" lock
- *
- * returns 0 on success, <0 on failure.
- */
-int mei_ioctl_connect_client(struct file *file,
-                       struct mei_connect_client_data *data)
-{
-       struct mei_device *dev;
-       struct mei_cl_cb *cb;
-       struct mei_client *client;
-       struct mei_cl *cl;
-       long timeout = mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT);
-       int i;
-       int err;
-       int rets;
-
-       cl = file->private_data;
-       if (WARN_ON(!cl || !cl->dev))
-               return -ENODEV;
-
-       dev = cl->dev;
-
-       dev_dbg(&dev->pdev->dev, "mei_ioctl_connect_client() Entry\n");
-
-       /* buffered ioctl cb */
-       cb = mei_io_cb_init(cl, file);
-       if (!cb) {
-               rets = -ENOMEM;
-               goto end;
-       }
-
-       cb->fop_type = MEI_FOP_IOCTL;
-
-       if (dev->dev_state != MEI_DEV_ENABLED) {
-               rets = -ENODEV;
-               goto end;
-       }
-       if (cl->state != MEI_FILE_INITIALIZING &&
-           cl->state != MEI_FILE_DISCONNECTED) {
-               rets = -EBUSY;
-               goto end;
-       }
-
-       /* find ME client we're trying to connect to */
-       i = mei_me_cl_by_uuid(dev, &data->in_client_uuid);
-       if (i >= 0 && !dev->me_clients[i].props.fixed_address) {
-               cl->me_client_id = dev->me_clients[i].client_id;
-               cl->state = MEI_FILE_CONNECTING;
-       }
-
-       dev_dbg(&dev->pdev->dev, "Connect to FW Client ID = %d\n",
-                       cl->me_client_id);
-       dev_dbg(&dev->pdev->dev, "FW Client - Protocol Version = %d\n",
-                       dev->me_clients[i].props.protocol_version);
-       dev_dbg(&dev->pdev->dev, "FW Client - Max Msg Len = %d\n",
-                       dev->me_clients[i].props.max_msg_length);
-
-       /* if we're connecting to amthi client then we will use the
-        * existing connection
-        */
-       if (uuid_le_cmp(data->in_client_uuid, mei_amthi_guid) == 0) {
-               dev_dbg(&dev->pdev->dev, "FW Client is amthi\n");
-               if (dev->iamthif_cl.state != MEI_FILE_CONNECTED) {
-                       rets = -ENODEV;
-                       goto end;
-               }
-               clear_bit(cl->host_client_id, dev->host_clients_map);
-               mei_me_cl_unlink(dev, cl);
-
-               kfree(cl);
-               cl = NULL;
-               file->private_data = &dev->iamthif_cl;
-
-               client = &data->out_client_properties;
-               client->max_msg_length =
-                       dev->me_clients[i].props.max_msg_length;
-               client->protocol_version =
-                       dev->me_clients[i].props.protocol_version;
-               rets = dev->iamthif_cl.status;
-
-               goto end;
-       }
-
-       if (cl->state != MEI_FILE_CONNECTING) {
-               rets = -ENODEV;
-               goto end;
-       }
-
-
-       /* prepare the output buffer */
-       client = &data->out_client_properties;
-       client->max_msg_length = dev->me_clients[i].props.max_msg_length;
-       client->protocol_version = dev->me_clients[i].props.protocol_version;
-       dev_dbg(&dev->pdev->dev, "Can connect?\n");
-       if (dev->mei_host_buffer_is_empty
-           && !mei_other_client_is_connecting(dev, cl)) {
-               dev_dbg(&dev->pdev->dev, "Sending Connect Message\n");
-               dev->mei_host_buffer_is_empty = false;
-               if (mei_connect(dev, cl)) {
-                       dev_dbg(&dev->pdev->dev, "Sending connect message - failed\n");
-                       rets = -ENODEV;
-                       goto end;
-               } else {
-                       dev_dbg(&dev->pdev->dev, "Sending connect message - succeeded\n");
-                       cl->timer_count = MEI_CONNECT_TIMEOUT;
-                       list_add_tail(&cb->list, &dev->ctrl_rd_list.list);
-               }
-
-
-       } else {
-               dev_dbg(&dev->pdev->dev, "Queuing the connect request due to device busy\n");
-               dev_dbg(&dev->pdev->dev, "add connect cb to control write list.\n");
-               list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
-       }
-       mutex_unlock(&dev->device_lock);
-       err = wait_event_timeout(dev->wait_recvd_msg,
-                       (MEI_FILE_CONNECTED == cl->state ||
-                        MEI_FILE_DISCONNECTED == cl->state), timeout);
-
-       mutex_lock(&dev->device_lock);
-       if (MEI_FILE_CONNECTED == cl->state) {
-               dev_dbg(&dev->pdev->dev, "successfully connected to FW client.\n");
-               rets = cl->status;
-               goto end;
-       } else {
-               dev_dbg(&dev->pdev->dev, "failed to connect to FW client.cl->state = %d.\n",
-                   cl->state);
-               if (!err) {
-                       dev_dbg(&dev->pdev->dev,
-                               "wait_event_interruptible_timeout failed on client"
-                               " connect message fw response message.\n");
-               }
-               rets = -EFAULT;
-
-               mei_io_list_flush(&dev->ctrl_rd_list, cl);
-               mei_io_list_flush(&dev->ctrl_wr_list, cl);
-               goto end;
-       }
-       rets = 0;
-end:
-       dev_dbg(&dev->pdev->dev, "free connect cb memory.");
-       mei_io_cb_free(cb);
-       return rets;
-}
-
-/**
- * mei_start_read - the start read client message function.
- *
- * @dev: the device structure
- * @if_num:  minor number
- * @cl: private data of the file object
- *
- * returns 0 on success, <0 on failure.
- */
-int mei_start_read(struct mei_device *dev, struct mei_cl *cl)
-{
-       struct mei_cl_cb *cb;
-       int rets;
-       int i;
-
-       if (cl->state != MEI_FILE_CONNECTED)
-               return -ENODEV;
-
-       if (dev->dev_state != MEI_DEV_ENABLED)
-               return -ENODEV;
-
-       if (cl->read_pending || cl->read_cb) {
-               dev_dbg(&dev->pdev->dev, "read is pending.\n");
-               return -EBUSY;
-       }
-       i = mei_me_cl_by_id(dev, cl->me_client_id);
-       if (i < 0) {
-               dev_err(&dev->pdev->dev, "no such me client %d\n",
-                       cl->me_client_id);
-               return  -ENODEV;
-       }
-
-       cb = mei_io_cb_init(cl, NULL);
-       if (!cb)
-               return -ENOMEM;
-
-       rets = mei_io_cb_alloc_resp_buf(cb,
-                       dev->me_clients[i].props.max_msg_length);
-       if (rets)
-               goto err;
-
-       cb->fop_type = MEI_FOP_READ;
-       cl->read_cb = cb;
-       if (dev->mei_host_buffer_is_empty) {
-               dev->mei_host_buffer_is_empty = false;
-               if (mei_send_flow_control(dev, cl)) {
-                       rets = -ENODEV;
-                       goto err;
-               }
-               list_add_tail(&cb->list, &dev->read_list.list);
-       } else {
-               list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
-       }
-       return rets;
-err:
-       mei_io_cb_free(cb);
-       return rets;
-}
-
index 43fb52f..903f809 100644 (file)
 #include <linux/interrupt.h>
 #include <linux/miscdevice.h>
 
-#include "mei_dev.h"
 #include <linux/mei.h>
-#include "interface.h"
-
-/* AMT device is a singleton on the platform */
-static struct pci_dev *mei_pdev;
-
-/* mei_pci_tbl - PCI Device ID Table */
-static DEFINE_PCI_DEVICE_TABLE(mei_pci_tbl) = {
-       {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82946GZ)},
-       {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82G35)},
-       {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82Q965)},
-       {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82G965)},
-       {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82GM965)},
-       {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82GME965)},
-       {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82Q35)},
-       {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82G33)},
-       {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82Q33)},
-       {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82X38)},
-       {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_3200)},
-       {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_6)},
-       {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_7)},
-       {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_8)},
-       {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_9)},
-       {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_10)},
-       {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_1)},
-       {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_2)},
-       {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_3)},
-       {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_4)},
-       {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_1)},
-       {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_2)},
-       {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_3)},
-       {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_4)},
-       {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_IBXPK_1)},
-       {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_IBXPK_2)},
-       {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_CPT_1)},
-       {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PBG_1)},
-       {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_1)},
-       {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_2)},
-       {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_3)},
-       {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT)},
-       {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT_LP)},
-
-       /* required last entry */
-       {0, }
-};
-
-MODULE_DEVICE_TABLE(pci, mei_pci_tbl);
 
-static DEFINE_MUTEX(mei_mutex);
-
-
-/**
- * find_read_list_entry - find read list entry
- *
- * @dev: device structure
- * @file: pointer to file structure
- *
- * returns cb on success, NULL on error
- */
-static struct mei_cl_cb *find_read_list_entry(
-               struct mei_device *dev,
-               struct mei_cl *cl)
-{
-       struct mei_cl_cb *pos = NULL;
-       struct mei_cl_cb *next = NULL;
-
-       dev_dbg(&dev->pdev->dev, "remove read_list CB\n");
-       list_for_each_entry_safe(pos, next, &dev->read_list.list, list)
-               if (mei_cl_cmp_id(cl, pos->cl))
-                       return pos;
-       return NULL;
-}
+#include "mei_dev.h"
+#include "hw-me.h"
+#include "client.h"
 
 /**
  * mei_open - the open function
@@ -121,16 +53,20 @@ static struct mei_cl_cb *find_read_list_entry(
  */
 static int mei_open(struct inode *inode, struct file *file)
 {
+       struct miscdevice *misc = file->private_data;
+       struct pci_dev *pdev;
        struct mei_cl *cl;
        struct mei_device *dev;
-       unsigned long cl_id;
+
        int err;
 
        err = -ENODEV;
-       if (!mei_pdev)
+       if (!misc->parent)
                goto out;
 
-       dev = pci_get_drvdata(mei_pdev);
+       pdev = container_of(misc->parent, struct pci_dev, dev);
+
+       dev = pci_get_drvdata(pdev);
        if (!dev)
                goto out;
 
@@ -153,24 +89,9 @@ static int mei_open(struct inode *inode, struct file *file)
                goto out_unlock;
        }
 
-       cl_id = find_first_zero_bit(dev->host_clients_map, MEI_CLIENTS_MAX);
-       if (cl_id >= MEI_CLIENTS_MAX) {
-               dev_err(&dev->pdev->dev, "client_id exceded %d",
-                               MEI_CLIENTS_MAX) ;
+       err = mei_cl_link(cl, MEI_HOST_CLIENT_ID_ANY);
+       if (err)
                goto out_unlock;
-       }
-
-       cl->host_client_id  = cl_id;
-
-       dev_dbg(&dev->pdev->dev, "client_id = %d\n", cl->host_client_id);
-
-       dev->open_handle_count++;
-
-       list_add_tail(&cl->link, &dev->file_list);
-
-       set_bit(cl->host_client_id, dev->host_clients_map);
-       cl->state = MEI_FILE_INITIALIZING;
-       cl->sm_state = 0;
 
        file->private_data = cl;
        mutex_unlock(&dev->device_lock);
@@ -216,7 +137,7 @@ static int mei_release(struct inode *inode, struct file *file)
                    "ME client = %d\n",
                    cl->host_client_id,
                    cl->me_client_id);
-               rets = mei_disconnect_host_client(dev, cl);
+               rets = mei_cl_disconnect(cl);
        }
        mei_cl_flush_queues(cl);
        dev_dbg(&dev->pdev->dev, "remove client host client = %d, ME client = %d\n",
@@ -227,12 +148,13 @@ static int mei_release(struct inode *inode, struct file *file)
                clear_bit(cl->host_client_id, dev->host_clients_map);
                dev->open_handle_count--;
        }
-       mei_me_cl_unlink(dev, cl);
+       mei_cl_unlink(cl);
+
 
        /* free read cb */
        cb = NULL;
        if (cl->read_cb) {
-               cb = find_read_list_entry(dev, cl);
+               cb = mei_cl_find_read_cb(cl);
                /* Remove entry from read list */
                if (cb)
                        list_del(&cb->list);
@@ -322,7 +244,7 @@ static ssize_t mei_read(struct file *file, char __user *ubuf,
                goto out;
        }
 
-       err = mei_start_read(dev, cl);
+       err = mei_cl_read_start(cl);
        if (err && err != -EBUSY) {
                dev_dbg(&dev->pdev->dev,
                        "mei start read failure with status = %d\n", err);
@@ -393,14 +315,13 @@ copy_buffer:
                goto out;
 
 free:
-       cb_pos = find_read_list_entry(dev, cl);
+       cb_pos = mei_cl_find_read_cb(cl);
        /* Remove entry from read list */
        if (cb_pos)
                list_del(&cb_pos->list);
        mei_io_cb_free(cb);
        cl->reading_state = MEI_IDLE;
        cl->read_cb = NULL;
-       cl->read_pending = 0;
 out:
        dev_dbg(&dev->pdev->dev, "end mei read rets= %d\n", rets);
        mutex_unlock(&dev->device_lock);
@@ -475,16 +396,15 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf,
        /* free entry used in read */
        if (cl->reading_state == MEI_READ_COMPLETE) {
                *offset = 0;
-               write_cb = find_read_list_entry(dev, cl);
+               write_cb = mei_cl_find_read_cb(cl);
                if (write_cb) {
                        list_del(&write_cb->list);
                        mei_io_cb_free(write_cb);
                        write_cb = NULL;
                        cl->reading_state = MEI_IDLE;
                        cl->read_cb = NULL;
-                       cl->read_pending = 0;
                }
-       } else if (cl->reading_state == MEI_IDLE && !cl->read_pending)
+       } else if (cl->reading_state == MEI_IDLE)
                *offset = 0;
 
 
@@ -519,7 +439,7 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf,
 
                if (rets) {
                        dev_err(&dev->pdev->dev,
-                               "amthi write failed with status = %d\n", rets);
+                               "amthif write failed with status = %d\n", rets);
                        goto err;
                }
                mutex_unlock(&dev->device_lock);
@@ -530,20 +450,20 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf,
 
        dev_dbg(&dev->pdev->dev, "host client = %d, ME client = %d\n",
            cl->host_client_id, cl->me_client_id);
-       rets = mei_flow_ctrl_creds(dev, cl);
+       rets = mei_cl_flow_ctrl_creds(cl);
        if (rets < 0)
                goto err;
 
-       if (rets == 0 || dev->mei_host_buffer_is_empty == false) {
+       if (rets == 0 || !dev->hbuf_is_ready) {
                write_cb->buf_idx = 0;
                mei_hdr.msg_complete = 0;
                cl->writing_state = MEI_WRITING;
                goto out;
        }
 
-       dev->mei_host_buffer_is_empty = false;
-       if (length >  mei_hbuf_max_data(dev)) {
-               mei_hdr.length = mei_hbuf_max_data(dev);
+       dev->hbuf_is_ready = false;
+       if (length >  mei_hbuf_max_len(dev)) {
+               mei_hdr.length = mei_hbuf_max_len(dev);
                mei_hdr.msg_complete = 0;
        } else {
                mei_hdr.length = length;
@@ -552,10 +472,10 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf,
        mei_hdr.host_addr = cl->host_client_id;
        mei_hdr.me_addr = cl->me_client_id;
        mei_hdr.reserved = 0;
-       dev_dbg(&dev->pdev->dev, "call mei_write_message header=%08x.\n",
-           *((u32 *) &mei_hdr));
-       if (mei_write_message(dev, &mei_hdr,
-               write_cb->request_buffer.data, mei_hdr.length)) {
+
+       dev_dbg(&dev->pdev->dev, "write " MEI_HDR_FMT "\n",
+               MEI_HDR_PRM(&mei_hdr));
+       if (mei_write_message(dev, &mei_hdr, write_cb->request_buffer.data)) {
                rets = -ENODEV;
                goto err;
        }
@@ -564,7 +484,7 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf,
 
 out:
        if (mei_hdr.msg_complete) {
-               if (mei_flow_ctrl_reduce(dev, cl)) {
+               if (mei_cl_flow_ctrl_reduce(cl)) {
                        rets = -ENODEV;
                        goto err;
                }
@@ -582,6 +502,103 @@ err:
        return rets;
 }
 
+/**
+ * mei_ioctl_connect_client - the connect to fw client IOCTL function
+ *
+ * @dev: the device structure
+ * @data: IOCTL connect data, input and output parameters
+ * @file: private data of the file object
+ *
+ * Locking: called under "dev->device_lock" lock
+ *
+ * returns 0 on success, <0 on failure.
+ */
+static int mei_ioctl_connect_client(struct file *file,
+                       struct mei_connect_client_data *data)
+{
+       struct mei_device *dev;
+       struct mei_client *client;
+       struct mei_cl *cl;
+       int i;
+       int rets;
+
+       cl = file->private_data;
+       if (WARN_ON(!cl || !cl->dev))
+               return -ENODEV;
+
+       dev = cl->dev;
+
+       if (dev->dev_state != MEI_DEV_ENABLED) {
+               rets = -ENODEV;
+               goto end;
+       }
+
+       if (cl->state != MEI_FILE_INITIALIZING &&
+           cl->state != MEI_FILE_DISCONNECTED) {
+               rets = -EBUSY;
+               goto end;
+       }
+
+       /* find ME client we're trying to connect to */
+       i = mei_me_cl_by_uuid(dev, &data->in_client_uuid);
+       if (i >= 0 && !dev->me_clients[i].props.fixed_address) {
+               cl->me_client_id = dev->me_clients[i].client_id;
+               cl->state = MEI_FILE_CONNECTING;
+       }
+
+       dev_dbg(&dev->pdev->dev, "Connect to FW Client ID = %d\n",
+                       cl->me_client_id);
+       dev_dbg(&dev->pdev->dev, "FW Client - Protocol Version = %d\n",
+                       dev->me_clients[i].props.protocol_version);
+       dev_dbg(&dev->pdev->dev, "FW Client - Max Msg Len = %d\n",
+                       dev->me_clients[i].props.max_msg_length);
+
+       /* if we're connecting to amthif client then we will use the
+        * existing connection
+        */
+       if (uuid_le_cmp(data->in_client_uuid, mei_amthif_guid) == 0) {
+               dev_dbg(&dev->pdev->dev, "FW Client is amthi\n");
+               if (dev->iamthif_cl.state != MEI_FILE_CONNECTED) {
+                       rets = -ENODEV;
+                       goto end;
+               }
+               clear_bit(cl->host_client_id, dev->host_clients_map);
+               mei_cl_unlink(cl);
+
+               kfree(cl);
+               cl = NULL;
+               file->private_data = &dev->iamthif_cl;
+
+               client = &data->out_client_properties;
+               client->max_msg_length =
+                       dev->me_clients[i].props.max_msg_length;
+               client->protocol_version =
+                       dev->me_clients[i].props.protocol_version;
+               rets = dev->iamthif_cl.status;
+
+               goto end;
+       }
+
+       if (cl->state != MEI_FILE_CONNECTING) {
+               rets = -ENODEV;
+               goto end;
+       }
+
+
+       /* prepare the output buffer */
+       client = &data->out_client_properties;
+       client->max_msg_length = dev->me_clients[i].props.max_msg_length;
+       client->protocol_version = dev->me_clients[i].props.protocol_version;
+       dev_dbg(&dev->pdev->dev, "Can connect?\n");
+
+
+       rets = mei_cl_connect(cl, file);
+
+end:
+       dev_dbg(&dev->pdev->dev, "free connect cb memory.");
+       return rets;
+}
+
 
 /**
  * mei_ioctl - the IOCTL function
@@ -630,6 +647,7 @@ static long mei_ioctl(struct file *file, unsigned int cmd, unsigned long data)
                rets = -EFAULT;
                goto out;
        }
+
        rets = mei_ioctl_connect_client(file, connect_data);
 
        /* if all is ok, copying the data back to user. */
@@ -726,7 +744,6 @@ static const struct file_operations mei_fops = {
        .llseek = no_llseek
 };
 
-
 /*
  * Misc Device Struct
  */
@@ -736,300 +753,17 @@ static struct miscdevice  mei_misc_device = {
                .minor = MISC_DYNAMIC_MINOR,
 };
 
-/**
- * mei_quirk_probe - probe for devices that doesn't valid ME interface
- * @pdev: PCI device structure
- * @ent: entry into pci_device_table
- *
- * returns true if ME Interface is valid, false otherwise
- */
-static bool mei_quirk_probe(struct pci_dev *pdev,
-                               const struct pci_device_id *ent)
+int mei_register(struct device *dev)
 {
-       u32 reg;
-       if (ent->device == MEI_DEV_ID_PBG_1) {
-               pci_read_config_dword(pdev, 0x48, &reg);
-               /* make sure that bit 9 is up and bit 10 is down */
-               if ((reg & 0x600) == 0x200) {
-                       dev_info(&pdev->dev, "Device doesn't have valid ME Interface\n");
-                       return false;
-               }
-       }
-       return true;
-}
-/**
- * mei_probe - Device Initialization Routine
- *
- * @pdev: PCI device structure
- * @ent: entry in kcs_pci_tbl
- *
- * returns 0 on success, <0 on failure.
- */
-static int mei_probe(struct pci_dev *pdev,
-                               const struct pci_device_id *ent)
-{
-       struct mei_device *dev;
-       int err;
-
-       mutex_lock(&mei_mutex);
-
-       if (!mei_quirk_probe(pdev, ent)) {
-               err = -ENODEV;
-               goto end;
-       }
-
-       if (mei_pdev) {
-               err = -EEXIST;
-               goto end;
-       }
-       /* enable pci dev */
-       err = pci_enable_device(pdev);
-       if (err) {
-               dev_err(&pdev->dev, "failed to enable pci device.\n");
-               goto end;
-       }
-       /* set PCI host mastering  */
-       pci_set_master(pdev);
-       /* pci request regions for mei driver */
-       err = pci_request_regions(pdev, KBUILD_MODNAME);
-       if (err) {
-               dev_err(&pdev->dev, "failed to get pci regions.\n");
-               goto disable_device;
-       }
-       /* allocates and initializes the mei dev structure */
-       dev = mei_device_init(pdev);
-       if (!dev) {
-               err = -ENOMEM;
-               goto release_regions;
-       }
-       /* mapping  IO device memory */
-       dev->mem_addr = pci_iomap(pdev, 0, 0);
-       if (!dev->mem_addr) {
-               dev_err(&pdev->dev, "mapping I/O device memory failure.\n");
-               err = -ENOMEM;
-               goto free_device;
-       }
-       pci_enable_msi(pdev);
-
-        /* request and enable interrupt */
-       if (pci_dev_msi_enabled(pdev))
-               err = request_threaded_irq(pdev->irq,
-                       NULL,
-                       mei_interrupt_thread_handler,
-                       IRQF_ONESHOT, KBUILD_MODNAME, dev);
-       else
-               err = request_threaded_irq(pdev->irq,
-                       mei_interrupt_quick_handler,
-                       mei_interrupt_thread_handler,
-                       IRQF_SHARED, KBUILD_MODNAME, dev);
-
-       if (err) {
-               dev_err(&pdev->dev, "request_threaded_irq failure. irq = %d\n",
-                      pdev->irq);
-               goto disable_msi;
-       }
-       INIT_DELAYED_WORK(&dev->timer_work, mei_timer);
-       INIT_WORK(&dev->init_work, mei_host_client_init);
-
-       if (mei_hw_init(dev)) {
-               dev_err(&pdev->dev, "init hw failure.\n");
-               err = -ENODEV;
-               goto release_irq;
-       }
-
-       err = misc_register(&mei_misc_device);
-       if (err)
-               goto release_irq;
-
-       mei_pdev = pdev;
-       pci_set_drvdata(pdev, dev);
-
-
-       schedule_delayed_work(&dev->timer_work, HZ);
-
-       mutex_unlock(&mei_mutex);
-
-       pr_debug("initialization successful.\n");
-
-       return 0;
-
-release_irq:
-       /* disable interrupts */
-       dev->host_hw_state = mei_hcsr_read(dev);
-       mei_disable_interrupts(dev);
-       flush_scheduled_work();
-       free_irq(pdev->irq, dev);
-disable_msi:
-       pci_disable_msi(pdev);
-       pci_iounmap(pdev, dev->mem_addr);
-free_device:
-       kfree(dev);
-release_regions:
-       pci_release_regions(pdev);
-disable_device:
-       pci_disable_device(pdev);
-end:
-       mutex_unlock(&mei_mutex);
-       dev_err(&pdev->dev, "initialization failed.\n");
-       return err;
+       mei_misc_device.parent = dev;
+       return misc_register(&mei_misc_device);
 }
 
-/**
- * mei_remove - Device Removal Routine
- *
- * @pdev: PCI device structure
- *
- * mei_remove is called by the PCI subsystem to alert the driver
- * that it should release a PCI device.
- */
-static void mei_remove(struct pci_dev *pdev)
+void mei_deregister(void)
 {
-       struct mei_device *dev;
-
-       if (mei_pdev != pdev)
-               return;
-
-       dev = pci_get_drvdata(pdev);
-       if (!dev)
-               return;
-
-       mutex_lock(&dev->device_lock);
-
-       cancel_delayed_work(&dev->timer_work);
-
-       mei_wd_stop(dev);
-
-       mei_pdev = NULL;
-
-       if (dev->iamthif_cl.state == MEI_FILE_CONNECTED) {
-               dev->iamthif_cl.state = MEI_FILE_DISCONNECTING;
-               mei_disconnect_host_client(dev, &dev->iamthif_cl);
-       }
-       if (dev->wd_cl.state == MEI_FILE_CONNECTED) {
-               dev->wd_cl.state = MEI_FILE_DISCONNECTING;
-               mei_disconnect_host_client(dev, &dev->wd_cl);
-       }
-
-       /* Unregistering watchdog device */
-       mei_watchdog_unregister(dev);
-
-       /* remove entry if already in list */
-       dev_dbg(&pdev->dev, "list del iamthif and wd file list.\n");
-       mei_me_cl_unlink(dev, &dev->wd_cl);
-       mei_me_cl_unlink(dev, &dev->iamthif_cl);
-
-       dev->iamthif_current_cb = NULL;
-       dev->me_clients_num = 0;
-
-       mutex_unlock(&dev->device_lock);
-
-       flush_scheduled_work();
-
-       /* disable interrupts */
-       mei_disable_interrupts(dev);
-
-       free_irq(pdev->irq, dev);
-       pci_disable_msi(pdev);
-       pci_set_drvdata(pdev, NULL);
-
-       if (dev->mem_addr)
-               pci_iounmap(pdev, dev->mem_addr);
-
-       kfree(dev);
-
-       pci_release_regions(pdev);
-       pci_disable_device(pdev);
-
        misc_deregister(&mei_misc_device);
-}
-#ifdef CONFIG_PM
-static int mei_pci_suspend(struct device *device)
-{
-       struct pci_dev *pdev = to_pci_dev(device);
-       struct mei_device *dev = pci_get_drvdata(pdev);
-       int err;
-
-       if (!dev)
-               return -ENODEV;
-       mutex_lock(&dev->device_lock);
-
-       cancel_delayed_work(&dev->timer_work);
-
-       /* Stop watchdog if exists */
-       err = mei_wd_stop(dev);
-       /* Set new mei state */
-       if (dev->dev_state == MEI_DEV_ENABLED ||
-           dev->dev_state == MEI_DEV_RECOVERING_FROM_RESET) {
-               dev->dev_state = MEI_DEV_POWER_DOWN;
-               mei_reset(dev, 0);
-       }
-       mutex_unlock(&dev->device_lock);
-
-       free_irq(pdev->irq, dev);
-       pci_disable_msi(pdev);
-
-       return err;
+       mei_misc_device.parent = NULL;
 }
 
-static int mei_pci_resume(struct device *device)
-{
-       struct pci_dev *pdev = to_pci_dev(device);
-       struct mei_device *dev;
-       int err;
-
-       dev = pci_get_drvdata(pdev);
-       if (!dev)
-               return -ENODEV;
-
-       pci_enable_msi(pdev);
-
-       /* request and enable interrupt */
-       if (pci_dev_msi_enabled(pdev))
-               err = request_threaded_irq(pdev->irq,
-                       NULL,
-                       mei_interrupt_thread_handler,
-                       IRQF_ONESHOT, KBUILD_MODNAME, dev);
-       else
-               err = request_threaded_irq(pdev->irq,
-                       mei_interrupt_quick_handler,
-                       mei_interrupt_thread_handler,
-                       IRQF_SHARED, KBUILD_MODNAME, dev);
-
-       if (err) {
-               dev_err(&pdev->dev, "request_threaded_irq failed: irq = %d.\n",
-                               pdev->irq);
-               return err;
-       }
-
-       mutex_lock(&dev->device_lock);
-       dev->dev_state = MEI_DEV_POWER_UP;
-       mei_reset(dev, 1);
-       mutex_unlock(&dev->device_lock);
-
-       /* Start timer if stopped in suspend */
-       schedule_delayed_work(&dev->timer_work, HZ);
-
-       return err;
-}
-static SIMPLE_DEV_PM_OPS(mei_pm_ops, mei_pci_suspend, mei_pci_resume);
-#define MEI_PM_OPS     (&mei_pm_ops)
-#else
-#define MEI_PM_OPS     NULL
-#endif /* CONFIG_PM */
-/*
- *  PCI driver structure
- */
-static struct pci_driver mei_driver = {
-       .name = KBUILD_MODNAME,
-       .id_table = mei_pci_tbl,
-       .probe = mei_probe,
-       .remove = mei_remove,
-       .shutdown = mei_remove,
-       .driver.pm = MEI_PM_OPS,
-};
-
-module_pci_driver(mei_driver);
-
-MODULE_AUTHOR("Intel Corporation");
-MODULE_DESCRIPTION("Intel(R) Management Engine Interface");
 MODULE_LICENSE("GPL v2");
+
index 25da045..cb80166 100644 (file)
@@ -21,7 +21,9 @@
 #include <linux/watchdog.h>
 #include <linux/poll.h>
 #include <linux/mei.h>
+
 #include "hw.h"
+#include "hw-me-regs.h"
 
 /*
  * watch dog definition
@@ -44,7 +46,7 @@
 /*
  * AMTHI Client UUID
  */
-extern const uuid_le mei_amthi_guid;
+extern const uuid_le mei_amthif_guid;
 
 /*
  * Watchdog Client UUID
@@ -65,12 +67,18 @@ extern const u8 mei_wd_state_independence_msg[3][4];
  * Number of File descriptors/handles
  * that can be opened to the driver.
  *
- * Limit to 253: 256 Total Clients
+ * Limit to 255: 256 Total Clients
  * minus internal client for MEI Bus Messags
- * minus internal client for AMTHI
- * minus internal client for Watchdog
  */
-#define  MEI_MAX_OPEN_HANDLE_COUNT (MEI_CLIENTS_MAX - 3)
+#define  MEI_MAX_OPEN_HANDLE_COUNT (MEI_CLIENTS_MAX - 1)
+
+/*
+ * Internal Clients Number
+ */
+#define MEI_HOST_CLIENT_ID_ANY        (-1)
+#define MEI_HBM_HOST_CLIENT_ID         0 /* not used, just for documentation */
+#define MEI_WD_HOST_CLIENT_ID          1
+#define MEI_IAMTHIF_HOST_CLIENT_ID     2
 
 
 /* File state */
@@ -150,6 +158,19 @@ struct mei_message_data {
        unsigned char *data;
 };
 
+/**
+ * struct mei_me_client - representation of me (fw) client
+ *
+ * @props  - client properties
+ * @client_id - me client id
+ * @mei_flow_ctrl_creds - flow control credits
+ */
+struct mei_me_client {
+       struct mei_client_properties props;
+       u8 client_id;
+       u8 mei_flow_ctrl_creds;
+};
+
 
 struct mei_cl;
 
@@ -178,7 +199,6 @@ struct mei_cl {
        wait_queue_head_t tx_wait;
        wait_queue_head_t rx_wait;
        wait_queue_head_t wait;
-       int read_pending;
        int status;
        /* ID of client connected */
        u8 host_client_id;
@@ -191,10 +211,67 @@ struct mei_cl {
        struct mei_cl_cb *read_cb;
 };
 
+/** struct mei_hw_ops
+ *
+ * @host_set_ready   - notify FW that host side is ready
+ * @host_is_ready    - query for host readiness
+
+ * @hw_is_ready      - query if hw is ready
+ * @hw_reset         - reset hw
+ * @hw_config        - configure hw
+
+ * @intr_clear       - clear pending interrupts
+ * @intr_enable      - enable interrupts
+ * @intr_disable     - disable interrupts
+
+ * @hbuf_free_slots  - query for write buffer empty slots
+ * @hbuf_is_ready    - query if write buffer is empty
+ * @hbuf_max_len     - query for write buffer max len
+
+ * @write            - write a message to FW
+
+ * @rdbuf_full_slots - query how many slots are filled
+
+ * @read_hdr         - get first 4 bytes (header)
+ * @read             - read a buffer from the FW
+ */
+struct mei_hw_ops {
+
+       void (*host_set_ready) (struct mei_device *dev);
+       bool (*host_is_ready) (struct mei_device *dev);
+
+       bool (*hw_is_ready) (struct mei_device *dev);
+       void (*hw_reset) (struct mei_device *dev, bool enable);
+       void (*hw_config) (struct mei_device *dev);
+
+       void (*intr_clear) (struct mei_device *dev);
+       void (*intr_enable) (struct mei_device *dev);
+       void (*intr_disable) (struct mei_device *dev);
+
+       int (*hbuf_free_slots) (struct mei_device *dev);
+       bool (*hbuf_is_ready) (struct mei_device *dev);
+       size_t (*hbuf_max_len) (const struct mei_device *dev);
+
+       int (*write)(struct mei_device *dev,
+                    struct mei_msg_hdr *hdr,
+                    unsigned char *buf);
+
+       int (*rdbuf_full_slots)(struct mei_device *dev);
+
+       u32 (*read_hdr)(const struct mei_device *dev);
+       int (*read) (struct mei_device *dev,
+                    unsigned char *buf, unsigned long len);
+};
+
 /**
  * struct mei_device -  MEI private device struct
- * @hbuf_depth - depth of host(write) buffer
- * @wr_ext_msg - buffer for hbm control responses (set in read cycle)
+
+ * @mem_addr - mem mapped base register address
+
+ * @hbuf_depth - depth of hardware host/write buffer is slots
+ * @hbuf_is_ready - query if the host host/write buffer is ready
+ * @wr_msg - the buffer for hbm control messages
+ * @wr_ext_msg - the buffer for hbm control responses (set in read cycle)
  */
 struct mei_device {
        struct pci_dev *pdev;   /* pointer to pci device struct */
@@ -213,24 +290,14 @@ struct mei_device {
         */
        struct list_head file_list;
        long open_handle_count;
-       /*
-        * memory of device
-        */
-       unsigned int mem_base;
-       unsigned int mem_length;
-       void __iomem *mem_addr;
+
        /*
         * lock for the device
         */
        struct mutex device_lock; /* device lock */
        struct delayed_work timer_work; /* MEI timer delayed work (timeouts) */
        bool recvd_msg;
-       /*
-        * hw states of host and fw(ME)
-        */
-       u32 host_hw_state;
-       u32 me_hw_state;
-       u8  hbuf_depth;
+
        /*
         * waiting queue for receive message from FW
         */
@@ -243,11 +310,20 @@ struct mei_device {
        enum mei_dev_state dev_state;
        enum mei_init_clients_states init_clients_state;
        u16 init_clients_timer;
-       bool need_reset;
 
        unsigned char rd_msg_buf[MEI_RD_MSG_BUF_SIZE];  /* control messages */
        u32 rd_msg_hdr;
-       u32 wr_msg_buf[128];    /* used for control messages */
+
+       /* write buffer */
+       u8 hbuf_depth;
+       bool hbuf_is_ready;
+
+       /* used for control messages */
+       struct {
+               struct mei_msg_hdr hdr;
+               unsigned char data[128];
+       } wr_msg;
+
        struct {
                struct mei_msg_hdr hdr;
                unsigned char data[4];  /* All HBM messages are 4 bytes */
@@ -261,7 +337,6 @@ struct mei_device {
        u8 me_clients_num;
        u8 me_client_presentation_num;
        u8 me_client_index;
-       bool mei_host_buffer_is_empty;
 
        struct mei_cl wd_cl;
        enum mei_wd_states wd_state;
@@ -289,6 +364,9 @@ struct mei_device {
        bool iamthif_canceled;
 
        struct work_struct init_work;
+
+       const struct mei_hw_ops *ops;
+       char hw[0] __aligned(sizeof(void *));
 };
 
 static inline unsigned long mei_secs_to_jiffies(unsigned long sec)
@@ -300,96 +378,28 @@ static inline unsigned long mei_secs_to_jiffies(unsigned long sec)
 /*
  * mei init function prototypes
  */
-struct mei_device *mei_device_init(struct pci_dev *pdev);
+void mei_device_init(struct mei_device *dev);
 void mei_reset(struct mei_device *dev, int interrupts);
 int mei_hw_init(struct mei_device *dev);
-int mei_task_initialize_clients(void *data);
-int mei_initialize_clients(struct mei_device *dev);
-int mei_disconnect_host_client(struct mei_device *dev, struct mei_cl *cl);
-void mei_allocate_me_clients_storage(struct mei_device *dev);
-
-
-int mei_me_cl_link(struct mei_device *dev, struct mei_cl *cl,
-                       const uuid_le *cguid, u8 host_client_id);
-void mei_me_cl_unlink(struct mei_device *dev, struct mei_cl *cl);
-int mei_me_cl_by_uuid(const struct mei_device *dev, const uuid_le *cuuid);
-int mei_me_cl_by_id(struct mei_device *dev, u8 client_id);
-
-/*
- * MEI IO Functions
- */
-struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, struct file *fp);
-void mei_io_cb_free(struct mei_cl_cb *priv_cb);
-int mei_io_cb_alloc_req_buf(struct mei_cl_cb *cb, size_t length);
-int mei_io_cb_alloc_resp_buf(struct mei_cl_cb *cb, size_t length);
-
-
-/**
- * mei_io_list_init - Sets up a queue list.
- *
- * @list: An instance cl callback structure
- */
-static inline void mei_io_list_init(struct mei_cl_cb *list)
-{
-       INIT_LIST_HEAD(&list->list);
-}
-void mei_io_list_flush(struct mei_cl_cb *list, struct mei_cl *cl);
-
-/*
- * MEI ME Client Functions
- */
-
-struct mei_cl *mei_cl_allocate(struct mei_device *dev);
-void mei_cl_init(struct mei_cl *cl, struct mei_device *dev);
-int mei_cl_flush_queues(struct mei_cl *cl);
-/**
- * mei_cl_cmp_id - tells if file private data have same id
- *
- * @fe1: private data of 1. file object
- * @fe2: private data of 2. file object
- *
- * returns true  - if ids are the same and not NULL
- */
-static inline bool mei_cl_cmp_id(const struct mei_cl *cl1,
-                               const struct mei_cl *cl2)
-{
-       return cl1 && cl2 &&
-               (cl1->host_client_id == cl2->host_client_id) &&
-               (cl1->me_client_id == cl2->me_client_id);
-}
-
-
-
-/*
- * MEI Host Client Functions
- */
-void mei_host_start_message(struct mei_device *dev);
-void mei_host_enum_clients_message(struct mei_device *dev);
-int mei_host_client_enumerate(struct mei_device *dev);
-void mei_host_client_init(struct work_struct *work);
 
 /*
  *  MEI interrupt functions prototype
  */
-irqreturn_t mei_interrupt_quick_handler(int irq, void *dev_id);
-irqreturn_t mei_interrupt_thread_handler(int irq, void *dev_id);
-void mei_timer(struct work_struct *work);
 
-/*
- *  MEI input output function prototype
- */
-int mei_ioctl_connect_client(struct file *file,
-                       struct mei_connect_client_data *data);
+void mei_timer(struct work_struct *work);
+int mei_irq_read_handler(struct mei_device *dev,
+               struct mei_cl_cb *cmpl_list, s32 *slots);
 
-int mei_start_read(struct mei_device *dev, struct mei_cl *cl);
+int mei_irq_write_handler(struct mei_device *dev, struct mei_cl_cb *cmpl_list);
 
+void mei_irq_complete_handler(struct mei_cl *cl, struct mei_cl_cb *cb_pos);
 
 /*
  * AMTHIF - AMT Host Interface Functions
  */
 void mei_amthif_reset_params(struct mei_device *dev);
 
-void mei_amthif_host_init(struct mei_device *dev);
+int mei_amthif_host_init(struct mei_device *dev);
 
 int mei_amthif_write(struct mei_device *dev, struct mei_cl_cb *priv_cb);
 
@@ -407,9 +417,6 @@ struct mei_cl_cb *mei_amthif_find_read_list_entry(struct mei_device *dev,
 void mei_amthif_run_next_cmd(struct mei_device *dev);
 
 
-int mei_amthif_read_message(struct mei_cl_cb *complete_list,
-               struct mei_device *dev, struct mei_msg_hdr *mei_hdr);
-
 int mei_amthif_irq_write_complete(struct mei_device *dev, s32 *slots,
                        struct mei_cl_cb *cb, struct mei_cl_cb *cmpl_list);
 
@@ -418,92 +425,107 @@ int mei_amthif_irq_read_message(struct mei_cl_cb *complete_list,
                struct mei_device *dev, struct mei_msg_hdr *mei_hdr);
 int mei_amthif_irq_read(struct mei_device *dev, s32 *slots);
 
+
+int mei_wd_send(struct mei_device *dev);
+int mei_wd_stop(struct mei_device *dev);
+int mei_wd_host_init(struct mei_device *dev);
 /*
- * Register Access Function
+ * mei_watchdog_register  - Registering watchdog interface
+ *   once we got connection to the WD Client
+ * @dev - mei device
+ */
+void mei_watchdog_register(struct mei_device *dev);
+/*
+ * mei_watchdog_unregister  - Unregistering watchdog interface
+ * @dev - mei device
  */
+void mei_watchdog_unregister(struct mei_device *dev);
 
-/**
- * mei_reg_read - Reads 32bit data from the mei device
- *
- * @dev: the device structure
- * @offset: offset from which to read the data
- *
- * returns register value (u32)
+/*
+ * Register Access Function
  */
-static inline u32 mei_reg_read(const struct mei_device *dev,
-                              unsigned long offset)
+
+static inline void mei_hw_config(struct mei_device *dev)
+{
+       dev->ops->hw_config(dev);
+}
+static inline void mei_hw_reset(struct mei_device *dev, bool enable)
 {
-       return ioread32(dev->mem_addr + offset);
+       dev->ops->hw_reset(dev, enable);
 }
 
-/**
- * mei_reg_write - Writes 32bit data to the mei device
- *
- * @dev: the device structure
- * @offset: offset from which to write the data
- * @value: register value to write (u32)
- */
-static inline void mei_reg_write(const struct mei_device *dev,
-                                unsigned long offset, u32 value)
+static inline void mei_clear_interrupts(struct mei_device *dev)
 {
-       iowrite32(value, dev->mem_addr + offset);
+       dev->ops->intr_clear(dev);
 }
 
-/**
- * mei_hcsr_read - Reads 32bit data from the host CSR
- *
- * @dev: the device structure
- *
- * returns the byte read.
- */
-static inline u32 mei_hcsr_read(const struct mei_device *dev)
+static inline void mei_enable_interrupts(struct mei_device *dev)
 {
-       return mei_reg_read(dev, H_CSR);
+       dev->ops->intr_enable(dev);
 }
 
-/**
- * mei_mecsr_read - Reads 32bit data from the ME CSR
- *
- * @dev: the device structure
- *
- * returns ME_CSR_HA register value (u32)
- */
-static inline u32 mei_mecsr_read(const struct mei_device *dev)
+static inline void mei_disable_interrupts(struct mei_device *dev)
 {
-       return mei_reg_read(dev, ME_CSR_HA);
+       dev->ops->intr_disable(dev);
 }
 
-/**
- * get_me_cb_rw - Reads 32bit data from the mei ME_CB_RW register
- *
- * @dev: the device structure
- *
- * returns ME_CB_RW register value (u32)
- */
-static inline u32 mei_mecbrw_read(const struct mei_device *dev)
+static inline void mei_host_set_ready(struct mei_device *dev)
 {
-       return mei_reg_read(dev, ME_CB_RW);
+       dev->ops->host_set_ready(dev);
+}
+static inline bool mei_host_is_ready(struct mei_device *dev)
+{
+       return dev->ops->host_is_ready(dev);
+}
+static inline bool mei_hw_is_ready(struct mei_device *dev)
+{
+       return dev->ops->hw_is_ready(dev);
 }
 
+static inline bool mei_hbuf_is_ready(struct mei_device *dev)
+{
+       return dev->ops->hbuf_is_ready(dev);
+}
 
-/*
- * mei interface function prototypes
- */
-void mei_hcsr_set(struct mei_device *dev);
-void mei_csr_clear_his(struct mei_device *dev);
+static inline int mei_hbuf_empty_slots(struct mei_device *dev)
+{
+       return dev->ops->hbuf_free_slots(dev);
+}
+
+static inline size_t mei_hbuf_max_len(const struct mei_device *dev)
+{
+       return dev->ops->hbuf_max_len(dev);
+}
 
-void mei_enable_interrupts(struct mei_device *dev);
-void mei_disable_interrupts(struct mei_device *dev);
+static inline int mei_write_message(struct mei_device *dev,
+                       struct mei_msg_hdr *hdr,
+                       unsigned char *buf)
+{
+       return dev->ops->write(dev, hdr, buf);
+}
 
-static inline struct mei_msg_hdr *mei_hbm_hdr(u32 *buf, size_t length)
+static inline u32 mei_read_hdr(const struct mei_device *dev)
 {
-       struct mei_msg_hdr *hdr = (struct mei_msg_hdr *)buf;
-       hdr->host_addr = 0;
-       hdr->me_addr = 0;
-       hdr->length = length;
-       hdr->msg_complete = 1;
-       hdr->reserved = 0;
-       return hdr;
+       return dev->ops->read_hdr(dev);
 }
 
+static inline void mei_read_slots(struct mei_device *dev,
+                    unsigned char *buf, unsigned long len)
+{
+       dev->ops->read(dev, buf, len);
+}
+
+static inline int mei_count_full_read_slots(struct mei_device *dev)
+{
+       return dev->ops->rdbuf_full_slots(dev);
+}
+
+int mei_register(struct device *dev);
+void mei_deregister(void);
+
+#define MEI_HDR_FMT "hdr:host=%02d me=%02d len=%d comp=%1d"
+#define MEI_HDR_PRM(hdr)                  \
+       (hdr)->host_addr, (hdr)->me_addr, \
+       (hdr)->length, (hdr)->msg_complete
+
 #endif
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
new file mode 100644 (file)
index 0000000..b40ec06
--- /dev/null
@@ -0,0 +1,396 @@
+/*
+ *
+ * Intel Management Engine Interface (Intel MEI) Linux driver
+ * Copyright (c) 2003-2012, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/aio.h>
+#include <linux/pci.h>
+#include <linux/poll.h>
+#include <linux/init.h>
+#include <linux/ioctl.h>
+#include <linux/cdev.h>
+#include <linux/sched.h>
+#include <linux/uuid.h>
+#include <linux/compat.h>
+#include <linux/jiffies.h>
+#include <linux/interrupt.h>
+#include <linux/miscdevice.h>
+
+#include <linux/mei.h>
+
+#include "mei_dev.h"
+#include "hw-me.h"
+#include "client.h"
+
+/* AMT device is a singleton on the platform */
+static struct pci_dev *mei_pdev;
+
+/* mei_pci_tbl - PCI Device ID Table */
+static DEFINE_PCI_DEVICE_TABLE(mei_pci_tbl) = {
+       {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82946GZ)},
+       {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82G35)},
+       {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82Q965)},
+       {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82G965)},
+       {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82GM965)},
+       {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82GME965)},
+       {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82Q35)},
+       {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82G33)},
+       {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82Q33)},
+       {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82X38)},
+       {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_3200)},
+       {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_6)},
+       {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_7)},
+       {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_8)},
+       {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_9)},
+       {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_10)},
+       {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_1)},
+       {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_2)},
+       {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_3)},
+       {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_4)},
+       {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_1)},
+       {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_2)},
+       {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_3)},
+       {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_4)},
+       {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_IBXPK_1)},
+       {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_IBXPK_2)},
+       {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_CPT_1)},
+       {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PBG_1)},
+       {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_1)},
+       {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_2)},
+       {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_3)},
+       {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT)},
+       {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT_LP)},
+
+       /* required last entry */
+       {0, }
+};
+
+MODULE_DEVICE_TABLE(pci, mei_pci_tbl);
+
+static DEFINE_MUTEX(mei_mutex);
+
+/**
+ * mei_quirk_probe - probe for devices that doesn't valid ME interface
+ * @pdev: PCI device structure
+ * @ent: entry into pci_device_table
+ *
+ * returns true if ME Interface is valid, false otherwise
+ */
+static bool mei_quirk_probe(struct pci_dev *pdev,
+                               const struct pci_device_id *ent)
+{
+       u32 reg;
+       if (ent->device == MEI_DEV_ID_PBG_1) {
+               pci_read_config_dword(pdev, 0x48, &reg);
+               /* make sure that bit 9 is up and bit 10 is down */
+               if ((reg & 0x600) == 0x200) {
+                       dev_info(&pdev->dev, "Device doesn't have valid ME Interface\n");
+                       return false;
+               }
+       }
+       return true;
+}
+/**
+ * mei_probe - Device Initialization Routine
+ *
+ * @pdev: PCI device structure
+ * @ent: entry in kcs_pci_tbl
+ *
+ * returns 0 on success, <0 on failure.
+ */
+static int mei_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+       struct mei_device *dev;
+       struct mei_me_hw *hw;
+       int err;
+
+       mutex_lock(&mei_mutex);
+
+       if (!mei_quirk_probe(pdev, ent)) {
+               err = -ENODEV;
+               goto end;
+       }
+
+       if (mei_pdev) {
+               err = -EEXIST;
+               goto end;
+       }
+       /* enable pci dev */
+       err = pci_enable_device(pdev);
+       if (err) {
+               dev_err(&pdev->dev, "failed to enable pci device.\n");
+               goto end;
+       }
+       /* set PCI host mastering  */
+       pci_set_master(pdev);
+       /* pci request regions for mei driver */
+       err = pci_request_regions(pdev, KBUILD_MODNAME);
+       if (err) {
+               dev_err(&pdev->dev, "failed to get pci regions.\n");
+               goto disable_device;
+       }
+       /* allocates and initializes the mei dev structure */
+       dev = mei_me_dev_init(pdev);
+       if (!dev) {
+               err = -ENOMEM;
+               goto release_regions;
+       }
+       hw = to_me_hw(dev);
+       /* mapping  IO device memory */
+       hw->mem_addr = pci_iomap(pdev, 0, 0);
+       if (!hw->mem_addr) {
+               dev_err(&pdev->dev, "mapping I/O device memory failure.\n");
+               err = -ENOMEM;
+               goto free_device;
+       }
+       pci_enable_msi(pdev);
+
+        /* request and enable interrupt */
+       if (pci_dev_msi_enabled(pdev))
+               err = request_threaded_irq(pdev->irq,
+                       NULL,
+                       mei_me_irq_thread_handler,
+                       IRQF_ONESHOT, KBUILD_MODNAME, dev);
+       else
+               err = request_threaded_irq(pdev->irq,
+                       mei_me_irq_quick_handler,
+                       mei_me_irq_thread_handler,
+                       IRQF_SHARED, KBUILD_MODNAME, dev);
+
+       if (err) {
+               dev_err(&pdev->dev, "request_threaded_irq failure. irq = %d\n",
+                      pdev->irq);
+               goto disable_msi;
+       }
+
+       if (mei_hw_init(dev)) {
+               dev_err(&pdev->dev, "init hw failure.\n");
+               err = -ENODEV;
+               goto release_irq;
+       }
+
+       err = mei_register(&pdev->dev);
+       if (err)
+               goto release_irq;
+
+       mei_pdev = pdev;
+       pci_set_drvdata(pdev, dev);
+
+
+       schedule_delayed_work(&dev->timer_work, HZ);
+
+       mutex_unlock(&mei_mutex);
+
+       pr_debug("initialization successful.\n");
+
+       return 0;
+
+release_irq:
+       mei_disable_interrupts(dev);
+       flush_scheduled_work();
+       free_irq(pdev->irq, dev);
+disable_msi:
+       pci_disable_msi(pdev);
+       pci_iounmap(pdev, hw->mem_addr);
+free_device:
+       kfree(dev);
+release_regions:
+       pci_release_regions(pdev);
+disable_device:
+       pci_disable_device(pdev);
+end:
+       mutex_unlock(&mei_mutex);
+       dev_err(&pdev->dev, "initialization failed.\n");
+       return err;
+}
+
+/**
+ * mei_remove - Device Removal Routine
+ *
+ * @pdev: PCI device structure
+ *
+ * mei_remove is called by the PCI subsystem to alert the driver
+ * that it should release a PCI device.
+ */
+static void mei_remove(struct pci_dev *pdev)
+{
+       struct mei_device *dev;
+       struct mei_me_hw *hw;
+
+       if (mei_pdev != pdev)
+               return;
+
+       dev = pci_get_drvdata(pdev);
+       if (!dev)
+               return;
+
+       hw = to_me_hw(dev);
+
+       mutex_lock(&dev->device_lock);
+
+       cancel_delayed_work(&dev->timer_work);
+
+       mei_wd_stop(dev);
+
+       mei_pdev = NULL;
+
+       if (dev->iamthif_cl.state == MEI_FILE_CONNECTED) {
+               dev->iamthif_cl.state = MEI_FILE_DISCONNECTING;
+               mei_cl_disconnect(&dev->iamthif_cl);
+       }
+       if (dev->wd_cl.state == MEI_FILE_CONNECTED) {
+               dev->wd_cl.state = MEI_FILE_DISCONNECTING;
+               mei_cl_disconnect(&dev->wd_cl);
+       }
+
+       /* Unregistering watchdog device */
+       mei_watchdog_unregister(dev);
+
+       /* remove entry if already in list */
+       dev_dbg(&pdev->dev, "list del iamthif and wd file list.\n");
+
+       if (dev->open_handle_count > 0)
+               dev->open_handle_count--;
+       mei_cl_unlink(&dev->wd_cl);
+
+       if (dev->open_handle_count > 0)
+               dev->open_handle_count--;
+       mei_cl_unlink(&dev->iamthif_cl);
+
+       dev->iamthif_current_cb = NULL;
+       dev->me_clients_num = 0;
+
+       mutex_unlock(&dev->device_lock);
+
+       flush_scheduled_work();
+
+       /* disable interrupts */
+       mei_disable_interrupts(dev);
+
+       free_irq(pdev->irq, dev);
+       pci_disable_msi(pdev);
+       pci_set_drvdata(pdev, NULL);
+
+       if (hw->mem_addr)
+               pci_iounmap(pdev, hw->mem_addr);
+
+       kfree(dev);
+
+       pci_release_regions(pdev);
+       pci_disable_device(pdev);
+
+       mei_deregister();
+
+}
+#ifdef CONFIG_PM
+static int mei_pci_suspend(struct device *device)
+{
+       struct pci_dev *pdev = to_pci_dev(device);
+       struct mei_device *dev = pci_get_drvdata(pdev);
+       int err;
+
+       if (!dev)
+               return -ENODEV;
+       mutex_lock(&dev->device_lock);
+
+       cancel_delayed_work(&dev->timer_work);
+
+       /* Stop watchdog if exists */
+       err = mei_wd_stop(dev);
+       /* Set new mei state */
+       if (dev->dev_state == MEI_DEV_ENABLED ||
+           dev->dev_state == MEI_DEV_RECOVERING_FROM_RESET) {
+               dev->dev_state = MEI_DEV_POWER_DOWN;
+               mei_reset(dev, 0);
+       }
+       mutex_unlock(&dev->device_lock);
+
+       free_irq(pdev->irq, dev);
+       pci_disable_msi(pdev);
+
+       return err;
+}
+
+static int mei_pci_resume(struct device *device)
+{
+       struct pci_dev *pdev = to_pci_dev(device);
+       struct mei_device *dev;
+       int err;
+
+       dev = pci_get_drvdata(pdev);
+       if (!dev)
+               return -ENODEV;
+
+       pci_enable_msi(pdev);
+
+       /* request and enable interrupt */
+       if (pci_dev_msi_enabled(pdev))
+               err = request_threaded_irq(pdev->irq,
+                       NULL,
+                       mei_me_irq_thread_handler,
+                       IRQF_ONESHOT, KBUILD_MODNAME, dev);
+       else
+               err = request_threaded_irq(pdev->irq,
+                       mei_me_irq_quick_handler,
+                       mei_me_irq_thread_handler,
+                       IRQF_SHARED, KBUILD_MODNAME, dev);
+
+       if (err) {
+               dev_err(&pdev->dev, "request_threaded_irq failed: irq = %d.\n",
+                               pdev->irq);
+               return err;
+       }
+
+       mutex_lock(&dev->device_lock);
+       dev->dev_state = MEI_DEV_POWER_UP;
+       mei_reset(dev, 1);
+       mutex_unlock(&dev->device_lock);
+
+       /* Start timer if stopped in suspend */
+       schedule_delayed_work(&dev->timer_work, HZ);
+
+       return err;
+}
+static SIMPLE_DEV_PM_OPS(mei_pm_ops, mei_pci_suspend, mei_pci_resume);
+#define MEI_PM_OPS     (&mei_pm_ops)
+#else
+#define MEI_PM_OPS     NULL
+#endif /* CONFIG_PM */
+/*
+ *  PCI driver structure
+ */
+static struct pci_driver mei_driver = {
+       .name = KBUILD_MODNAME,
+       .id_table = mei_pci_tbl,
+       .probe = mei_probe,
+       .remove = mei_remove,
+       .shutdown = mei_remove,
+       .driver.pm = MEI_PM_OPS,
+};
+
+module_pci_driver(mei_driver);
+
+MODULE_AUTHOR("Intel Corporation");
+MODULE_DESCRIPTION("Intel(R) Management Engine Interface");
+MODULE_LICENSE("GPL v2");
index 9299a8c..2413247 100644 (file)
 #include <linux/sched.h>
 #include <linux/watchdog.h>
 
-#include "mei_dev.h"
-#include "hw.h"
-#include "interface.h"
 #include <linux/mei.h>
 
+#include "mei_dev.h"
+#include "hbm.h"
+#include "hw-me.h"
+#include "client.h"
+
 static const u8 mei_start_wd_params[] = { 0x02, 0x12, 0x13, 0x10 };
 static const u8 mei_stop_wd_params[] = { 0x02, 0x02, 0x14, 0x10 };
 
@@ -62,30 +64,41 @@ static void mei_wd_set_start_timeout(struct mei_device *dev, u16 timeout)
  */
 int mei_wd_host_init(struct mei_device *dev)
 {
-       int id;
-       mei_cl_init(&dev->wd_cl, dev);
+       struct mei_cl *cl = &dev->wd_cl;
+       int i;
+       int ret;
+
+       mei_cl_init(cl, dev);
 
-       /* look for WD client and connect to it */
-       dev->wd_cl.state = MEI_FILE_DISCONNECTED;
        dev->wd_timeout = MEI_WD_DEFAULT_TIMEOUT;
        dev->wd_state = MEI_WD_IDLE;
 
-       /* Connect WD ME client to the host client */
-       id = mei_me_cl_link(dev, &dev->wd_cl,
-                               &mei_wd_guid, MEI_WD_HOST_CLIENT_ID);
 
-       if (id < 0) {
+       /* check for valid client id */
+       i = mei_me_cl_by_uuid(dev, &mei_wd_guid);
+       if (i < 0) {
                dev_info(&dev->pdev->dev, "wd: failed to find the client\n");
                return -ENOENT;
        }
 
-       if (mei_connect(dev, &dev->wd_cl)) {
+       cl->me_client_id = dev->me_clients[i].client_id;
+
+       ret = mei_cl_link(cl, MEI_WD_HOST_CLIENT_ID);
+
+       if (ret < 0) {
+               dev_info(&dev->pdev->dev, "wd: failed link client\n");
+               return -ENOENT;
+       }
+
+       cl->state = MEI_FILE_CONNECTING;
+
+       if (mei_hbm_cl_connect_req(dev, cl)) {
                dev_err(&dev->pdev->dev, "wd: failed to connect to the client\n");
-               dev->wd_cl.state = MEI_FILE_DISCONNECTED;
-               dev->wd_cl.host_client_id = 0;
+               cl->state = MEI_FILE_DISCONNECTED;
+               cl->host_client_id = 0;
                return -EIO;
        }
-       dev->wd_cl.timer_count = MEI_CONNECT_TIMEOUT;
+       cl->timer_count = MEI_CONNECT_TIMEOUT;
 
        return 0;
 }
@@ -101,22 +114,21 @@ int mei_wd_host_init(struct mei_device *dev)
  */
 int mei_wd_send(struct mei_device *dev)
 {
-       struct mei_msg_hdr *mei_hdr;
+       struct mei_msg_hdr hdr;
 
-       mei_hdr = (struct mei_msg_hdr *) &dev->wr_msg_buf[0];
-       mei_hdr->host_addr = dev->wd_cl.host_client_id;
-       mei_hdr->me_addr = dev->wd_cl.me_client_id;
-       mei_hdr->msg_complete = 1;
-       mei_hdr->reserved = 0;
+       hdr.host_addr = dev->wd_cl.host_client_id;
+       hdr.me_addr = dev->wd_cl.me_client_id;
+       hdr.msg_complete = 1;
+       hdr.reserved = 0;
 
        if (!memcmp(dev->wd_data, mei_start_wd_params, MEI_WD_HDR_SIZE))
-               mei_hdr->length = MEI_WD_START_MSG_SIZE;
+               hdr.length = MEI_WD_START_MSG_SIZE;
        else if (!memcmp(dev->wd_data, mei_stop_wd_params, MEI_WD_HDR_SIZE))
-               mei_hdr->length = MEI_WD_STOP_MSG_SIZE;
+               hdr.length = MEI_WD_STOP_MSG_SIZE;
        else
                return -EINVAL;
 
-       return mei_write_message(dev, mei_hdr, dev->wd_data, mei_hdr->length);
+       return mei_write_message(dev, &hdr, dev->wd_data);
 }
 
 /**
@@ -141,16 +153,16 @@ int mei_wd_stop(struct mei_device *dev)
 
        dev->wd_state = MEI_WD_STOPPING;
 
-       ret = mei_flow_ctrl_creds(dev, &dev->wd_cl);
+       ret = mei_cl_flow_ctrl_creds(&dev->wd_cl);
        if (ret < 0)
                goto out;
 
-       if (ret && dev->mei_host_buffer_is_empty) {
+       if (ret && dev->hbuf_is_ready) {
                ret = 0;
-               dev->mei_host_buffer_is_empty = false;
+               dev->hbuf_is_ready = false;
 
                if (!mei_wd_send(dev)) {
-                       ret = mei_flow_ctrl_reduce(dev, &dev->wd_cl);
+                       ret = mei_cl_flow_ctrl_reduce(&dev->wd_cl);
                        if (ret)
                                goto out;
                } else {
@@ -270,10 +282,9 @@ static int mei_wd_ops_ping(struct watchdog_device *wd_dev)
        dev->wd_state = MEI_WD_RUNNING;
 
        /* Check if we can send the ping to HW*/
-       if (dev->mei_host_buffer_is_empty &&
-               mei_flow_ctrl_creds(dev, &dev->wd_cl) > 0) {
+       if (dev->hbuf_is_ready && mei_cl_flow_ctrl_creds(&dev->wd_cl) > 0) {
 
-               dev->mei_host_buffer_is_empty = false;
+               dev->hbuf_is_ready = false;
                dev_dbg(&dev->pdev->dev, "wd: sending ping\n");
 
                if (mei_wd_send(dev)) {
@@ -282,9 +293,9 @@ static int mei_wd_ops_ping(struct watchdog_device *wd_dev)
                        goto end;
                }
 
-               if (mei_flow_ctrl_reduce(dev, &dev->wd_cl)) {
+               if (mei_cl_flow_ctrl_reduce(&dev->wd_cl)) {
                        dev_err(&dev->pdev->dev,
-                               "wd: mei_flow_ctrl_reduce() failed.\n");
+                               "wd: mei_cl_flow_ctrl_reduce() failed.\n");
                        ret = -EIO;
                        goto end;
                }
index b90a224..0a14280 100644 (file)
@@ -240,7 +240,8 @@ void st_int_recv(void *disc_data,
        char *ptr;
        struct st_proto_s *proto;
        unsigned short payload_len = 0;
-       int len = 0, type = 0;
+       int len = 0;
+       unsigned char type = 0;
        unsigned char *plen;
        struct st_data_s *st_gdata = (struct st_data_s *)disc_data;
        unsigned long flags;
index 9ff942a..83269f1 100644 (file)
@@ -468,6 +468,11 @@ long st_kim_start(void *kim_data)
                if (pdata->chip_enable)
                        pdata->chip_enable(kim_gdata);
 
+               /* Configure BT nShutdown to HIGH state */
+               gpio_set_value(kim_gdata->nshutdown, GPIO_LOW);
+               mdelay(5);      /* FIXME: a proper toggle */
+               gpio_set_value(kim_gdata->nshutdown, GPIO_HIGH);
+               mdelay(100);
                /* re-initialize the completion */
                INIT_COMPLETION(kim_gdata->ldisc_installed);
                /* send notification to UIM */
@@ -509,7 +514,8 @@ long st_kim_start(void *kim_data)
  *     (b) upon failure to either install ldisc or download firmware.
  *     The function is responsible to (a) notify UIM about un-installation,
  *     (b) flush UART if the ldisc was installed.
- *     (c) invoke platform's chip disabling routine.
+ *     (c) reset BT_EN - pull down nshutdown at the end.
+ *     (d) invoke platform's chip disabling routine.
  */
 long st_kim_stop(void *kim_data)
 {
@@ -541,6 +547,13 @@ long st_kim_stop(void *kim_data)
                err = -ETIMEDOUT;
        }
 
+       /* By default configure BT nShutdown to LOW state */
+       gpio_set_value(kim_gdata->nshutdown, GPIO_LOW);
+       mdelay(1);
+       gpio_set_value(kim_gdata->nshutdown, GPIO_HIGH);
+       mdelay(1);
+       gpio_set_value(kim_gdata->nshutdown, GPIO_LOW);
+
        /* platform specific disable */
        if (pdata->chip_disable)
                pdata->chip_disable(kim_gdata);
@@ -733,6 +746,20 @@ static int kim_probe(struct platform_device *pdev)
        /* refer to itself */
        kim_gdata->core_data->kim_data = kim_gdata;
 
+       /* Claim the chip enable nShutdown gpio from the system */
+       kim_gdata->nshutdown = pdata->nshutdown_gpio;
+       err = gpio_request(kim_gdata->nshutdown, "kim");
+       if (unlikely(err)) {
+               pr_err(" gpio %ld request failed ", kim_gdata->nshutdown);
+               return err;
+       }
+
+       /* Configure nShutdown GPIO as output=0 */
+       err = gpio_direction_output(kim_gdata->nshutdown, 0);
+       if (unlikely(err)) {
+               pr_err(" unable to configure gpio %ld", kim_gdata->nshutdown);
+               return err;
+       }
        /* get reference of pdev for request_firmware
         */
        kim_gdata->kim_pdev = pdev;
@@ -779,10 +806,18 @@ err_core_init:
 
 static int kim_remove(struct platform_device *pdev)
 {
+       /* free the GPIOs requested */
+       struct ti_st_plat_data  *pdata = pdev->dev.platform_data;
        struct kim_data_s       *kim_gdata;
 
        kim_gdata = dev_get_drvdata(&pdev->dev);
 
+       /* Free the Bluetooth/FM/GPIO
+        * nShutdown gpio from the system
+        */
+       gpio_free(pdata->nshutdown_gpio);
+       pr_info("nshutdown GPIO Freed");
+
        debugfs_remove_recursive(kim_debugfs_dir);
        sysfs_remove_group(&pdev->dev.kobj, &uim_attr_grp);
        pr_info("sysfs entries removed");
diff --git a/drivers/misc/vmw_vmci/Kconfig b/drivers/misc/vmw_vmci/Kconfig
new file mode 100644 (file)
index 0000000..39c2eca
--- /dev/null
@@ -0,0 +1,16 @@
+#
+# VMware VMCI device
+#
+
+config VMWARE_VMCI
+       tristate "VMware VMCI Driver"
+       depends on X86 && PCI
+       help
+         This is VMware's Virtual Machine Communication Interface.  It enables
+         high-speed communication between host and guest in a virtual
+         environment via the VMCI virtual device.
+
+         If unsure, say N.
+
+         To compile this driver as a module, choose M here: the
+         module will be called vmw_vmci.
diff --git a/drivers/misc/vmw_vmci/Makefile b/drivers/misc/vmw_vmci/Makefile
new file mode 100644 (file)
index 0000000..4da9893
--- /dev/null
@@ -0,0 +1,4 @@
+obj-$(CONFIG_VMWARE_VMCI) += vmw_vmci.o
+vmw_vmci-y += vmci_context.o vmci_datagram.o vmci_doorbell.o \
+       vmci_driver.o vmci_event.o vmci_guest.o vmci_handle_array.o \
+       vmci_host.o vmci_queue_pair.o vmci_resource.o vmci_route.o
diff --git a/drivers/misc/vmw_vmci/vmci_context.c b/drivers/misc/vmw_vmci/vmci_context.c
new file mode 100644 (file)
index 0000000..f866a4b
--- /dev/null
@@ -0,0 +1,1214 @@
+/*
+ * VMware VMCI Driver
+ *
+ * Copyright (C) 2012 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * for more details.
+ */
+
+#include <linux/vmw_vmci_defs.h>
+#include <linux/vmw_vmci_api.h>
+#include <linux/highmem.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+
+#include "vmci_queue_pair.h"
+#include "vmci_datagram.h"
+#include "vmci_doorbell.h"
+#include "vmci_context.h"
+#include "vmci_driver.h"
+#include "vmci_event.h"
+
+/*
+ * List of current VMCI contexts.  Contexts can be added by
+ * vmci_ctx_create() and removed via vmci_ctx_destroy().
+ * These, along with context lookup, are protected by the
+ * list structure's lock.
+ */
+static struct {
+       struct list_head head;
+       spinlock_t lock; /* Spinlock for context list operations */
+} ctx_list = {
+       .head = LIST_HEAD_INIT(ctx_list.head),
+       .lock = __SPIN_LOCK_UNLOCKED(ctx_list.lock),
+};
+
+/* Used by contexts that did not set up notify flag pointers */
+static bool ctx_dummy_notify;
+
+static void ctx_signal_notify(struct vmci_ctx *context)
+{
+       *context->notify = true;
+}
+
+static void ctx_clear_notify(struct vmci_ctx *context)
+{
+       *context->notify = false;
+}
+
+/*
+ * If nothing requires the attention of the guest, clears both
+ * notify flag and call.
+ */
+static void ctx_clear_notify_call(struct vmci_ctx *context)
+{
+       if (context->pending_datagrams == 0 &&
+           vmci_handle_arr_get_size(context->pending_doorbell_array) == 0)
+               ctx_clear_notify(context);
+}
+
+/*
+ * Sets the context's notify flag iff datagrams are pending for this
+ * context.  Called from vmci_setup_notify().
+ */
+void vmci_ctx_check_signal_notify(struct vmci_ctx *context)
+{
+       spin_lock(&context->lock);
+       if (context->pending_datagrams)
+               ctx_signal_notify(context);
+       spin_unlock(&context->lock);
+}
+
+/*
+ * Allocates and initializes a VMCI context.
+ */
+struct vmci_ctx *vmci_ctx_create(u32 cid, u32 priv_flags,
+                                uintptr_t event_hnd,
+                                int user_version,
+                                const struct cred *cred)
+{
+       struct vmci_ctx *context;
+       int error;
+
+       if (cid == VMCI_INVALID_ID) {
+               pr_devel("Invalid context ID for VMCI context\n");
+               error = -EINVAL;
+               goto err_out;
+       }
+
+       if (priv_flags & ~VMCI_PRIVILEGE_ALL_FLAGS) {
+               pr_devel("Invalid flag (flags=0x%x) for VMCI context\n",
+                        priv_flags);
+               error = -EINVAL;
+               goto err_out;
+       }
+
+       if (user_version == 0) {
+               pr_devel("Invalid suer_version %d\n", user_version);
+               error = -EINVAL;
+               goto err_out;
+       }
+
+       context = kzalloc(sizeof(*context), GFP_KERNEL);
+       if (!context) {
+               pr_warn("Failed to allocate memory for VMCI context\n");
+               error = -EINVAL;
+               goto err_out;
+       }
+
+       kref_init(&context->kref);
+       spin_lock_init(&context->lock);
+       INIT_LIST_HEAD(&context->list_item);
+       INIT_LIST_HEAD(&context->datagram_queue);
+       INIT_LIST_HEAD(&context->notifier_list);
+
+       /* Initialize host-specific VMCI context. */
+       init_waitqueue_head(&context->host_context.wait_queue);
+
+       context->queue_pair_array = vmci_handle_arr_create(0);
+       if (!context->queue_pair_array) {
+               error = -ENOMEM;
+               goto err_free_ctx;
+       }
+
+       context->doorbell_array = vmci_handle_arr_create(0);
+       if (!context->doorbell_array) {
+               error = -ENOMEM;
+               goto err_free_qp_array;
+       }
+
+       context->pending_doorbell_array = vmci_handle_arr_create(0);
+       if (!context->pending_doorbell_array) {
+               error = -ENOMEM;
+               goto err_free_db_array;
+       }
+
+       context->user_version = user_version;
+
+       context->priv_flags = priv_flags;
+
+       if (cred)
+               context->cred = get_cred(cred);
+
+       context->notify = &ctx_dummy_notify;
+       context->notify_page = NULL;
+
+       /*
+        * If we collide with an existing context we generate a new
+        * and use it instead. The VMX will determine if regeneration
+        * is okay. Since there isn't 4B - 16 VMs running on a given
+        * host, the below loop will terminate.
+        */
+       spin_lock(&ctx_list.lock);
+
+       while (vmci_ctx_exists(cid)) {
+               /* We reserve the lowest 16 ids for fixed contexts. */
+               cid = max(cid, VMCI_RESERVED_CID_LIMIT - 1) + 1;
+               if (cid == VMCI_INVALID_ID)
+                       cid = VMCI_RESERVED_CID_LIMIT;
+       }
+       context->cid = cid;
+
+       list_add_tail_rcu(&context->list_item, &ctx_list.head);
+       spin_unlock(&ctx_list.lock);
+
+       return context;
+
+ err_free_db_array:
+       vmci_handle_arr_destroy(context->doorbell_array);
+ err_free_qp_array:
+       vmci_handle_arr_destroy(context->queue_pair_array);
+ err_free_ctx:
+       kfree(context);
+ err_out:
+       return ERR_PTR(error);
+}
+
+/*
+ * Destroy VMCI context.
+ */
+void vmci_ctx_destroy(struct vmci_ctx *context)
+{
+       spin_lock(&ctx_list.lock);
+       list_del_rcu(&context->list_item);
+       spin_unlock(&ctx_list.lock);
+       synchronize_rcu();
+
+       vmci_ctx_put(context);
+}
+
+/*
+ * Fire notification for all contexts interested in given cid.
+ */
+static int ctx_fire_notification(u32 context_id, u32 priv_flags)
+{
+       u32 i, array_size;
+       struct vmci_ctx *sub_ctx;
+       struct vmci_handle_arr *subscriber_array;
+       struct vmci_handle context_handle =
+               vmci_make_handle(context_id, VMCI_EVENT_HANDLER);
+
+       /*
+        * We create an array to hold the subscribers we find when
+        * scanning through all contexts.
+        */
+       subscriber_array = vmci_handle_arr_create(0);
+       if (subscriber_array == NULL)
+               return VMCI_ERROR_NO_MEM;
+
+       /*
+        * Scan all contexts to find who is interested in being
+        * notified about given contextID.
+        */
+       rcu_read_lock();
+       list_for_each_entry_rcu(sub_ctx, &ctx_list.head, list_item) {
+               struct vmci_handle_list *node;
+
+               /*
+                * We only deliver notifications of the removal of
+                * contexts, if the two contexts are allowed to
+                * interact.
+                */
+               if (vmci_deny_interaction(priv_flags, sub_ctx->priv_flags))
+                       continue;
+
+               list_for_each_entry_rcu(node, &sub_ctx->notifier_list, node) {
+                       if (!vmci_handle_is_equal(node->handle, context_handle))
+                               continue;
+
+                       vmci_handle_arr_append_entry(&subscriber_array,
+                                       vmci_make_handle(sub_ctx->cid,
+                                                        VMCI_EVENT_HANDLER));
+               }
+       }
+       rcu_read_unlock();
+
+       /* Fire event to all subscribers. */
+       array_size = vmci_handle_arr_get_size(subscriber_array);
+       for (i = 0; i < array_size; i++) {
+               int result;
+               struct vmci_event_ctx ev;
+
+               ev.msg.hdr.dst = vmci_handle_arr_get_entry(subscriber_array, i);
+               ev.msg.hdr.src = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
+                                                 VMCI_CONTEXT_RESOURCE_ID);
+               ev.msg.hdr.payload_size = sizeof(ev) - sizeof(ev.msg.hdr);
+               ev.msg.event_data.event = VMCI_EVENT_CTX_REMOVED;
+               ev.payload.context_id = context_id;
+
+               result = vmci_datagram_dispatch(VMCI_HYPERVISOR_CONTEXT_ID,
+                                               &ev.msg.hdr, false);
+               if (result < VMCI_SUCCESS) {
+                       pr_devel("Failed to enqueue event datagram (type=%d) for context (ID=0x%x)\n",
+                                ev.msg.event_data.event,
+                                ev.msg.hdr.dst.context);
+                       /* We continue to enqueue on next subscriber. */
+               }
+       }
+       vmci_handle_arr_destroy(subscriber_array);
+
+       return VMCI_SUCCESS;
+}
+
+/*
+ * Returns the current number of pending datagrams. The call may
+ * also serve as a synchronization point for the datagram queue,
+ * as no enqueue operations can occur concurrently.
+ */
+int vmci_ctx_pending_datagrams(u32 cid, u32 *pending)
+{
+       struct vmci_ctx *context;
+
+       context = vmci_ctx_get(cid);
+       if (context == NULL)
+               return VMCI_ERROR_INVALID_ARGS;
+
+       spin_lock(&context->lock);
+       if (pending)
+               *pending = context->pending_datagrams;
+       spin_unlock(&context->lock);
+       vmci_ctx_put(context);
+
+       return VMCI_SUCCESS;
+}
+
+/*
+ * Queues a VMCI datagram for the appropriate target VM context.
+ */
+int vmci_ctx_enqueue_datagram(u32 cid, struct vmci_datagram *dg)
+{
+       struct vmci_datagram_queue_entry *dq_entry;
+       struct vmci_ctx *context;
+       struct vmci_handle dg_src;
+       size_t vmci_dg_size;
+
+       vmci_dg_size = VMCI_DG_SIZE(dg);
+       if (vmci_dg_size > VMCI_MAX_DG_SIZE) {
+               pr_devel("Datagram too large (bytes=%Zu)\n", vmci_dg_size);
+               return VMCI_ERROR_INVALID_ARGS;
+       }
+
+       /* Get the target VM's VMCI context. */
+       context = vmci_ctx_get(cid);
+       if (!context) {
+               pr_devel("Invalid context (ID=0x%x)\n", cid);
+               return VMCI_ERROR_INVALID_ARGS;
+       }
+
+       /* Allocate guest call entry and add it to the target VM's queue. */
+       dq_entry = kmalloc(sizeof(*dq_entry), GFP_KERNEL);
+       if (dq_entry == NULL) {
+               pr_warn("Failed to allocate memory for datagram\n");
+               vmci_ctx_put(context);
+               return VMCI_ERROR_NO_MEM;
+       }
+       dq_entry->dg = dg;
+       dq_entry->dg_size = vmci_dg_size;
+       dg_src = dg->src;
+       INIT_LIST_HEAD(&dq_entry->list_item);
+
+       spin_lock(&context->lock);
+
+       /*
+        * We put a higher limit on datagrams from the hypervisor.  If
+        * the pending datagram is not from hypervisor, then we check
+        * if enqueueing it would exceed the
+        * VMCI_MAX_DATAGRAM_QUEUE_SIZE limit on the destination.  If
+        * the pending datagram is from hypervisor, we allow it to be
+        * queued at the destination side provided we don't reach the
+        * VMCI_MAX_DATAGRAM_AND_EVENT_QUEUE_SIZE limit.
+        */
+       if (context->datagram_queue_size + vmci_dg_size >=
+           VMCI_MAX_DATAGRAM_QUEUE_SIZE &&
+           (!vmci_handle_is_equal(dg_src,
+                               vmci_make_handle
+                               (VMCI_HYPERVISOR_CONTEXT_ID,
+                                VMCI_CONTEXT_RESOURCE_ID)) ||
+            context->datagram_queue_size + vmci_dg_size >=
+            VMCI_MAX_DATAGRAM_AND_EVENT_QUEUE_SIZE)) {
+               spin_unlock(&context->lock);
+               vmci_ctx_put(context);
+               kfree(dq_entry);
+               pr_devel("Context (ID=0x%x) receive queue is full\n", cid);
+               return VMCI_ERROR_NO_RESOURCES;
+       }
+
+       list_add(&dq_entry->list_item, &context->datagram_queue);
+       context->pending_datagrams++;
+       context->datagram_queue_size += vmci_dg_size;
+       ctx_signal_notify(context);
+       wake_up(&context->host_context.wait_queue);
+       spin_unlock(&context->lock);
+       vmci_ctx_put(context);
+
+       return vmci_dg_size;
+}
+
+/*
+ * Verifies whether a context with the specified context ID exists.
+ * FIXME: utility is dubious as no decisions can be reliably made
+ * using this data as context can appear and disappear at any time.
+ */
+bool vmci_ctx_exists(u32 cid)
+{
+       struct vmci_ctx *context;
+       bool exists = false;
+
+       rcu_read_lock();
+
+       list_for_each_entry_rcu(context, &ctx_list.head, list_item) {
+               if (context->cid == cid) {
+                       exists = true;
+                       break;
+               }
+       }
+
+       rcu_read_unlock();
+       return exists;
+}
+
+/*
+ * Retrieves VMCI context corresponding to the given cid.
+ */
+struct vmci_ctx *vmci_ctx_get(u32 cid)
+{
+       struct vmci_ctx *c, *context = NULL;
+
+       if (cid == VMCI_INVALID_ID)
+               return NULL;
+
+       rcu_read_lock();
+       list_for_each_entry_rcu(c, &ctx_list.head, list_item) {
+               if (c->cid == cid) {
+                       /*
+                        * The context owner drops its own reference to the
+                        * context only after removing it from the list and
+                        * waiting for RCU grace period to expire. This
+                        * means that we are not about to increase the
+                        * reference count of something that is in the
+                        * process of being destroyed.
+                        */
+                       context = c;
+                       kref_get(&context->kref);
+                       break;
+               }
+       }
+       rcu_read_unlock();
+
+       return context;
+}
+
+/*
+ * Deallocates all parts of a context data structure. This
+ * function doesn't lock the context, because it assumes that
+ * the caller was holding the last reference to context.
+ */
+static void ctx_free_ctx(struct kref *kref)
+{
+       struct vmci_ctx *context = container_of(kref, struct vmci_ctx, kref);
+       struct vmci_datagram_queue_entry *dq_entry, *dq_entry_tmp;
+       struct vmci_handle temp_handle;
+       struct vmci_handle_list *notifier, *tmp;
+
+       /*
+        * Fire event to all contexts interested in knowing this
+        * context is dying.
+        */
+       ctx_fire_notification(context->cid, context->priv_flags);
+
+       /*
+        * Cleanup all queue pair resources attached to context.  If
+        * the VM dies without cleaning up, this code will make sure
+        * that no resources are leaked.
+        */
+       temp_handle = vmci_handle_arr_get_entry(context->queue_pair_array, 0);
+       while (!vmci_handle_is_equal(temp_handle, VMCI_INVALID_HANDLE)) {
+               if (vmci_qp_broker_detach(temp_handle,
+                                         context) < VMCI_SUCCESS) {
+                       /*
+                        * When vmci_qp_broker_detach() succeeds it
+                        * removes the handle from the array.  If
+                        * detach fails, we must remove the handle
+                        * ourselves.
+                        */
+                       vmci_handle_arr_remove_entry(context->queue_pair_array,
+                                                    temp_handle);
+               }
+               temp_handle =
+                   vmci_handle_arr_get_entry(context->queue_pair_array, 0);
+       }
+
+       /*
+        * It is fine to destroy this without locking the callQueue, as
+        * this is the only thread having a reference to the context.
+        */
+       list_for_each_entry_safe(dq_entry, dq_entry_tmp,
+                                &context->datagram_queue, list_item) {
+               WARN_ON(dq_entry->dg_size != VMCI_DG_SIZE(dq_entry->dg));
+               list_del(&dq_entry->list_item);
+               kfree(dq_entry->dg);
+               kfree(dq_entry);
+       }
+
+       list_for_each_entry_safe(notifier, tmp,
+                                &context->notifier_list, node) {
+               list_del(&notifier->node);
+               kfree(notifier);
+       }
+
+       vmci_handle_arr_destroy(context->queue_pair_array);
+       vmci_handle_arr_destroy(context->doorbell_array);
+       vmci_handle_arr_destroy(context->pending_doorbell_array);
+       vmci_ctx_unset_notify(context);
+       if (context->cred)
+               put_cred(context->cred);
+       kfree(context);
+}
+
+/*
+ * Drops reference to VMCI context. If this is the last reference to
+ * the context it will be deallocated. A context is created with
+ * a reference count of one, and on destroy, it is removed from
+ * the context list before its reference count is decremented. Thus,
+ * if we reach zero, we are sure that nobody else are about to increment
+ * it (they need the entry in the context list for that), and so there
+ * is no need for locking.
+ */
+void vmci_ctx_put(struct vmci_ctx *context)
+{
+       kref_put(&context->kref, ctx_free_ctx);
+}
+
+/*
+ * Dequeues the next datagram and returns it to caller.
+ * The caller passes in a pointer to the max size datagram
+ * it can handle and the datagram is only unqueued if the
+ * size is less than max_size. If larger max_size is set to
+ * the size of the datagram to give the caller a chance to
+ * set up a larger buffer for the guestcall.
+ */
+int vmci_ctx_dequeue_datagram(struct vmci_ctx *context,
+                             size_t *max_size,
+                             struct vmci_datagram **dg)
+{
+       struct vmci_datagram_queue_entry *dq_entry;
+       struct list_head *list_item;
+       int rv;
+
+       /* Dequeue the next datagram entry. */
+       spin_lock(&context->lock);
+       if (context->pending_datagrams == 0) {
+               ctx_clear_notify_call(context);
+               spin_unlock(&context->lock);
+               pr_devel("No datagrams pending\n");
+               return VMCI_ERROR_NO_MORE_DATAGRAMS;
+       }
+
+       list_item = context->datagram_queue.next;
+
+       dq_entry =
+           list_entry(list_item, struct vmci_datagram_queue_entry, list_item);
+
+       /* Check size of caller's buffer. */
+       if (*max_size < dq_entry->dg_size) {
+               *max_size = dq_entry->dg_size;
+               spin_unlock(&context->lock);
+               pr_devel("Caller's buffer should be at least (size=%u bytes)\n",
+                        (u32) *max_size);
+               return VMCI_ERROR_NO_MEM;
+       }
+
+       list_del(list_item);
+       context->pending_datagrams--;
+       context->datagram_queue_size -= dq_entry->dg_size;
+       if (context->pending_datagrams == 0) {
+               ctx_clear_notify_call(context);
+               rv = VMCI_SUCCESS;
+       } else {
+               /*
+                * Return the size of the next datagram.
+                */
+               struct vmci_datagram_queue_entry *next_entry;
+
+               list_item = context->datagram_queue.next;
+               next_entry =
+                   list_entry(list_item, struct vmci_datagram_queue_entry,
+                              list_item);
+
+               /*
+                * The following size_t -> int truncation is fine as
+                * the maximum size of a (routable) datagram is 68KB.
+                */
+               rv = (int)next_entry->dg_size;
+       }
+       spin_unlock(&context->lock);
+
+       /* Caller must free datagram. */
+       *dg = dq_entry->dg;
+       dq_entry->dg = NULL;
+       kfree(dq_entry);
+
+       return rv;
+}
+
+/*
+ * Reverts actions set up by vmci_setup_notify().  Unmaps and unlocks the
+ * page mapped/locked by vmci_setup_notify().
+ */
+void vmci_ctx_unset_notify(struct vmci_ctx *context)
+{
+       struct page *notify_page;
+
+       spin_lock(&context->lock);
+
+       notify_page = context->notify_page;
+       context->notify = &ctx_dummy_notify;
+       context->notify_page = NULL;
+
+       spin_unlock(&context->lock);
+
+       if (notify_page) {
+               kunmap(notify_page);
+               put_page(notify_page);
+       }
+}
+
+/*
+ * Add remote_cid to list of contexts current contexts wants
+ * notifications from/about.
+ */
+int vmci_ctx_add_notification(u32 context_id, u32 remote_cid)
+{
+       struct vmci_ctx *context;
+       struct vmci_handle_list *notifier, *n;
+       int result;
+       bool exists = false;
+
+       context = vmci_ctx_get(context_id);
+       if (!context)
+               return VMCI_ERROR_NOT_FOUND;
+
+       if (VMCI_CONTEXT_IS_VM(context_id) && VMCI_CONTEXT_IS_VM(remote_cid)) {
+               pr_devel("Context removed notifications for other VMs not supported (src=0x%x, remote=0x%x)\n",
+                        context_id, remote_cid);
+               result = VMCI_ERROR_DST_UNREACHABLE;
+               goto out;
+       }
+
+       if (context->priv_flags & VMCI_PRIVILEGE_FLAG_RESTRICTED) {
+               result = VMCI_ERROR_NO_ACCESS;
+               goto out;
+       }
+
+       notifier = kmalloc(sizeof(struct vmci_handle_list), GFP_KERNEL);
+       if (!notifier) {
+               result = VMCI_ERROR_NO_MEM;
+               goto out;
+       }
+
+       INIT_LIST_HEAD(&notifier->node);
+       notifier->handle = vmci_make_handle(remote_cid, VMCI_EVENT_HANDLER);
+
+       spin_lock(&context->lock);
+
+       list_for_each_entry(n, &context->notifier_list, node) {
+               if (vmci_handle_is_equal(n->handle, notifier->handle)) {
+                       exists = true;
+                       break;
+               }
+       }
+
+       if (exists) {
+               kfree(notifier);
+               result = VMCI_ERROR_ALREADY_EXISTS;
+       } else {
+               list_add_tail_rcu(&notifier->node, &context->notifier_list);
+               context->n_notifiers++;
+               result = VMCI_SUCCESS;
+       }
+
+       spin_unlock(&context->lock);
+
+ out:
+       vmci_ctx_put(context);
+       return result;
+}
+
+/*
+ * Remove remote_cid from current context's list of contexts it is
+ * interested in getting notifications from/about.
+ */
+int vmci_ctx_remove_notification(u32 context_id, u32 remote_cid)
+{
+       struct vmci_ctx *context;
+       struct vmci_handle_list *notifier, *tmp;
+       struct vmci_handle handle;
+       bool found = false;
+
+       context = vmci_ctx_get(context_id);
+       if (!context)
+               return VMCI_ERROR_NOT_FOUND;
+
+       handle = vmci_make_handle(remote_cid, VMCI_EVENT_HANDLER);
+
+       spin_lock(&context->lock);
+       list_for_each_entry_safe(notifier, tmp,
+                                &context->notifier_list, node) {
+               if (vmci_handle_is_equal(notifier->handle, handle)) {
+                       list_del_rcu(&notifier->node);
+                       context->n_notifiers--;
+                       found = true;
+                       break;
+               }
+       }
+       spin_unlock(&context->lock);
+
+       if (found) {
+               synchronize_rcu();
+               kfree(notifier);
+       }
+
+       vmci_ctx_put(context);
+
+       return found ? VMCI_SUCCESS : VMCI_ERROR_NOT_FOUND;
+}
+
+static int vmci_ctx_get_chkpt_notifiers(struct vmci_ctx *context,
+                                       u32 *buf_size, void **pbuf)
+{
+       u32 *notifiers;
+       size_t data_size;
+       struct vmci_handle_list *entry;
+       int i = 0;
+
+       if (context->n_notifiers == 0) {
+               *buf_size = 0;
+               *pbuf = NULL;
+               return VMCI_SUCCESS;
+       }
+
+       data_size = context->n_notifiers * sizeof(*notifiers);
+       if (*buf_size < data_size) {
+               *buf_size = data_size;
+               return VMCI_ERROR_MORE_DATA;
+       }
+
+       notifiers = kmalloc(data_size, GFP_ATOMIC); /* FIXME: want GFP_KERNEL */
+       if (!notifiers)
+               return VMCI_ERROR_NO_MEM;
+
+       list_for_each_entry(entry, &context->notifier_list, node)
+               notifiers[i++] = entry->handle.context;
+
+       *buf_size = data_size;
+       *pbuf = notifiers;
+       return VMCI_SUCCESS;
+}
+
+static int vmci_ctx_get_chkpt_doorbells(struct vmci_ctx *context,
+                                       u32 *buf_size, void **pbuf)
+{
+       struct dbell_cpt_state *dbells;
+       size_t n_doorbells;
+       int i;
+
+       n_doorbells = vmci_handle_arr_get_size(context->doorbell_array);
+       if (n_doorbells > 0) {
+               size_t data_size = n_doorbells * sizeof(*dbells);
+               if (*buf_size < data_size) {
+                       *buf_size = data_size;
+                       return VMCI_ERROR_MORE_DATA;
+               }
+
+               dbells = kmalloc(data_size, GFP_ATOMIC);
+               if (!dbells)
+                       return VMCI_ERROR_NO_MEM;
+
+               for (i = 0; i < n_doorbells; i++)
+                       dbells[i].handle = vmci_handle_arr_get_entry(
+                                               context->doorbell_array, i);
+
+               *buf_size = data_size;
+               *pbuf = dbells;
+       } else {
+               *buf_size = 0;
+               *pbuf = NULL;
+       }
+
+       return VMCI_SUCCESS;
+}
+
+/*
+ * Get current context's checkpoint state of given type.
+ */
+int vmci_ctx_get_chkpt_state(u32 context_id,
+                            u32 cpt_type,
+                            u32 *buf_size,
+                            void **pbuf)
+{
+       struct vmci_ctx *context;
+       int result;
+
+       context = vmci_ctx_get(context_id);
+       if (!context)
+               return VMCI_ERROR_NOT_FOUND;
+
+       spin_lock(&context->lock);
+
+       switch (cpt_type) {
+       case VMCI_NOTIFICATION_CPT_STATE:
+               result = vmci_ctx_get_chkpt_notifiers(context, buf_size, pbuf);
+               break;
+
+       case VMCI_WELLKNOWN_CPT_STATE:
+               /*
+                * For compatibility with VMX'en with VM to VM communication, we
+                * always return zero wellknown handles.
+                */
+
+               *buf_size = 0;
+               *pbuf = NULL;
+               result = VMCI_SUCCESS;
+               break;
+
+       case VMCI_DOORBELL_CPT_STATE:
+               result = vmci_ctx_get_chkpt_doorbells(context, buf_size, pbuf);
+               break;
+
+       default:
+               pr_devel("Invalid cpt state (type=%d)\n", cpt_type);
+               result = VMCI_ERROR_INVALID_ARGS;
+               break;
+       }
+
+       spin_unlock(&context->lock);
+       vmci_ctx_put(context);
+
+       return result;
+}
+
+/*
+ * Set current context's checkpoint state of given type.
+ */
+int vmci_ctx_set_chkpt_state(u32 context_id,
+                            u32 cpt_type,
+                            u32 buf_size,
+                            void *cpt_buf)
+{
+       u32 i;
+       u32 current_id;
+       int result = VMCI_SUCCESS;
+       u32 num_ids = buf_size / sizeof(u32);
+
+       if (cpt_type == VMCI_WELLKNOWN_CPT_STATE && num_ids > 0) {
+               /*
+                * We would end up here if VMX with VM to VM communication
+                * attempts to restore a checkpoint with wellknown handles.
+                */
+               pr_warn("Attempt to restore checkpoint with obsolete wellknown handles\n");
+               return VMCI_ERROR_OBSOLETE;
+       }
+
+       if (cpt_type != VMCI_NOTIFICATION_CPT_STATE) {
+               pr_devel("Invalid cpt state (type=%d)\n", cpt_type);
+               return VMCI_ERROR_INVALID_ARGS;
+       }
+
+       for (i = 0; i < num_ids && result == VMCI_SUCCESS; i++) {
+               current_id = ((u32 *)cpt_buf)[i];
+               result = vmci_ctx_add_notification(context_id, current_id);
+               if (result != VMCI_SUCCESS)
+                       break;
+       }
+       if (result != VMCI_SUCCESS)
+               pr_devel("Failed to set cpt state (type=%d) (error=%d)\n",
+                        cpt_type, result);
+
+       return result;
+}
+
+/*
+ * Retrieves the specified context's pending notifications in the
+ * form of a handle array. The handle arrays returned are the
+ * actual data - not a copy and should not be modified by the
+ * caller. They must be released using
+ * vmci_ctx_rcv_notifications_release.
+ */
+int vmci_ctx_rcv_notifications_get(u32 context_id,
+                                  struct vmci_handle_arr **db_handle_array,
+                                  struct vmci_handle_arr **qp_handle_array)
+{
+       struct vmci_ctx *context;
+       int result = VMCI_SUCCESS;
+
+       context = vmci_ctx_get(context_id);
+       if (context == NULL)
+               return VMCI_ERROR_NOT_FOUND;
+
+       spin_lock(&context->lock);
+
+       *db_handle_array = context->pending_doorbell_array;
+       context->pending_doorbell_array = vmci_handle_arr_create(0);
+       if (!context->pending_doorbell_array) {
+               context->pending_doorbell_array = *db_handle_array;
+               *db_handle_array = NULL;
+               result = VMCI_ERROR_NO_MEM;
+       }
+       *qp_handle_array = NULL;
+
+       spin_unlock(&context->lock);
+       vmci_ctx_put(context);
+
+       return result;
+}
+
+/*
+ * Releases handle arrays with pending notifications previously
+ * retrieved using vmci_ctx_rcv_notifications_get. If the
+ * notifications were not successfully handed over to the guest,
+ * success must be false.
+ */
+void vmci_ctx_rcv_notifications_release(u32 context_id,
+                                       struct vmci_handle_arr *db_handle_array,
+                                       struct vmci_handle_arr *qp_handle_array,
+                                       bool success)
+{
+       struct vmci_ctx *context = vmci_ctx_get(context_id);
+
+       spin_lock(&context->lock);
+       if (!success) {
+               struct vmci_handle handle;
+
+               /*
+                * New notifications may have been added while we were not
+                * holding the context lock, so we transfer any new pending
+                * doorbell notifications to the old array, and reinstate the
+                * old array.
+                */
+
+               handle = vmci_handle_arr_remove_tail(
+                                       context->pending_doorbell_array);
+               while (!vmci_handle_is_invalid(handle)) {
+                       if (!vmci_handle_arr_has_entry(db_handle_array,
+                                                      handle)) {
+                               vmci_handle_arr_append_entry(
+                                               &db_handle_array, handle);
+                       }
+                       handle = vmci_handle_arr_remove_tail(
+                                       context->pending_doorbell_array);
+               }
+               vmci_handle_arr_destroy(context->pending_doorbell_array);
+               context->pending_doorbell_array = db_handle_array;
+               db_handle_array = NULL;
+       } else {
+               ctx_clear_notify_call(context);
+       }
+       spin_unlock(&context->lock);
+       vmci_ctx_put(context);
+
+       if (db_handle_array)
+               vmci_handle_arr_destroy(db_handle_array);
+
+       if (qp_handle_array)
+               vmci_handle_arr_destroy(qp_handle_array);
+}
+
+/*
+ * Registers that a new doorbell handle has been allocated by the
+ * context. Only doorbell handles registered can be notified.
+ */
+int vmci_ctx_dbell_create(u32 context_id, struct vmci_handle handle)
+{
+       struct vmci_ctx *context;
+       int result;
+
+       if (context_id == VMCI_INVALID_ID || vmci_handle_is_invalid(handle))
+               return VMCI_ERROR_INVALID_ARGS;
+
+       context = vmci_ctx_get(context_id);
+       if (context == NULL)
+               return VMCI_ERROR_NOT_FOUND;
+
+       spin_lock(&context->lock);
+       if (!vmci_handle_arr_has_entry(context->doorbell_array, handle)) {
+               vmci_handle_arr_append_entry(&context->doorbell_array, handle);
+               result = VMCI_SUCCESS;
+       } else {
+               result = VMCI_ERROR_DUPLICATE_ENTRY;
+       }
+
+       spin_unlock(&context->lock);
+       vmci_ctx_put(context);
+
+       return result;
+}
+
+/*
+ * Unregisters a doorbell handle that was previously registered
+ * with vmci_ctx_dbell_create.
+ */
+int vmci_ctx_dbell_destroy(u32 context_id, struct vmci_handle handle)
+{
+       struct vmci_ctx *context;
+       struct vmci_handle removed_handle;
+
+       if (context_id == VMCI_INVALID_ID || vmci_handle_is_invalid(handle))
+               return VMCI_ERROR_INVALID_ARGS;
+
+       context = vmci_ctx_get(context_id);
+       if (context == NULL)
+               return VMCI_ERROR_NOT_FOUND;
+
+       spin_lock(&context->lock);
+       removed_handle =
+           vmci_handle_arr_remove_entry(context->doorbell_array, handle);
+       vmci_handle_arr_remove_entry(context->pending_doorbell_array, handle);
+       spin_unlock(&context->lock);
+
+       vmci_ctx_put(context);
+
+       return vmci_handle_is_invalid(removed_handle) ?
+           VMCI_ERROR_NOT_FOUND : VMCI_SUCCESS;
+}
+
+/*
+ * Unregisters all doorbell handles that were previously
+ * registered with vmci_ctx_dbell_create.
+ */
+int vmci_ctx_dbell_destroy_all(u32 context_id)
+{
+       struct vmci_ctx *context;
+       struct vmci_handle handle;
+
+       if (context_id == VMCI_INVALID_ID)
+               return VMCI_ERROR_INVALID_ARGS;
+
+       context = vmci_ctx_get(context_id);
+       if (context == NULL)
+               return VMCI_ERROR_NOT_FOUND;
+
+       spin_lock(&context->lock);
+       do {
+               struct vmci_handle_arr *arr = context->doorbell_array;
+               handle = vmci_handle_arr_remove_tail(arr);
+       } while (!vmci_handle_is_invalid(handle));
+       do {
+               struct vmci_handle_arr *arr = context->pending_doorbell_array;
+               handle = vmci_handle_arr_remove_tail(arr);
+       } while (!vmci_handle_is_invalid(handle));
+       spin_unlock(&context->lock);
+
+       vmci_ctx_put(context);
+
+       return VMCI_SUCCESS;
+}
+
+/*
+ * Registers a notification of a doorbell handle initiated by the
+ * specified source context. The notification of doorbells are
+ * subject to the same isolation rules as datagram delivery. To
+ * allow host side senders of notifications a finer granularity
+ * of sender rights than those assigned to the sending context
+ * itself, the host context is required to specify a different
+ * set of privilege flags that will override the privileges of
+ * the source context.
+ */
+int vmci_ctx_notify_dbell(u32 src_cid,
+                         struct vmci_handle handle,
+                         u32 src_priv_flags)
+{
+       struct vmci_ctx *dst_context;
+       int result;
+
+       if (vmci_handle_is_invalid(handle))
+               return VMCI_ERROR_INVALID_ARGS;
+
+       /* Get the target VM's VMCI context. */
+       dst_context = vmci_ctx_get(handle.context);
+       if (!dst_context) {
+               pr_devel("Invalid context (ID=0x%x)\n", handle.context);
+               return VMCI_ERROR_NOT_FOUND;
+       }
+
+       if (src_cid != handle.context) {
+               u32 dst_priv_flags;
+
+               if (VMCI_CONTEXT_IS_VM(src_cid) &&
+                   VMCI_CONTEXT_IS_VM(handle.context)) {
+                       pr_devel("Doorbell notification from VM to VM not supported (src=0x%x, dst=0x%x)\n",
+                                src_cid, handle.context);
+                       result = VMCI_ERROR_DST_UNREACHABLE;
+                       goto out;
+               }
+
+               result = vmci_dbell_get_priv_flags(handle, &dst_priv_flags);
+               if (result < VMCI_SUCCESS) {
+                       pr_warn("Failed to get privilege flags for destination (handle=0x%x:0x%x)\n",
+                               handle.context, handle.resource);
+                       goto out;
+               }
+
+               if (src_cid != VMCI_HOST_CONTEXT_ID ||
+                   src_priv_flags == VMCI_NO_PRIVILEGE_FLAGS) {
+                       src_priv_flags = vmci_context_get_priv_flags(src_cid);
+               }
+
+               if (vmci_deny_interaction(src_priv_flags, dst_priv_flags)) {
+                       result = VMCI_ERROR_NO_ACCESS;
+                       goto out;
+               }
+       }
+
+       if (handle.context == VMCI_HOST_CONTEXT_ID) {
+               result = vmci_dbell_host_context_notify(src_cid, handle);
+       } else {
+               spin_lock(&dst_context->lock);
+
+               if (!vmci_handle_arr_has_entry(dst_context->doorbell_array,
+                                              handle)) {
+                       result = VMCI_ERROR_NOT_FOUND;
+               } else {
+                       if (!vmci_handle_arr_has_entry(
+                                       dst_context->pending_doorbell_array,
+                                       handle)) {
+                               vmci_handle_arr_append_entry(
+                                       &dst_context->pending_doorbell_array,
+                                       handle);
+
+                               ctx_signal_notify(dst_context);
+                               wake_up(&dst_context->host_context.wait_queue);
+
+                       }
+                       result = VMCI_SUCCESS;
+               }
+               spin_unlock(&dst_context->lock);
+       }
+
+ out:
+       vmci_ctx_put(dst_context);
+
+       return result;
+}
+
+bool vmci_ctx_supports_host_qp(struct vmci_ctx *context)
+{
+       return context && context->user_version >= VMCI_VERSION_HOSTQP;
+}
+
+/*
+ * Registers that a new queue pair handle has been allocated by
+ * the context.
+ */
+int vmci_ctx_qp_create(struct vmci_ctx *context, struct vmci_handle handle)
+{
+       int result;
+
+       if (context == NULL || vmci_handle_is_invalid(handle))
+               return VMCI_ERROR_INVALID_ARGS;
+
+       if (!vmci_handle_arr_has_entry(context->queue_pair_array, handle)) {
+               vmci_handle_arr_append_entry(&context->queue_pair_array,
+                                            handle);
+               result = VMCI_SUCCESS;
+       } else {
+               result = VMCI_ERROR_DUPLICATE_ENTRY;
+       }
+
+       return result;
+}
+
+/*
+ * Unregisters a queue pair handle that was previously registered
+ * with vmci_ctx_qp_create.
+ */
+int vmci_ctx_qp_destroy(struct vmci_ctx *context, struct vmci_handle handle)
+{
+       struct vmci_handle hndl;
+
+       if (context == NULL || vmci_handle_is_invalid(handle))
+               return VMCI_ERROR_INVALID_ARGS;
+
+       hndl = vmci_handle_arr_remove_entry(context->queue_pair_array, handle);
+
+       return vmci_handle_is_invalid(hndl) ?
+               VMCI_ERROR_NOT_FOUND : VMCI_SUCCESS;
+}
+
+/*
+ * Determines whether a given queue pair handle is registered
+ * with the given context.
+ */
+bool vmci_ctx_qp_exists(struct vmci_ctx *context, struct vmci_handle handle)
+{
+       if (context == NULL || vmci_handle_is_invalid(handle))
+               return false;
+
+       return vmci_handle_arr_has_entry(context->queue_pair_array, handle);
+}
+
+/*
+ * vmci_context_get_priv_flags() - Retrieve privilege flags.
+ * @context_id: The context ID of the VMCI context.
+ *
+ * Retrieves privilege flags of the given VMCI context ID.
+ */
+u32 vmci_context_get_priv_flags(u32 context_id)
+{
+       if (vmci_host_code_active()) {
+               u32 flags;
+               struct vmci_ctx *context;
+
+               context = vmci_ctx_get(context_id);
+               if (!context)
+                       return VMCI_LEAST_PRIVILEGE_FLAGS;
+
+               flags = context->priv_flags;
+               vmci_ctx_put(context);
+               return flags;
+       }
+       return VMCI_NO_PRIVILEGE_FLAGS;
+}
+EXPORT_SYMBOL_GPL(vmci_context_get_priv_flags);
+
+/*
+ * vmci_is_context_owner() - Determimnes if user is the context owner
+ * @context_id: The context ID of the VMCI context.
+ * @uid:        The host user id (real kernel value).
+ *
+ * Determines whether a given UID is the owner of given VMCI context.
+ */
+bool vmci_is_context_owner(u32 context_id, kuid_t uid)
+{
+       bool is_owner = false;
+
+       if (vmci_host_code_active()) {
+               struct vmci_ctx *context = vmci_ctx_get(context_id);
+               if (context) {
+                       if (context->cred)
+                               is_owner = uid_eq(context->cred->uid, uid);
+                       vmci_ctx_put(context);
+               }
+       }
+
+       return is_owner;
+}
+EXPORT_SYMBOL_GPL(vmci_is_context_owner);
diff --git a/drivers/misc/vmw_vmci/vmci_context.h b/drivers/misc/vmw_vmci/vmci_context.h
new file mode 100644 (file)
index 0000000..24a88e6
--- /dev/null
@@ -0,0 +1,182 @@
+/*
+ * VMware VMCI driver (vmciContext.h)
+ *
+ * Copyright (C) 2012 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * for more details.
+ */
+
+#ifndef _VMCI_CONTEXT_H_
+#define _VMCI_CONTEXT_H_
+
+#include <linux/vmw_vmci_defs.h>
+#include <linux/atomic.h>
+#include <linux/kref.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+
+#include "vmci_handle_array.h"
+#include "vmci_datagram.h"
+
+/* Used to determine what checkpoint state to get and set. */
+enum {
+       VMCI_NOTIFICATION_CPT_STATE = 1,
+       VMCI_WELLKNOWN_CPT_STATE    = 2,
+       VMCI_DG_OUT_STATE           = 3,
+       VMCI_DG_IN_STATE            = 4,
+       VMCI_DG_IN_SIZE_STATE       = 5,
+       VMCI_DOORBELL_CPT_STATE     = 6,
+};
+
+/* Host specific struct used for signalling */
+struct vmci_host {
+       wait_queue_head_t wait_queue;
+};
+
+struct vmci_handle_list {
+       struct list_head node;
+       struct vmci_handle handle;
+};
+
+struct vmci_ctx {
+       struct list_head list_item;       /* For global VMCI list. */
+       u32 cid;
+       struct kref kref;
+       struct list_head datagram_queue;  /* Head of per VM queue. */
+       u32 pending_datagrams;
+       size_t datagram_queue_size;       /* Size of datagram queue in bytes. */
+
+       /*
+        * Version of the code that created
+        * this context; e.g., VMX.
+        */
+       int user_version;
+       spinlock_t lock;  /* Locks callQueue and handle_arrays. */
+
+       /*
+        * queue_pairs attached to.  The array of
+        * handles for queue pairs is accessed
+        * from the code for QP API, and there
+        * it is protected by the QP lock.  It
+        * is also accessed from the context
+        * clean up path, which does not
+        * require a lock.  VMCILock is not
+        * used to protect the QP array field.
+        */
+       struct vmci_handle_arr *queue_pair_array;
+
+       /* Doorbells created by context. */
+       struct vmci_handle_arr *doorbell_array;
+
+       /* Doorbells pending for context. */
+       struct vmci_handle_arr *pending_doorbell_array;
+
+       /* Contexts current context is subscribing to. */
+       struct list_head notifier_list;
+       unsigned int n_notifiers;
+
+       struct vmci_host host_context;
+       u32 priv_flags;
+
+       const struct cred *cred;
+       bool *notify;           /* Notify flag pointer - hosted only. */
+       struct page *notify_page;       /* Page backing the notify UVA. */
+};
+
+/* VMCINotifyAddRemoveInfo: Used to add/remove remote context notifications. */
+struct vmci_ctx_info {
+       u32 remote_cid;
+       int result;
+};
+
+/* VMCICptBufInfo: Used to set/get current context's checkpoint state. */
+struct vmci_ctx_chkpt_buf_info {
+       u64 cpt_buf;
+       u32 cpt_type;
+       u32 buf_size;
+       s32 result;
+       u32 _pad;
+};
+
+/*
+ * VMCINotificationReceiveInfo: Used to recieve pending notifications
+ * for doorbells and queue pairs.
+ */
+struct vmci_ctx_notify_recv_info {
+       u64 db_handle_buf_uva;
+       u64 db_handle_buf_size;
+       u64 qp_handle_buf_uva;
+       u64 qp_handle_buf_size;
+       s32 result;
+       u32 _pad;
+};
+
+/*
+ * Utilility function that checks whether two entities are allowed
+ * to interact. If one of them is restricted, the other one must
+ * be trusted.
+ */
+static inline bool vmci_deny_interaction(u32 part_one, u32 part_two)
+{
+       return ((part_one & VMCI_PRIVILEGE_FLAG_RESTRICTED) &&
+               !(part_two & VMCI_PRIVILEGE_FLAG_TRUSTED)) ||
+              ((part_two & VMCI_PRIVILEGE_FLAG_RESTRICTED) &&
+               !(part_one & VMCI_PRIVILEGE_FLAG_TRUSTED));
+}
+
+struct vmci_ctx *vmci_ctx_create(u32 cid, u32 flags,
+                                uintptr_t event_hnd, int version,
+                                const struct cred *cred);
+void vmci_ctx_destroy(struct vmci_ctx *context);
+
+bool vmci_ctx_supports_host_qp(struct vmci_ctx *context);
+int vmci_ctx_enqueue_datagram(u32 cid, struct vmci_datagram *dg);
+int vmci_ctx_dequeue_datagram(struct vmci_ctx *context,
+                             size_t *max_size, struct vmci_datagram **dg);
+int vmci_ctx_pending_datagrams(u32 cid, u32 *pending);
+struct vmci_ctx *vmci_ctx_get(u32 cid);
+void vmci_ctx_put(struct vmci_ctx *context);
+bool vmci_ctx_exists(u32 cid);
+
+int vmci_ctx_add_notification(u32 context_id, u32 remote_cid);
+int vmci_ctx_remove_notification(u32 context_id, u32 remote_cid);
+int vmci_ctx_get_chkpt_state(u32 context_id, u32 cpt_type,
+                            u32 *num_cids, void **cpt_buf_ptr);
+int vmci_ctx_set_chkpt_state(u32 context_id, u32 cpt_type,
+                            u32 num_cids, void *cpt_buf);
+
+int vmci_ctx_qp_create(struct vmci_ctx *context, struct vmci_handle handle);
+int vmci_ctx_qp_destroy(struct vmci_ctx *context, struct vmci_handle handle);
+bool vmci_ctx_qp_exists(struct vmci_ctx *context, struct vmci_handle handle);
+
+void vmci_ctx_check_signal_notify(struct vmci_ctx *context);
+void vmci_ctx_unset_notify(struct vmci_ctx *context);
+
+int vmci_ctx_dbell_create(u32 context_id, struct vmci_handle handle);
+int vmci_ctx_dbell_destroy(u32 context_id, struct vmci_handle handle);
+int vmci_ctx_dbell_destroy_all(u32 context_id);
+int vmci_ctx_notify_dbell(u32 cid, struct vmci_handle handle,
+                         u32 src_priv_flags);
+
+int vmci_ctx_rcv_notifications_get(u32 context_id, struct vmci_handle_arr
+                                  **db_handle_array, struct vmci_handle_arr
+                                  **qp_handle_array);
+void vmci_ctx_rcv_notifications_release(u32 context_id, struct vmci_handle_arr
+                                       *db_handle_array, struct vmci_handle_arr
+                                       *qp_handle_array, bool success);
+
+static inline u32 vmci_ctx_get_id(struct vmci_ctx *context)
+{
+       if (!context)
+               return VMCI_INVALID_ID;
+       return context->cid;
+}
+
+#endif /* _VMCI_CONTEXT_H_ */
diff --git a/drivers/misc/vmw_vmci/vmci_datagram.c b/drivers/misc/vmw_vmci/vmci_datagram.c
new file mode 100644 (file)
index 0000000..ed5c433
--- /dev/null
@@ -0,0 +1,500 @@
+/*
+ * VMware VMCI Driver
+ *
+ * Copyright (C) 2012 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * for more details.
+ */
+
+#include <linux/vmw_vmci_defs.h>
+#include <linux/vmw_vmci_api.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/bug.h>
+
+#include "vmci_datagram.h"
+#include "vmci_resource.h"
+#include "vmci_context.h"
+#include "vmci_driver.h"
+#include "vmci_event.h"
+#include "vmci_route.h"
+
+/*
+ * struct datagram_entry describes the datagram entity. It is used for datagram
+ * entities created only on the host.
+ */
+struct datagram_entry {
+       struct vmci_resource resource;
+       u32 flags;
+       bool run_delayed;
+       vmci_datagram_recv_cb recv_cb;
+       void *client_data;
+       u32 priv_flags;
+};
+
+struct delayed_datagram_info {
+       struct datagram_entry *entry;
+       struct vmci_datagram msg;
+       struct work_struct work;
+       bool in_dg_host_queue;
+};
+
+/* Number of in-flight host->host datagrams */
+static atomic_t delayed_dg_host_queue_size = ATOMIC_INIT(0);
+
+/*
+ * Create a datagram entry given a handle pointer.
+ */
+static int dg_create_handle(u32 resource_id,
+                           u32 flags,
+                           u32 priv_flags,
+                           vmci_datagram_recv_cb recv_cb,
+                           void *client_data, struct vmci_handle *out_handle)
+{
+       int result;
+       u32 context_id;
+       struct vmci_handle handle;
+       struct datagram_entry *entry;
+
+       if ((flags & VMCI_FLAG_WELLKNOWN_DG_HND) != 0)
+               return VMCI_ERROR_INVALID_ARGS;
+
+       if ((flags & VMCI_FLAG_ANYCID_DG_HND) != 0) {
+               context_id = VMCI_INVALID_ID;
+       } else {
+               context_id = vmci_get_context_id();
+               if (context_id == VMCI_INVALID_ID)
+                       return VMCI_ERROR_NO_RESOURCES;
+       }
+
+       handle = vmci_make_handle(context_id, resource_id);
+
+       entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+       if (!entry) {
+               pr_warn("Failed allocating memory for datagram entry\n");
+               return VMCI_ERROR_NO_MEM;
+       }
+
+       entry->run_delayed = (flags & VMCI_FLAG_DG_DELAYED_CB) ? true : false;
+       entry->flags = flags;
+       entry->recv_cb = recv_cb;
+       entry->client_data = client_data;
+       entry->priv_flags = priv_flags;
+
+       /* Make datagram resource live. */
+       result = vmci_resource_add(&entry->resource,
+                                  VMCI_RESOURCE_TYPE_DATAGRAM,
+                                  handle);
+       if (result != VMCI_SUCCESS) {
+               pr_warn("Failed to add new resource (handle=0x%x:0x%x), error: %d\n",
+                       handle.context, handle.resource, result);
+               kfree(entry);
+               return result;
+       }
+
+       *out_handle = vmci_resource_handle(&entry->resource);
+       return VMCI_SUCCESS;
+}
+
+/*
+ * Internal utility function with the same purpose as
+ * vmci_datagram_get_priv_flags that also takes a context_id.
+ */
+static int vmci_datagram_get_priv_flags(u32 context_id,
+                                       struct vmci_handle handle,
+                                       u32 *priv_flags)
+{
+       if (context_id == VMCI_INVALID_ID)
+               return VMCI_ERROR_INVALID_ARGS;
+
+       if (context_id == VMCI_HOST_CONTEXT_ID) {
+               struct datagram_entry *src_entry;
+               struct vmci_resource *resource;
+
+               resource = vmci_resource_by_handle(handle,
+                                                  VMCI_RESOURCE_TYPE_DATAGRAM);
+               if (!resource)
+                       return VMCI_ERROR_INVALID_ARGS;
+
+               src_entry = container_of(resource, struct datagram_entry,
+                                        resource);
+               *priv_flags = src_entry->priv_flags;
+               vmci_resource_put(resource);
+       } else if (context_id == VMCI_HYPERVISOR_CONTEXT_ID)
+               *priv_flags = VMCI_MAX_PRIVILEGE_FLAGS;
+       else
+               *priv_flags = vmci_context_get_priv_flags(context_id);
+
+       return VMCI_SUCCESS;
+}
+
+/*
+ * Calls the specified callback in a delayed context.
+ */
+static void dg_delayed_dispatch(struct work_struct *work)
+{
+       struct delayed_datagram_info *dg_info =
+                       container_of(work, struct delayed_datagram_info, work);
+
+       dg_info->entry->recv_cb(dg_info->entry->client_data, &dg_info->msg);
+
+       vmci_resource_put(&dg_info->entry->resource);
+
+       if (dg_info->in_dg_host_queue)
+               atomic_dec(&delayed_dg_host_queue_size);
+
+       kfree(dg_info);
+}
+
+/*
+ * Dispatch datagram as a host, to the host, or other vm context. This
+ * function cannot dispatch to hypervisor context handlers. This should
+ * have been handled before we get here by vmci_datagram_dispatch.
+ * Returns number of bytes sent on success, error code otherwise.
+ */
+static int dg_dispatch_as_host(u32 context_id, struct vmci_datagram *dg)
+{
+       int retval;
+       size_t dg_size;
+       u32 src_priv_flags;
+
+       dg_size = VMCI_DG_SIZE(dg);
+
+       /* Host cannot send to the hypervisor. */
+       if (dg->dst.context == VMCI_HYPERVISOR_CONTEXT_ID)
+               return VMCI_ERROR_DST_UNREACHABLE;
+
+       /* Check that source handle matches sending context. */
+       if (dg->src.context != context_id) {
+               pr_devel("Sender context (ID=0x%x) is not owner of src datagram entry (handle=0x%x:0x%x)\n",
+                        context_id, dg->src.context, dg->src.resource);
+               return VMCI_ERROR_NO_ACCESS;
+       }
+
+       /* Get hold of privileges of sending endpoint. */
+       retval = vmci_datagram_get_priv_flags(context_id, dg->src,
+                                             &src_priv_flags);
+       if (retval != VMCI_SUCCESS) {
+               pr_warn("Couldn't get privileges (handle=0x%x:0x%x)\n",
+                       dg->src.context, dg->src.resource);
+               return retval;
+       }
+
+       /* Determine if we should route to host or guest destination. */
+       if (dg->dst.context == VMCI_HOST_CONTEXT_ID) {
+               /* Route to host datagram entry. */
+               struct datagram_entry *dst_entry;
+               struct vmci_resource *resource;
+
+               if (dg->src.context == VMCI_HYPERVISOR_CONTEXT_ID &&
+                   dg->dst.resource == VMCI_EVENT_HANDLER) {
+                       return vmci_event_dispatch(dg);
+               }
+
+               resource = vmci_resource_by_handle(dg->dst,
+                                                  VMCI_RESOURCE_TYPE_DATAGRAM);
+               if (!resource) {
+                       pr_devel("Sending to invalid destination (handle=0x%x:0x%x)\n",
+                                dg->dst.context, dg->dst.resource);
+                       return VMCI_ERROR_INVALID_RESOURCE;
+               }
+               dst_entry = container_of(resource, struct datagram_entry,
+                                        resource);
+               if (vmci_deny_interaction(src_priv_flags,
+                                         dst_entry->priv_flags)) {
+                       vmci_resource_put(resource);
+                       return VMCI_ERROR_NO_ACCESS;
+               }
+
+               /*
+                * If a VMCI datagram destined for the host is also sent by the
+                * host, we always run it delayed. This ensures that no locks
+                * are held when the datagram callback runs.
+                */
+               if (dst_entry->run_delayed ||
+                   dg->src.context == VMCI_HOST_CONTEXT_ID) {
+                       struct delayed_datagram_info *dg_info;
+
+                       if (atomic_add_return(1, &delayed_dg_host_queue_size)
+                           == VMCI_MAX_DELAYED_DG_HOST_QUEUE_SIZE) {
+                               atomic_dec(&delayed_dg_host_queue_size);
+                               vmci_resource_put(resource);
+                               return VMCI_ERROR_NO_MEM;
+                       }
+
+                       dg_info = kmalloc(sizeof(*dg_info) +
+                                   (size_t) dg->payload_size, GFP_ATOMIC);
+                       if (!dg_info) {
+                               atomic_dec(&delayed_dg_host_queue_size);
+                               vmci_resource_put(resource);
+                               return VMCI_ERROR_NO_MEM;
+                       }
+
+                       dg_info->in_dg_host_queue = true;
+                       dg_info->entry = dst_entry;
+                       memcpy(&dg_info->msg, dg, dg_size);
+
+                       INIT_WORK(&dg_info->work, dg_delayed_dispatch);
+                       schedule_work(&dg_info->work);
+                       retval = VMCI_SUCCESS;
+
+               } else {
+                       retval = dst_entry->recv_cb(dst_entry->client_data, dg);
+                       vmci_resource_put(resource);
+                       if (retval < VMCI_SUCCESS)
+                               return retval;
+               }
+       } else {
+               /* Route to destination VM context. */
+               struct vmci_datagram *new_dg;
+
+               if (context_id != dg->dst.context) {
+                       if (vmci_deny_interaction(src_priv_flags,
+                                                 vmci_context_get_priv_flags
+                                                 (dg->dst.context))) {
+                               return VMCI_ERROR_NO_ACCESS;
+                       } else if (VMCI_CONTEXT_IS_VM(context_id)) {
+                               /*
+                                * If the sending context is a VM, it
+                                * cannot reach another VM.
+                                */
+
+                               pr_devel("Datagram communication between VMs not supported (src=0x%x, dst=0x%x)\n",
+                                        context_id, dg->dst.context);
+                               return VMCI_ERROR_DST_UNREACHABLE;
+                       }
+               }
+
+               /* We make a copy to enqueue. */
+               new_dg = kmalloc(dg_size, GFP_KERNEL);
+               if (new_dg == NULL)
+                       return VMCI_ERROR_NO_MEM;
+
+               memcpy(new_dg, dg, dg_size);
+               retval = vmci_ctx_enqueue_datagram(dg->dst.context, new_dg);
+               if (retval < VMCI_SUCCESS) {
+                       kfree(new_dg);
+                       return retval;
+               }
+       }
+
+       /*
+        * We currently truncate the size to signed 32 bits. This doesn't
+        * matter for this handler as it only support 4Kb messages.
+        */
+       return (int)dg_size;
+}
+
+/*
+ * Dispatch datagram as a guest, down through the VMX and potentially to
+ * the host.
+ * Returns number of bytes sent on success, error code otherwise.
+ */
+static int dg_dispatch_as_guest(struct vmci_datagram *dg)
+{
+       int retval;
+       struct vmci_resource *resource;
+
+       resource = vmci_resource_by_handle(dg->src,
+                                          VMCI_RESOURCE_TYPE_DATAGRAM);
+       if (!resource)
+               return VMCI_ERROR_NO_HANDLE;
+
+       retval = vmci_send_datagram(dg);
+       vmci_resource_put(resource);
+       return retval;
+}
+
+/*
+ * Dispatch datagram.  This will determine the routing for the datagram
+ * and dispatch it accordingly.
+ * Returns number of bytes sent on success, error code otherwise.
+ */
+int vmci_datagram_dispatch(u32 context_id,
+                          struct vmci_datagram *dg, bool from_guest)
+{
+       int retval;
+       enum vmci_route route;
+
+       BUILD_BUG_ON(sizeof(struct vmci_datagram) != 24);
+
+       if (VMCI_DG_SIZE(dg) > VMCI_MAX_DG_SIZE) {
+               pr_devel("Payload (size=%llu bytes) too big to send\n",
+                        (unsigned long long)dg->payload_size);
+               return VMCI_ERROR_INVALID_ARGS;
+       }
+
+       retval = vmci_route(&dg->src, &dg->dst, from_guest, &route);
+       if (retval < VMCI_SUCCESS) {
+               pr_devel("Failed to route datagram (src=0x%x, dst=0x%x, err=%d)\n",
+                        dg->src.context, dg->dst.context, retval);
+               return retval;
+       }
+
+       if (VMCI_ROUTE_AS_HOST == route) {
+               if (VMCI_INVALID_ID == context_id)
+                       context_id = VMCI_HOST_CONTEXT_ID;
+               return dg_dispatch_as_host(context_id, dg);
+       }
+
+       if (VMCI_ROUTE_AS_GUEST == route)
+               return dg_dispatch_as_guest(dg);
+
+       pr_warn("Unknown route (%d) for datagram\n", route);
+       return VMCI_ERROR_DST_UNREACHABLE;
+}
+
+/*
+ * Invoke the handler for the given datagram.  This is intended to be
+ * called only when acting as a guest and receiving a datagram from the
+ * virtual device.
+ */
+int vmci_datagram_invoke_guest_handler(struct vmci_datagram *dg)
+{
+       struct vmci_resource *resource;
+       struct datagram_entry *dst_entry;
+
+       resource = vmci_resource_by_handle(dg->dst,
+                                          VMCI_RESOURCE_TYPE_DATAGRAM);
+       if (!resource) {
+               pr_devel("destination (handle=0x%x:0x%x) doesn't exist\n",
+                        dg->dst.context, dg->dst.resource);
+               return VMCI_ERROR_NO_HANDLE;
+       }
+
+       dst_entry = container_of(resource, struct datagram_entry, resource);
+       if (dst_entry->run_delayed) {
+               struct delayed_datagram_info *dg_info;
+
+               dg_info = kmalloc(sizeof(*dg_info) + (size_t)dg->payload_size,
+                                 GFP_ATOMIC);
+               if (!dg_info) {
+                       vmci_resource_put(resource);
+                       return VMCI_ERROR_NO_MEM;
+               }
+
+               dg_info->in_dg_host_queue = false;
+               dg_info->entry = dst_entry;
+               memcpy(&dg_info->msg, dg, VMCI_DG_SIZE(dg));
+
+               INIT_WORK(&dg_info->work, dg_delayed_dispatch);
+               schedule_work(&dg_info->work);
+       } else {
+               dst_entry->recv_cb(dst_entry->client_data, dg);
+               vmci_resource_put(resource);
+       }
+
+       return VMCI_SUCCESS;
+}
+
+/*
+ * vmci_datagram_create_handle_priv() - Create host context datagram endpoint
+ * @resource_id:        The resource ID.
+ * @flags:      Datagram Flags.
+ * @priv_flags: Privilege Flags.
+ * @recv_cb:    Callback when receiving datagrams.
+ * @client_data:        Pointer for a datagram_entry struct
+ * @out_handle: vmci_handle that is populated as a result of this function.
+ *
+ * Creates a host context datagram endpoint and returns a handle to it.
+ */
+int vmci_datagram_create_handle_priv(u32 resource_id,
+                                    u32 flags,
+                                    u32 priv_flags,
+                                    vmci_datagram_recv_cb recv_cb,
+                                    void *client_data,
+                                    struct vmci_handle *out_handle)
+{
+       if (out_handle == NULL)
+               return VMCI_ERROR_INVALID_ARGS;
+
+       if (recv_cb == NULL) {
+               pr_devel("Client callback needed when creating datagram\n");
+               return VMCI_ERROR_INVALID_ARGS;
+       }
+
+       if (priv_flags & ~VMCI_PRIVILEGE_ALL_FLAGS)
+               return VMCI_ERROR_INVALID_ARGS;
+
+       return dg_create_handle(resource_id, flags, priv_flags, recv_cb,
+                               client_data, out_handle);
+}
+EXPORT_SYMBOL_GPL(vmci_datagram_create_handle_priv);
+
+/*
+ * vmci_datagram_create_handle() - Create host context datagram endpoint
+ * @resource_id:        Resource ID.
+ * @flags:      Datagram Flags.
+ * @recv_cb:    Callback when receiving datagrams.
+ * @client_ata: Pointer for a datagram_entry struct
+ * @out_handle: vmci_handle that is populated as a result of this function.
+ *
+ * Creates a host context datagram endpoint and returns a handle to
+ * it.  Same as vmci_datagram_create_handle_priv without the priviledge
+ * flags argument.
+ */
+int vmci_datagram_create_handle(u32 resource_id,
+                               u32 flags,
+                               vmci_datagram_recv_cb recv_cb,
+                               void *client_data,
+                               struct vmci_handle *out_handle)
+{
+       return vmci_datagram_create_handle_priv(
+               resource_id, flags,
+               VMCI_DEFAULT_PROC_PRIVILEGE_FLAGS,
+               recv_cb, client_data,
+               out_handle);
+}
+EXPORT_SYMBOL_GPL(vmci_datagram_create_handle);
+
+/*
+ * vmci_datagram_destroy_handle() - Destroys datagram handle
+ * @handle:     vmci_handle to be destroyed and reaped.
+ *
+ * Use this function to destroy any datagram handles created by
+ * vmci_datagram_create_handle{,Priv} functions.
+ */
+int vmci_datagram_destroy_handle(struct vmci_handle handle)
+{
+       struct datagram_entry *entry;
+       struct vmci_resource *resource;
+
+       resource = vmci_resource_by_handle(handle, VMCI_RESOURCE_TYPE_DATAGRAM);
+       if (!resource) {
+               pr_devel("Failed to destroy datagram (handle=0x%x:0x%x)\n",
+                        handle.context, handle.resource);
+               return VMCI_ERROR_NOT_FOUND;
+       }
+
+       entry = container_of(resource, struct datagram_entry, resource);
+
+       vmci_resource_put(&entry->resource);
+       vmci_resource_remove(&entry->resource);
+       kfree(entry);
+
+       return VMCI_SUCCESS;
+}
+EXPORT_SYMBOL_GPL(vmci_datagram_destroy_handle);
+
+/*
+ * vmci_datagram_send() - Send a datagram
+ * @msg:        The datagram to send.
+ *
+ * Sends the provided datagram on its merry way.
+ */
+int vmci_datagram_send(struct vmci_datagram *msg)
+{
+       if (msg == NULL)
+               return VMCI_ERROR_INVALID_ARGS;
+
+       return vmci_datagram_dispatch(VMCI_INVALID_ID, msg, false);
+}
+EXPORT_SYMBOL_GPL(vmci_datagram_send);
diff --git a/drivers/misc/vmw_vmci/vmci_datagram.h b/drivers/misc/vmw_vmci/vmci_datagram.h
new file mode 100644 (file)
index 0000000..eb4aab7
--- /dev/null
@@ -0,0 +1,52 @@
+/*
+ * VMware VMCI Driver
+ *
+ * Copyright (C) 2012 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * for more details.
+ */
+
+#ifndef _VMCI_DATAGRAM_H_
+#define _VMCI_DATAGRAM_H_
+
+#include <linux/types.h>
+#include <linux/list.h>
+
+#include "vmci_context.h"
+
+#define VMCI_MAX_DELAYED_DG_HOST_QUEUE_SIZE 256
+
+/*
+ * The struct vmci_datagram_queue_entry is a queue header for the in-kernel VMCI
+ * datagram queues. It is allocated in non-paged memory, as the
+ * content is accessed while holding a spinlock. The pending datagram
+ * itself may be allocated from paged memory. We shadow the size of
+ * the datagram in the non-paged queue entry as this size is used
+ * while holding the same spinlock as above.
+ */
+struct vmci_datagram_queue_entry {
+       struct list_head list_item;     /* For queuing. */
+       size_t dg_size;         /* Size of datagram. */
+       struct vmci_datagram *dg;       /* Pending datagram. */
+};
+
+/* VMCIDatagramSendRecvInfo */
+struct vmci_datagram_snd_rcv_info {
+       u64 addr;
+       u32 len;
+       s32 result;
+};
+
+/* Datagram API for non-public use. */
+int vmci_datagram_dispatch(u32 context_id, struct vmci_datagram *dg,
+                          bool from_guest);
+int vmci_datagram_invoke_guest_handler(struct vmci_datagram *dg);
+
+#endif /* _VMCI_DATAGRAM_H_ */
diff --git a/drivers/misc/vmw_vmci/vmci_doorbell.c b/drivers/misc/vmw_vmci/vmci_doorbell.c
new file mode 100644 (file)
index 0000000..c3e8397
--- /dev/null
@@ -0,0 +1,604 @@
+/*
+ * VMware VMCI Driver
+ *
+ * Copyright (C) 2012 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * for more details.
+ */
+
+#include <linux/vmw_vmci_defs.h>
+#include <linux/vmw_vmci_api.h>
+#include <linux/completion.h>
+#include <linux/hash.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+
+#include "vmci_datagram.h"
+#include "vmci_doorbell.h"
+#include "vmci_resource.h"
+#include "vmci_driver.h"
+#include "vmci_route.h"
+
+
+#define VMCI_DOORBELL_INDEX_BITS       6
+#define VMCI_DOORBELL_INDEX_TABLE_SIZE (1 << VMCI_DOORBELL_INDEX_BITS)
+#define VMCI_DOORBELL_HASH(_idx)       hash_32(_idx, VMCI_DOORBELL_INDEX_BITS)
+
+/*
+ * DoorbellEntry describes the a doorbell notification handle allocated by the
+ * host.
+ */
+struct dbell_entry {
+       struct vmci_resource resource;
+       struct hlist_node node;
+       struct work_struct work;
+       vmci_callback notify_cb;
+       void *client_data;
+       u32 idx;
+       u32 priv_flags;
+       bool run_delayed;
+       atomic_t active;        /* Only used by guest personality */
+};
+
+/* The VMCI index table keeps track of currently registered doorbells. */
+struct dbell_index_table {
+       spinlock_t lock;        /* Index table lock */
+       struct hlist_head entries[VMCI_DOORBELL_INDEX_TABLE_SIZE];
+};
+
+static struct dbell_index_table vmci_doorbell_it = {
+       .lock = __SPIN_LOCK_UNLOCKED(vmci_doorbell_it.lock),
+};
+
+/*
+ * The max_notify_idx is one larger than the currently known bitmap index in
+ * use, and is used to determine how much of the bitmap needs to be scanned.
+ */
+static u32 max_notify_idx;
+
+/*
+ * The notify_idx_count is used for determining whether there are free entries
+ * within the bitmap (if notify_idx_count + 1 < max_notify_idx).
+ */
+static u32 notify_idx_count;
+
+/*
+ * The last_notify_idx_reserved is used to track the last index handed out - in
+ * the case where multiple handles share a notification index, we hand out
+ * indexes round robin based on last_notify_idx_reserved.
+ */
+static u32 last_notify_idx_reserved;
+
+/* This is a one entry cache used to by the index allocation. */
+static u32 last_notify_idx_released = PAGE_SIZE;
+
+
+/*
+ * Utility function that retrieves the privilege flags associated
+ * with a given doorbell handle. For guest endpoints, the
+ * privileges are determined by the context ID, but for host
+ * endpoints privileges are associated with the complete
+ * handle. Hypervisor endpoints are not yet supported.
+ */
+int vmci_dbell_get_priv_flags(struct vmci_handle handle, u32 *priv_flags)
+{
+       if (priv_flags == NULL || handle.context == VMCI_INVALID_ID)
+               return VMCI_ERROR_INVALID_ARGS;
+
+       if (handle.context == VMCI_HOST_CONTEXT_ID) {
+               struct dbell_entry *entry;
+               struct vmci_resource *resource;
+
+               resource = vmci_resource_by_handle(handle,
+                                                  VMCI_RESOURCE_TYPE_DOORBELL);
+               if (!resource)
+                       return VMCI_ERROR_NOT_FOUND;
+
+               entry = container_of(resource, struct dbell_entry, resource);
+               *priv_flags = entry->priv_flags;
+               vmci_resource_put(resource);
+       } else if (handle.context == VMCI_HYPERVISOR_CONTEXT_ID) {
+               /*
+                * Hypervisor endpoints for notifications are not
+                * supported (yet).
+                */
+               return VMCI_ERROR_INVALID_ARGS;
+       } else {
+               *priv_flags = vmci_context_get_priv_flags(handle.context);
+       }
+
+       return VMCI_SUCCESS;
+}
+
+/*
+ * Find doorbell entry by bitmap index.
+ */
+static struct dbell_entry *dbell_index_table_find(u32 idx)
+{
+       u32 bucket = VMCI_DOORBELL_HASH(idx);
+       struct dbell_entry *dbell;
+       struct hlist_node *node;
+
+       hlist_for_each_entry(dbell, node, &vmci_doorbell_it.entries[bucket],
+                            node) {
+               if (idx == dbell->idx)
+                       return dbell;
+       }
+
+       return NULL;
+}
+
+/*
+ * Add the given entry to the index table.  This willi take a reference to the
+ * entry's resource so that the entry is not deleted before it is removed from
+ * the * table.
+ */
+static void dbell_index_table_add(struct dbell_entry *entry)
+{
+       u32 bucket;
+       u32 new_notify_idx;
+
+       vmci_resource_get(&entry->resource);
+
+       spin_lock_bh(&vmci_doorbell_it.lock);
+
+       /*
+        * Below we try to allocate an index in the notification
+        * bitmap with "not too much" sharing between resources. If we
+        * use less that the full bitmap, we either add to the end if
+        * there are no unused flags within the currently used area,
+        * or we search for unused ones. If we use the full bitmap, we
+        * allocate the index round robin.
+        */
+       if (max_notify_idx < PAGE_SIZE || notify_idx_count < PAGE_SIZE) {
+               if (last_notify_idx_released < max_notify_idx &&
+                   !dbell_index_table_find(last_notify_idx_released)) {
+                       new_notify_idx = last_notify_idx_released;
+                       last_notify_idx_released = PAGE_SIZE;
+               } else {
+                       bool reused = false;
+                       new_notify_idx = last_notify_idx_reserved;
+                       if (notify_idx_count + 1 < max_notify_idx) {
+                               do {
+                                       if (!dbell_index_table_find
+                                           (new_notify_idx)) {
+                                               reused = true;
+                                               break;
+                                       }
+                                       new_notify_idx = (new_notify_idx + 1) %
+                                           max_notify_idx;
+                               } while (new_notify_idx !=
+                                        last_notify_idx_released);
+                       }
+                       if (!reused) {
+                               new_notify_idx = max_notify_idx;
+                               max_notify_idx++;
+                       }
+               }
+       } else {
+               new_notify_idx = (last_notify_idx_reserved + 1) % PAGE_SIZE;
+       }
+
+       last_notify_idx_reserved = new_notify_idx;
+       notify_idx_count++;
+
+       entry->idx = new_notify_idx;
+       bucket = VMCI_DOORBELL_HASH(entry->idx);
+       hlist_add_head(&entry->node, &vmci_doorbell_it.entries[bucket]);
+
+       spin_unlock_bh(&vmci_doorbell_it.lock);
+}
+
+/*
+ * Remove the given entry from the index table.  This will release() the
+ * entry's resource.
+ */
+static void dbell_index_table_remove(struct dbell_entry *entry)
+{
+       spin_lock_bh(&vmci_doorbell_it.lock);
+
+       hlist_del_init(&entry->node);
+
+       notify_idx_count--;
+       if (entry->idx == max_notify_idx - 1) {
+               /*
+                * If we delete an entry with the maximum known
+                * notification index, we take the opportunity to
+                * prune the current max. As there might be other
+                * unused indices immediately below, we lower the
+                * maximum until we hit an index in use.
+                */
+               while (max_notify_idx > 0 &&
+                      !dbell_index_table_find(max_notify_idx - 1))
+                       max_notify_idx--;
+       }
+
+       last_notify_idx_released = entry->idx;
+
+       spin_unlock_bh(&vmci_doorbell_it.lock);
+
+       vmci_resource_put(&entry->resource);
+}
+
+/*
+ * Creates a link between the given doorbell handle and the given
+ * index in the bitmap in the device backend. A notification state
+ * is created in hypervisor.
+ */
+static int dbell_link(struct vmci_handle handle, u32 notify_idx)
+{
+       struct vmci_doorbell_link_msg link_msg;
+
+       link_msg.hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
+                                           VMCI_DOORBELL_LINK);
+       link_msg.hdr.src = VMCI_ANON_SRC_HANDLE;
+       link_msg.hdr.payload_size = sizeof(link_msg) - VMCI_DG_HEADERSIZE;
+       link_msg.handle = handle;
+       link_msg.notify_idx = notify_idx;
+
+       return vmci_send_datagram(&link_msg.hdr);
+}
+
+/*
+ * Unlinks the given doorbell handle from an index in the bitmap in
+ * the device backend. The notification state is destroyed in hypervisor.
+ */
+static int dbell_unlink(struct vmci_handle handle)
+{
+       struct vmci_doorbell_unlink_msg unlink_msg;
+
+       unlink_msg.hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
+                                             VMCI_DOORBELL_UNLINK);
+       unlink_msg.hdr.src = VMCI_ANON_SRC_HANDLE;
+       unlink_msg.hdr.payload_size = sizeof(unlink_msg) - VMCI_DG_HEADERSIZE;
+       unlink_msg.handle = handle;
+
+       return vmci_send_datagram(&unlink_msg.hdr);
+}
+
+/*
+ * Notify another guest or the host.  We send a datagram down to the
+ * host via the hypervisor with the notification info.
+ */
+static int dbell_notify_as_guest(struct vmci_handle handle, u32 priv_flags)
+{
+       struct vmci_doorbell_notify_msg notify_msg;
+
+       notify_msg.hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
+                                             VMCI_DOORBELL_NOTIFY);
+       notify_msg.hdr.src = VMCI_ANON_SRC_HANDLE;
+       notify_msg.hdr.payload_size = sizeof(notify_msg) - VMCI_DG_HEADERSIZE;
+       notify_msg.handle = handle;
+
+       return vmci_send_datagram(&notify_msg.hdr);
+}
+
+/*
+ * Calls the specified callback in a delayed context.
+ */
+static void dbell_delayed_dispatch(struct work_struct *work)
+{
+       struct dbell_entry *entry = container_of(work,
+                                                struct dbell_entry, work);
+
+       entry->notify_cb(entry->client_data);
+       vmci_resource_put(&entry->resource);
+}
+
+/*
+ * Dispatches a doorbell notification to the host context.
+ */
+int vmci_dbell_host_context_notify(u32 src_cid, struct vmci_handle handle)
+{
+       struct dbell_entry *entry;
+       struct vmci_resource *resource;
+
+       if (vmci_handle_is_invalid(handle)) {
+               pr_devel("Notifying an invalid doorbell (handle=0x%x:0x%x)\n",
+                        handle.context, handle.resource);
+               return VMCI_ERROR_INVALID_ARGS;
+       }
+
+       resource = vmci_resource_by_handle(handle,
+                                          VMCI_RESOURCE_TYPE_DOORBELL);
+       if (!resource) {
+               pr_devel("Notifying an unknown doorbell (handle=0x%x:0x%x)\n",
+                        handle.context, handle.resource);
+               return VMCI_ERROR_NOT_FOUND;
+       }
+
+       entry = container_of(resource, struct dbell_entry, resource);
+       if (entry->run_delayed) {
+               schedule_work(&entry->work);
+       } else {
+               entry->notify_cb(entry->client_data);
+               vmci_resource_put(resource);
+       }
+
+       return VMCI_SUCCESS;
+}
+
+/*
+ * Register the notification bitmap with the host.
+ */
+bool vmci_dbell_register_notification_bitmap(u32 bitmap_ppn)
+{
+       int result;
+       struct vmci_notify_bm_set_msg bitmap_set_msg;
+
+       bitmap_set_msg.hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
+                                                 VMCI_SET_NOTIFY_BITMAP);
+       bitmap_set_msg.hdr.src = VMCI_ANON_SRC_HANDLE;
+       bitmap_set_msg.hdr.payload_size = sizeof(bitmap_set_msg) -
+           VMCI_DG_HEADERSIZE;
+       bitmap_set_msg.bitmap_ppn = bitmap_ppn;
+
+       result = vmci_send_datagram(&bitmap_set_msg.hdr);
+       if (result != VMCI_SUCCESS) {
+               pr_devel("Failed to register (PPN=%u) as notification bitmap (error=%d)\n",
+                        bitmap_ppn, result);
+               return false;
+       }
+       return true;
+}
+
+/*
+ * Executes or schedules the handlers for a given notify index.
+ */
+static void dbell_fire_entries(u32 notify_idx)
+{
+       u32 bucket = VMCI_DOORBELL_HASH(notify_idx);
+       struct dbell_entry *dbell;
+       struct hlist_node *node;
+
+       spin_lock_bh(&vmci_doorbell_it.lock);
+
+       hlist_for_each_entry(dbell, node,
+                            &vmci_doorbell_it.entries[bucket], node) {
+               if (dbell->idx == notify_idx &&
+                   atomic_read(&dbell->active) == 1) {
+                       if (dbell->run_delayed) {
+                               vmci_resource_get(&dbell->resource);
+                               schedule_work(&dbell->work);
+                       } else {
+                               dbell->notify_cb(dbell->client_data);
+                       }
+               }
+       }
+
+       spin_unlock_bh(&vmci_doorbell_it.lock);
+}
+
+/*
+ * Scans the notification bitmap, collects pending notifications,
+ * resets the bitmap and invokes appropriate callbacks.
+ */
+void vmci_dbell_scan_notification_entries(u8 *bitmap)
+{
+       u32 idx;
+
+       for (idx = 0; idx < max_notify_idx; idx++) {
+               if (bitmap[idx] & 0x1) {
+                       bitmap[idx] &= ~1;
+                       dbell_fire_entries(idx);
+               }
+       }
+}
+
+/*
+ * vmci_doorbell_create() - Creates a doorbell
+ * @handle:     A handle used to track the resource.  Can be invalid.
+ * @flags:      Flag that determines context of callback.
+ * @priv_flags: Privileges flags.
+ * @notify_cb:  The callback to be ivoked when the doorbell fires.
+ * @client_data:        A parameter to be passed to the callback.
+ *
+ * Creates a doorbell with the given callback. If the handle is
+ * VMCI_INVALID_HANDLE, a free handle will be assigned, if
+ * possible. The callback can be run immediately (potentially with
+ * locks held - the default) or delayed (in a kernel thread) by
+ * specifying the flag VMCI_FLAG_DELAYED_CB. If delayed execution
+ * is selected, a given callback may not be run if the kernel is
+ * unable to allocate memory for the delayed execution (highly
+ * unlikely).
+ */
+int vmci_doorbell_create(struct vmci_handle *handle,
+                        u32 flags,
+                        u32 priv_flags,
+                        vmci_callback notify_cb, void *client_data)
+{
+       struct dbell_entry *entry;
+       struct vmci_handle new_handle;
+       int result;
+
+       if (!handle || !notify_cb || flags & ~VMCI_FLAG_DELAYED_CB ||
+           priv_flags & ~VMCI_PRIVILEGE_ALL_FLAGS)
+               return VMCI_ERROR_INVALID_ARGS;
+
+       entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+       if (entry == NULL) {
+               pr_warn("Failed allocating memory for datagram entry\n");
+               return VMCI_ERROR_NO_MEM;
+       }
+
+       if (vmci_handle_is_invalid(*handle)) {
+               u32 context_id = vmci_get_context_id();
+
+               /* Let resource code allocate a free ID for us */
+               new_handle = vmci_make_handle(context_id, VMCI_INVALID_ID);
+       } else {
+               bool valid_context = false;
+
+               /*
+                * Validate the handle.  We must do both of the checks below
+                * because we can be acting as both a host and a guest at the
+                * same time. We always allow the host context ID, since the
+                * host functionality is in practice always there with the
+                * unified driver.
+                */
+               if (handle->context == VMCI_HOST_CONTEXT_ID ||
+                   (vmci_guest_code_active() &&
+                    vmci_get_context_id() == handle->context)) {
+                       valid_context = true;
+               }
+
+               if (!valid_context || handle->resource == VMCI_INVALID_ID) {
+                       pr_devel("Invalid argument (handle=0x%x:0x%x)\n",
+                                handle->context, handle->resource);
+                       result = VMCI_ERROR_INVALID_ARGS;
+                       goto free_mem;
+               }
+
+               new_handle = *handle;
+       }
+
+       entry->idx = 0;
+       INIT_HLIST_NODE(&entry->node);
+       entry->priv_flags = priv_flags;
+       INIT_WORK(&entry->work, dbell_delayed_dispatch);
+       entry->run_delayed = flags & VMCI_FLAG_DELAYED_CB;
+       entry->notify_cb = notify_cb;
+       entry->client_data = client_data;
+       atomic_set(&entry->active, 0);
+
+       result = vmci_resource_add(&entry->resource,
+                                  VMCI_RESOURCE_TYPE_DOORBELL,
+                                  new_handle);
+       if (result != VMCI_SUCCESS) {
+               pr_warn("Failed to add new resource (handle=0x%x:0x%x), error: %d\n",
+                       new_handle.context, new_handle.resource, result);
+               goto free_mem;
+       }
+
+       new_handle = vmci_resource_handle(&entry->resource);
+       if (vmci_guest_code_active()) {
+               dbell_index_table_add(entry);
+               result = dbell_link(new_handle, entry->idx);
+               if (VMCI_SUCCESS != result)
+                       goto destroy_resource;
+
+               atomic_set(&entry->active, 1);
+       }
+
+       *handle = new_handle;
+
+       return result;
+
+ destroy_resource:
+       dbell_index_table_remove(entry);
+       vmci_resource_remove(&entry->resource);
+ free_mem:
+       kfree(entry);
+       return result;
+}
+EXPORT_SYMBOL_GPL(vmci_doorbell_create);
+
+/*
+ * vmci_doorbell_destroy() - Destroy a doorbell.
+ * @handle:     The handle tracking the resource.
+ *
+ * Destroys a doorbell previously created with vmcii_doorbell_create. This
+ * operation may block waiting for a callback to finish.
+ */
+int vmci_doorbell_destroy(struct vmci_handle handle)
+{
+       struct dbell_entry *entry;
+       struct vmci_resource *resource;
+
+       if (vmci_handle_is_invalid(handle))
+               return VMCI_ERROR_INVALID_ARGS;
+
+       resource = vmci_resource_by_handle(handle,
+                                          VMCI_RESOURCE_TYPE_DOORBELL);
+       if (!resource) {
+               pr_devel("Failed to destroy doorbell (handle=0x%x:0x%x)\n",
+                        handle.context, handle.resource);
+               return VMCI_ERROR_NOT_FOUND;
+       }
+
+       entry = container_of(resource, struct dbell_entry, resource);
+
+       if (vmci_guest_code_active()) {
+               int result;
+
+               dbell_index_table_remove(entry);
+
+               result = dbell_unlink(handle);
+               if (VMCI_SUCCESS != result) {
+
+                       /*
+                        * The only reason this should fail would be
+                        * an inconsistency between guest and
+                        * hypervisor state, where the guest believes
+                        * it has an active registration whereas the
+                        * hypervisor doesn't. One case where this may
+                        * happen is if a doorbell is unregistered
+                        * following a hibernation at a time where the
+                        * doorbell state hasn't been restored on the
+                        * hypervisor side yet. Since the handle has
+                        * now been removed in the guest, we just
+                        * print a warning and return success.
+                        */
+                       pr_devel("Unlink of doorbell (handle=0x%x:0x%x) unknown by hypervisor (error=%d)\n",
+                                handle.context, handle.resource, result);
+               }
+       }
+
+       /*
+        * Now remove the resource from the table.  It might still be in use
+        * after this, in a callback or still on the delayed work queue.
+        */
+       vmci_resource_put(&entry->resource);
+       vmci_resource_remove(&entry->resource);
+
+       kfree(entry);
+
+       return VMCI_SUCCESS;
+}
+EXPORT_SYMBOL_GPL(vmci_doorbell_destroy);
+
+/*
+ * vmci_doorbell_notify() - Ring the doorbell (and hide in the bushes).
+ * @dst:        The handlle identifying the doorbell resource
+ * @priv_flags: Priviledge flags.
+ *
+ * Generates a notification on the doorbell identified by the
+ * handle. For host side generation of notifications, the caller
+ * can specify what the privilege of the calling side is.
+ */
+int vmci_doorbell_notify(struct vmci_handle dst, u32 priv_flags)
+{
+       int retval;
+       enum vmci_route route;
+       struct vmci_handle src;
+
+       if (vmci_handle_is_invalid(dst) ||
+           (priv_flags & ~VMCI_PRIVILEGE_ALL_FLAGS))
+               return VMCI_ERROR_INVALID_ARGS;
+
+       src = VMCI_INVALID_HANDLE;
+       retval = vmci_route(&src, &dst, false, &route);
+       if (retval < VMCI_SUCCESS)
+               return retval;
+
+       if (VMCI_ROUTE_AS_HOST == route)
+               return vmci_ctx_notify_dbell(VMCI_HOST_CONTEXT_ID,
+                                            dst, priv_flags);
+
+       if (VMCI_ROUTE_AS_GUEST == route)
+               return dbell_notify_as_guest(dst, priv_flags);
+
+       pr_warn("Unknown route (%d) for doorbell\n", route);
+       return VMCI_ERROR_DST_UNREACHABLE;
+}
+EXPORT_SYMBOL_GPL(vmci_doorbell_notify);
diff --git a/drivers/misc/vmw_vmci/vmci_doorbell.h b/drivers/misc/vmw_vmci/vmci_doorbell.h
new file mode 100644 (file)
index 0000000..e4c0b17
--- /dev/null
@@ -0,0 +1,51 @@
+/*
+ * VMware VMCI Driver
+ *
+ * Copyright (C) 2012 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * for more details.
+ */
+
+#ifndef VMCI_DOORBELL_H
+#define VMCI_DOORBELL_H
+
+#include <linux/vmw_vmci_defs.h>
+#include <linux/types.h>
+
+#include "vmci_driver.h"
+
+/*
+ * VMCINotifyResourceInfo: Used to create and destroy doorbells, and
+ * generate a notification for a doorbell or queue pair.
+ */
+struct vmci_dbell_notify_resource_info {
+       struct vmci_handle handle;
+       u16 resource;
+       u16 action;
+       s32 result;
+};
+
+/*
+ * Structure used for checkpointing the doorbell mappings. It is
+ * written to the checkpoint as is, so changing this structure will
+ * break checkpoint compatibility.
+ */
+struct dbell_cpt_state {
+       struct vmci_handle handle;
+       u64 bitmap_idx;
+};
+
+int vmci_dbell_host_context_notify(u32 src_cid, struct vmci_handle handle);
+int vmci_dbell_get_priv_flags(struct vmci_handle handle, u32 *priv_flags);
+
+bool vmci_dbell_register_notification_bitmap(u32 bitmap_ppn);
+void vmci_dbell_scan_notification_entries(u8 *bitmap);
+
+#endif /* VMCI_DOORBELL_H */
diff --git a/drivers/misc/vmw_vmci/vmci_driver.c b/drivers/misc/vmw_vmci/vmci_driver.c
new file mode 100644 (file)
index 0000000..7b3fce2
--- /dev/null
@@ -0,0 +1,117 @@
+/*
+ * VMware VMCI Driver
+ *
+ * Copyright (C) 2012 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * for more details.
+ */
+
+#include <linux/vmw_vmci_defs.h>
+#include <linux/vmw_vmci_api.h>
+#include <linux/atomic.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+
+#include "vmci_driver.h"
+#include "vmci_event.h"
+
+static bool vmci_disable_host;
+module_param_named(disable_host, vmci_disable_host, bool, 0);
+MODULE_PARM_DESC(disable_host,
+                "Disable driver host personality (default=enabled)");
+
+static bool vmci_disable_guest;
+module_param_named(disable_guest, vmci_disable_guest, bool, 0);
+MODULE_PARM_DESC(disable_guest,
+                "Disable driver guest personality (default=enabled)");
+
+static bool vmci_guest_personality_initialized;
+static bool vmci_host_personality_initialized;
+
+/*
+ * vmci_get_context_id() - Gets the current context ID.
+ *
+ * Returns the current context ID.  Note that since this is accessed only
+ * from code running in the host, this always returns the host context ID.
+ */
+u32 vmci_get_context_id(void)
+{
+       if (vmci_guest_code_active())
+               return vmci_get_vm_context_id();
+       else if (vmci_host_code_active())
+               return VMCI_HOST_CONTEXT_ID;
+
+       return VMCI_INVALID_ID;
+}
+EXPORT_SYMBOL_GPL(vmci_get_context_id);
+
+static int __init vmci_drv_init(void)
+{
+       int vmci_err;
+       int error;
+
+       vmci_err = vmci_event_init();
+       if (vmci_err < VMCI_SUCCESS) {
+               pr_err("Failed to initialize VMCIEvent (result=%d)\n",
+                      vmci_err);
+               return -EINVAL;
+       }
+
+       if (!vmci_disable_guest) {
+               error = vmci_guest_init();
+               if (error) {
+                       pr_warn("Failed to initialize guest personality (err=%d)\n",
+                               error);
+               } else {
+                       vmci_guest_personality_initialized = true;
+                       pr_info("Guest personality initialized and is %s\n",
+                               vmci_guest_code_active() ?
+                               "active" : "inactive");
+               }
+       }
+
+       if (!vmci_disable_host) {
+               error = vmci_host_init();
+               if (error) {
+                       pr_warn("Unable to initialize host personality (err=%d)\n",
+                               error);
+               } else {
+                       vmci_host_personality_initialized = true;
+                       pr_info("Initialized host personality\n");
+               }
+       }
+
+       if (!vmci_guest_personality_initialized &&
+           !vmci_host_personality_initialized) {
+               vmci_event_exit();
+               return -ENODEV;
+       }
+
+       return 0;
+}
+module_init(vmci_drv_init);
+
+static void __exit vmci_drv_exit(void)
+{
+       if (vmci_guest_personality_initialized)
+               vmci_guest_exit();
+
+       if (vmci_host_personality_initialized)
+               vmci_host_exit();
+
+       vmci_event_exit();
+}
+module_exit(vmci_drv_exit);
+
+MODULE_AUTHOR("VMware, Inc.");
+MODULE_DESCRIPTION("VMware Virtual Machine Communication Interface.");
+MODULE_VERSION("1.0.0.0-k");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/vmw_vmci/vmci_driver.h b/drivers/misc/vmw_vmci/vmci_driver.h
new file mode 100644 (file)
index 0000000..f69156a
--- /dev/null
@@ -0,0 +1,50 @@
+/*
+ * VMware VMCI Driver
+ *
+ * Copyright (C) 2012 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * for more details.
+ */
+
+#ifndef _VMCI_DRIVER_H_
+#define _VMCI_DRIVER_H_
+
+#include <linux/vmw_vmci_defs.h>
+#include <linux/wait.h>
+
+#include "vmci_queue_pair.h"
+#include "vmci_context.h"
+
+enum vmci_obj_type {
+       VMCIOBJ_VMX_VM = 10,
+       VMCIOBJ_CONTEXT,
+       VMCIOBJ_SOCKET,
+       VMCIOBJ_NOT_SET,
+};
+
+/* For storing VMCI structures in file handles. */
+struct vmci_obj {
+       void *ptr;
+       enum vmci_obj_type type;
+};
+
+u32 vmci_get_context_id(void);
+int vmci_send_datagram(struct vmci_datagram *dg);
+
+int vmci_host_init(void);
+void vmci_host_exit(void);
+bool vmci_host_code_active(void);
+
+int vmci_guest_init(void);
+void vmci_guest_exit(void);
+bool vmci_guest_code_active(void);
+u32 vmci_get_vm_context_id(void);
+
+#endif /* _VMCI_DRIVER_H_ */
diff --git a/drivers/misc/vmw_vmci/vmci_event.c b/drivers/misc/vmw_vmci/vmci_event.c
new file mode 100644 (file)
index 0000000..8449516
--- /dev/null
@@ -0,0 +1,224 @@
+/*
+ * VMware VMCI Driver
+ *
+ * Copyright (C) 2012 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * for more details.
+ */
+
+#include <linux/vmw_vmci_defs.h>
+#include <linux/vmw_vmci_api.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+
+#include "vmci_driver.h"
+#include "vmci_event.h"
+
+#define EVENT_MAGIC 0xEABE0000
+#define VMCI_EVENT_MAX_ATTEMPTS 10
+
+struct vmci_subscription {
+       u32 id;
+       u32 event;
+       vmci_event_cb callback;
+       void *callback_data;
+       struct list_head node;  /* on one of subscriber lists */
+};
+
+static struct list_head subscriber_array[VMCI_EVENT_MAX];
+static DEFINE_MUTEX(subscriber_mutex);
+
+int __init vmci_event_init(void)
+{
+       int i;
+
+       for (i = 0; i < VMCI_EVENT_MAX; i++)
+               INIT_LIST_HEAD(&subscriber_array[i]);
+
+       return VMCI_SUCCESS;
+}
+
+void vmci_event_exit(void)
+{
+       int e;
+
+       /* We free all memory at exit. */
+       for (e = 0; e < VMCI_EVENT_MAX; e++) {
+               struct vmci_subscription *cur, *p2;
+               list_for_each_entry_safe(cur, p2, &subscriber_array[e], node) {
+
+                       /*
+                        * We should never get here because all events
+                        * should have been unregistered before we try
+                        * to unload the driver module.
+                        */
+                       pr_warn("Unexpected free events occurring\n");
+                       list_del(&cur->node);
+                       kfree(cur);
+               }
+       }
+}
+
+/*
+ * Find entry. Assumes subscriber_mutex is held.
+ */
+static struct vmci_subscription *event_find(u32 sub_id)
+{
+       int e;
+
+       for (e = 0; e < VMCI_EVENT_MAX; e++) {
+               struct vmci_subscription *cur;
+               list_for_each_entry(cur, &subscriber_array[e], node) {
+                       if (cur->id == sub_id)
+                               return cur;
+               }
+       }
+       return NULL;
+}
+
+/*
+ * Actually delivers the events to the subscribers.
+ * The callback function for each subscriber is invoked.
+ */
+static void event_deliver(struct vmci_event_msg *event_msg)
+{
+       struct vmci_subscription *cur;
+       struct list_head *subscriber_list;
+
+       rcu_read_lock();
+       subscriber_list = &subscriber_array[event_msg->event_data.event];
+       list_for_each_entry_rcu(cur, subscriber_list, node) {
+               cur->callback(cur->id, &event_msg->event_data,
+                             cur->callback_data);
+       }
+       rcu_read_unlock();
+}
+
+/*
+ * Dispatcher for the VMCI_EVENT_RECEIVE datagrams. Calls all
+ * subscribers for given event.
+ */
+int vmci_event_dispatch(struct vmci_datagram *msg)
+{
+       struct vmci_event_msg *event_msg = (struct vmci_event_msg *)msg;
+
+       if (msg->payload_size < sizeof(u32) ||
+           msg->payload_size > sizeof(struct vmci_event_data_max))
+               return VMCI_ERROR_INVALID_ARGS;
+
+       if (!VMCI_EVENT_VALID(event_msg->event_data.event))
+               return VMCI_ERROR_EVENT_UNKNOWN;
+
+       event_deliver(event_msg);
+       return VMCI_SUCCESS;
+}
+
+/*
+ * vmci_event_subscribe() - Subscribe to a given event.
+ * @event:      The event to subscribe to.
+ * @callback:   The callback to invoke upon the event.
+ * @callback_data:      Data to pass to the callback.
+ * @subscription_id:    ID used to track subscription.  Used with
+ *              vmci_event_unsubscribe()
+ *
+ * Subscribes to the provided event. The callback specified will be
+ * fired from RCU critical section and therefore must not sleep.
+ */
+int vmci_event_subscribe(u32 event,
+                        vmci_event_cb callback,
+                        void *callback_data,
+                        u32 *new_subscription_id)
+{
+       struct vmci_subscription *sub;
+       int attempts;
+       int retval;
+       bool have_new_id = false;
+
+       if (!new_subscription_id) {
+               pr_devel("%s: Invalid subscription (NULL)\n", __func__);
+               return VMCI_ERROR_INVALID_ARGS;
+       }
+
+       if (!VMCI_EVENT_VALID(event) || !callback) {
+               pr_devel("%s: Failed to subscribe to event (type=%d) (callback=%p) (data=%p)\n",
+                        __func__, event, callback, callback_data);
+               return VMCI_ERROR_INVALID_ARGS;
+       }
+
+       sub = kzalloc(sizeof(*sub), GFP_KERNEL);
+       if (!sub)
+               return VMCI_ERROR_NO_MEM;
+
+       sub->id = VMCI_EVENT_MAX;
+       sub->event = event;
+       sub->callback = callback;
+       sub->callback_data = callback_data;
+       INIT_LIST_HEAD(&sub->node);
+
+       mutex_lock(&subscriber_mutex);
+
+       /* Creation of a new event is always allowed. */
+       for (attempts = 0; attempts < VMCI_EVENT_MAX_ATTEMPTS; attempts++) {
+               static u32 subscription_id;
+               /*
+                * We try to get an id a couple of time before
+                * claiming we are out of resources.
+                */
+
+               /* Test for duplicate id. */
+               if (!event_find(++subscription_id)) {
+                       sub->id = subscription_id;
+                       have_new_id = true;
+                       break;
+               }
+       }
+
+       if (have_new_id) {
+               list_add_rcu(&sub->node, &subscriber_array[event]);
+               retval = VMCI_SUCCESS;
+       } else {
+               retval = VMCI_ERROR_NO_RESOURCES;
+       }
+
+       mutex_unlock(&subscriber_mutex);
+
+       *new_subscription_id = sub->id;
+       return retval;
+}
+EXPORT_SYMBOL_GPL(vmci_event_subscribe);
+
+/*
+ * vmci_event_unsubscribe() - unsubscribe from an event.
+ * @sub_id:     A subscription ID as provided by vmci_event_subscribe()
+ *
+ * Unsubscribe from given event. Removes it from list and frees it.
+ * Will return callback_data if requested by caller.
+ */
+int vmci_event_unsubscribe(u32 sub_id)
+{
+       struct vmci_subscription *s;
+
+       mutex_lock(&subscriber_mutex);
+       s = event_find(sub_id);
+       if (s)
+               list_del_rcu(&s->node);
+       mutex_unlock(&subscriber_mutex);
+
+       if (!s)
+               return VMCI_ERROR_NOT_FOUND;
+
+       synchronize_rcu();
+       kfree(s);
+
+       return VMCI_SUCCESS;
+}
+EXPORT_SYMBOL_GPL(vmci_event_unsubscribe);
diff --git a/drivers/misc/vmw_vmci/vmci_event.h b/drivers/misc/vmw_vmci/vmci_event.h
new file mode 100644 (file)
index 0000000..7df9b1c
--- /dev/null
@@ -0,0 +1,25 @@
+/*
+ * VMware VMCI Driver
+ *
+ * Copyright (C) 2012 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * for more details.
+ */
+
+#ifndef __VMCI_EVENT_H__
+#define __VMCI_EVENT_H__
+
+#include <linux/vmw_vmci_api.h>
+
+int vmci_event_init(void);
+void vmci_event_exit(void);
+int vmci_event_dispatch(struct vmci_datagram *msg);
+
+#endif /*__VMCI_EVENT_H__ */
diff --git a/drivers/misc/vmw_vmci/vmci_guest.c b/drivers/misc/vmw_vmci/vmci_guest.c
new file mode 100644 (file)
index 0000000..60c0199
--- /dev/null
@@ -0,0 +1,759 @@
+/*
+ * VMware VMCI Driver
+ *
+ * Copyright (C) 2012 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * for more details.
+ */
+
+#include <linux/vmw_vmci_defs.h>
+#include <linux/vmw_vmci_api.h>
+#include <linux/moduleparam.h>
+#include <linux/interrupt.h>
+#include <linux/highmem.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/smp.h>
+#include <linux/io.h>
+#include <linux/vmalloc.h>
+
+#include "vmci_datagram.h"
+#include "vmci_doorbell.h"
+#include "vmci_context.h"
+#include "vmci_driver.h"
+#include "vmci_event.h"
+
+#define PCI_VENDOR_ID_VMWARE           0x15AD
+#define PCI_DEVICE_ID_VMWARE_VMCI      0x0740
+
+#define VMCI_UTIL_NUM_RESOURCES 1
+
+static bool vmci_disable_msi;
+module_param_named(disable_msi, vmci_disable_msi, bool, 0);
+MODULE_PARM_DESC(disable_msi, "Disable MSI use in driver - (default=0)");
+
+static bool vmci_disable_msix;
+module_param_named(disable_msix, vmci_disable_msix, bool, 0);
+MODULE_PARM_DESC(disable_msix, "Disable MSI-X use in driver - (default=0)");
+
+static u32 ctx_update_sub_id = VMCI_INVALID_ID;
+static u32 vm_context_id = VMCI_INVALID_ID;
+
+struct vmci_guest_device {
+       struct device *dev;     /* PCI device we are attached to */
+       void __iomem *iobase;
+
+       unsigned int irq;
+       unsigned int intr_type;
+       bool exclusive_vectors;
+       struct msix_entry msix_entries[VMCI_MAX_INTRS];
+
+       struct tasklet_struct datagram_tasklet;
+       struct tasklet_struct bm_tasklet;
+
+       void *data_buffer;
+       void *notification_bitmap;
+};
+
+/* vmci_dev singleton device and supporting data*/
+static struct vmci_guest_device *vmci_dev_g;
+static DEFINE_SPINLOCK(vmci_dev_spinlock);
+
+static atomic_t vmci_num_guest_devices = ATOMIC_INIT(0);
+
+bool vmci_guest_code_active(void)
+{
+       return atomic_read(&vmci_num_guest_devices) != 0;
+}
+
+u32 vmci_get_vm_context_id(void)
+{
+       if (vm_context_id == VMCI_INVALID_ID) {
+               struct vmci_datagram get_cid_msg;
+               get_cid_msg.dst =
+                   vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
+                                    VMCI_GET_CONTEXT_ID);
+               get_cid_msg.src = VMCI_ANON_SRC_HANDLE;
+               get_cid_msg.payload_size = 0;
+               vm_context_id = vmci_send_datagram(&get_cid_msg);
+       }
+       return vm_context_id;
+}
+
+/*
+ * VM to hypervisor call mechanism. We use the standard VMware naming
+ * convention since shared code is calling this function as well.
+ */
+int vmci_send_datagram(struct vmci_datagram *dg)
+{
+       unsigned long flags;
+       int result;
+
+       /* Check args. */
+       if (dg == NULL)
+               return VMCI_ERROR_INVALID_ARGS;
+
+       /*
+        * Need to acquire spinlock on the device because the datagram
+        * data may be spread over multiple pages and the monitor may
+        * interleave device user rpc calls from multiple
+        * VCPUs. Acquiring the spinlock precludes that
+        * possibility. Disabling interrupts to avoid incoming
+        * datagrams during a "rep out" and possibly landing up in
+        * this function.
+        */
+       spin_lock_irqsave(&vmci_dev_spinlock, flags);
+
+       if (vmci_dev_g) {
+               iowrite8_rep(vmci_dev_g->iobase + VMCI_DATA_OUT_ADDR,
+                            dg, VMCI_DG_SIZE(dg));
+               result = ioread32(vmci_dev_g->iobase + VMCI_RESULT_LOW_ADDR);
+       } else {
+               result = VMCI_ERROR_UNAVAILABLE;
+       }
+
+       spin_unlock_irqrestore(&vmci_dev_spinlock, flags);
+
+       return result;
+}
+EXPORT_SYMBOL_GPL(vmci_send_datagram);
+
+/*
+ * Gets called with the new context id if updated or resumed.
+ * Context id.
+ */
+static void vmci_guest_cid_update(u32 sub_id,
+                                 const struct vmci_event_data *event_data,
+                                 void *client_data)
+{
+       const struct vmci_event_payld_ctx *ev_payload =
+                               vmci_event_data_const_payload(event_data);
+
+       if (sub_id != ctx_update_sub_id) {
+               pr_devel("Invalid subscriber (ID=0x%x)\n", sub_id);
+               return;
+       }
+
+       if (!event_data || ev_payload->context_id == VMCI_INVALID_ID) {
+               pr_devel("Invalid event data\n");
+               return;
+       }
+
+       pr_devel("Updating context from (ID=0x%x) to (ID=0x%x) on event (type=%d)\n",
+                vm_context_id, ev_payload->context_id, event_data->event);
+
+       vm_context_id = ev_payload->context_id;
+}
+
+/*
+ * Verify that the host supports the hypercalls we need. If it does not,
+ * try to find fallback hypercalls and use those instead.  Returns
+ * true if required hypercalls (or fallback hypercalls) are
+ * supported by the host, false otherwise.
+ */
+static bool vmci_check_host_caps(struct pci_dev *pdev)
+{
+       bool result;
+       struct vmci_resource_query_msg *msg;
+       u32 msg_size = sizeof(struct vmci_resource_query_hdr) +
+                               VMCI_UTIL_NUM_RESOURCES * sizeof(u32);
+       struct vmci_datagram *check_msg;
+
+       check_msg = kmalloc(msg_size, GFP_KERNEL);
+       if (!check_msg) {
+               dev_err(&pdev->dev, "%s: Insufficient memory\n", __func__);
+               return false;
+       }
+
+       check_msg->dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
+                                         VMCI_RESOURCES_QUERY);
+       check_msg->src = VMCI_ANON_SRC_HANDLE;
+       check_msg->payload_size = msg_size - VMCI_DG_HEADERSIZE;
+       msg = (struct vmci_resource_query_msg *)VMCI_DG_PAYLOAD(check_msg);
+
+       msg->num_resources = VMCI_UTIL_NUM_RESOURCES;
+       msg->resources[0] = VMCI_GET_CONTEXT_ID;
+
+       /* Checks that hyper calls are supported */
+       result = vmci_send_datagram(check_msg) == 0x01;
+       kfree(check_msg);
+
+       dev_dbg(&pdev->dev, "%s: Host capability check: %s\n",
+               __func__, result ? "PASSED" : "FAILED");
+
+       /* We need the vector. There are no fallbacks. */
+       return result;
+}
+
+/*
+ * Reads datagrams from the data in port and dispatches them. We
+ * always start reading datagrams into only the first page of the
+ * datagram buffer. If the datagrams don't fit into one page, we
+ * use the maximum datagram buffer size for the remainder of the
+ * invocation. This is a simple heuristic for not penalizing
+ * small datagrams.
+ *
+ * This function assumes that it has exclusive access to the data
+ * in port for the duration of the call.
+ */
+static void vmci_dispatch_dgs(unsigned long data)
+{
+       struct vmci_guest_device *vmci_dev = (struct vmci_guest_device *)data;
+       u8 *dg_in_buffer = vmci_dev->data_buffer;
+       struct vmci_datagram *dg;
+       size_t dg_in_buffer_size = VMCI_MAX_DG_SIZE;
+       size_t current_dg_in_buffer_size = PAGE_SIZE;
+       size_t remaining_bytes;
+
+       BUILD_BUG_ON(VMCI_MAX_DG_SIZE < PAGE_SIZE);
+
+       ioread8_rep(vmci_dev->iobase + VMCI_DATA_IN_ADDR,
+                   vmci_dev->data_buffer, current_dg_in_buffer_size);
+       dg = (struct vmci_datagram *)dg_in_buffer;
+       remaining_bytes = current_dg_in_buffer_size;
+
+       while (dg->dst.resource != VMCI_INVALID_ID ||
+              remaining_bytes > PAGE_SIZE) {
+               unsigned dg_in_size;
+
+               /*
+                * When the input buffer spans multiple pages, a datagram can
+                * start on any page boundary in the buffer.
+                */
+               if (dg->dst.resource == VMCI_INVALID_ID) {
+                       dg = (struct vmci_datagram *)roundup(
+                               (uintptr_t)dg + 1, PAGE_SIZE);
+                       remaining_bytes =
+                               (size_t)(dg_in_buffer +
+                                        current_dg_in_buffer_size -
+                                        (u8 *)dg);
+                       continue;
+               }
+
+               dg_in_size = VMCI_DG_SIZE_ALIGNED(dg);
+
+               if (dg_in_size <= dg_in_buffer_size) {
+                       int result;
+
+                       /*
+                        * If the remaining bytes in the datagram
+                        * buffer doesn't contain the complete
+                        * datagram, we first make sure we have enough
+                        * room for it and then we read the reminder
+                        * of the datagram and possibly any following
+                        * datagrams.
+                        */
+                       if (dg_in_size > remaining_bytes) {
+                               if (remaining_bytes !=
+                                   current_dg_in_buffer_size) {
+
+                                       /*
+                                        * We move the partial
+                                        * datagram to the front and
+                                        * read the reminder of the
+                                        * datagram and possibly
+                                        * following calls into the
+                                        * following bytes.
+                                        */
+                                       memmove(dg_in_buffer, dg_in_buffer +
+                                               current_dg_in_buffer_size -
+                                               remaining_bytes,
+                                               remaining_bytes);
+                                       dg = (struct vmci_datagram *)
+                                           dg_in_buffer;
+                               }
+
+                               if (current_dg_in_buffer_size !=
+                                   dg_in_buffer_size)
+                                       current_dg_in_buffer_size =
+                                           dg_in_buffer_size;
+
+                               ioread8_rep(vmci_dev->iobase +
+                                               VMCI_DATA_IN_ADDR,
+                                       vmci_dev->data_buffer +
+                                               remaining_bytes,
+                                       current_dg_in_buffer_size -
+                                               remaining_bytes);
+                       }
+
+                       /*
+                        * We special case event datagrams from the
+                        * hypervisor.
+                        */
+                       if (dg->src.context == VMCI_HYPERVISOR_CONTEXT_ID &&
+                           dg->dst.resource == VMCI_EVENT_HANDLER) {
+                               result = vmci_event_dispatch(dg);
+                       } else {
+                               result = vmci_datagram_invoke_guest_handler(dg);
+                       }
+                       if (result < VMCI_SUCCESS)
+                               dev_dbg(vmci_dev->dev,
+                                       "Datagram with resource (ID=0x%x) failed (err=%d)\n",
+                                        dg->dst.resource, result);
+
+                       /* On to the next datagram. */
+                       dg = (struct vmci_datagram *)((u8 *)dg +
+                                                     dg_in_size);
+               } else {
+                       size_t bytes_to_skip;
+
+                       /*
+                        * Datagram doesn't fit in datagram buffer of maximal
+                        * size. We drop it.
+                        */
+                       dev_dbg(vmci_dev->dev,
+                               "Failed to receive datagram (size=%u bytes)\n",
+                                dg_in_size);
+
+                       bytes_to_skip = dg_in_size - remaining_bytes;
+                       if (current_dg_in_buffer_size != dg_in_buffer_size)
+                               current_dg_in_buffer_size = dg_in_buffer_size;
+
+                       for (;;) {
+                               ioread8_rep(vmci_dev->iobase +
+                                               VMCI_DATA_IN_ADDR,
+                                       vmci_dev->data_buffer,
+                                       current_dg_in_buffer_size);
+                               if (bytes_to_skip <= current_dg_in_buffer_size)
+                                       break;
+
+                               bytes_to_skip -= current_dg_in_buffer_size;
+                       }
+                       dg = (struct vmci_datagram *)(dg_in_buffer +
+                                                     bytes_to_skip);
+               }
+
+               remaining_bytes =
+                   (size_t) (dg_in_buffer + current_dg_in_buffer_size -
+                             (u8 *)dg);
+
+               if (remaining_bytes < VMCI_DG_HEADERSIZE) {
+                       /* Get the next batch of datagrams. */
+
+                       ioread8_rep(vmci_dev->iobase + VMCI_DATA_IN_ADDR,
+                                   vmci_dev->data_buffer,
+                                   current_dg_in_buffer_size);
+                       dg = (struct vmci_datagram *)dg_in_buffer;
+                       remaining_bytes = current_dg_in_buffer_size;
+               }
+       }
+}
+
+/*
+ * Scans the notification bitmap for raised flags, clears them
+ * and handles the notifications.
+ */
+static void vmci_process_bitmap(unsigned long data)
+{
+       struct vmci_guest_device *dev = (struct vmci_guest_device *)data;
+
+       if (!dev->notification_bitmap) {
+               dev_dbg(dev->dev, "No bitmap present in %s\n", __func__);
+               return;
+       }
+
+       vmci_dbell_scan_notification_entries(dev->notification_bitmap);
+}
+
+/*
+ * Enable MSI-X.  Try exclusive vectors first, then shared vectors.
+ */
+static int vmci_enable_msix(struct pci_dev *pdev,
+                           struct vmci_guest_device *vmci_dev)
+{
+       int i;
+       int result;
+
+       for (i = 0; i < VMCI_MAX_INTRS; ++i) {
+               vmci_dev->msix_entries[i].entry = i;
+               vmci_dev->msix_entries[i].vector = i;
+       }
+
+       result = pci_enable_msix(pdev, vmci_dev->msix_entries, VMCI_MAX_INTRS);
+       if (result == 0)
+               vmci_dev->exclusive_vectors = true;
+       else if (result > 0)
+               result = pci_enable_msix(pdev, vmci_dev->msix_entries, 1);
+
+       return result;
+}
+
+/*
+ * Interrupt handler for legacy or MSI interrupt, or for first MSI-X
+ * interrupt (vector VMCI_INTR_DATAGRAM).
+ */
+static irqreturn_t vmci_interrupt(int irq, void *_dev)
+{
+       struct vmci_guest_device *dev = _dev;
+
+       /*
+        * If we are using MSI-X with exclusive vectors then we simply schedule
+        * the datagram tasklet, since we know the interrupt was meant for us.
+        * Otherwise we must read the ICR to determine what to do.
+        */
+
+       if (dev->intr_type == VMCI_INTR_TYPE_MSIX && dev->exclusive_vectors) {
+               tasklet_schedule(&dev->datagram_tasklet);
+       } else {
+               unsigned int icr;
+
+               /* Acknowledge interrupt and determine what needs doing. */
+               icr = ioread32(dev->iobase + VMCI_ICR_ADDR);
+               if (icr == 0 || icr == ~0)
+                       return IRQ_NONE;
+
+               if (icr & VMCI_ICR_DATAGRAM) {
+                       tasklet_schedule(&dev->datagram_tasklet);
+                       icr &= ~VMCI_ICR_DATAGRAM;
+               }
+
+               if (icr & VMCI_ICR_NOTIFICATION) {
+                       tasklet_schedule(&dev->bm_tasklet);
+                       icr &= ~VMCI_ICR_NOTIFICATION;
+               }
+
+               if (icr != 0)
+                       dev_warn(dev->dev,
+                                "Ignoring unknown interrupt cause (%d)\n",
+                                icr);
+       }
+
+       return IRQ_HANDLED;
+}
+
+/*
+ * Interrupt handler for MSI-X interrupt vector VMCI_INTR_NOTIFICATION,
+ * which is for the notification bitmap.  Will only get called if we are
+ * using MSI-X with exclusive vectors.
+ */
+static irqreturn_t vmci_interrupt_bm(int irq, void *_dev)
+{
+       struct vmci_guest_device *dev = _dev;
+
+       /* For MSI-X we can just assume it was meant for us. */
+       tasklet_schedule(&dev->bm_tasklet);
+
+       return IRQ_HANDLED;
+}
+
+/*
+ * Most of the initialization at module load time is done here.
+ */
+static int vmci_guest_probe_device(struct pci_dev *pdev,
+                                  const struct pci_device_id *id)
+{
+       struct vmci_guest_device *vmci_dev;
+       void __iomem *iobase;
+       unsigned int capabilities;
+       unsigned long cmd;
+       int vmci_err;
+       int error;
+
+       dev_dbg(&pdev->dev, "Probing for vmci/PCI guest device\n");
+
+       error = pcim_enable_device(pdev);
+       if (error) {
+               dev_err(&pdev->dev,
+                       "Failed to enable VMCI device: %d\n", error);
+               return error;
+       }
+
+       error = pcim_iomap_regions(pdev, 1 << 0, KBUILD_MODNAME);
+       if (error) {
+               dev_err(&pdev->dev, "Failed to reserve/map IO regions\n");
+               return error;
+       }
+
+       iobase = pcim_iomap_table(pdev)[0];
+
+       dev_info(&pdev->dev, "Found VMCI PCI device at %#lx, irq %u\n",
+                (unsigned long)iobase, pdev->irq);
+
+       vmci_dev = devm_kzalloc(&pdev->dev, sizeof(*vmci_dev), GFP_KERNEL);
+       if (!vmci_dev) {
+               dev_err(&pdev->dev,
+                       "Can't allocate memory for VMCI device\n");
+               return -ENOMEM;
+       }
+
+       vmci_dev->dev = &pdev->dev;
+       vmci_dev->intr_type = VMCI_INTR_TYPE_INTX;
+       vmci_dev->exclusive_vectors = false;
+       vmci_dev->iobase = iobase;
+
+       tasklet_init(&vmci_dev->datagram_tasklet,
+                    vmci_dispatch_dgs, (unsigned long)vmci_dev);
+       tasklet_init(&vmci_dev->bm_tasklet,
+                    vmci_process_bitmap, (unsigned long)vmci_dev);
+
+       vmci_dev->data_buffer = vmalloc(VMCI_MAX_DG_SIZE);
+       if (!vmci_dev->data_buffer) {
+               dev_err(&pdev->dev,
+                       "Can't allocate memory for datagram buffer\n");
+               return -ENOMEM;
+       }
+
+       pci_set_master(pdev);   /* To enable queue_pair functionality. */
+
+       /*
+        * Verify that the VMCI Device supports the capabilities that
+        * we need. If the device is missing capabilities that we would
+        * like to use, check for fallback capabilities and use those
+        * instead (so we can run a new VM on old hosts). Fail the load if
+        * a required capability is missing and there is no fallback.
+        *
+        * Right now, we need datagrams. There are no fallbacks.
+        */
+       capabilities = ioread32(vmci_dev->iobase + VMCI_CAPS_ADDR);
+       if (!(capabilities & VMCI_CAPS_DATAGRAM)) {
+               dev_err(&pdev->dev, "Device does not support datagrams\n");
+               error = -ENXIO;
+               goto err_free_data_buffer;
+       }
+
+       /*
+        * If the hardware supports notifications, we will use that as
+        * well.
+        */
+       if (capabilities & VMCI_CAPS_NOTIFICATIONS) {
+               vmci_dev->notification_bitmap = vmalloc(PAGE_SIZE);
+               if (!vmci_dev->notification_bitmap) {
+                       dev_warn(&pdev->dev,
+                                "Unable to allocate notification bitmap\n");
+               } else {
+                       memset(vmci_dev->notification_bitmap, 0, PAGE_SIZE);
+                       capabilities |= VMCI_CAPS_NOTIFICATIONS;
+               }
+       }
+
+       dev_info(&pdev->dev, "Using capabilities 0x%x\n", capabilities);
+
+       /* Let the host know which capabilities we intend to use. */
+       iowrite32(capabilities, vmci_dev->iobase + VMCI_CAPS_ADDR);
+
+       /* Set up global device so that we can start sending datagrams */
+       spin_lock_irq(&vmci_dev_spinlock);
+       vmci_dev_g = vmci_dev;
+       spin_unlock_irq(&vmci_dev_spinlock);
+
+       /*
+        * Register notification bitmap with device if that capability is
+        * used.
+        */
+       if (capabilities & VMCI_CAPS_NOTIFICATIONS) {
+               struct page *page =
+                       vmalloc_to_page(vmci_dev->notification_bitmap);
+               unsigned long bitmap_ppn = page_to_pfn(page);
+               if (!vmci_dbell_register_notification_bitmap(bitmap_ppn)) {
+                       dev_warn(&pdev->dev,
+                                "VMCI device unable to register notification bitmap with PPN 0x%x\n",
+                                (u32) bitmap_ppn);
+                       goto err_remove_vmci_dev_g;
+               }
+       }
+
+       /* Check host capabilities. */
+       if (!vmci_check_host_caps(pdev))
+               goto err_remove_bitmap;
+
+       /* Enable device. */
+
+       /*
+        * We subscribe to the VMCI_EVENT_CTX_ID_UPDATE here so we can
+        * update the internal context id when needed.
+        */
+       vmci_err = vmci_event_subscribe(VMCI_EVENT_CTX_ID_UPDATE,
+                                       vmci_guest_cid_update, NULL,
+                                       &ctx_update_sub_id);
+       if (vmci_err < VMCI_SUCCESS)
+               dev_warn(&pdev->dev,
+                        "Failed to subscribe to event (type=%d): %d\n",
+                        VMCI_EVENT_CTX_ID_UPDATE, vmci_err);
+
+       /*
+        * Enable interrupts.  Try MSI-X first, then MSI, and then fallback on
+        * legacy interrupts.
+        */
+       if (!vmci_disable_msix && !vmci_enable_msix(pdev, vmci_dev)) {
+               vmci_dev->intr_type = VMCI_INTR_TYPE_MSIX;
+               vmci_dev->irq = vmci_dev->msix_entries[0].vector;
+       } else if (!vmci_disable_msi && !pci_enable_msi(pdev)) {
+               vmci_dev->intr_type = VMCI_INTR_TYPE_MSI;
+               vmci_dev->irq = pdev->irq;
+       } else {
+               vmci_dev->intr_type = VMCI_INTR_TYPE_INTX;
+               vmci_dev->irq = pdev->irq;
+       }
+
+       /*
+        * Request IRQ for legacy or MSI interrupts, or for first
+        * MSI-X vector.
+        */
+       error = request_irq(vmci_dev->irq, vmci_interrupt, IRQF_SHARED,
+                           KBUILD_MODNAME, vmci_dev);
+       if (error) {
+               dev_err(&pdev->dev, "Irq %u in use: %d\n",
+                       vmci_dev->irq, error);
+               goto err_disable_msi;
+       }
+
+       /*
+        * For MSI-X with exclusive vectors we need to request an
+        * interrupt for each vector so that we get a separate
+        * interrupt handler routine.  This allows us to distinguish
+        * between the vectors.
+        */
+       if (vmci_dev->exclusive_vectors) {
+               error = request_irq(vmci_dev->msix_entries[1].vector,
+                                   vmci_interrupt_bm, 0, KBUILD_MODNAME,
+                                   vmci_dev);
+               if (error) {
+                       dev_err(&pdev->dev,
+                               "Failed to allocate irq %u: %d\n",
+                               vmci_dev->msix_entries[1].vector, error);
+                       goto err_free_irq;
+               }
+       }
+
+       dev_dbg(&pdev->dev, "Registered device\n");
+
+       atomic_inc(&vmci_num_guest_devices);
+
+       /* Enable specific interrupt bits. */
+       cmd = VMCI_IMR_DATAGRAM;
+       if (capabilities & VMCI_CAPS_NOTIFICATIONS)
+               cmd |= VMCI_IMR_NOTIFICATION;
+       iowrite32(cmd, vmci_dev->iobase + VMCI_IMR_ADDR);
+
+       /* Enable interrupts. */
+       iowrite32(VMCI_CONTROL_INT_ENABLE,
+                 vmci_dev->iobase + VMCI_CONTROL_ADDR);
+
+       pci_set_drvdata(pdev, vmci_dev);
+       return 0;
+
+err_free_irq:
+       free_irq(vmci_dev->irq, &vmci_dev);
+       tasklet_kill(&vmci_dev->datagram_tasklet);
+       tasklet_kill(&vmci_dev->bm_tasklet);
+
+err_disable_msi:
+       if (vmci_dev->intr_type == VMCI_INTR_TYPE_MSIX)
+               pci_disable_msix(pdev);
+       else if (vmci_dev->intr_type == VMCI_INTR_TYPE_MSI)
+               pci_disable_msi(pdev);
+
+       vmci_err = vmci_event_unsubscribe(ctx_update_sub_id);
+       if (vmci_err < VMCI_SUCCESS)
+               dev_warn(&pdev->dev,
+                        "Failed to unsubscribe from event (type=%d) with subscriber (ID=0x%x): %d\n",
+                        VMCI_EVENT_CTX_ID_UPDATE, ctx_update_sub_id, vmci_err);
+
+err_remove_bitmap:
+       if (vmci_dev->notification_bitmap) {
+               iowrite32(VMCI_CONTROL_RESET,
+                         vmci_dev->iobase + VMCI_CONTROL_ADDR);
+               vfree(vmci_dev->notification_bitmap);
+       }
+
+err_remove_vmci_dev_g:
+       spin_lock_irq(&vmci_dev_spinlock);
+       vmci_dev_g = NULL;
+       spin_unlock_irq(&vmci_dev_spinlock);
+
+err_free_data_buffer:
+       vfree(vmci_dev->data_buffer);
+
+       /* The rest are managed resources and will be freed by PCI core */
+       return error;
+}
+
+static void vmci_guest_remove_device(struct pci_dev *pdev)
+{
+       struct vmci_guest_device *vmci_dev = pci_get_drvdata(pdev);
+       int vmci_err;
+
+       dev_dbg(&pdev->dev, "Removing device\n");
+
+       atomic_dec(&vmci_num_guest_devices);
+
+       vmci_qp_guest_endpoints_exit();
+
+       vmci_err = vmci_event_unsubscribe(ctx_update_sub_id);
+       if (vmci_err < VMCI_SUCCESS)
+               dev_warn(&pdev->dev,
+                        "Failed to unsubscribe from event (type=%d) with subscriber (ID=0x%x): %d\n",
+                        VMCI_EVENT_CTX_ID_UPDATE, ctx_update_sub_id, vmci_err);
+
+       spin_lock_irq(&vmci_dev_spinlock);
+       vmci_dev_g = NULL;
+       spin_unlock_irq(&vmci_dev_spinlock);
+
+       dev_dbg(&pdev->dev, "Resetting vmci device\n");
+       iowrite32(VMCI_CONTROL_RESET, vmci_dev->iobase + VMCI_CONTROL_ADDR);
+
+       /*
+        * Free IRQ and then disable MSI/MSI-X as appropriate.  For
+        * MSI-X, we might have multiple vectors, each with their own
+        * IRQ, which we must free too.
+        */
+       free_irq(vmci_dev->irq, vmci_dev);
+       if (vmci_dev->intr_type == VMCI_INTR_TYPE_MSIX) {
+               if (vmci_dev->exclusive_vectors)
+                       free_irq(vmci_dev->msix_entries[1].vector, vmci_dev);
+               pci_disable_msix(pdev);
+       } else if (vmci_dev->intr_type == VMCI_INTR_TYPE_MSI) {
+               pci_disable_msi(pdev);
+       }
+
+       tasklet_kill(&vmci_dev->datagram_tasklet);
+       tasklet_kill(&vmci_dev->bm_tasklet);
+
+       if (vmci_dev->notification_bitmap) {
+               /*
+                * The device reset above cleared the bitmap state of the
+                * device, so we can safely free it here.
+                */
+
+               vfree(vmci_dev->notification_bitmap);
+       }
+
+       vfree(vmci_dev->data_buffer);
+
+       /* The rest are managed resources and will be freed by PCI core */
+}
+
+static DEFINE_PCI_DEVICE_TABLE(vmci_ids) = {
+       { PCI_DEVICE(PCI_VENDOR_ID_VMWARE, PCI_DEVICE_ID_VMWARE_VMCI), },
+       { 0 },
+};
+MODULE_DEVICE_TABLE(pci, vmci_ids);
+
+static struct pci_driver vmci_guest_driver = {
+       .name           = KBUILD_MODNAME,
+       .id_table       = vmci_ids,
+       .probe          = vmci_guest_probe_device,
+       .remove         = vmci_guest_remove_device,
+};
+
+int __init vmci_guest_init(void)
+{
+       return pci_register_driver(&vmci_guest_driver);
+}
+
+void __exit vmci_guest_exit(void)
+{
+       pci_unregister_driver(&vmci_guest_driver);
+}
diff --git a/drivers/misc/vmw_vmci/vmci_handle_array.c b/drivers/misc/vmw_vmci/vmci_handle_array.c
new file mode 100644 (file)
index 0000000..344973a
--- /dev/null
@@ -0,0 +1,142 @@
+/*
+ * VMware VMCI Driver
+ *
+ * Copyright (C) 2012 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * for more details.
+ */
+
+#include <linux/slab.h>
+#include "vmci_handle_array.h"
+
+static size_t handle_arr_calc_size(size_t capacity)
+{
+       return sizeof(struct vmci_handle_arr) +
+           capacity * sizeof(struct vmci_handle);
+}
+
+struct vmci_handle_arr *vmci_handle_arr_create(size_t capacity)
+{
+       struct vmci_handle_arr *array;
+
+       if (capacity == 0)
+               capacity = VMCI_HANDLE_ARRAY_DEFAULT_SIZE;
+
+       array = kmalloc(handle_arr_calc_size(capacity), GFP_ATOMIC);
+       if (!array)
+               return NULL;
+
+       array->capacity = capacity;
+       array->size = 0;
+
+       return array;
+}
+
+void vmci_handle_arr_destroy(struct vmci_handle_arr *array)
+{
+       kfree(array);
+}
+
+void vmci_handle_arr_append_entry(struct vmci_handle_arr **array_ptr,
+                                 struct vmci_handle handle)
+{
+       struct vmci_handle_arr *array = *array_ptr;
+
+       if (unlikely(array->size >= array->capacity)) {
+               /* reallocate. */
+               struct vmci_handle_arr *new_array;
+               size_t new_capacity = array->capacity * VMCI_ARR_CAP_MULT;
+               size_t new_size = handle_arr_calc_size(new_capacity);
+
+               new_array = krealloc(array, new_size, GFP_ATOMIC);
+               if (!new_array)
+                       return;
+
+               new_array->capacity = new_capacity;
+               *array_ptr = array = new_array;
+       }
+
+       array->entries[array->size] = handle;
+       array->size++;
+}
+
+/*
+ * Handle that was removed, VMCI_INVALID_HANDLE if entry not found.
+ */
+struct vmci_handle vmci_handle_arr_remove_entry(struct vmci_handle_arr *array,
+                                               struct vmci_handle entry_handle)
+{
+       struct vmci_handle handle = VMCI_INVALID_HANDLE;
+       size_t i;
+
+       for (i = 0; i < array->size; i++) {
+               if (vmci_handle_is_equal(array->entries[i], entry_handle)) {
+                       handle = array->entries[i];
+                       array->size--;
+                       array->entries[i] = array->entries[array->size];
+                       array->entries[array->size] = VMCI_INVALID_HANDLE;
+                       break;
+               }
+       }
+
+       return handle;
+}
+
+/*
+ * Handle that was removed, VMCI_INVALID_HANDLE if array was empty.
+ */
+struct vmci_handle vmci_handle_arr_remove_tail(struct vmci_handle_arr *array)
+{
+       struct vmci_handle handle = VMCI_INVALID_HANDLE;
+
+       if (array->size) {
+               array->size--;
+               handle = array->entries[array->size];
+               array->entries[array->size] = VMCI_INVALID_HANDLE;
+       }
+
+       return handle;
+}
+
+/*
+ * Handle at given index, VMCI_INVALID_HANDLE if invalid index.
+ */
+struct vmci_handle
+vmci_handle_arr_get_entry(const struct vmci_handle_arr *array, size_t index)
+{
+       if (unlikely(index >= array->size))
+               return VMCI_INVALID_HANDLE;
+
+       return array->entries[index];
+}
+
+bool vmci_handle_arr_has_entry(const struct vmci_handle_arr *array,
+                              struct vmci_handle entry_handle)
+{
+       size_t i;
+
+       for (i = 0; i < array->size; i++)
+               if (vmci_handle_is_equal(array->entries[i], entry_handle))
+                       return true;
+
+       return false;
+}
+
+/*
+ * NULL if the array is empty. Otherwise, a pointer to the array
+ * of VMCI handles in the handle array.
+ */
+struct vmci_handle *vmci_handle_arr_get_handles(struct vmci_handle_arr *array)
+{
+       if (array->size)
+               return array->entries;
+
+       return NULL;
+}
diff --git a/drivers/misc/vmw_vmci/vmci_handle_array.h b/drivers/misc/vmw_vmci/vmci_handle_array.h
new file mode 100644 (file)
index 0000000..b5f3a7f
--- /dev/null
@@ -0,0 +1,52 @@
+/*
+ * VMware VMCI Driver
+ *
+ * Copyright (C) 2012 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * for more details.
+ */
+
+#ifndef _VMCI_HANDLE_ARRAY_H_
+#define _VMCI_HANDLE_ARRAY_H_
+
+#include <linux/vmw_vmci_defs.h>
+#include <linux/types.h>
+
+#define VMCI_HANDLE_ARRAY_DEFAULT_SIZE 4
+#define VMCI_ARR_CAP_MULT 2    /* Array capacity multiplier */
+
+struct vmci_handle_arr {
+       size_t capacity;
+       size_t size;
+       struct vmci_handle entries[];
+};
+
+struct vmci_handle_arr *vmci_handle_arr_create(size_t capacity);
+void vmci_handle_arr_destroy(struct vmci_handle_arr *array);
+void vmci_handle_arr_append_entry(struct vmci_handle_arr **array_ptr,
+                                 struct vmci_handle handle);
+struct vmci_handle vmci_handle_arr_remove_entry(struct vmci_handle_arr *array,
+                                               struct vmci_handle
+                                               entry_handle);
+struct vmci_handle vmci_handle_arr_remove_tail(struct vmci_handle_arr *array);
+struct vmci_handle
+vmci_handle_arr_get_entry(const struct vmci_handle_arr *array, size_t index);
+bool vmci_handle_arr_has_entry(const struct vmci_handle_arr *array,
+                              struct vmci_handle entry_handle);
+struct vmci_handle *vmci_handle_arr_get_handles(struct vmci_handle_arr *array);
+
+static inline size_t vmci_handle_arr_get_size(
+       const struct vmci_handle_arr *array)
+{
+       return array->size;
+}
+
+
+#endif /* _VMCI_HANDLE_ARRAY_H_ */
diff --git a/drivers/misc/vmw_vmci/vmci_host.c b/drivers/misc/vmw_vmci/vmci_host.c
new file mode 100644 (file)
index 0000000..d4722b3
--- /dev/null
@@ -0,0 +1,1043 @@
+/*
+ * VMware VMCI Driver
+ *
+ * Copyright (C) 2012 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * for more details.
+ */
+
+#include <linux/vmw_vmci_defs.h>
+#include <linux/vmw_vmci_api.h>
+#include <linux/moduleparam.h>
+#include <linux/miscdevice.h>
+#include <linux/interrupt.h>
+#include <linux/highmem.h>
+#include <linux/atomic.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/file.h>
+#include <linux/init.h>
+#include <linux/poll.h>
+#include <linux/pci.h>
+#include <linux/smp.h>
+#include <linux/fs.h>
+#include <linux/io.h>
+
+#include "vmci_handle_array.h"
+#include "vmci_queue_pair.h"
+#include "vmci_datagram.h"
+#include "vmci_doorbell.h"
+#include "vmci_resource.h"
+#include "vmci_context.h"
+#include "vmci_driver.h"
+#include "vmci_event.h"
+
+#define VMCI_UTIL_NUM_RESOURCES 1
+
+enum {
+       VMCI_NOTIFY_RESOURCE_QUEUE_PAIR = 0,
+       VMCI_NOTIFY_RESOURCE_DOOR_BELL = 1,
+};
+
+enum {
+       VMCI_NOTIFY_RESOURCE_ACTION_NOTIFY = 0,
+       VMCI_NOTIFY_RESOURCE_ACTION_CREATE = 1,
+       VMCI_NOTIFY_RESOURCE_ACTION_DESTROY = 2,
+};
+
+/*
+ * VMCI driver initialization. This block can also be used to
+ * pass initial group membership etc.
+ */
+struct vmci_init_blk {
+       u32 cid;
+       u32 flags;
+};
+
+/* VMCIqueue_pairAllocInfo_VMToVM */
+struct vmci_qp_alloc_info_vmvm {
+       struct vmci_handle handle;
+       u32 peer;
+       u32 flags;
+       u64 produce_size;
+       u64 consume_size;
+       u64 produce_page_file;    /* User VA. */
+       u64 consume_page_file;    /* User VA. */
+       u64 produce_page_file_size;  /* Size of the file name array. */
+       u64 consume_page_file_size;  /* Size of the file name array. */
+       s32 result;
+       u32 _pad;
+};
+
+/* VMCISetNotifyInfo: Used to pass notify flag's address to the host driver. */
+struct vmci_set_notify_info {
+       u64 notify_uva;
+       s32 result;
+       u32 _pad;
+};
+
+/*
+ * Per-instance host state
+ */
+struct vmci_host_dev {
+       struct vmci_ctx *context;
+       int user_version;
+       enum vmci_obj_type ct_type;
+       struct mutex lock;  /* Mutex lock for vmci context access */
+};
+
+static struct vmci_ctx *host_context;
+static bool vmci_host_device_initialized;
+static atomic_t vmci_host_active_users = ATOMIC_INIT(0);
+
+/*
+ * Determines whether the VMCI host personality is
+ * available. Since the core functionality of the host driver is
+ * always present, all guests could possibly use the host
+ * personality. However, to minimize the deviation from the
+ * pre-unified driver state of affairs, we only consider the host
+ * device active if there is no active guest device or if there
+ * are VMX'en with active VMCI contexts using the host device.
+ */
+bool vmci_host_code_active(void)
+{
+       return vmci_host_device_initialized &&
+           (!vmci_guest_code_active() ||
+            atomic_read(&vmci_host_active_users) > 0);
+}
+
+/*
+ * Called on open of /dev/vmci.
+ */
+static int vmci_host_open(struct inode *inode, struct file *filp)
+{
+       struct vmci_host_dev *vmci_host_dev;
+
+       vmci_host_dev = kzalloc(sizeof(struct vmci_host_dev), GFP_KERNEL);
+       if (vmci_host_dev == NULL)
+               return -ENOMEM;
+
+       vmci_host_dev->ct_type = VMCIOBJ_NOT_SET;
+       mutex_init(&vmci_host_dev->lock);
+       filp->private_data = vmci_host_dev;
+
+       return 0;
+}
+
+/*
+ * Called on close of /dev/vmci, most often when the process
+ * exits.
+ */
+static int vmci_host_close(struct inode *inode, struct file *filp)
+{
+       struct vmci_host_dev *vmci_host_dev = filp->private_data;
+
+       if (vmci_host_dev->ct_type == VMCIOBJ_CONTEXT) {
+               vmci_ctx_destroy(vmci_host_dev->context);
+               vmci_host_dev->context = NULL;
+
+               /*
+                * The number of active contexts is used to track whether any
+                * VMX'en are using the host personality. It is incremented when
+                * a context is created through the IOCTL_VMCI_INIT_CONTEXT
+                * ioctl.
+                */
+               atomic_dec(&vmci_host_active_users);
+       }
+       vmci_host_dev->ct_type = VMCIOBJ_NOT_SET;
+
+       kfree(vmci_host_dev);
+       filp->private_data = NULL;
+       return 0;
+}
+
+/*
+ * This is used to wake up the VMX when a VMCI call arrives, or
+ * to wake up select() or poll() at the next clock tick.
+ */
+static unsigned int vmci_host_poll(struct file *filp, poll_table *wait)
+{
+       struct vmci_host_dev *vmci_host_dev = filp->private_data;
+       struct vmci_ctx *context = vmci_host_dev->context;
+       unsigned int mask = 0;
+
+       if (vmci_host_dev->ct_type == VMCIOBJ_CONTEXT) {
+               /* Check for VMCI calls to this VM context. */
+               if (wait)
+                       poll_wait(filp, &context->host_context.wait_queue,
+                                 wait);
+
+               spin_lock(&context->lock);
+               if (context->pending_datagrams > 0 ||
+                   vmci_handle_arr_get_size(
+                               context->pending_doorbell_array) > 0) {
+                       mask = POLLIN;
+               }
+               spin_unlock(&context->lock);
+       }
+       return mask;
+}
+
+/*
+ * Copies the handles of a handle array into a user buffer, and
+ * returns the new length in userBufferSize. If the copy to the
+ * user buffer fails, the functions still returns VMCI_SUCCESS,
+ * but retval != 0.
+ */
+static int drv_cp_harray_to_user(void __user *user_buf_uva,
+                                u64 *user_buf_size,
+                                struct vmci_handle_arr *handle_array,
+                                int *retval)
+{
+       u32 array_size = 0;
+       struct vmci_handle *handles;
+
+       if (handle_array)
+               array_size = vmci_handle_arr_get_size(handle_array);
+
+       if (array_size * sizeof(*handles) > *user_buf_size)
+               return VMCI_ERROR_MORE_DATA;
+
+       *user_buf_size = array_size * sizeof(*handles);
+       if (*user_buf_size)
+               *retval = copy_to_user(user_buf_uva,
+                                      vmci_handle_arr_get_handles
+                                      (handle_array), *user_buf_size);
+
+       return VMCI_SUCCESS;
+}
+
+/*
+ * Sets up a given context for notify to work.  Calls drv_map_bool_ptr()
+ * which maps the notify boolean in user VA in kernel space.
+ */
+static int vmci_host_setup_notify(struct vmci_ctx *context,
+                                 unsigned long uva)
+{
+       struct page *page;
+       int retval;
+
+       if (context->notify_page) {
+               pr_devel("%s: Notify mechanism is already set up\n", __func__);
+               return VMCI_ERROR_DUPLICATE_ENTRY;
+       }
+
+       /*
+        * We are using 'bool' internally, but let's make sure we explicit
+        * about the size.
+        */
+       BUILD_BUG_ON(sizeof(bool) != sizeof(u8));
+       if (!access_ok(VERIFY_WRITE, (void __user *)uva, sizeof(u8)))
+               return VMCI_ERROR_GENERIC;
+
+       /*
+        * Lock physical page backing a given user VA.
+        */
+       down_read(&current->mm->mmap_sem);
+       retval = get_user_pages(current, current->mm,
+                               PAGE_ALIGN(uva),
+                               1, 1, 0, &page, NULL);
+       up_read(&current->mm->mmap_sem);
+       if (retval != 1)
+               return VMCI_ERROR_GENERIC;
+
+       /*
+        * Map the locked page and set up notify pointer.
+        */
+       context->notify = kmap(page) + (uva & (PAGE_SIZE - 1));
+       vmci_ctx_check_signal_notify(context);
+
+       return VMCI_SUCCESS;
+}
+
+static int vmci_host_get_version(struct vmci_host_dev *vmci_host_dev,
+                                unsigned int cmd, void __user *uptr)
+{
+       if (cmd == IOCTL_VMCI_VERSION2) {
+               int __user *vptr = uptr;
+               if (get_user(vmci_host_dev->user_version, vptr))
+                       return -EFAULT;
+       }
+
+       /*
+        * The basic logic here is:
+        *
+        * If the user sends in a version of 0 tell it our version.
+        * If the user didn't send in a version, tell it our version.
+        * If the user sent in an old version, tell it -its- version.
+        * If the user sent in an newer version, tell it our version.
+        *
+        * The rationale behind telling the caller its version is that
+        * Workstation 6.5 required that VMX and VMCI kernel module were
+        * version sync'd.  All new VMX users will be programmed to
+        * handle the VMCI kernel module version.
+        */
+
+       if (vmci_host_dev->user_version > 0 &&
+           vmci_host_dev->user_version < VMCI_VERSION_HOSTQP) {
+               return vmci_host_dev->user_version;
+       }
+
+       return VMCI_VERSION;
+}
+
+#define vmci_ioctl_err(fmt, ...)       \
+       pr_devel("%s: " fmt, ioctl_name, ##__VA_ARGS__)
+
+static int vmci_host_do_init_context(struct vmci_host_dev *vmci_host_dev,
+                                    const char *ioctl_name,
+                                    void __user *uptr)
+{
+       struct vmci_init_blk init_block;
+       const struct cred *cred;
+       int retval;
+
+       if (copy_from_user(&init_block, uptr, sizeof(init_block))) {
+               vmci_ioctl_err("error reading init block\n");
+               return -EFAULT;
+       }
+
+       mutex_lock(&vmci_host_dev->lock);
+
+       if (vmci_host_dev->ct_type != VMCIOBJ_NOT_SET) {
+               vmci_ioctl_err("received VMCI init on initialized handle\n");
+               retval = -EINVAL;
+               goto out;
+       }
+
+       if (init_block.flags & ~VMCI_PRIVILEGE_FLAG_RESTRICTED) {
+               vmci_ioctl_err("unsupported VMCI restriction flag\n");
+               retval = -EINVAL;
+               goto out;
+       }
+
+       cred = get_current_cred();
+       vmci_host_dev->context = vmci_ctx_create(init_block.cid,
+                                                init_block.flags, 0,
+                                                vmci_host_dev->user_version,
+                                                cred);
+       put_cred(cred);
+       if (IS_ERR(vmci_host_dev->context)) {
+               retval = PTR_ERR(vmci_host_dev->context);
+               vmci_ioctl_err("error initializing context\n");
+               goto out;
+       }
+
+       /*
+        * Copy cid to userlevel, we do this to allow the VMX
+        * to enforce its policy on cid generation.
+        */
+       init_block.cid = vmci_ctx_get_id(vmci_host_dev->context);
+       if (copy_to_user(uptr, &init_block, sizeof(init_block))) {
+               vmci_ctx_destroy(vmci_host_dev->context);
+               vmci_host_dev->context = NULL;
+               vmci_ioctl_err("error writing init block\n");
+               retval = -EFAULT;
+               goto out;
+       }
+
+       vmci_host_dev->ct_type = VMCIOBJ_CONTEXT;
+       atomic_inc(&vmci_host_active_users);
+
+       retval = 0;
+
+out:
+       mutex_unlock(&vmci_host_dev->lock);
+       return retval;
+}
+
+static int vmci_host_do_send_datagram(struct vmci_host_dev *vmci_host_dev,
+                                     const char *ioctl_name,
+                                     void __user *uptr)
+{
+       struct vmci_datagram_snd_rcv_info send_info;
+       struct vmci_datagram *dg = NULL;
+       u32 cid;
+
+       if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
+               vmci_ioctl_err("only valid for contexts\n");
+               return -EINVAL;
+       }
+
+       if (copy_from_user(&send_info, uptr, sizeof(send_info)))
+               return -EFAULT;
+
+       if (send_info.len > VMCI_MAX_DG_SIZE) {
+               vmci_ioctl_err("datagram is too big (size=%d)\n",
+                              send_info.len);
+               return -EINVAL;
+       }
+
+       if (send_info.len < sizeof(*dg)) {
+               vmci_ioctl_err("datagram is too small (size=%d)\n",
+                              send_info.len);
+               return -EINVAL;
+       }
+
+       dg = kmalloc(send_info.len, GFP_KERNEL);
+       if (!dg) {
+               vmci_ioctl_err(
+                       "cannot allocate memory to dispatch datagram\n");
+               return -ENOMEM;
+       }
+
+       if (copy_from_user(dg, (void __user *)(uintptr_t)send_info.addr,
+                          send_info.len)) {
+               vmci_ioctl_err("error getting datagram\n");
+               kfree(dg);
+               return -EFAULT;
+       }
+
+       pr_devel("Datagram dst (handle=0x%x:0x%x) src (handle=0x%x:0x%x), payload (size=%llu bytes)\n",
+                dg->dst.context, dg->dst.resource,
+                dg->src.context, dg->src.resource,
+                (unsigned long long)dg->payload_size);
+
+       /* Get source context id. */
+       cid = vmci_ctx_get_id(vmci_host_dev->context);
+       send_info.result = vmci_datagram_dispatch(cid, dg, true);
+       kfree(dg);
+
+       return copy_to_user(uptr, &send_info, sizeof(send_info)) ? -EFAULT : 0;
+}
+
+static int vmci_host_do_receive_datagram(struct vmci_host_dev *vmci_host_dev,
+                                        const char *ioctl_name,
+                                        void __user *uptr)
+{
+       struct vmci_datagram_snd_rcv_info recv_info;
+       struct vmci_datagram *dg = NULL;
+       int retval;
+       size_t size;
+
+       if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
+               vmci_ioctl_err("only valid for contexts\n");
+               return -EINVAL;
+       }
+
+       if (copy_from_user(&recv_info, uptr, sizeof(recv_info)))
+               return -EFAULT;
+
+       size = recv_info.len;
+       recv_info.result = vmci_ctx_dequeue_datagram(vmci_host_dev->context,
+                                                    &size, &dg);
+
+       if (recv_info.result >= VMCI_SUCCESS) {
+               void __user *ubuf = (void __user *)(uintptr_t)recv_info.addr;
+               retval = copy_to_user(ubuf, dg, VMCI_DG_SIZE(dg));
+               kfree(dg);
+               if (retval != 0)
+                       return -EFAULT;
+       }
+
+       return copy_to_user(uptr, &recv_info, sizeof(recv_info)) ? -EFAULT : 0;
+}
+
+static int vmci_host_do_alloc_queuepair(struct vmci_host_dev *vmci_host_dev,
+                                       const char *ioctl_name,
+                                       void __user *uptr)
+{
+       struct vmci_handle handle;
+       int vmci_status;
+       int __user *retptr;
+       u32 cid;
+
+       if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
+               vmci_ioctl_err("only valid for contexts\n");
+               return -EINVAL;
+       }
+
+       cid = vmci_ctx_get_id(vmci_host_dev->context);
+
+       if (vmci_host_dev->user_version < VMCI_VERSION_NOVMVM) {
+               struct vmci_qp_alloc_info_vmvm alloc_info;
+               struct vmci_qp_alloc_info_vmvm __user *info = uptr;
+
+               if (copy_from_user(&alloc_info, uptr, sizeof(alloc_info)))
+                       return -EFAULT;
+
+               handle = alloc_info.handle;
+               retptr = &info->result;
+
+               vmci_status = vmci_qp_broker_alloc(alloc_info.handle,
+                                               alloc_info.peer,
+                                               alloc_info.flags,
+                                               VMCI_NO_PRIVILEGE_FLAGS,
+                                               alloc_info.produce_size,
+                                               alloc_info.consume_size,
+                                               NULL,
+                                               vmci_host_dev->context);
+
+               if (vmci_status == VMCI_SUCCESS)
+                       vmci_status = VMCI_SUCCESS_QUEUEPAIR_CREATE;
+       } else {
+               struct vmci_qp_alloc_info alloc_info;
+               struct vmci_qp_alloc_info __user *info = uptr;
+               struct vmci_qp_page_store page_store;
+
+               if (copy_from_user(&alloc_info, uptr, sizeof(alloc_info)))
+                       return -EFAULT;
+
+               handle = alloc_info.handle;
+               retptr = &info->result;
+
+               page_store.pages = alloc_info.ppn_va;
+               page_store.len = alloc_info.num_ppns;
+
+               vmci_status = vmci_qp_broker_alloc(alloc_info.handle,
+                                               alloc_info.peer,
+                                               alloc_info.flags,
+                                               VMCI_NO_PRIVILEGE_FLAGS,
+                                               alloc_info.produce_size,
+                                               alloc_info.consume_size,
+                                               &page_store,
+                                               vmci_host_dev->context);
+       }
+
+       if (put_user(vmci_status, retptr)) {
+               if (vmci_status >= VMCI_SUCCESS) {
+                       vmci_status = vmci_qp_broker_detach(handle,
+                                                       vmci_host_dev->context);
+               }
+               return -EFAULT;
+       }
+
+       return 0;
+}
+
+static int vmci_host_do_queuepair_setva(struct vmci_host_dev *vmci_host_dev,
+                                       const char *ioctl_name,
+                                       void __user *uptr)
+{
+       struct vmci_qp_set_va_info set_va_info;
+       struct vmci_qp_set_va_info __user *info = uptr;
+       s32 result;
+
+       if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
+               vmci_ioctl_err("only valid for contexts\n");
+               return -EINVAL;
+       }
+
+       if (vmci_host_dev->user_version < VMCI_VERSION_NOVMVM) {
+               vmci_ioctl_err("is not allowed\n");
+               return -EINVAL;
+       }
+
+       if (copy_from_user(&set_va_info, uptr, sizeof(set_va_info)))
+               return -EFAULT;
+
+       if (set_va_info.va) {
+               /*
+                * VMX is passing down a new VA for the queue
+                * pair mapping.
+                */
+               result = vmci_qp_broker_map(set_va_info.handle,
+                                           vmci_host_dev->context,
+                                           set_va_info.va);
+       } else {
+               /*
+                * The queue pair is about to be unmapped by
+                * the VMX.
+                */
+               result = vmci_qp_broker_unmap(set_va_info.handle,
+                                        vmci_host_dev->context, 0);
+       }
+
+       return put_user(result, &info->result) ? -EFAULT : 0;
+}
+
+static int vmci_host_do_queuepair_setpf(struct vmci_host_dev *vmci_host_dev,
+                                       const char *ioctl_name,
+                                       void __user *uptr)
+{
+       struct vmci_qp_page_file_info page_file_info;
+       struct vmci_qp_page_file_info __user *info = uptr;
+       s32 result;
+
+       if (vmci_host_dev->user_version < VMCI_VERSION_HOSTQP ||
+           vmci_host_dev->user_version >= VMCI_VERSION_NOVMVM) {
+               vmci_ioctl_err("not supported on this VMX (version=%d)\n",
+                              vmci_host_dev->user_version);
+               return -EINVAL;
+       }
+
+       if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
+               vmci_ioctl_err("only valid for contexts\n");
+               return -EINVAL;
+       }
+
+       if (copy_from_user(&page_file_info, uptr, sizeof(*info)))
+               return -EFAULT;
+
+       /*
+        * Communicate success pre-emptively to the caller.  Note that the
+        * basic premise is that it is incumbent upon the caller not to look at
+        * the info.result field until after the ioctl() returns.  And then,
+        * only if the ioctl() result indicates no error.  We send up the
+        * SUCCESS status before calling SetPageStore() store because failing
+        * to copy up the result code means unwinding the SetPageStore().
+        *
+        * It turns out the logic to unwind a SetPageStore() opens a can of
+        * worms.  For example, if a host had created the queue_pair and a
+        * guest attaches and SetPageStore() is successful but writing success
+        * fails, then ... the host has to be stopped from writing (anymore)
+        * data into the queue_pair.  That means an additional test in the
+        * VMCI_Enqueue() code path.  Ugh.
+        */
+
+       if (put_user(VMCI_SUCCESS, &info->result)) {
+               /*
+                * In this case, we can't write a result field of the
+                * caller's info block.  So, we don't even try to
+                * SetPageStore().
+                */
+               return -EFAULT;
+       }
+
+       result = vmci_qp_broker_set_page_store(page_file_info.handle,
+                                               page_file_info.produce_va,
+                                               page_file_info.consume_va,
+                                               vmci_host_dev->context);
+       if (result < VMCI_SUCCESS) {
+               if (put_user(result, &info->result)) {
+                       /*
+                        * Note that in this case the SetPageStore()
+                        * call failed but we were unable to
+                        * communicate that to the caller (because the
+                        * copy_to_user() call failed).  So, if we
+                        * simply return an error (in this case
+                        * -EFAULT) then the caller will know that the
+                        *  SetPageStore failed even though we couldn't
+                        *  put the result code in the result field and
+                        *  indicate exactly why it failed.
+                        *
+                        * That says nothing about the issue where we
+                        * were once able to write to the caller's info
+                        * memory and now can't.  Something more
+                        * serious is probably going on than the fact
+                        * that SetPageStore() didn't work.
+                        */
+                       return -EFAULT;
+               }
+       }
+
+       return 0;
+}
+
+static int vmci_host_do_qp_detach(struct vmci_host_dev *vmci_host_dev,
+                                 const char *ioctl_name,
+                                 void __user *uptr)
+{
+       struct vmci_qp_dtch_info detach_info;
+       struct vmci_qp_dtch_info __user *info = uptr;
+       s32 result;
+
+       if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
+               vmci_ioctl_err("only valid for contexts\n");
+               return -EINVAL;
+       }
+
+       if (copy_from_user(&detach_info, uptr, sizeof(detach_info)))
+               return -EFAULT;
+
+       result = vmci_qp_broker_detach(detach_info.handle,
+                                      vmci_host_dev->context);
+       if (result == VMCI_SUCCESS &&
+           vmci_host_dev->user_version < VMCI_VERSION_NOVMVM) {
+               result = VMCI_SUCCESS_LAST_DETACH;
+       }
+
+       return put_user(result, &info->result) ? -EFAULT : 0;
+}
+
+static int vmci_host_do_ctx_add_notify(struct vmci_host_dev *vmci_host_dev,
+                                      const char *ioctl_name,
+                                      void __user *uptr)
+{
+       struct vmci_ctx_info ar_info;
+       struct vmci_ctx_info __user *info = uptr;
+       s32 result;
+       u32 cid;
+
+       if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
+               vmci_ioctl_err("only valid for contexts\n");
+               return -EINVAL;
+       }
+
+       if (copy_from_user(&ar_info, uptr, sizeof(ar_info)))
+               return -EFAULT;
+
+       cid = vmci_ctx_get_id(vmci_host_dev->context);
+       result = vmci_ctx_add_notification(cid, ar_info.remote_cid);
+
+       return put_user(result, &info->result) ? -EFAULT : 0;
+}
+
+static int vmci_host_do_ctx_remove_notify(struct vmci_host_dev *vmci_host_dev,
+                                         const char *ioctl_name,
+                                         void __user *uptr)
+{
+       struct vmci_ctx_info ar_info;
+       struct vmci_ctx_info __user *info = uptr;
+       u32 cid;
+       int result;
+
+       if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
+               vmci_ioctl_err("only valid for contexts\n");
+               return -EINVAL;
+       }
+
+       if (copy_from_user(&ar_info, uptr, sizeof(ar_info)))
+               return -EFAULT;
+
+       cid = vmci_ctx_get_id(vmci_host_dev->context);
+       result = vmci_ctx_remove_notification(cid,
+                                             ar_info.remote_cid);
+
+       return put_user(result, &info->result) ? -EFAULT : 0;
+}
+
+static int vmci_host_do_ctx_get_cpt_state(struct vmci_host_dev *vmci_host_dev,
+                                         const char *ioctl_name,
+                                         void __user *uptr)
+{
+       struct vmci_ctx_chkpt_buf_info get_info;
+       u32 cid;
+       void *cpt_buf;
+       int retval;
+
+       if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
+               vmci_ioctl_err("only valid for contexts\n");
+               return -EINVAL;
+       }
+
+       if (copy_from_user(&get_info, uptr, sizeof(get_info)))
+               return -EFAULT;
+
+       cid = vmci_ctx_get_id(vmci_host_dev->context);
+       get_info.result = vmci_ctx_get_chkpt_state(cid, get_info.cpt_type,
+                                               &get_info.buf_size, &cpt_buf);
+       if (get_info.result == VMCI_SUCCESS && get_info.buf_size) {
+               void __user *ubuf = (void __user *)(uintptr_t)get_info.cpt_buf;
+               retval = copy_to_user(ubuf, cpt_buf, get_info.buf_size);
+               kfree(cpt_buf);
+
+               if (retval)
+                       return -EFAULT;
+       }
+
+       return copy_to_user(uptr, &get_info, sizeof(get_info)) ? -EFAULT : 0;
+}
+
+static int vmci_host_do_ctx_set_cpt_state(struct vmci_host_dev *vmci_host_dev,
+                                         const char *ioctl_name,
+                                         void __user *uptr)
+{
+       struct vmci_ctx_chkpt_buf_info set_info;
+       u32 cid;
+       void *cpt_buf;
+       int retval;
+
+       if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
+               vmci_ioctl_err("only valid for contexts\n");
+               return -EINVAL;
+       }
+
+       if (copy_from_user(&set_info, uptr, sizeof(set_info)))
+               return -EFAULT;
+
+       cpt_buf = kmalloc(set_info.buf_size, GFP_KERNEL);
+       if (!cpt_buf) {
+               vmci_ioctl_err(
+                       "cannot allocate memory to set cpt state (type=%d)\n",
+                       set_info.cpt_type);
+               return -ENOMEM;
+       }
+
+       if (copy_from_user(cpt_buf, (void __user *)(uintptr_t)set_info.cpt_buf,
+                          set_info.buf_size)) {
+               retval = -EFAULT;
+               goto out;
+       }
+
+       cid = vmci_ctx_get_id(vmci_host_dev->context);
+       set_info.result = vmci_ctx_set_chkpt_state(cid, set_info.cpt_type,
+                                                  set_info.buf_size, cpt_buf);
+
+       retval = copy_to_user(uptr, &set_info, sizeof(set_info)) ? -EFAULT : 0;
+
+out:
+       kfree(cpt_buf);
+       return retval;
+}
+
+static int vmci_host_do_get_context_id(struct vmci_host_dev *vmci_host_dev,
+                                      const char *ioctl_name,
+                                      void __user *uptr)
+{
+       u32 __user *u32ptr = uptr;
+
+       return put_user(VMCI_HOST_CONTEXT_ID, u32ptr) ? -EFAULT : 0;
+}
+
+static int vmci_host_do_set_notify(struct vmci_host_dev *vmci_host_dev,
+                                  const char *ioctl_name,
+                                  void __user *uptr)
+{
+       struct vmci_set_notify_info notify_info;
+
+       if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
+               vmci_ioctl_err("only valid for contexts\n");
+               return -EINVAL;
+       }
+
+       if (copy_from_user(&notify_info, uptr, sizeof(notify_info)))
+               return -EFAULT;
+
+       if (notify_info.notify_uva) {
+               notify_info.result =
+                       vmci_host_setup_notify(vmci_host_dev->context,
+                                              notify_info.notify_uva);
+       } else {
+               vmci_ctx_unset_notify(vmci_host_dev->context);
+               notify_info.result = VMCI_SUCCESS;
+       }
+
+       return copy_to_user(uptr, &notify_info, sizeof(notify_info)) ?
+               -EFAULT : 0;
+}
+
+static int vmci_host_do_notify_resource(struct vmci_host_dev *vmci_host_dev,
+                                       const char *ioctl_name,
+                                       void __user *uptr)
+{
+       struct vmci_dbell_notify_resource_info info;
+       u32 cid;
+
+       if (vmci_host_dev->user_version < VMCI_VERSION_NOTIFY) {
+               vmci_ioctl_err("invalid for current VMX versions\n");
+               return -EINVAL;
+       }
+
+       if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
+               vmci_ioctl_err("only valid for contexts\n");
+               return -EINVAL;
+       }
+
+       if (copy_from_user(&info, uptr, sizeof(info)))
+               return -EFAULT;
+
+       cid = vmci_ctx_get_id(vmci_host_dev->context);
+
+       switch (info.action) {
+       case VMCI_NOTIFY_RESOURCE_ACTION_NOTIFY:
+               if (info.resource == VMCI_NOTIFY_RESOURCE_DOOR_BELL) {
+                       u32 flags = VMCI_NO_PRIVILEGE_FLAGS;
+                       info.result = vmci_ctx_notify_dbell(cid, info.handle,
+                                                           flags);
+               } else {
+                       info.result = VMCI_ERROR_UNAVAILABLE;
+               }
+               break;
+
+       case VMCI_NOTIFY_RESOURCE_ACTION_CREATE:
+               info.result = vmci_ctx_dbell_create(cid, info.handle);
+               break;
+
+       case VMCI_NOTIFY_RESOURCE_ACTION_DESTROY:
+               info.result = vmci_ctx_dbell_destroy(cid, info.handle);
+               break;
+
+       default:
+               vmci_ioctl_err("got unknown action (action=%d)\n",
+                              info.action);
+               info.result = VMCI_ERROR_INVALID_ARGS;
+       }
+
+       return copy_to_user(uptr, &info, sizeof(info)) ? -EFAULT : 0;
+}
+
+static int vmci_host_do_recv_notifications(struct vmci_host_dev *vmci_host_dev,
+                                          const char *ioctl_name,
+                                          void __user *uptr)
+{
+       struct vmci_ctx_notify_recv_info info;
+       struct vmci_handle_arr *db_handle_array;
+       struct vmci_handle_arr *qp_handle_array;
+       void __user *ubuf;
+       u32 cid;
+       int retval = 0;
+
+       if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
+               vmci_ioctl_err("only valid for contexts\n");
+               return -EINVAL;
+       }
+
+       if (vmci_host_dev->user_version < VMCI_VERSION_NOTIFY) {
+               vmci_ioctl_err("not supported for the current vmx version\n");
+               return -EINVAL;
+       }
+
+       if (copy_from_user(&info, uptr, sizeof(info)))
+               return -EFAULT;
+
+       if ((info.db_handle_buf_size && !info.db_handle_buf_uva) ||
+           (info.qp_handle_buf_size && !info.qp_handle_buf_uva)) {
+               return -EINVAL;
+       }
+
+       cid = vmci_ctx_get_id(vmci_host_dev->context);
+
+       info.result = vmci_ctx_rcv_notifications_get(cid,
+                               &db_handle_array, &qp_handle_array);
+       if (info.result != VMCI_SUCCESS)
+               return copy_to_user(uptr, &info, sizeof(info)) ? -EFAULT : 0;
+
+       ubuf = (void __user *)(uintptr_t)info.db_handle_buf_uva;
+       info.result = drv_cp_harray_to_user(ubuf, &info.db_handle_buf_size,
+                                           db_handle_array, &retval);
+       if (info.result == VMCI_SUCCESS && !retval) {
+               ubuf = (void __user *)(uintptr_t)info.qp_handle_buf_uva;
+               info.result = drv_cp_harray_to_user(ubuf,
+                                                   &info.qp_handle_buf_size,
+                                                   qp_handle_array, &retval);
+       }
+
+       if (!retval && copy_to_user(uptr, &info, sizeof(info)))
+               retval = -EFAULT;
+
+       vmci_ctx_rcv_notifications_release(cid,
+                               db_handle_array, qp_handle_array,
+                               info.result == VMCI_SUCCESS && !retval);
+
+       return retval;
+}
+
+static long vmci_host_unlocked_ioctl(struct file *filp,
+                                    unsigned int iocmd, unsigned long ioarg)
+{
+#define VMCI_DO_IOCTL(ioctl_name, ioctl_fn) do {                       \
+               char *name = __stringify(IOCTL_VMCI_ ## ioctl_name);    \
+               return vmci_host_do_ ## ioctl_fn(                       \
+                       vmci_host_dev, name, uptr);                     \
+       } while (0)
+
+       struct vmci_host_dev *vmci_host_dev = filp->private_data;
+       void __user *uptr = (void __user *)ioarg;
+
+       switch (iocmd) {
+       case IOCTL_VMCI_INIT_CONTEXT:
+               VMCI_DO_IOCTL(INIT_CONTEXT, init_context);
+       case IOCTL_VMCI_DATAGRAM_SEND:
+               VMCI_DO_IOCTL(DATAGRAM_SEND, send_datagram);
+       case IOCTL_VMCI_DATAGRAM_RECEIVE:
+               VMCI_DO_IOCTL(DATAGRAM_RECEIVE, receive_datagram);
+       case IOCTL_VMCI_QUEUEPAIR_ALLOC:
+               VMCI_DO_IOCTL(QUEUEPAIR_ALLOC, alloc_queuepair);
+       case IOCTL_VMCI_QUEUEPAIR_SETVA:
+               VMCI_DO_IOCTL(QUEUEPAIR_SETVA, queuepair_setva);
+       case IOCTL_VMCI_QUEUEPAIR_SETPAGEFILE:
+               VMCI_DO_IOCTL(QUEUEPAIR_SETPAGEFILE, queuepair_setpf);
+       case IOCTL_VMCI_QUEUEPAIR_DETACH:
+               VMCI_DO_IOCTL(QUEUEPAIR_DETACH, qp_detach);
+       case IOCTL_VMCI_CTX_ADD_NOTIFICATION:
+               VMCI_DO_IOCTL(CTX_ADD_NOTIFICATION, ctx_add_notify);
+       case IOCTL_VMCI_CTX_REMOVE_NOTIFICATION:
+               VMCI_DO_IOCTL(CTX_REMOVE_NOTIFICATION, ctx_remove_notify);
+       case IOCTL_VMCI_CTX_GET_CPT_STATE:
+               VMCI_DO_IOCTL(CTX_GET_CPT_STATE, ctx_get_cpt_state);
+       case IOCTL_VMCI_CTX_SET_CPT_STATE:
+               VMCI_DO_IOCTL(CTX_SET_CPT_STATE, ctx_set_cpt_state);
+       case IOCTL_VMCI_GET_CONTEXT_ID:
+               VMCI_DO_IOCTL(GET_CONTEXT_ID, get_context_id);
+       case IOCTL_VMCI_SET_NOTIFY:
+               VMCI_DO_IOCTL(SET_NOTIFY, set_notify);
+       case IOCTL_VMCI_NOTIFY_RESOURCE:
+               VMCI_DO_IOCTL(NOTIFY_RESOURCE, notify_resource);
+       case IOCTL_VMCI_NOTIFICATIONS_RECEIVE:
+               VMCI_DO_IOCTL(NOTIFICATIONS_RECEIVE, recv_notifications);
+
+       case IOCTL_VMCI_VERSION:
+       case IOCTL_VMCI_VERSION2:
+               return vmci_host_get_version(vmci_host_dev, iocmd, uptr);
+
+       default:
+               pr_devel("%s: Unknown ioctl (iocmd=%d)\n", __func__, iocmd);
+               return -EINVAL;
+       }
+
+#undef VMCI_DO_IOCTL
+}
+
+static const struct file_operations vmuser_fops = {
+       .owner          = THIS_MODULE,
+       .open           = vmci_host_open,
+       .release        = vmci_host_close,
+       .poll           = vmci_host_poll,
+       .unlocked_ioctl = vmci_host_unlocked_ioctl,
+       .compat_ioctl   = vmci_host_unlocked_ioctl,
+};
+
+static struct miscdevice vmci_host_miscdev = {
+        .name = "vmci",
+        .minor = MISC_DYNAMIC_MINOR,
+        .fops = &vmuser_fops,
+};
+
+int __init vmci_host_init(void)
+{
+       int error;
+
+       host_context = vmci_ctx_create(VMCI_HOST_CONTEXT_ID,
+                                       VMCI_DEFAULT_PROC_PRIVILEGE_FLAGS,
+                                       -1, VMCI_VERSION, NULL);
+       if (IS_ERR(host_context)) {
+               error = PTR_ERR(host_context);
+               pr_warn("Failed to initialize VMCIContext (error%d)\n",
+                       error);
+               return error;
+       }
+
+       error = misc_register(&vmci_host_miscdev);
+       if (error) {
+               pr_warn("Module registration error (name=%s, major=%d, minor=%d, err=%d)\n",
+                       vmci_host_miscdev.name,
+                       MISC_MAJOR, vmci_host_miscdev.minor,
+                       error);
+               pr_warn("Unable to initialize host personality\n");
+               vmci_ctx_destroy(host_context);
+               return error;
+       }
+
+       pr_info("VMCI host device registered (name=%s, major=%d, minor=%d)\n",
+               vmci_host_miscdev.name, MISC_MAJOR, vmci_host_miscdev.minor);
+
+       vmci_host_device_initialized = true;
+       return 0;
+}
+
+void __exit vmci_host_exit(void)
+{
+       int error;
+
+       vmci_host_device_initialized = false;
+
+       error = misc_deregister(&vmci_host_miscdev);
+       if (error)
+               pr_warn("Error unregistering character device: %d\n", error);
+
+       vmci_ctx_destroy(host_context);
+       vmci_qp_broker_exit();
+
+       pr_debug("VMCI host driver module unloaded\n");
+}
diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c
new file mode 100644 (file)
index 0000000..d94245d
--- /dev/null
@@ -0,0 +1,3425 @@
+/*
+ * VMware VMCI Driver
+ *
+ * Copyright (C) 2012 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * for more details.
+ */
+
+#include <linux/vmw_vmci_defs.h>
+#include <linux/vmw_vmci_api.h>
+#include <linux/highmem.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/pagemap.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/socket.h>
+#include <linux/wait.h>
+#include <linux/vmalloc.h>
+
+#include "vmci_handle_array.h"
+#include "vmci_queue_pair.h"
+#include "vmci_datagram.h"
+#include "vmci_resource.h"
+#include "vmci_context.h"
+#include "vmci_driver.h"
+#include "vmci_event.h"
+#include "vmci_route.h"
+
+/*
+ * In the following, we will distinguish between two kinds of VMX processes -
+ * the ones with versions lower than VMCI_VERSION_NOVMVM that use specialized
+ * VMCI page files in the VMX and supporting VM to VM communication and the
+ * newer ones that use the guest memory directly. We will in the following
+ * refer to the older VMX versions as old-style VMX'en, and the newer ones as
+ * new-style VMX'en.
+ *
+ * The state transition datagram is as follows (the VMCIQPB_ prefix has been
+ * removed for readability) - see below for more details on the transtions:
+ *
+ *            --------------  NEW  -------------
+ *            |                                |
+ *           \_/                              \_/
+ *     CREATED_NO_MEM <-----------------> CREATED_MEM
+ *            |    |                           |
+ *            |    o-----------------------o   |
+ *            |                            |   |
+ *           \_/                          \_/ \_/
+ *     ATTACHED_NO_MEM <----------------> ATTACHED_MEM
+ *            |                            |   |
+ *            |     o----------------------o   |
+ *            |     |                          |
+ *           \_/   \_/                        \_/
+ *     SHUTDOWN_NO_MEM <----------------> SHUTDOWN_MEM
+ *            |                                |
+ *            |                                |
+ *            -------------> gone <-------------
+ *
+ * In more detail. When a VMCI queue pair is first created, it will be in the
+ * VMCIQPB_NEW state. It will then move into one of the following states:
+ *
+ * - VMCIQPB_CREATED_NO_MEM: this state indicates that either:
+ *
+ *     - the created was performed by a host endpoint, in which case there is
+ *       no backing memory yet.
+ *
+ *     - the create was initiated by an old-style VMX, that uses
+ *       vmci_qp_broker_set_page_store to specify the UVAs of the queue pair at
+ *       a later point in time. This state can be distinguished from the one
+ *       above by the context ID of the creator. A host side is not allowed to
+ *       attach until the page store has been set.
+ *
+ * - VMCIQPB_CREATED_MEM: this state is the result when the queue pair
+ *     is created by a VMX using the queue pair device backend that
+ *     sets the UVAs of the queue pair immediately and stores the
+ *     information for later attachers. At this point, it is ready for
+ *     the host side to attach to it.
+ *
+ * Once the queue pair is in one of the created states (with the exception of
+ * the case mentioned for older VMX'en above), it is possible to attach to the
+ * queue pair. Again we have two new states possible:
+ *
+ * - VMCIQPB_ATTACHED_MEM: this state can be reached through the following
+ *   paths:
+ *
+ *     - from VMCIQPB_CREATED_NO_MEM when a new-style VMX allocates a queue
+ *       pair, and attaches to a queue pair previously created by the host side.
+ *
+ *     - from VMCIQPB_CREATED_MEM when the host side attaches to a queue pair
+ *       already created by a guest.
+ *
+ *     - from VMCIQPB_ATTACHED_NO_MEM, when an old-style VMX calls
+ *       vmci_qp_broker_set_page_store (see below).
+ *
+ * - VMCIQPB_ATTACHED_NO_MEM: If the queue pair already was in the
+ *     VMCIQPB_CREATED_NO_MEM due to a host side create, an old-style VMX will
+ *     bring the queue pair into this state. Once vmci_qp_broker_set_page_store
+ *     is called to register the user memory, the VMCIQPB_ATTACH_MEM state
+ *     will be entered.
+ *
+ * From the attached queue pair, the queue pair can enter the shutdown states
+ * when either side of the queue pair detaches. If the guest side detaches
+ * first, the queue pair will enter the VMCIQPB_SHUTDOWN_NO_MEM state, where
+ * the content of the queue pair will no longer be available. If the host
+ * side detaches first, the queue pair will either enter the
+ * VMCIQPB_SHUTDOWN_MEM, if the guest memory is currently mapped, or
+ * VMCIQPB_SHUTDOWN_NO_MEM, if the guest memory is not mapped
+ * (e.g., the host detaches while a guest is stunned).
+ *
+ * New-style VMX'en will also unmap guest memory, if the guest is
+ * quiesced, e.g., during a snapshot operation. In that case, the guest
+ * memory will no longer be available, and the queue pair will transition from
+ * *_MEM state to a *_NO_MEM state. The VMX may later map the memory once more,
+ * in which case the queue pair will transition from the *_NO_MEM state at that
+ * point back to the *_MEM state. Note that the *_NO_MEM state may have changed,
+ * since the peer may have either attached or detached in the meantime. The
+ * values are laid out such that ++ on a state will move from a *_NO_MEM to a
+ * *_MEM state, and vice versa.
+ */
+
+/*
+ * VMCIMemcpy{To,From}QueueFunc() prototypes.  Functions of these
+ * types are passed around to enqueue and dequeue routines.  Note that
+ * often the functions passed are simply wrappers around memcpy
+ * itself.
+ *
+ * Note: In order for the memcpy typedefs to be compatible with the VMKernel,
+ * there's an unused last parameter for the hosted side.  In
+ * ESX, that parameter holds a buffer type.
+ */
+typedef int vmci_memcpy_to_queue_func(struct vmci_queue *queue,
+                                     u64 queue_offset, const void *src,
+                                     size_t src_offset, size_t size);
+typedef int vmci_memcpy_from_queue_func(void *dest, size_t dest_offset,
+                                       const struct vmci_queue *queue,
+                                       u64 queue_offset, size_t size);
+
+/* The Kernel specific component of the struct vmci_queue structure. */
+struct vmci_queue_kern_if {
+       struct page **page;
+       struct page **header_page;
+       void *va;
+       struct mutex __mutex;   /* Protects the queue. */
+       struct mutex *mutex;    /* Shared by producer and consumer queues. */
+       bool host;
+       size_t num_pages;
+       bool mapped;
+};
+
+/*
+ * This structure is opaque to the clients.
+ */
+struct vmci_qp {
+       struct vmci_handle handle;
+       struct vmci_queue *produce_q;
+       struct vmci_queue *consume_q;
+       u64 produce_q_size;
+       u64 consume_q_size;
+       u32 peer;
+       u32 flags;
+       u32 priv_flags;
+       bool guest_endpoint;
+       unsigned int blocked;
+       unsigned int generation;
+       wait_queue_head_t event;
+};
+
+enum qp_broker_state {
+       VMCIQPB_NEW,
+       VMCIQPB_CREATED_NO_MEM,
+       VMCIQPB_CREATED_MEM,
+       VMCIQPB_ATTACHED_NO_MEM,
+       VMCIQPB_ATTACHED_MEM,
+       VMCIQPB_SHUTDOWN_NO_MEM,
+       VMCIQPB_SHUTDOWN_MEM,
+       VMCIQPB_GONE
+};
+
+#define QPBROKERSTATE_HAS_MEM(_qpb) (_qpb->state == VMCIQPB_CREATED_MEM || \
+                                    _qpb->state == VMCIQPB_ATTACHED_MEM || \
+                                    _qpb->state == VMCIQPB_SHUTDOWN_MEM)
+
+/*
+ * In the queue pair broker, we always use the guest point of view for
+ * the produce and consume queue values and references, e.g., the
+ * produce queue size stored is the guests produce queue size. The
+ * host endpoint will need to swap these around. The only exception is
+ * the local queue pairs on the host, in which case the host endpoint
+ * that creates the queue pair will have the right orientation, and
+ * the attaching host endpoint will need to swap.
+ */
+struct qp_entry {
+       struct list_head list_item;
+       struct vmci_handle handle;
+       u32 peer;
+       u32 flags;
+       u64 produce_size;
+       u64 consume_size;
+       u32 ref_count;
+};
+
+struct qp_broker_entry {
+       struct vmci_resource resource;
+       struct qp_entry qp;
+       u32 create_id;
+       u32 attach_id;
+       enum qp_broker_state state;
+       bool require_trusted_attach;
+       bool created_by_trusted;
+       bool vmci_page_files;   /* Created by VMX using VMCI page files */
+       struct vmci_queue *produce_q;
+       struct vmci_queue *consume_q;
+       struct vmci_queue_header saved_produce_q;
+       struct vmci_queue_header saved_consume_q;
+       vmci_event_release_cb wakeup_cb;
+       void *client_data;
+       void *local_mem;        /* Kernel memory for local queue pair */
+};
+
+struct qp_guest_endpoint {
+       struct vmci_resource resource;
+       struct qp_entry qp;
+       u64 num_ppns;
+       void *produce_q;
+       void *consume_q;
+       struct ppn_set ppn_set;
+};
+
+struct qp_list {
+       struct list_head head;
+       struct mutex mutex;     /* Protect queue list. */
+};
+
+static struct qp_list qp_broker_list = {
+       .head = LIST_HEAD_INIT(qp_broker_list.head),
+       .mutex = __MUTEX_INITIALIZER(qp_broker_list.mutex),
+};
+
+static struct qp_list qp_guest_endpoints = {
+       .head = LIST_HEAD_INIT(qp_guest_endpoints.head),
+       .mutex = __MUTEX_INITIALIZER(qp_guest_endpoints.mutex),
+};
+
+#define INVALID_VMCI_GUEST_MEM_ID  0
+#define QPE_NUM_PAGES(_QPE) ((u32) \
+                            (DIV_ROUND_UP(_QPE.produce_size, PAGE_SIZE) + \
+                             DIV_ROUND_UP(_QPE.consume_size, PAGE_SIZE) + 2))
+
+
+/*
+ * Frees kernel VA space for a given queue and its queue header, and
+ * frees physical data pages.
+ */
+static void qp_free_queue(void *q, u64 size)
+{
+       struct vmci_queue *queue = q;
+
+       if (queue) {
+               u64 i = DIV_ROUND_UP(size, PAGE_SIZE);
+
+               if (queue->kernel_if->mapped) {
+                       vunmap(queue->kernel_if->va);
+                       queue->kernel_if->va = NULL;
+               }
+
+               while (i)
+                       __free_page(queue->kernel_if->page[--i]);
+
+               vfree(queue->q_header);
+       }
+}
+
+/*
+ * Allocates kernel VA space of specified size, plus space for the
+ * queue structure/kernel interface and the queue header.  Allocates
+ * physical pages for the queue data pages.
+ *
+ * PAGE m:      struct vmci_queue_header (struct vmci_queue->q_header)
+ * PAGE m+1:    struct vmci_queue
+ * PAGE m+1+q:  struct vmci_queue_kern_if (struct vmci_queue->kernel_if)
+ * PAGE n-size: Data pages (struct vmci_queue->kernel_if->page[])
+ */
+static void *qp_alloc_queue(u64 size, u32 flags)
+{
+       u64 i;
+       struct vmci_queue *queue;
+       struct vmci_queue_header *q_header;
+       const u64 num_data_pages = DIV_ROUND_UP(size, PAGE_SIZE);
+       const uint queue_size =
+           PAGE_SIZE +
+           sizeof(*queue) + sizeof(*(queue->kernel_if)) +
+           num_data_pages * sizeof(*(queue->kernel_if->page));
+
+       q_header = vmalloc(queue_size);
+       if (!q_header)
+               return NULL;
+
+       queue = (void *)q_header + PAGE_SIZE;
+       queue->q_header = q_header;
+       queue->saved_header = NULL;
+       queue->kernel_if = (struct vmci_queue_kern_if *)(queue + 1);
+       queue->kernel_if->header_page = NULL;   /* Unused in guest. */
+       queue->kernel_if->page = (struct page **)(queue->kernel_if + 1);
+       queue->kernel_if->host = false;
+       queue->kernel_if->va = NULL;
+       queue->kernel_if->mapped = false;
+
+       for (i = 0; i < num_data_pages; i++) {
+               queue->kernel_if->page[i] = alloc_pages(GFP_KERNEL, 0);
+               if (!queue->kernel_if->page[i])
+                       goto fail;
+       }
+
+       if (vmci_qp_pinned(flags)) {
+               queue->kernel_if->va =
+                   vmap(queue->kernel_if->page, num_data_pages, VM_MAP,
+                        PAGE_KERNEL);
+               if (!queue->kernel_if->va)
+                       goto fail;
+
+               queue->kernel_if->mapped = true;
+       }
+
+       return (void *)queue;
+
+ fail:
+       qp_free_queue(queue, i * PAGE_SIZE);
+       return NULL;
+}
+
+/*
+ * Copies from a given buffer or iovector to a VMCI Queue.  Uses
+ * kmap()/kunmap() to dynamically map/unmap required portions of the queue
+ * by traversing the offset -> page translation structure for the queue.
+ * Assumes that offset + size does not wrap around in the queue.
+ */
+static int __qp_memcpy_to_queue(struct vmci_queue *queue,
+                               u64 queue_offset,
+                               const void *src,
+                               size_t size,
+                               bool is_iovec)
+{
+       struct vmci_queue_kern_if *kernel_if = queue->kernel_if;
+       size_t bytes_copied = 0;
+
+       while (bytes_copied < size) {
+               u64 page_index = (queue_offset + bytes_copied) / PAGE_SIZE;
+               size_t page_offset =
+                   (queue_offset + bytes_copied) & (PAGE_SIZE - 1);
+               void *va;
+               size_t to_copy;
+
+               if (!kernel_if->mapped)
+                       va = kmap(kernel_if->page[page_index]);
+               else
+                       va = (void *)((u8 *)kernel_if->va +
+                                     (page_index * PAGE_SIZE));
+
+               if (size - bytes_copied > PAGE_SIZE - page_offset)
+                       /* Enough payload to fill up from this page. */
+                       to_copy = PAGE_SIZE - page_offset;
+               else
+                       to_copy = size - bytes_copied;
+
+               if (is_iovec) {
+                       struct iovec *iov = (struct iovec *)src;
+                       int err;
+
+                       /* The iovec will track bytes_copied internally. */
+                       err = memcpy_fromiovec((u8 *)va + page_offset,
+                                              iov, to_copy);
+                       if (err != 0) {
+                               kunmap(kernel_if->page[page_index]);
+                               return VMCI_ERROR_INVALID_ARGS;
+                       }
+               } else {
+                       memcpy((u8 *)va + page_offset,
+                              (u8 *)src + bytes_copied, to_copy);
+               }
+
+               bytes_copied += to_copy;
+               if (!kernel_if->mapped)
+                       kunmap(kernel_if->page[page_index]);
+       }
+
+       return VMCI_SUCCESS;
+}
+
+/*
+ * Copies to a given buffer or iovector from a VMCI Queue.  Uses
+ * kmap()/kunmap() to dynamically map/unmap required portions of the queue
+ * by traversing the offset -> page translation structure for the queue.
+ * Assumes that offset + size does not wrap around in the queue.
+ */
+static int __qp_memcpy_from_queue(void *dest,
+                                 const struct vmci_queue *queue,
+                                 u64 queue_offset,
+                                 size_t size,
+                                 bool is_iovec)
+{
+       struct vmci_queue_kern_if *kernel_if = queue->kernel_if;
+       size_t bytes_copied = 0;
+
+       while (bytes_copied < size) {
+               u64 page_index = (queue_offset + bytes_copied) / PAGE_SIZE;
+               size_t page_offset =
+                   (queue_offset + bytes_copied) & (PAGE_SIZE - 1);
+               void *va;
+               size_t to_copy;
+
+               if (!kernel_if->mapped)
+                       va = kmap(kernel_if->page[page_index]);
+               else
+                       va = (void *)((u8 *)kernel_if->va +
+                                     (page_index * PAGE_SIZE));
+
+               if (size - bytes_copied > PAGE_SIZE - page_offset)
+                       /* Enough payload to fill up this page. */
+                       to_copy = PAGE_SIZE - page_offset;
+               else
+                       to_copy = size - bytes_copied;
+
+               if (is_iovec) {
+                       struct iovec *iov = (struct iovec *)dest;
+                       int err;
+
+                       /* The iovec will track bytes_copied internally. */
+                       err = memcpy_toiovec(iov, (u8 *)va + page_offset,
+                                            to_copy);
+                       if (err != 0) {
+                               kunmap(kernel_if->page[page_index]);
+                               return VMCI_ERROR_INVALID_ARGS;
+                       }
+               } else {
+                       memcpy((u8 *)dest + bytes_copied,
+                              (u8 *)va + page_offset, to_copy);
+               }
+
+               bytes_copied += to_copy;
+               if (!kernel_if->mapped)
+                       kunmap(kernel_if->page[page_index]);
+       }
+
+       return VMCI_SUCCESS;
+}
+
+/*
+ * Allocates two list of PPNs --- one for the pages in the produce queue,
+ * and the other for the pages in the consume queue. Intializes the list
+ * of PPNs with the page frame numbers of the KVA for the two queues (and
+ * the queue headers).
+ */
+static int qp_alloc_ppn_set(void *prod_q,
+                           u64 num_produce_pages,
+                           void *cons_q,
+                           u64 num_consume_pages, struct ppn_set *ppn_set)
+{
+       u32 *produce_ppns;
+       u32 *consume_ppns;
+       struct vmci_queue *produce_q = prod_q;
+       struct vmci_queue *consume_q = cons_q;
+       u64 i;
+
+       if (!produce_q || !num_produce_pages || !consume_q ||
+           !num_consume_pages || !ppn_set)
+               return VMCI_ERROR_INVALID_ARGS;
+
+       if (ppn_set->initialized)
+               return VMCI_ERROR_ALREADY_EXISTS;
+
+       produce_ppns =
+           kmalloc(num_produce_pages * sizeof(*produce_ppns), GFP_KERNEL);
+       if (!produce_ppns)
+               return VMCI_ERROR_NO_MEM;
+
+       consume_ppns =
+           kmalloc(num_consume_pages * sizeof(*consume_ppns), GFP_KERNEL);
+       if (!consume_ppns) {
+               kfree(produce_ppns);
+               return VMCI_ERROR_NO_MEM;
+       }
+
+       produce_ppns[0] = page_to_pfn(vmalloc_to_page(produce_q->q_header));
+       for (i = 1; i < num_produce_pages; i++) {
+               unsigned long pfn;
+
+               produce_ppns[i] =
+                   page_to_pfn(produce_q->kernel_if->page[i - 1]);
+               pfn = produce_ppns[i];
+
+               /* Fail allocation if PFN isn't supported by hypervisor. */
+               if (sizeof(pfn) > sizeof(*produce_ppns)
+                   && pfn != produce_ppns[i])
+                       goto ppn_error;
+       }
+
+       consume_ppns[0] = page_to_pfn(vmalloc_to_page(consume_q->q_header));
+       for (i = 1; i < num_consume_pages; i++) {
+               unsigned long pfn;
+
+               consume_ppns[i] =
+                   page_to_pfn(consume_q->kernel_if->page[i - 1]);
+               pfn = consume_ppns[i];
+
+               /* Fail allocation if PFN isn't supported by hypervisor. */
+               if (sizeof(pfn) > sizeof(*consume_ppns)
+                   && pfn != consume_ppns[i])
+                       goto ppn_error;
+       }
+
+       ppn_set->num_produce_pages = num_produce_pages;
+       ppn_set->num_consume_pages = num_consume_pages;
+       ppn_set->produce_ppns = produce_ppns;
+       ppn_set->consume_ppns = consume_ppns;
+       ppn_set->initialized = true;
+       return VMCI_SUCCESS;
+
+ ppn_error:
+       kfree(produce_ppns);
+       kfree(consume_ppns);
+       return VMCI_ERROR_INVALID_ARGS;
+}
+
+/*
+ * Frees the two list of PPNs for a queue pair.
+ */
+static void qp_free_ppn_set(struct ppn_set *ppn_set)
+{
+       if (ppn_set->initialized) {
+               /* Do not call these functions on NULL inputs. */
+               kfree(ppn_set->produce_ppns);
+               kfree(ppn_set->consume_ppns);
+       }
+       memset(ppn_set, 0, sizeof(*ppn_set));
+}
+
+/*
+ * Populates the list of PPNs in the hypercall structure with the PPNS
+ * of the produce queue and the consume queue.
+ */
+static int qp_populate_ppn_set(u8 *call_buf, const struct ppn_set *ppn_set)
+{
+       memcpy(call_buf, ppn_set->produce_ppns,
+              ppn_set->num_produce_pages * sizeof(*ppn_set->produce_ppns));
+       memcpy(call_buf +
+              ppn_set->num_produce_pages * sizeof(*ppn_set->produce_ppns),
+              ppn_set->consume_ppns,
+              ppn_set->num_consume_pages * sizeof(*ppn_set->consume_ppns));
+
+       return VMCI_SUCCESS;
+}
+
+static int qp_memcpy_to_queue(struct vmci_queue *queue,
+                             u64 queue_offset,
+                             const void *src, size_t src_offset, size_t size)
+{
+       return __qp_memcpy_to_queue(queue, queue_offset,
+                                   (u8 *)src + src_offset, size, false);
+}
+
+static int qp_memcpy_from_queue(void *dest,
+                               size_t dest_offset,
+                               const struct vmci_queue *queue,
+                               u64 queue_offset, size_t size)
+{
+       return __qp_memcpy_from_queue((u8 *)dest + dest_offset,
+                                     queue, queue_offset, size, false);
+}
+
+/*
+ * Copies from a given iovec from a VMCI Queue.
+ */
+static int qp_memcpy_to_queue_iov(struct vmci_queue *queue,
+                                 u64 queue_offset,
+                                 const void *src,
+                                 size_t src_offset, size_t size)
+{
+
+       /*
+        * We ignore src_offset because src is really a struct iovec * and will
+        * maintain offset internally.
+        */
+       return __qp_memcpy_to_queue(queue, queue_offset, src, size, true);
+}
+
+/*
+ * Copies to a given iovec from a VMCI Queue.
+ */
+static int qp_memcpy_from_queue_iov(void *dest,
+                                   size_t dest_offset,
+                                   const struct vmci_queue *queue,
+                                   u64 queue_offset, size_t size)
+{
+       /*
+        * We ignore dest_offset because dest is really a struct iovec * and
+        * will maintain offset internally.
+        */
+       return __qp_memcpy_from_queue(dest, queue, queue_offset, size, true);
+}
+
+/*
+ * Allocates kernel VA space of specified size plus space for the queue
+ * and kernel interface.  This is different from the guest queue allocator,
+ * because we do not allocate our own queue header/data pages here but
+ * share those of the guest.
+ */
+static struct vmci_queue *qp_host_alloc_queue(u64 size)
+{
+       struct vmci_queue *queue;
+       const size_t num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1;
+       const size_t queue_size = sizeof(*queue) + sizeof(*(queue->kernel_if));
+       const size_t queue_page_size =
+           num_pages * sizeof(*queue->kernel_if->page);
+
+       queue = kzalloc(queue_size + queue_page_size, GFP_KERNEL);
+       if (queue) {
+               queue->q_header = NULL;
+               queue->saved_header = NULL;
+               queue->kernel_if =
+                   (struct vmci_queue_kern_if *)((u8 *)queue +
+                                                 sizeof(*queue));
+               queue->kernel_if->host = true;
+               queue->kernel_if->mutex = NULL;
+               queue->kernel_if->num_pages = num_pages;
+               queue->kernel_if->header_page =
+                   (struct page **)((u8 *)queue + queue_size);
+               queue->kernel_if->page = &queue->kernel_if->header_page[1];
+               queue->kernel_if->va = NULL;
+               queue->kernel_if->mapped = false;
+       }
+
+       return queue;
+}
+
+/*
+ * Frees kernel memory for a given queue (header plus translation
+ * structure).
+ */
+static void qp_host_free_queue(struct vmci_queue *queue, u64 queue_size)
+{
+       kfree(queue);
+}
+
+/*
+ * Initialize the mutex for the pair of queues.  This mutex is used to
+ * protect the q_header and the buffer from changing out from under any
+ * users of either queue.  Of course, it's only any good if the mutexes
+ * are actually acquired.  Queue structure must lie on non-paged memory
+ * or we cannot guarantee access to the mutex.
+ */
+static void qp_init_queue_mutex(struct vmci_queue *produce_q,
+                               struct vmci_queue *consume_q)
+{
+       /*
+        * Only the host queue has shared state - the guest queues do not
+        * need to synchronize access using a queue mutex.
+        */
+
+       if (produce_q->kernel_if->host) {
+               produce_q->kernel_if->mutex = &produce_q->kernel_if->__mutex;
+               consume_q->kernel_if->mutex = &produce_q->kernel_if->__mutex;
+               mutex_init(produce_q->kernel_if->mutex);
+       }
+}
+
+/*
+ * Cleans up the mutex for the pair of queues.
+ */
+static void qp_cleanup_queue_mutex(struct vmci_queue *produce_q,
+                                  struct vmci_queue *consume_q)
+{
+       if (produce_q->kernel_if->host) {
+               produce_q->kernel_if->mutex = NULL;
+               consume_q->kernel_if->mutex = NULL;
+       }
+}
+
+/*
+ * Acquire the mutex for the queue.  Note that the produce_q and
+ * the consume_q share a mutex.  So, only one of the two need to
+ * be passed in to this routine.  Either will work just fine.
+ */
+static void qp_acquire_queue_mutex(struct vmci_queue *queue)
+{
+       if (queue->kernel_if->host)
+               mutex_lock(queue->kernel_if->mutex);
+}
+
+/*
+ * Release the mutex for the queue.  Note that the produce_q and
+ * the consume_q share a mutex.  So, only one of the two need to
+ * be passed in to this routine.  Either will work just fine.
+ */
+static void qp_release_queue_mutex(struct vmci_queue *queue)
+{
+       if (queue->kernel_if->host)
+               mutex_unlock(queue->kernel_if->mutex);
+}
+
+/*
+ * Helper function to release pages in the PageStoreAttachInfo
+ * previously obtained using get_user_pages.
+ */
+static void qp_release_pages(struct page **pages,
+                            u64 num_pages, bool dirty)
+{
+       int i;
+
+       for (i = 0; i < num_pages; i++) {
+               if (dirty)
+                       set_page_dirty(pages[i]);
+
+               page_cache_release(pages[i]);
+               pages[i] = NULL;
+       }
+}
+
+/*
+ * Lock the user pages referenced by the {produce,consume}Buffer
+ * struct into memory and populate the {produce,consume}Pages
+ * arrays in the attach structure with them.
+ */
+static int qp_host_get_user_memory(u64 produce_uva,
+                                  u64 consume_uva,
+                                  struct vmci_queue *produce_q,
+                                  struct vmci_queue *consume_q)
+{
+       int retval;
+       int err = VMCI_SUCCESS;
+
+       down_write(&current->mm->mmap_sem);
+       retval = get_user_pages(current,
+                               current->mm,
+                               (uintptr_t) produce_uva,
+                               produce_q->kernel_if->num_pages,
+                               1, 0, produce_q->kernel_if->header_page, NULL);
+       if (retval < produce_q->kernel_if->num_pages) {
+               pr_warn("get_user_pages(produce) failed (retval=%d)", retval);
+               qp_release_pages(produce_q->kernel_if->header_page, retval,
+                                false);
+               err = VMCI_ERROR_NO_MEM;
+               goto out;
+       }
+
+       retval = get_user_pages(current,
+                               current->mm,
+                               (uintptr_t) consume_uva,
+                               consume_q->kernel_if->num_pages,
+                               1, 0, consume_q->kernel_if->header_page, NULL);
+       if (retval < consume_q->kernel_if->num_pages) {
+               pr_warn("get_user_pages(consume) failed (retval=%d)", retval);
+               qp_release_pages(consume_q->kernel_if->header_page, retval,
+                                false);
+               qp_release_pages(produce_q->kernel_if->header_page,
+                                produce_q->kernel_if->num_pages, false);
+               err = VMCI_ERROR_NO_MEM;
+       }
+
+ out:
+       up_write(&current->mm->mmap_sem);
+
+       return err;
+}
+
+/*
+ * Registers the specification of the user pages used for backing a queue
+ * pair. Enough information to map in pages is stored in the OS specific
+ * part of the struct vmci_queue structure.
+ */
+static int qp_host_register_user_memory(struct vmci_qp_page_store *page_store,
+                                       struct vmci_queue *produce_q,
+                                       struct vmci_queue *consume_q)
+{
+       u64 produce_uva;
+       u64 consume_uva;
+
+       /*
+        * The new style and the old style mapping only differs in
+        * that we either get a single or two UVAs, so we split the
+        * single UVA range at the appropriate spot.
+        */
+       produce_uva = page_store->pages;
+       consume_uva = page_store->pages +
+           produce_q->kernel_if->num_pages * PAGE_SIZE;
+       return qp_host_get_user_memory(produce_uva, consume_uva, produce_q,
+                                      consume_q);
+}
+
+/*
+ * Releases and removes the references to user pages stored in the attach
+ * struct.  Pages are released from the page cache and may become
+ * swappable again.
+ */
+static void qp_host_unregister_user_memory(struct vmci_queue *produce_q,
+                                          struct vmci_queue *consume_q)
+{
+       qp_release_pages(produce_q->kernel_if->header_page,
+                        produce_q->kernel_if->num_pages, true);
+       memset(produce_q->kernel_if->header_page, 0,
+              sizeof(*produce_q->kernel_if->header_page) *
+              produce_q->kernel_if->num_pages);
+       qp_release_pages(consume_q->kernel_if->header_page,
+                        consume_q->kernel_if->num_pages, true);
+       memset(consume_q->kernel_if->header_page, 0,
+              sizeof(*consume_q->kernel_if->header_page) *
+              consume_q->kernel_if->num_pages);
+}
+
+/*
+ * Once qp_host_register_user_memory has been performed on a
+ * queue, the queue pair headers can be mapped into the
+ * kernel. Once mapped, they must be unmapped with
+ * qp_host_unmap_queues prior to calling
+ * qp_host_unregister_user_memory.
+ * Pages are pinned.
+ */
+static int qp_host_map_queues(struct vmci_queue *produce_q,
+                             struct vmci_queue *consume_q)
+{
+       int result;
+
+       if (!produce_q->q_header || !consume_q->q_header) {
+               struct page *headers[2];
+
+               if (produce_q->q_header != consume_q->q_header)
+                       return VMCI_ERROR_QUEUEPAIR_MISMATCH;
+
+               if (produce_q->kernel_if->header_page == NULL ||
+                   *produce_q->kernel_if->header_page == NULL)
+                       return VMCI_ERROR_UNAVAILABLE;
+
+               headers[0] = *produce_q->kernel_if->header_page;
+               headers[1] = *consume_q->kernel_if->header_page;
+
+               produce_q->q_header = vmap(headers, 2, VM_MAP, PAGE_KERNEL);
+               if (produce_q->q_header != NULL) {
+                       consume_q->q_header =
+                           (struct vmci_queue_header *)((u8 *)
+                                                        produce_q->q_header +
+                                                        PAGE_SIZE);
+                       result = VMCI_SUCCESS;
+               } else {
+                       pr_warn("vmap failed\n");
+                       result = VMCI_ERROR_NO_MEM;
+               }
+       } else {
+               result = VMCI_SUCCESS;
+       }
+
+       return result;
+}
+
+/*
+ * Unmaps previously mapped queue pair headers from the kernel.
+ * Pages are unpinned.
+ */
+static int qp_host_unmap_queues(u32 gid,
+                               struct vmci_queue *produce_q,
+                               struct vmci_queue *consume_q)
+{
+       if (produce_q->q_header) {
+               if (produce_q->q_header < consume_q->q_header)
+                       vunmap(produce_q->q_header);
+               else
+                       vunmap(consume_q->q_header);
+
+               produce_q->q_header = NULL;
+               consume_q->q_header = NULL;
+       }
+
+       return VMCI_SUCCESS;
+}
+
+/*
+ * Finds the entry in the list corresponding to a given handle. Assumes
+ * that the list is locked.
+ */
+static struct qp_entry *qp_list_find(struct qp_list *qp_list,
+                                    struct vmci_handle handle)
+{
+       struct qp_entry *entry;
+
+       if (vmci_handle_is_invalid(handle))
+               return NULL;
+
+       list_for_each_entry(entry, &qp_list->head, list_item) {
+               if (vmci_handle_is_equal(entry->handle, handle))
+                       return entry;
+       }
+
+       return NULL;
+}
+
+/*
+ * Finds the entry in the list corresponding to a given handle.
+ */
+static struct qp_guest_endpoint *
+qp_guest_handle_to_entry(struct vmci_handle handle)
+{
+       struct qp_guest_endpoint *entry;
+       struct qp_entry *qp = qp_list_find(&qp_guest_endpoints, handle);
+
+       entry = qp ? container_of(
+               qp, struct qp_guest_endpoint, qp) : NULL;
+       return entry;
+}
+
+/*
+ * Finds the entry in the list corresponding to a given handle.
+ */
+static struct qp_broker_entry *
+qp_broker_handle_to_entry(struct vmci_handle handle)
+{
+       struct qp_broker_entry *entry;
+       struct qp_entry *qp = qp_list_find(&qp_broker_list, handle);
+
+       entry = qp ? container_of(
+               qp, struct qp_broker_entry, qp) : NULL;
+       return entry;
+}
+
+/*
+ * Dispatches a queue pair event message directly into the local event
+ * queue.
+ */
+static int qp_notify_peer_local(bool attach, struct vmci_handle handle)
+{
+       u32 context_id = vmci_get_context_id();
+       struct vmci_event_qp ev;
+
+       ev.msg.hdr.dst = vmci_make_handle(context_id, VMCI_EVENT_HANDLER);
+       ev.msg.hdr.src = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
+                                         VMCI_CONTEXT_RESOURCE_ID);
+       ev.msg.hdr.payload_size = sizeof(ev) - sizeof(ev.msg.hdr);
+       ev.msg.event_data.event =
+           attach ? VMCI_EVENT_QP_PEER_ATTACH : VMCI_EVENT_QP_PEER_DETACH;
+       ev.payload.peer_id = context_id;
+       ev.payload.handle = handle;
+
+       return vmci_event_dispatch(&ev.msg.hdr);
+}
+
+/*
+ * Allocates and initializes a qp_guest_endpoint structure.
+ * Allocates a queue_pair rid (and handle) iff the given entry has
+ * an invalid handle.  0 through VMCI_RESERVED_RESOURCE_ID_MAX
+ * are reserved handles.  Assumes that the QP list mutex is held
+ * by the caller.
+ */
+static struct qp_guest_endpoint *
+qp_guest_endpoint_create(struct vmci_handle handle,
+                        u32 peer,
+                        u32 flags,
+                        u64 produce_size,
+                        u64 consume_size,
+                        void *produce_q,
+                        void *consume_q)
+{
+       int result;
+       struct qp_guest_endpoint *entry;
+       /* One page each for the queue headers. */
+       const u64 num_ppns = DIV_ROUND_UP(produce_size, PAGE_SIZE) +
+           DIV_ROUND_UP(consume_size, PAGE_SIZE) + 2;
+
+       if (vmci_handle_is_invalid(handle)) {
+               u32 context_id = vmci_get_context_id();
+
+               handle = vmci_make_handle(context_id, VMCI_INVALID_ID);
+       }
+
+       entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+       if (entry) {
+               entry->qp.peer = peer;
+               entry->qp.flags = flags;
+               entry->qp.produce_size = produce_size;
+               entry->qp.consume_size = consume_size;
+               entry->qp.ref_count = 0;
+               entry->num_ppns = num_ppns;
+               entry->produce_q = produce_q;
+               entry->consume_q = consume_q;
+               INIT_LIST_HEAD(&entry->qp.list_item);
+
+               /* Add resource obj */
+               result = vmci_resource_add(&entry->resource,
+                                          VMCI_RESOURCE_TYPE_QPAIR_GUEST,
+                                          handle);
+               entry->qp.handle = vmci_resource_handle(&entry->resource);
+               if ((result != VMCI_SUCCESS) ||
+                   qp_list_find(&qp_guest_endpoints, entry->qp.handle)) {
+                       pr_warn("Failed to add new resource (handle=0x%x:0x%x), error: %d",
+                               handle.context, handle.resource, result);
+                       kfree(entry);
+                       entry = NULL;
+               }
+       }
+       return entry;
+}
+
+/*
+ * Frees a qp_guest_endpoint structure.
+ */
+static void qp_guest_endpoint_destroy(struct qp_guest_endpoint *entry)
+{
+       qp_free_ppn_set(&entry->ppn_set);
+       qp_cleanup_queue_mutex(entry->produce_q, entry->consume_q);
+       qp_free_queue(entry->produce_q, entry->qp.produce_size);
+       qp_free_queue(entry->consume_q, entry->qp.consume_size);
+       /* Unlink from resource hash table and free callback */
+       vmci_resource_remove(&entry->resource);
+
+       kfree(entry);
+}
+
+/*
+ * Helper to make a queue_pairAlloc hypercall when the driver is
+ * supporting a guest device.
+ */
+static int qp_alloc_hypercall(const struct qp_guest_endpoint *entry)
+{
+       struct vmci_qp_alloc_msg *alloc_msg;
+       size_t msg_size;
+       int result;
+
+       if (!entry || entry->num_ppns <= 2)
+               return VMCI_ERROR_INVALID_ARGS;
+
+       msg_size = sizeof(*alloc_msg) +
+           (size_t) entry->num_ppns * sizeof(u32);
+       alloc_msg = kmalloc(msg_size, GFP_KERNEL);
+       if (!alloc_msg)
+               return VMCI_ERROR_NO_MEM;
+
+       alloc_msg->hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
+                                             VMCI_QUEUEPAIR_ALLOC);
+       alloc_msg->hdr.src = VMCI_ANON_SRC_HANDLE;
+       alloc_msg->hdr.payload_size = msg_size - VMCI_DG_HEADERSIZE;
+       alloc_msg->handle = entry->qp.handle;
+       alloc_msg->peer = entry->qp.peer;
+       alloc_msg->flags = entry->qp.flags;
+       alloc_msg->produce_size = entry->qp.produce_size;
+       alloc_msg->consume_size = entry->qp.consume_size;
+       alloc_msg->num_ppns = entry->num_ppns;
+
+       result = qp_populate_ppn_set((u8 *)alloc_msg + sizeof(*alloc_msg),
+                                    &entry->ppn_set);
+       if (result == VMCI_SUCCESS)
+               result = vmci_send_datagram(&alloc_msg->hdr);
+
+       kfree(alloc_msg);
+
+       return result;
+}
+
+/*
+ * Helper to make a queue_pairDetach hypercall when the driver is
+ * supporting a guest device.
+ */
+static int qp_detatch_hypercall(struct vmci_handle handle)
+{
+       struct vmci_qp_detach_msg detach_msg;
+
+       detach_msg.hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
+                                             VMCI_QUEUEPAIR_DETACH);
+       detach_msg.hdr.src = VMCI_ANON_SRC_HANDLE;
+       detach_msg.hdr.payload_size = sizeof(handle);
+       detach_msg.handle = handle;
+
+       return vmci_send_datagram(&detach_msg.hdr);
+}
+
+/*
+ * Adds the given entry to the list. Assumes that the list is locked.
+ */
+static void qp_list_add_entry(struct qp_list *qp_list, struct qp_entry *entry)
+{
+       if (entry)
+               list_add(&entry->list_item, &qp_list->head);
+}
+
+/*
+ * Removes the given entry from the list. Assumes that the list is locked.
+ */
+static void qp_list_remove_entry(struct qp_list *qp_list,
+                                struct qp_entry *entry)
+{
+       if (entry)
+               list_del(&entry->list_item);
+}
+
+/*
+ * Helper for VMCI queue_pair detach interface. Frees the physical
+ * pages for the queue pair.
+ */
+static int qp_detatch_guest_work(struct vmci_handle handle)
+{
+       int result;
+       struct qp_guest_endpoint *entry;
+       u32 ref_count = ~0;     /* To avoid compiler warning below */
+
+       mutex_lock(&qp_guest_endpoints.mutex);
+
+       entry = qp_guest_handle_to_entry(handle);
+       if (!entry) {
+               mutex_unlock(&qp_guest_endpoints.mutex);
+               return VMCI_ERROR_NOT_FOUND;
+       }
+
+       if (entry->qp.flags & VMCI_QPFLAG_LOCAL) {
+               result = VMCI_SUCCESS;
+
+               if (entry->qp.ref_count > 1) {
+                       result = qp_notify_peer_local(false, handle);
+                       /*
+                        * We can fail to notify a local queuepair
+                        * because we can't allocate.  We still want
+                        * to release the entry if that happens, so
+                        * don't bail out yet.
+                        */
+               }
+       } else {
+               result = qp_detatch_hypercall(handle);
+               if (result < VMCI_SUCCESS) {
+                       /*
+                        * We failed to notify a non-local queuepair.
+                        * That other queuepair might still be
+                        * accessing the shared memory, so don't
+                        * release the entry yet.  It will get cleaned
+                        * up by VMCIqueue_pair_Exit() if necessary
+                        * (assuming we are going away, otherwise why
+                        * did this fail?).
+                        */
+
+                       mutex_unlock(&qp_guest_endpoints.mutex);
+                       return result;
+               }
+       }
+
+       /*
+        * If we get here then we either failed to notify a local queuepair, or
+        * we succeeded in all cases.  Release the entry if required.
+        */
+
+       entry->qp.ref_count--;
+       if (entry->qp.ref_count == 0)
+               qp_list_remove_entry(&qp_guest_endpoints, &entry->qp);
+
+       /* If we didn't remove the entry, this could change once we unlock. */
+       if (entry)
+               ref_count = entry->qp.ref_count;
+
+       mutex_unlock(&qp_guest_endpoints.mutex);
+
+       if (ref_count == 0)
+               qp_guest_endpoint_destroy(entry);
+
+       return result;
+}
+
+/*
+ * This functions handles the actual allocation of a VMCI queue
+ * pair guest endpoint. Allocates physical pages for the queue
+ * pair. It makes OS dependent calls through generic wrappers.
+ */
+static int qp_alloc_guest_work(struct vmci_handle *handle,
+                              struct vmci_queue **produce_q,
+                              u64 produce_size,
+                              struct vmci_queue **consume_q,
+                              u64 consume_size,
+                              u32 peer,
+                              u32 flags,
+                              u32 priv_flags)
+{
+       const u64 num_produce_pages =
+           DIV_ROUND_UP(produce_size, PAGE_SIZE) + 1;
+       const u64 num_consume_pages =
+           DIV_ROUND_UP(consume_size, PAGE_SIZE) + 1;
+       void *my_produce_q = NULL;
+       void *my_consume_q = NULL;
+       int result;
+       struct qp_guest_endpoint *queue_pair_entry = NULL;
+
+       if (priv_flags != VMCI_NO_PRIVILEGE_FLAGS)
+               return VMCI_ERROR_NO_ACCESS;
+
+       mutex_lock(&qp_guest_endpoints.mutex);
+
+       queue_pair_entry = qp_guest_handle_to_entry(*handle);
+       if (queue_pair_entry) {
+               if (queue_pair_entry->qp.flags & VMCI_QPFLAG_LOCAL) {
+                       /* Local attach case. */
+                       if (queue_pair_entry->qp.ref_count > 1) {
+                               pr_devel("Error attempting to attach more than once\n");
+                               result = VMCI_ERROR_UNAVAILABLE;
+                               goto error_keep_entry;
+                       }
+
+                       if (queue_pair_entry->qp.produce_size != consume_size ||
+                           queue_pair_entry->qp.consume_size !=
+                           produce_size ||
+                           queue_pair_entry->qp.flags !=
+                           (flags & ~VMCI_QPFLAG_ATTACH_ONLY)) {
+                               pr_devel("Error mismatched queue pair in local attach\n");
+                               result = VMCI_ERROR_QUEUEPAIR_MISMATCH;
+                               goto error_keep_entry;
+                       }
+
+                       /*
+                        * Do a local attach.  We swap the consume and
+                        * produce queues for the attacher and deliver
+                        * an attach event.
+                        */
+                       result = qp_notify_peer_local(true, *handle);
+                       if (result < VMCI_SUCCESS)
+                               goto error_keep_entry;
+
+                       my_produce_q = queue_pair_entry->consume_q;
+                       my_consume_q = queue_pair_entry->produce_q;
+                       goto out;
+               }
+
+               result = VMCI_ERROR_ALREADY_EXISTS;
+               goto error_keep_entry;
+       }
+
+       my_produce_q = qp_alloc_queue(produce_size, flags);
+       if (!my_produce_q) {
+               pr_warn("Error allocating pages for produce queue\n");
+               result = VMCI_ERROR_NO_MEM;
+               goto error;
+       }
+
+       my_consume_q = qp_alloc_queue(consume_size, flags);
+       if (!my_consume_q) {
+               pr_warn("Error allocating pages for consume queue\n");
+               result = VMCI_ERROR_NO_MEM;
+               goto error;
+       }
+
+       queue_pair_entry = qp_guest_endpoint_create(*handle, peer, flags,
+                                                   produce_size, consume_size,
+                                                   my_produce_q, my_consume_q);
+       if (!queue_pair_entry) {
+               pr_warn("Error allocating memory in %s\n", __func__);
+               result = VMCI_ERROR_NO_MEM;
+               goto error;
+       }
+
+       result = qp_alloc_ppn_set(my_produce_q, num_produce_pages, my_consume_q,
+                                 num_consume_pages,
+                                 &queue_pair_entry->ppn_set);
+       if (result < VMCI_SUCCESS) {
+               pr_warn("qp_alloc_ppn_set failed\n");
+               goto error;
+       }
+
+       /*
+        * It's only necessary to notify the host if this queue pair will be
+        * attached to from another context.
+        */
+       if (queue_pair_entry->qp.flags & VMCI_QPFLAG_LOCAL) {
+               /* Local create case. */
+               u32 context_id = vmci_get_context_id();
+
+               /*
+                * Enforce similar checks on local queue pairs as we
+                * do for regular ones.  The handle's context must
+                * match the creator or attacher context id (here they
+                * are both the current context id) and the
+                * attach-only flag cannot exist during create.  We
+                * also ensure specified peer is this context or an
+                * invalid one.
+                */
+               if (queue_pair_entry->qp.handle.context != context_id ||
+                   (queue_pair_entry->qp.peer != VMCI_INVALID_ID &&
+                    queue_pair_entry->qp.peer != context_id)) {
+                       result = VMCI_ERROR_NO_ACCESS;
+                       goto error;
+               }
+
+               if (queue_pair_entry->qp.flags & VMCI_QPFLAG_ATTACH_ONLY) {
+                       result = VMCI_ERROR_NOT_FOUND;
+                       goto error;
+               }
+       } else {
+               result = qp_alloc_hypercall(queue_pair_entry);
+               if (result < VMCI_SUCCESS) {
+                       pr_warn("qp_alloc_hypercall result = %d\n", result);
+                       goto error;
+               }
+       }
+
+       qp_init_queue_mutex((struct vmci_queue *)my_produce_q,
+                           (struct vmci_queue *)my_consume_q);
+
+       qp_list_add_entry(&qp_guest_endpoints, &queue_pair_entry->qp);
+
+ out:
+       queue_pair_entry->qp.ref_count++;
+       *handle = queue_pair_entry->qp.handle;
+       *produce_q = (struct vmci_queue *)my_produce_q;
+       *consume_q = (struct vmci_queue *)my_consume_q;
+
+       /*
+        * We should initialize the queue pair header pages on a local
+        * queue pair create.  For non-local queue pairs, the
+        * hypervisor initializes the header pages in the create step.
+        */
+       if ((queue_pair_entry->qp.flags & VMCI_QPFLAG_LOCAL) &&
+           queue_pair_entry->qp.ref_count == 1) {
+               vmci_q_header_init((*produce_q)->q_header, *handle);
+               vmci_q_header_init((*consume_q)->q_header, *handle);
+       }
+
+       mutex_unlock(&qp_guest_endpoints.mutex);
+
+       return VMCI_SUCCESS;
+
+ error:
+       mutex_unlock(&qp_guest_endpoints.mutex);
+       if (queue_pair_entry) {
+               /* The queues will be freed inside the destroy routine. */
+               qp_guest_endpoint_destroy(queue_pair_entry);
+       } else {
+               qp_free_queue(my_produce_q, produce_size);
+               qp_free_queue(my_consume_q, consume_size);
+       }
+       return result;
+
+ error_keep_entry:
+       /* This path should only be used when an existing entry was found. */
+       mutex_unlock(&qp_guest_endpoints.mutex);
+       return result;
+}
+
+/*
+ * The first endpoint issuing a queue pair allocation will create the state
+ * of the queue pair in the queue pair broker.
+ *
+ * If the creator is a guest, it will associate a VMX virtual address range
+ * with the queue pair as specified by the page_store. For compatibility with
+ * older VMX'en, that would use a separate step to set the VMX virtual
+ * address range, the virtual address range can be registered later using
+ * vmci_qp_broker_set_page_store. In that case, a page_store of NULL should be
+ * used.
+ *
+ * If the creator is the host, a page_store of NULL should be used as well,
+ * since the host is not able to supply a page store for the queue pair.
+ *
+ * For older VMX and host callers, the queue pair will be created in the
+ * VMCIQPB_CREATED_NO_MEM state, and for current VMX callers, it will be
+ * created in VMCOQPB_CREATED_MEM state.
+ */
+static int qp_broker_create(struct vmci_handle handle,
+                           u32 peer,
+                           u32 flags,
+                           u32 priv_flags,
+                           u64 produce_size,
+                           u64 consume_size,
+                           struct vmci_qp_page_store *page_store,
+                           struct vmci_ctx *context,
+                           vmci_event_release_cb wakeup_cb,
+                           void *client_data, struct qp_broker_entry **ent)
+{
+       struct qp_broker_entry *entry = NULL;
+       const u32 context_id = vmci_ctx_get_id(context);
+       bool is_local = flags & VMCI_QPFLAG_LOCAL;
+       int result;
+       u64 guest_produce_size;
+       u64 guest_consume_size;
+
+       /* Do not create if the caller asked not to. */
+       if (flags & VMCI_QPFLAG_ATTACH_ONLY)
+               return VMCI_ERROR_NOT_FOUND;
+
+       /*
+        * Creator's context ID should match handle's context ID or the creator
+        * must allow the context in handle's context ID as the "peer".
+        */
+       if (handle.context != context_id && handle.context != peer)
+               return VMCI_ERROR_NO_ACCESS;
+
+       if (VMCI_CONTEXT_IS_VM(context_id) && VMCI_CONTEXT_IS_VM(peer))
+               return VMCI_ERROR_DST_UNREACHABLE;
+
+       /*
+        * Creator's context ID for local queue pairs should match the
+        * peer, if a peer is specified.
+        */
+       if (is_local && peer != VMCI_INVALID_ID && context_id != peer)
+               return VMCI_ERROR_NO_ACCESS;
+
+       entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
+       if (!entry)
+               return VMCI_ERROR_NO_MEM;
+
+       if (vmci_ctx_get_id(context) == VMCI_HOST_CONTEXT_ID && !is_local) {
+               /*
+                * The queue pair broker entry stores values from the guest
+                * point of view, so a creating host side endpoint should swap
+                * produce and consume values -- unless it is a local queue
+                * pair, in which case no swapping is necessary, since the local
+                * attacher will swap queues.
+                */
+
+               guest_produce_size = consume_size;
+               guest_consume_size = produce_size;
+       } else {
+               guest_produce_size = produce_size;
+               guest_consume_size = consume_size;
+       }
+
+       entry->qp.handle = handle;
+       entry->qp.peer = peer;
+       entry->qp.flags = flags;
+       entry->qp.produce_size = guest_produce_size;
+       entry->qp.consume_size = guest_consume_size;
+       entry->qp.ref_count = 1;
+       entry->create_id = context_id;
+       entry->attach_id = VMCI_INVALID_ID;
+       entry->state = VMCIQPB_NEW;
+       entry->require_trusted_attach =
+           !!(context->priv_flags & VMCI_PRIVILEGE_FLAG_RESTRICTED);
+       entry->created_by_trusted =
+           !!(priv_flags & VMCI_PRIVILEGE_FLAG_TRUSTED);
+       entry->vmci_page_files = false;
+       entry->wakeup_cb = wakeup_cb;
+       entry->client_data = client_data;
+       entry->produce_q = qp_host_alloc_queue(guest_produce_size);
+       if (entry->produce_q == NULL) {
+               result = VMCI_ERROR_NO_MEM;
+               goto error;
+       }
+       entry->consume_q = qp_host_alloc_queue(guest_consume_size);
+       if (entry->consume_q == NULL) {
+               result = VMCI_ERROR_NO_MEM;
+               goto error;
+       }
+
+       qp_init_queue_mutex(entry->produce_q, entry->consume_q);
+
+       INIT_LIST_HEAD(&entry->qp.list_item);
+
+       if (is_local) {
+               u8 *tmp;
+
+               entry->local_mem = kcalloc(QPE_NUM_PAGES(entry->qp),
+                                          PAGE_SIZE, GFP_KERNEL);
+               if (entry->local_mem == NULL) {
+                       result = VMCI_ERROR_NO_MEM;
+                       goto error;
+               }
+               entry->state = VMCIQPB_CREATED_MEM;
+               entry->produce_q->q_header = entry->local_mem;
+               tmp = (u8 *)entry->local_mem + PAGE_SIZE *
+                   (DIV_ROUND_UP(entry->qp.produce_size, PAGE_SIZE) + 1);
+               entry->consume_q->q_header = (struct vmci_queue_header *)tmp;
+       } else if (page_store) {
+               /*
+                * The VMX already initialized the queue pair headers, so no
+                * need for the kernel side to do that.
+                */
+               result = qp_host_register_user_memory(page_store,
+                                                     entry->produce_q,
+                                                     entry->consume_q);
+               if (result < VMCI_SUCCESS)
+                       goto error;
+
+               entry->state = VMCIQPB_CREATED_MEM;
+       } else {
+               /*
+                * A create without a page_store may be either a host
+                * side create (in which case we are waiting for the
+                * guest side to supply the memory) or an old style
+                * queue pair create (in which case we will expect a
+                * set page store call as the next step).
+                */
+               entry->state = VMCIQPB_CREATED_NO_MEM;
+       }
+
+       qp_list_add_entry(&qp_broker_list, &entry->qp);
+       if (ent != NULL)
+               *ent = entry;
+
+       /* Add to resource obj */
+       result = vmci_resource_add(&entry->resource,
+                                  VMCI_RESOURCE_TYPE_QPAIR_HOST,
+                                  handle);
+       if (result != VMCI_SUCCESS) {
+               pr_warn("Failed to add new resource (handle=0x%x:0x%x), error: %d",
+                       handle.context, handle.resource, result);
+               goto error;
+       }
+
+       entry->qp.handle = vmci_resource_handle(&entry->resource);
+       if (is_local) {
+               vmci_q_header_init(entry->produce_q->q_header,
+                                  entry->qp.handle);
+               vmci_q_header_init(entry->consume_q->q_header,
+                                  entry->qp.handle);
+       }
+
+       vmci_ctx_qp_create(context, entry->qp.handle);
+
+       return VMCI_SUCCESS;
+
+ error:
+       if (entry != NULL) {
+               qp_host_free_queue(entry->produce_q, guest_produce_size);
+               qp_host_free_queue(entry->consume_q, guest_consume_size);
+               kfree(entry);
+       }
+
+       return result;
+}
+
+/*
+ * Enqueues an event datagram to notify the peer VM attached to
+ * the given queue pair handle about attach/detach event by the
+ * given VM.  Returns Payload size of datagram enqueued on
+ * success, error code otherwise.
+ */
+static int qp_notify_peer(bool attach,
+                         struct vmci_handle handle,
+                         u32 my_id,
+                         u32 peer_id)
+{
+       int rv;
+       struct vmci_event_qp ev;
+
+       if (vmci_handle_is_invalid(handle) || my_id == VMCI_INVALID_ID ||
+           peer_id == VMCI_INVALID_ID)
+               return VMCI_ERROR_INVALID_ARGS;
+
+       /*
+        * In vmci_ctx_enqueue_datagram() we enforce the upper limit on
+        * number of pending events from the hypervisor to a given VM
+        * otherwise a rogue VM could do an arbitrary number of attach
+        * and detach operations causing memory pressure in the host
+        * kernel.
+        */
+
+       ev.msg.hdr.dst = vmci_make_handle(peer_id, VMCI_EVENT_HANDLER);
+       ev.msg.hdr.src = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
+                                         VMCI_CONTEXT_RESOURCE_ID);
+       ev.msg.hdr.payload_size = sizeof(ev) - sizeof(ev.msg.hdr);
+       ev.msg.event_data.event = attach ?
+           VMCI_EVENT_QP_PEER_ATTACH : VMCI_EVENT_QP_PEER_DETACH;
+       ev.payload.handle = handle;
+       ev.payload.peer_id = my_id;
+
+       rv = vmci_datagram_dispatch(VMCI_HYPERVISOR_CONTEXT_ID,
+                                   &ev.msg.hdr, false);
+       if (rv < VMCI_SUCCESS)
+               pr_warn("Failed to enqueue queue_pair %s event datagram for context (ID=0x%x)\n",
+                       attach ? "ATTACH" : "DETACH", peer_id);
+
+       return rv;
+}
+
+/*
+ * The second endpoint issuing a queue pair allocation will attach to
+ * the queue pair registered with the queue pair broker.
+ *
+ * If the attacher is a guest, it will associate a VMX virtual address
+ * range with the queue pair as specified by the page_store. At this
+ * point, the already attach host endpoint may start using the queue
+ * pair, and an attach event is sent to it. For compatibility with
+ * older VMX'en, that used a separate step to set the VMX virtual
+ * address range, the virtual address range can be registered later
+ * using vmci_qp_broker_set_page_store. In that case, a page_store of
+ * NULL should be used, and the attach event will be generated once
+ * the actual page store has been set.
+ *
+ * If the attacher is the host, a page_store of NULL should be used as
+ * well, since the page store information is already set by the guest.
+ *
+ * For new VMX and host callers, the queue pair will be moved to the
+ * VMCIQPB_ATTACHED_MEM state, and for older VMX callers, it will be
+ * moved to the VMCOQPB_ATTACHED_NO_MEM state.
+ */
+static int qp_broker_attach(struct qp_broker_entry *entry,
+                           u32 peer,
+                           u32 flags,
+                           u32 priv_flags,
+                           u64 produce_size,
+                           u64 consume_size,
+                           struct vmci_qp_page_store *page_store,
+                           struct vmci_ctx *context,
+                           vmci_event_release_cb wakeup_cb,
+                           void *client_data,
+                           struct qp_broker_entry **ent)
+{
+       const u32 context_id = vmci_ctx_get_id(context);
+       bool is_local = flags & VMCI_QPFLAG_LOCAL;
+       int result;
+
+       if (entry->state != VMCIQPB_CREATED_NO_MEM &&
+           entry->state != VMCIQPB_CREATED_MEM)
+               return VMCI_ERROR_UNAVAILABLE;
+
+       if (is_local) {
+               if (!(entry->qp.flags & VMCI_QPFLAG_LOCAL) ||
+                   context_id != entry->create_id) {
+                       return VMCI_ERROR_INVALID_ARGS;
+               }
+       } else if (context_id == entry->create_id ||
+                  context_id == entry->attach_id) {
+               return VMCI_ERROR_ALREADY_EXISTS;
+       }
+
+       if (VMCI_CONTEXT_IS_VM(context_id) &&
+           VMCI_CONTEXT_IS_VM(entry->create_id))
+               return VMCI_ERROR_DST_UNREACHABLE;
+
+       /*
+        * If we are attaching from a restricted context then the queuepair
+        * must have been created by a trusted endpoint.
+        */
+       if ((context->priv_flags & VMCI_PRIVILEGE_FLAG_RESTRICTED) &&
+           !entry->created_by_trusted)
+               return VMCI_ERROR_NO_ACCESS;
+
+       /*
+        * If we are attaching to a queuepair that was created by a restricted
+        * context then we must be trusted.
+        */
+       if (entry->require_trusted_attach &&
+           (!(priv_flags & VMCI_PRIVILEGE_FLAG_TRUSTED)))
+               return VMCI_ERROR_NO_ACCESS;
+
+       /*
+        * If the creator specifies VMCI_INVALID_ID in "peer" field, access
+        * control check is not performed.
+        */
+       if (entry->qp.peer != VMCI_INVALID_ID && entry->qp.peer != context_id)
+               return VMCI_ERROR_NO_ACCESS;
+
+       if (entry->create_id == VMCI_HOST_CONTEXT_ID) {
+               /*
+                * Do not attach if the caller doesn't support Host Queue Pairs
+                * and a host created this queue pair.
+                */
+
+               if (!vmci_ctx_supports_host_qp(context))
+                       return VMCI_ERROR_INVALID_RESOURCE;
+
+       } else if (context_id == VMCI_HOST_CONTEXT_ID) {
+               struct vmci_ctx *create_context;
+               bool supports_host_qp;
+
+               /*
+                * Do not attach a host to a user created queue pair if that
+                * user doesn't support host queue pair end points.
+                */
+
+               create_context = vmci_ctx_get(entry->create_id);
+               supports_host_qp = vmci_ctx_supports_host_qp(create_context);
+               vmci_ctx_put(create_context);
+
+               if (!supports_host_qp)
+                       return VMCI_ERROR_INVALID_RESOURCE;
+       }
+
+       if ((entry->qp.flags & ~VMCI_QP_ASYMM) != (flags & ~VMCI_QP_ASYMM_PEER))
+               return VMCI_ERROR_QUEUEPAIR_MISMATCH;
+
+       if (context_id != VMCI_HOST_CONTEXT_ID) {
+               /*
+                * The queue pair broker entry stores values from the guest
+                * point of view, so an attaching guest should match the values
+                * stored in the entry.
+                */
+
+               if (entry->qp.produce_size != produce_size ||
+                   entry->qp.consume_size != consume_size) {
+                       return VMCI_ERROR_QUEUEPAIR_MISMATCH;
+               }
+       } else if (entry->qp.produce_size != consume_size ||
+                  entry->qp.consume_size != produce_size) {
+               return VMCI_ERROR_QUEUEPAIR_MISMATCH;
+       }
+
+       if (context_id != VMCI_HOST_CONTEXT_ID) {
+               /*
+                * If a guest attached to a queue pair, it will supply
+                * the backing memory.  If this is a pre NOVMVM vmx,
+                * the backing memory will be supplied by calling
+                * vmci_qp_broker_set_page_store() following the
+                * return of the vmci_qp_broker_alloc() call. If it is
+                * a vmx of version NOVMVM or later, the page store
+                * must be supplied as part of the
+                * vmci_qp_broker_alloc call.  Under all circumstances
+                * must the initially created queue pair not have any
+                * memory associated with it already.
+                */
+
+               if (entry->state != VMCIQPB_CREATED_NO_MEM)
+                       return VMCI_ERROR_INVALID_ARGS;
+
+               if (page_store != NULL) {
+                       /*
+                        * Patch up host state to point to guest
+                        * supplied memory. The VMX already
+                        * initialized the queue pair headers, so no
+                        * need for the kernel side to do that.
+                        */
+
+                       result = qp_host_register_user_memory(page_store,
+                                                             entry->produce_q,
+                                                             entry->consume_q);
+                       if (result < VMCI_SUCCESS)
+                               return result;
+
+                       /*
+                        * Preemptively load in the headers if non-blocking to
+                        * prevent blocking later.
+                        */
+                       if (entry->qp.flags & VMCI_QPFLAG_NONBLOCK) {
+                               result = qp_host_map_queues(entry->produce_q,
+                                                           entry->consume_q);
+                               if (result < VMCI_SUCCESS) {
+                                       qp_host_unregister_user_memory(
+                                               entry->produce_q,
+                                               entry->consume_q);
+                                       return result;
+                               }
+                       }
+
+                       entry->state = VMCIQPB_ATTACHED_MEM;
+               } else {
+                       entry->state = VMCIQPB_ATTACHED_NO_MEM;
+               }
+       } else if (entry->state == VMCIQPB_CREATED_NO_MEM) {
+               /*
+                * The host side is attempting to attach to a queue
+                * pair that doesn't have any memory associated with
+                * it. This must be a pre NOVMVM vmx that hasn't set
+                * the page store information yet, or a quiesced VM.
+                */
+
+               return VMCI_ERROR_UNAVAILABLE;
+       } else {
+               /*
+                * For non-blocking queue pairs, we cannot rely on
+                * enqueue/dequeue to map in the pages on the
+                * host-side, since it may block, so we make an
+                * attempt here.
+                */
+
+               if (flags & VMCI_QPFLAG_NONBLOCK) {
+                       result =
+                           qp_host_map_queues(entry->produce_q,
+                                              entry->consume_q);
+                       if (result < VMCI_SUCCESS)
+                               return result;
+
+                       entry->qp.flags |= flags &
+                           (VMCI_QPFLAG_NONBLOCK | VMCI_QPFLAG_PINNED);
+               }
+
+               /* The host side has successfully attached to a queue pair. */
+               entry->state = VMCIQPB_ATTACHED_MEM;
+       }
+
+       if (entry->state == VMCIQPB_ATTACHED_MEM) {
+               result =
+                   qp_notify_peer(true, entry->qp.handle, context_id,
+                                  entry->create_id);
+               if (result < VMCI_SUCCESS)
+                       pr_warn("Failed to notify peer (ID=0x%x) of attach to queue pair (handle=0x%x:0x%x)\n",
+                               entry->create_id, entry->qp.handle.context,
+                               entry->qp.handle.resource);
+       }
+
+       entry->attach_id = context_id;
+       entry->qp.ref_count++;
+       if (wakeup_cb) {
+               entry->wakeup_cb = wakeup_cb;
+               entry->client_data = client_data;
+       }
+
+       /*
+        * When attaching to local queue pairs, the context already has
+        * an entry tracking the queue pair, so don't add another one.
+        */
+       if (!is_local)
+               vmci_ctx_qp_create(context, entry->qp.handle);
+
+       if (ent != NULL)
+               *ent = entry;
+
+       return VMCI_SUCCESS;
+}
+
+/*
+ * queue_pair_Alloc for use when setting up queue pair endpoints
+ * on the host.
+ */
+static int qp_broker_alloc(struct vmci_handle handle,
+                          u32 peer,
+                          u32 flags,
+                          u32 priv_flags,
+                          u64 produce_size,
+                          u64 consume_size,
+                          struct vmci_qp_page_store *page_store,
+                          struct vmci_ctx *context,
+                          vmci_event_release_cb wakeup_cb,
+                          void *client_data,
+                          struct qp_broker_entry **ent,
+                          bool *swap)
+{
+       const u32 context_id = vmci_ctx_get_id(context);
+       bool create;
+       struct qp_broker_entry *entry = NULL;
+       bool is_local = flags & VMCI_QPFLAG_LOCAL;
+       int result;
+
+       if (vmci_handle_is_invalid(handle) ||
+           (flags & ~VMCI_QP_ALL_FLAGS) || is_local ||
+           !(produce_size || consume_size) ||
+           !context || context_id == VMCI_INVALID_ID ||
+           handle.context == VMCI_INVALID_ID) {
+               return VMCI_ERROR_INVALID_ARGS;
+       }
+
+       if (page_store && !VMCI_QP_PAGESTORE_IS_WELLFORMED(page_store))
+               return VMCI_ERROR_INVALID_ARGS;
+
+       /*
+        * In the initial argument check, we ensure that non-vmkernel hosts
+        * are not allowed to create local queue pairs.
+        */
+
+       mutex_lock(&qp_broker_list.mutex);
+
+       if (!is_local && vmci_ctx_qp_exists(context, handle)) {
+               pr_devel("Context (ID=0x%x) already attached to queue pair (handle=0x%x:0x%x)\n",
+                        context_id, handle.context, handle.resource);
+               mutex_unlock(&qp_broker_list.mutex);
+               return VMCI_ERROR_ALREADY_EXISTS;
+       }
+
+       if (handle.resource != VMCI_INVALID_ID)
+               entry = qp_broker_handle_to_entry(handle);
+
+       if (!entry) {
+               create = true;
+               result =
+                   qp_broker_create(handle, peer, flags, priv_flags,
+                                    produce_size, consume_size, page_store,
+                                    context, wakeup_cb, client_data, ent);
+       } else {
+               create = false;
+               result =
+                   qp_broker_attach(entry, peer, flags, priv_flags,
+                                    produce_size, consume_size, page_store,
+                                    context, wakeup_cb, client_data, ent);
+       }
+
+       mutex_unlock(&qp_broker_list.mutex);
+
+       if (swap)
+               *swap = (context_id == VMCI_HOST_CONTEXT_ID) &&
+                   !(create && is_local);
+
+       return result;
+}
+
+/*
+ * This function implements the kernel API for allocating a queue
+ * pair.
+ */
+static int qp_alloc_host_work(struct vmci_handle *handle,
+                             struct vmci_queue **produce_q,
+                             u64 produce_size,
+                             struct vmci_queue **consume_q,
+                             u64 consume_size,
+                             u32 peer,
+                             u32 flags,
+                             u32 priv_flags,
+                             vmci_event_release_cb wakeup_cb,
+                             void *client_data)
+{
+       struct vmci_handle new_handle;
+       struct vmci_ctx *context;
+       struct qp_broker_entry *entry;
+       int result;
+       bool swap;
+
+       if (vmci_handle_is_invalid(*handle)) {
+               new_handle = vmci_make_handle(
+                       VMCI_HOST_CONTEXT_ID, VMCI_INVALID_ID);
+       } else
+               new_handle = *handle;
+
+       context = vmci_ctx_get(VMCI_HOST_CONTEXT_ID);
+       entry = NULL;
+       result =
+           qp_broker_alloc(new_handle, peer, flags, priv_flags,
+                           produce_size, consume_size, NULL, context,
+                           wakeup_cb, client_data, &entry, &swap);
+       if (result == VMCI_SUCCESS) {
+               if (swap) {
+                       /*
+                        * If this is a local queue pair, the attacher
+                        * will swap around produce and consume
+                        * queues.
+                        */
+
+                       *produce_q = entry->consume_q;
+                       *consume_q = entry->produce_q;
+               } else {
+                       *produce_q = entry->produce_q;
+                       *consume_q = entry->consume_q;
+               }
+
+               *handle = vmci_resource_handle(&entry->resource);
+       } else {
+               *handle = VMCI_INVALID_HANDLE;
+               pr_devel("queue pair broker failed to alloc (result=%d)\n",
+                        result);
+       }
+       vmci_ctx_put(context);
+       return result;
+}
+
+/*
+ * Allocates a VMCI queue_pair. Only checks validity of input
+ * arguments. The real work is done in the host or guest
+ * specific function.
+ */
+int vmci_qp_alloc(struct vmci_handle *handle,
+                 struct vmci_queue **produce_q,
+                 u64 produce_size,
+                 struct vmci_queue **consume_q,
+                 u64 consume_size,
+                 u32 peer,
+                 u32 flags,
+                 u32 priv_flags,
+                 bool guest_endpoint,
+                 vmci_event_release_cb wakeup_cb,
+                 void *client_data)
+{
+       if (!handle || !produce_q || !consume_q ||
+           (!produce_size && !consume_size) || (flags & ~VMCI_QP_ALL_FLAGS))
+               return VMCI_ERROR_INVALID_ARGS;
+
+       if (guest_endpoint) {
+               return qp_alloc_guest_work(handle, produce_q,
+                                          produce_size, consume_q,
+                                          consume_size, peer,
+                                          flags, priv_flags);
+       } else {
+               return qp_alloc_host_work(handle, produce_q,
+                                         produce_size, consume_q,
+                                         consume_size, peer, flags,
+                                         priv_flags, wakeup_cb, client_data);
+       }
+}
+
+/*
+ * This function implements the host kernel API for detaching from
+ * a queue pair.
+ */
+static int qp_detatch_host_work(struct vmci_handle handle)
+{
+       int result;
+       struct vmci_ctx *context;
+
+       context = vmci_ctx_get(VMCI_HOST_CONTEXT_ID);
+
+       result = vmci_qp_broker_detach(handle, context);
+
+       vmci_ctx_put(context);
+       return result;
+}
+
+/*
+ * Detaches from a VMCI queue_pair. Only checks validity of input argument.
+ * Real work is done in the host or guest specific function.
+ */
+static int qp_detatch(struct vmci_handle handle, bool guest_endpoint)
+{
+       if (vmci_handle_is_invalid(handle))
+               return VMCI_ERROR_INVALID_ARGS;
+
+       if (guest_endpoint)
+               return qp_detatch_guest_work(handle);
+       else
+               return qp_detatch_host_work(handle);
+}
+
+/*
+ * Returns the entry from the head of the list. Assumes that the list is
+ * locked.
+ */
+static struct qp_entry *qp_list_get_head(struct qp_list *qp_list)
+{
+       if (!list_empty(&qp_list->head)) {
+               struct qp_entry *entry =
+                   list_first_entry(&qp_list->head, struct qp_entry,
+                                    list_item);
+               return entry;
+       }
+
+       return NULL;
+}
+
+void vmci_qp_broker_exit(void)
+{
+       struct qp_entry *entry;
+       struct qp_broker_entry *be;
+
+       mutex_lock(&qp_broker_list.mutex);
+
+       while ((entry = qp_list_get_head(&qp_broker_list))) {
+               be = (struct qp_broker_entry *)entry;
+
+               qp_list_remove_entry(&qp_broker_list, entry);
+               kfree(be);
+       }
+
+       mutex_unlock(&qp_broker_list.mutex);
+}
+
+/*
+ * Requests that a queue pair be allocated with the VMCI queue
+ * pair broker. Allocates a queue pair entry if one does not
+ * exist. Attaches to one if it exists, and retrieves the page
+ * files backing that queue_pair.  Assumes that the queue pair
+ * broker lock is held.
+ */
+int vmci_qp_broker_alloc(struct vmci_handle handle,
+                        u32 peer,
+                        u32 flags,
+                        u32 priv_flags,
+                        u64 produce_size,
+                        u64 consume_size,
+                        struct vmci_qp_page_store *page_store,
+                        struct vmci_ctx *context)
+{
+       return qp_broker_alloc(handle, peer, flags, priv_flags,
+                              produce_size, consume_size,
+                              page_store, context, NULL, NULL, NULL, NULL);
+}
+
+/*
+ * VMX'en with versions lower than VMCI_VERSION_NOVMVM use a separate
+ * step to add the UVAs of the VMX mapping of the queue pair. This function
+ * provides backwards compatibility with such VMX'en, and takes care of
+ * registering the page store for a queue pair previously allocated by the
+ * VMX during create or attach. This function will move the queue pair state
+ * to either from VMCIQBP_CREATED_NO_MEM to VMCIQBP_CREATED_MEM or
+ * VMCIQBP_ATTACHED_NO_MEM to VMCIQBP_ATTACHED_MEM. If moving to the
+ * attached state with memory, the queue pair is ready to be used by the
+ * host peer, and an attached event will be generated.
+ *
+ * Assumes that the queue pair broker lock is held.
+ *
+ * This function is only used by the hosted platform, since there is no
+ * issue with backwards compatibility for vmkernel.
+ */
+int vmci_qp_broker_set_page_store(struct vmci_handle handle,
+                                 u64 produce_uva,
+                                 u64 consume_uva,
+                                 struct vmci_ctx *context)
+{
+       struct qp_broker_entry *entry;
+       int result;
+       const u32 context_id = vmci_ctx_get_id(context);
+
+       if (vmci_handle_is_invalid(handle) || !context ||
+           context_id == VMCI_INVALID_ID)
+               return VMCI_ERROR_INVALID_ARGS;
+
+       /*
+        * We only support guest to host queue pairs, so the VMX must
+        * supply UVAs for the mapped page files.
+        */
+
+       if (produce_uva == 0 || consume_uva == 0)
+               return VMCI_ERROR_INVALID_ARGS;
+
+       mutex_lock(&qp_broker_list.mutex);
+
+       if (!vmci_ctx_qp_exists(context, handle)) {
+               pr_warn("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n",
+                       context_id, handle.context, handle.resource);
+               result = VMCI_ERROR_NOT_FOUND;
+               goto out;
+       }
+
+       entry = qp_broker_handle_to_entry(handle);
+       if (!entry) {
+               result = VMCI_ERROR_NOT_FOUND;
+               goto out;
+       }
+
+       /*
+        * If I'm the owner then I can set the page store.
+        *
+        * Or, if a host created the queue_pair and I'm the attached peer
+        * then I can set the page store.
+        */
+       if (entry->create_id != context_id &&
+           (entry->create_id != VMCI_HOST_CONTEXT_ID ||
+            entry->attach_id != context_id)) {
+               result = VMCI_ERROR_QUEUEPAIR_NOTOWNER;
+               goto out;
+       }
+
+       if (entry->state != VMCIQPB_CREATED_NO_MEM &&
+           entry->state != VMCIQPB_ATTACHED_NO_MEM) {
+               result = VMCI_ERROR_UNAVAILABLE;
+               goto out;
+       }
+
+       result = qp_host_get_user_memory(produce_uva, consume_uva,
+                                        entry->produce_q, entry->consume_q);
+       if (result < VMCI_SUCCESS)
+               goto out;
+
+       result = qp_host_map_queues(entry->produce_q, entry->consume_q);
+       if (result < VMCI_SUCCESS) {
+               qp_host_unregister_user_memory(entry->produce_q,
+                                              entry->consume_q);
+               goto out;
+       }
+
+       if (entry->state == VMCIQPB_CREATED_NO_MEM)
+               entry->state = VMCIQPB_CREATED_MEM;
+       else
+               entry->state = VMCIQPB_ATTACHED_MEM;
+
+       entry->vmci_page_files = true;
+
+       if (entry->state == VMCIQPB_ATTACHED_MEM) {
+               result =
+                   qp_notify_peer(true, handle, context_id, entry->create_id);
+               if (result < VMCI_SUCCESS) {
+                       pr_warn("Failed to notify peer (ID=0x%x) of attach to queue pair (handle=0x%x:0x%x)\n",
+                               entry->create_id, entry->qp.handle.context,
+                               entry->qp.handle.resource);
+               }
+       }
+
+       result = VMCI_SUCCESS;
+ out:
+       mutex_unlock(&qp_broker_list.mutex);
+       return result;
+}
+
+/*
+ * Resets saved queue headers for the given QP broker
+ * entry. Should be used when guest memory becomes available
+ * again, or the guest detaches.
+ */
+static void qp_reset_saved_headers(struct qp_broker_entry *entry)
+{
+       entry->produce_q->saved_header = NULL;
+       entry->consume_q->saved_header = NULL;
+}
+
+/*
+ * The main entry point for detaching from a queue pair registered with the
+ * queue pair broker. If more than one endpoint is attached to the queue
+ * pair, the first endpoint will mainly decrement a reference count and
+ * generate a notification to its peer. The last endpoint will clean up
+ * the queue pair state registered with the broker.
+ *
+ * When a guest endpoint detaches, it will unmap and unregister the guest
+ * memory backing the queue pair. If the host is still attached, it will
+ * no longer be able to access the queue pair content.
+ *
+ * If the queue pair is already in a state where there is no memory
+ * registered for the queue pair (any *_NO_MEM state), it will transition to
+ * the VMCIQPB_SHUTDOWN_NO_MEM state. This will also happen, if a guest
+ * endpoint is the first of two endpoints to detach. If the host endpoint is
+ * the first out of two to detach, the queue pair will move to the
+ * VMCIQPB_SHUTDOWN_MEM state.
+ */
+int vmci_qp_broker_detach(struct vmci_handle handle, struct vmci_ctx *context)
+{
+       struct qp_broker_entry *entry;
+       const u32 context_id = vmci_ctx_get_id(context);
+       u32 peer_id;
+       bool is_local = false;
+       int result;
+
+       if (vmci_handle_is_invalid(handle) || !context ||
+           context_id == VMCI_INVALID_ID) {
+               return VMCI_ERROR_INVALID_ARGS;
+       }
+
+       mutex_lock(&qp_broker_list.mutex);
+
+       if (!vmci_ctx_qp_exists(context, handle)) {
+               pr_devel("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n",
+                        context_id, handle.context, handle.resource);
+               result = VMCI_ERROR_NOT_FOUND;
+               goto out;
+       }
+
+       entry = qp_broker_handle_to_entry(handle);
+       if (!entry) {
+               pr_devel("Context (ID=0x%x) reports being attached to queue pair(handle=0x%x:0x%x) that isn't present in broker\n",
+                        context_id, handle.context, handle.resource);
+               result = VMCI_ERROR_NOT_FOUND;
+               goto out;
+       }
+
+       if (context_id != entry->create_id && context_id != entry->attach_id) {
+               result = VMCI_ERROR_QUEUEPAIR_NOTATTACHED;
+               goto out;
+       }
+
+       if (context_id == entry->create_id) {
+               peer_id = entry->attach_id;
+               entry->create_id = VMCI_INVALID_ID;
+       } else {
+               peer_id = entry->create_id;
+               entry->attach_id = VMCI_INVALID_ID;
+       }
+       entry->qp.ref_count--;
+
+       is_local = entry->qp.flags & VMCI_QPFLAG_LOCAL;
+
+       if (context_id != VMCI_HOST_CONTEXT_ID) {
+               bool headers_mapped;
+
+               /*
+                * Pre NOVMVM vmx'en may detach from a queue pair
+                * before setting the page store, and in that case
+                * there is no user memory to detach from. Also, more
+                * recent VMX'en may detach from a queue pair in the
+                * quiesced state.
+                */
+
+               qp_acquire_queue_mutex(entry->produce_q);
+               headers_mapped = entry->produce_q->q_header ||
+                   entry->consume_q->q_header;
+               if (QPBROKERSTATE_HAS_MEM(entry)) {
+                       result =
+                           qp_host_unmap_queues(INVALID_VMCI_GUEST_MEM_ID,
+                                                entry->produce_q,
+                                                entry->consume_q);
+                       if (result < VMCI_SUCCESS)
+                               pr_warn("Failed to unmap queue headers for queue pair (handle=0x%x:0x%x,result=%d)\n",
+                                       handle.context, handle.resource,
+                                       result);
+
+                       if (entry->vmci_page_files)
+                               qp_host_unregister_user_memory(entry->produce_q,
+                                                              entry->
+                                                              consume_q);
+                       else
+                               qp_host_unregister_user_memory(entry->produce_q,
+                                                              entry->
+                                                              consume_q);
+
+               }
+
+               if (!headers_mapped)
+                       qp_reset_saved_headers(entry);
+
+               qp_release_queue_mutex(entry->produce_q);
+
+               if (!headers_mapped && entry->wakeup_cb)
+                       entry->wakeup_cb(entry->client_data);
+
+       } else {
+               if (entry->wakeup_cb) {
+                       entry->wakeup_cb = NULL;
+                       entry->client_data = NULL;
+               }
+       }
+
+       if (entry->qp.ref_count == 0) {
+               qp_list_remove_entry(&qp_broker_list, &entry->qp);
+
+               if (is_local)
+                       kfree(entry->local_mem);
+
+               qp_cleanup_queue_mutex(entry->produce_q, entry->consume_q);
+               qp_host_free_queue(entry->produce_q, entry->qp.produce_size);
+               qp_host_free_queue(entry->consume_q, entry->qp.consume_size);
+               /* Unlink from resource hash table and free callback */
+               vmci_resource_remove(&entry->resource);
+
+               kfree(entry);
+
+               vmci_ctx_qp_destroy(context, handle);
+       } else {
+               qp_notify_peer(false, handle, context_id, peer_id);
+               if (context_id == VMCI_HOST_CONTEXT_ID &&
+                   QPBROKERSTATE_HAS_MEM(entry)) {
+                       entry->state = VMCIQPB_SHUTDOWN_MEM;
+               } else {
+                       entry->state = VMCIQPB_SHUTDOWN_NO_MEM;
+               }
+
+               if (!is_local)
+                       vmci_ctx_qp_destroy(context, handle);
+
+       }
+       result = VMCI_SUCCESS;
+ out:
+       mutex_unlock(&qp_broker_list.mutex);
+       return result;
+}
+
+/*
+ * Establishes the necessary mappings for a queue pair given a
+ * reference to the queue pair guest memory. This is usually
+ * called when a guest is unquiesced and the VMX is allowed to
+ * map guest memory once again.
+ */
+int vmci_qp_broker_map(struct vmci_handle handle,
+                      struct vmci_ctx *context,
+                      u64 guest_mem)
+{
+       struct qp_broker_entry *entry;
+       const u32 context_id = vmci_ctx_get_id(context);
+       bool is_local = false;
+       int result;
+
+       if (vmci_handle_is_invalid(handle) || !context ||
+           context_id == VMCI_INVALID_ID)
+               return VMCI_ERROR_INVALID_ARGS;
+
+       mutex_lock(&qp_broker_list.mutex);
+
+       if (!vmci_ctx_qp_exists(context, handle)) {
+               pr_devel("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n",
+                        context_id, handle.context, handle.resource);
+               result = VMCI_ERROR_NOT_FOUND;
+               goto out;
+       }
+
+       entry = qp_broker_handle_to_entry(handle);
+       if (!entry) {
+               pr_devel("Context (ID=0x%x) reports being attached to queue pair (handle=0x%x:0x%x) that isn't present in broker\n",
+                        context_id, handle.context, handle.resource);
+               result = VMCI_ERROR_NOT_FOUND;
+               goto out;
+       }
+
+       if (context_id != entry->create_id && context_id != entry->attach_id) {
+               result = VMCI_ERROR_QUEUEPAIR_NOTATTACHED;
+               goto out;
+       }
+
+       is_local = entry->qp.flags & VMCI_QPFLAG_LOCAL;
+       result = VMCI_SUCCESS;
+
+       if (context_id != VMCI_HOST_CONTEXT_ID) {
+               struct vmci_qp_page_store page_store;
+
+               page_store.pages = guest_mem;
+               page_store.len = QPE_NUM_PAGES(entry->qp);
+
+               qp_acquire_queue_mutex(entry->produce_q);
+               qp_reset_saved_headers(entry);
+               result =
+                   qp_host_register_user_memory(&page_store,
+                                                entry->produce_q,
+                                                entry->consume_q);
+               qp_release_queue_mutex(entry->produce_q);
+               if (result == VMCI_SUCCESS) {
+                       /* Move state from *_NO_MEM to *_MEM */
+
+                       entry->state++;
+
+                       if (entry->wakeup_cb)
+                               entry->wakeup_cb(entry->client_data);
+               }
+       }
+
+ out:
+       mutex_unlock(&qp_broker_list.mutex);
+       return result;
+}
+
+/*
+ * Saves a snapshot of the queue headers for the given QP broker
+ * entry. Should be used when guest memory is unmapped.
+ * Results:
+ * VMCI_SUCCESS on success, appropriate error code if guest memory
+ * can't be accessed..
+ */
+static int qp_save_headers(struct qp_broker_entry *entry)
+{
+       int result;
+
+       if (entry->produce_q->saved_header != NULL &&
+           entry->consume_q->saved_header != NULL) {
+               /*
+                *  If the headers have already been saved, we don't need to do
+                *  it again, and we don't want to map in the headers
+                *  unnecessarily.
+                */
+
+               return VMCI_SUCCESS;
+       }
+
+       if (NULL == entry->produce_q->q_header ||
+           NULL == entry->consume_q->q_header) {
+               result = qp_host_map_queues(entry->produce_q, entry->consume_q);
+               if (result < VMCI_SUCCESS)
+                       return result;
+       }
+
+       memcpy(&entry->saved_produce_q, entry->produce_q->q_header,
+              sizeof(entry->saved_produce_q));
+       entry->produce_q->saved_header = &entry->saved_produce_q;
+       memcpy(&entry->saved_consume_q, entry->consume_q->q_header,
+              sizeof(entry->saved_consume_q));
+       entry->consume_q->saved_header = &entry->saved_consume_q;
+
+       return VMCI_SUCCESS;
+}
+
+/*
+ * Removes all references to the guest memory of a given queue pair, and
+ * will move the queue pair from state *_MEM to *_NO_MEM. It is usually
+ * called when a VM is being quiesced where access to guest memory should
+ * avoided.
+ */
+int vmci_qp_broker_unmap(struct vmci_handle handle,
+                        struct vmci_ctx *context,
+                        u32 gid)
+{
+       struct qp_broker_entry *entry;
+       const u32 context_id = vmci_ctx_get_id(context);
+       bool is_local = false;
+       int result;
+
+       if (vmci_handle_is_invalid(handle) || !context ||
+           context_id == VMCI_INVALID_ID)
+               return VMCI_ERROR_INVALID_ARGS;
+
+       mutex_lock(&qp_broker_list.mutex);
+
+       if (!vmci_ctx_qp_exists(context, handle)) {
+               pr_devel("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n",
+                        context_id, handle.context, handle.resource);
+               result = VMCI_ERROR_NOT_FOUND;
+               goto out;
+       }
+
+       entry = qp_broker_handle_to_entry(handle);
+       if (!entry) {
+               pr_devel("Context (ID=0x%x) reports being attached to queue pair (handle=0x%x:0x%x) that isn't present in broker\n",
+                        context_id, handle.context, handle.resource);
+               result = VMCI_ERROR_NOT_FOUND;
+               goto out;
+       }
+
+       if (context_id != entry->create_id && context_id != entry->attach_id) {
+               result = VMCI_ERROR_QUEUEPAIR_NOTATTACHED;
+               goto out;
+       }
+
+       is_local = entry->qp.flags & VMCI_QPFLAG_LOCAL;
+
+       if (context_id != VMCI_HOST_CONTEXT_ID) {
+               qp_acquire_queue_mutex(entry->produce_q);
+               result = qp_save_headers(entry);
+               if (result < VMCI_SUCCESS)
+                       pr_warn("Failed to save queue headers for queue pair (handle=0x%x:0x%x,result=%d)\n",
+                               handle.context, handle.resource, result);
+
+               qp_host_unmap_queues(gid, entry->produce_q, entry->consume_q);
+
+               /*
+                * On hosted, when we unmap queue pairs, the VMX will also
+                * unmap the guest memory, so we invalidate the previously
+                * registered memory. If the queue pair is mapped again at a
+                * later point in time, we will need to reregister the user
+                * memory with a possibly new user VA.
+                */
+               qp_host_unregister_user_memory(entry->produce_q,
+                                              entry->consume_q);
+
+               /*
+                * Move state from *_MEM to *_NO_MEM.
+                */
+               entry->state--;
+
+               qp_release_queue_mutex(entry->produce_q);
+       }
+
+       result = VMCI_SUCCESS;
+
+ out:
+       mutex_unlock(&qp_broker_list.mutex);
+       return result;
+}
+
+/*
+ * Destroys all guest queue pair endpoints. If active guest queue
+ * pairs still exist, hypercalls to attempt detach from these
+ * queue pairs will be made. Any failure to detach is silently
+ * ignored.
+ */
+void vmci_qp_guest_endpoints_exit(void)
+{
+       struct qp_entry *entry;
+       struct qp_guest_endpoint *ep;
+
+       mutex_lock(&qp_guest_endpoints.mutex);
+
+       while ((entry = qp_list_get_head(&qp_guest_endpoints))) {
+               ep = (struct qp_guest_endpoint *)entry;
+
+               /* Don't make a hypercall for local queue_pairs. */
+               if (!(entry->flags & VMCI_QPFLAG_LOCAL))
+                       qp_detatch_hypercall(entry->handle);
+
+               /* We cannot fail the exit, so let's reset ref_count. */
+               entry->ref_count = 0;
+               qp_list_remove_entry(&qp_guest_endpoints, entry);
+
+               qp_guest_endpoint_destroy(ep);
+       }
+
+       mutex_unlock(&qp_guest_endpoints.mutex);
+}
+
+/*
+ * Helper routine that will lock the queue pair before subsequent
+ * operations.
+ * Note: Non-blocking on the host side is currently only implemented in ESX.
+ * Since non-blocking isn't yet implemented on the host personality we
+ * have no reason to acquire a spin lock.  So to avoid the use of an
+ * unnecessary lock only acquire the mutex if we can block.
+ * Note: It is assumed that QPFLAG_PINNED implies QPFLAG_NONBLOCK.  Therefore
+ * we can use the same locking function for access to both the queue
+ * and the queue headers as it is the same logic.  Assert this behvior.
+ */
+static void qp_lock(const struct vmci_qp *qpair)
+{
+       if (vmci_can_block(qpair->flags))
+               qp_acquire_queue_mutex(qpair->produce_q);
+}
+
+/*
+ * Helper routine that unlocks the queue pair after calling
+ * qp_lock.  Respects non-blocking and pinning flags.
+ */
+static void qp_unlock(const struct vmci_qp *qpair)
+{
+       if (vmci_can_block(qpair->flags))
+               qp_release_queue_mutex(qpair->produce_q);
+}
+
+/*
+ * The queue headers may not be mapped at all times. If a queue is
+ * currently not mapped, it will be attempted to do so.
+ */
+static int qp_map_queue_headers(struct vmci_queue *produce_q,
+                               struct vmci_queue *consume_q,
+                               bool can_block)
+{
+       int result;
+
+       if (NULL == produce_q->q_header || NULL == consume_q->q_header) {
+               if (can_block)
+                       result = qp_host_map_queues(produce_q, consume_q);
+               else
+                       result = VMCI_ERROR_QUEUEPAIR_NOT_READY;
+
+               if (result < VMCI_SUCCESS)
+                       return (produce_q->saved_header &&
+                               consume_q->saved_header) ?
+                           VMCI_ERROR_QUEUEPAIR_NOT_READY :
+                           VMCI_ERROR_QUEUEPAIR_NOTATTACHED;
+       }
+
+       return VMCI_SUCCESS;
+}
+
+/*
+ * Helper routine that will retrieve the produce and consume
+ * headers of a given queue pair. If the guest memory of the
+ * queue pair is currently not available, the saved queue headers
+ * will be returned, if these are available.
+ */
+static int qp_get_queue_headers(const struct vmci_qp *qpair,
+                               struct vmci_queue_header **produce_q_header,
+                               struct vmci_queue_header **consume_q_header)
+{
+       int result;
+
+       result = qp_map_queue_headers(qpair->produce_q, qpair->consume_q,
+                                     vmci_can_block(qpair->flags));
+       if (result == VMCI_SUCCESS) {
+               *produce_q_header = qpair->produce_q->q_header;
+               *consume_q_header = qpair->consume_q->q_header;
+       } else if (qpair->produce_q->saved_header &&
+                  qpair->consume_q->saved_header) {
+               *produce_q_header = qpair->produce_q->saved_header;
+               *consume_q_header = qpair->consume_q->saved_header;
+               result = VMCI_SUCCESS;
+       }
+
+       return result;
+}
+
+/*
+ * Callback from VMCI queue pair broker indicating that a queue
+ * pair that was previously not ready, now either is ready or
+ * gone forever.
+ */
+static int qp_wakeup_cb(void *client_data)
+{
+       struct vmci_qp *qpair = (struct vmci_qp *)client_data;
+
+       qp_lock(qpair);
+       while (qpair->blocked > 0) {
+               qpair->blocked--;
+               qpair->generation++;
+               wake_up(&qpair->event);
+       }
+       qp_unlock(qpair);
+
+       return VMCI_SUCCESS;
+}
+
+/*
+ * Makes the calling thread wait for the queue pair to become
+ * ready for host side access.  Returns true when thread is
+ * woken up after queue pair state change, false otherwise.
+ */
+static bool qp_wait_for_ready_queue(struct vmci_qp *qpair)
+{
+       unsigned int generation;
+
+       if (qpair->flags & VMCI_QPFLAG_NONBLOCK)
+               return false;
+
+       qpair->blocked++;
+       generation = qpair->generation;
+       qp_unlock(qpair);
+       wait_event(qpair->event, generation != qpair->generation);
+       qp_lock(qpair);
+
+       return true;
+}
+
+/*
+ * Enqueues a given buffer to the produce queue using the provided
+ * function. As many bytes as possible (space available in the queue)
+ * are enqueued.  Assumes the queue->mutex has been acquired.  Returns
+ * VMCI_ERROR_QUEUEPAIR_NOSPACE if no space was available to enqueue
+ * data, VMCI_ERROR_INVALID_SIZE, if any queue pointer is outside the
+ * queue (as defined by the queue size), VMCI_ERROR_INVALID_ARGS, if
+ * an error occured when accessing the buffer,
+ * VMCI_ERROR_QUEUEPAIR_NOTATTACHED, if the queue pair pages aren't
+ * available.  Otherwise, the number of bytes written to the queue is
+ * returned.  Updates the tail pointer of the produce queue.
+ */
+static ssize_t qp_enqueue_locked(struct vmci_queue *produce_q,
+                                struct vmci_queue *consume_q,
+                                const u64 produce_q_size,
+                                const void *buf,
+                                size_t buf_size,
+                                vmci_memcpy_to_queue_func memcpy_to_queue,
+                                bool can_block)
+{
+       s64 free_space;
+       u64 tail;
+       size_t written;
+       ssize_t result;
+
+       result = qp_map_queue_headers(produce_q, consume_q, can_block);
+       if (unlikely(result != VMCI_SUCCESS))
+               return result;
+
+       free_space = vmci_q_header_free_space(produce_q->q_header,
+                                             consume_q->q_header,
+                                             produce_q_size);
+       if (free_space == 0)
+               return VMCI_ERROR_QUEUEPAIR_NOSPACE;
+
+       if (free_space < VMCI_SUCCESS)
+               return (ssize_t) free_space;
+
+       written = (size_t) (free_space > buf_size ? buf_size : free_space);
+       tail = vmci_q_header_producer_tail(produce_q->q_header);
+       if (likely(tail + written < produce_q_size)) {
+               result = memcpy_to_queue(produce_q, tail, buf, 0, written);
+       } else {
+               /* Tail pointer wraps around. */
+
+               const size_t tmp = (size_t) (produce_q_size - tail);
+
+               result = memcpy_to_queue(produce_q, tail, buf, 0, tmp);
+               if (result >= VMCI_SUCCESS)
+                       result = memcpy_to_queue(produce_q, 0, buf, tmp,
+                                                written - tmp);
+       }
+
+       if (result < VMCI_SUCCESS)
+               return result;
+
+       vmci_q_header_add_producer_tail(produce_q->q_header, written,
+                                       produce_q_size);
+       return written;
+}
+
+/*
+ * Dequeues data (if available) from the given consume queue. Writes data
+ * to the user provided buffer using the provided function.
+ * Assumes the queue->mutex has been acquired.
+ * Results:
+ * VMCI_ERROR_QUEUEPAIR_NODATA if no data was available to dequeue.
+ * VMCI_ERROR_INVALID_SIZE, if any queue pointer is outside the queue
+ * (as defined by the queue size).
+ * VMCI_ERROR_INVALID_ARGS, if an error occured when accessing the buffer.
+ * Otherwise the number of bytes dequeued is returned.
+ * Side effects:
+ * Updates the head pointer of the consume queue.
+ */
+static ssize_t qp_dequeue_locked(struct vmci_queue *produce_q,
+                                struct vmci_queue *consume_q,
+                                const u64 consume_q_size,
+                                void *buf,
+                                size_t buf_size,
+                                vmci_memcpy_from_queue_func memcpy_from_queue,
+                                bool update_consumer,
+                                bool can_block)
+{
+       s64 buf_ready;
+       u64 head;
+       size_t read;
+       ssize_t result;
+
+       result = qp_map_queue_headers(produce_q, consume_q, can_block);
+       if (unlikely(result != VMCI_SUCCESS))
+               return result;
+
+       buf_ready = vmci_q_header_buf_ready(consume_q->q_header,
+                                           produce_q->q_header,
+                                           consume_q_size);
+       if (buf_ready == 0)
+               return VMCI_ERROR_QUEUEPAIR_NODATA;
+
+       if (buf_ready < VMCI_SUCCESS)
+               return (ssize_t) buf_ready;
+
+       read = (size_t) (buf_ready > buf_size ? buf_size : buf_ready);
+       head = vmci_q_header_consumer_head(produce_q->q_header);
+       if (likely(head + read < consume_q_size)) {
+               result = memcpy_from_queue(buf, 0, consume_q, head, read);
+       } else {
+               /* Head pointer wraps around. */
+
+               const size_t tmp = (size_t) (consume_q_size - head);
+
+               result = memcpy_from_queue(buf, 0, consume_q, head, tmp);
+               if (result >= VMCI_SUCCESS)
+                       result = memcpy_from_queue(buf, tmp, consume_q, 0,
+                                                  read - tmp);
+
+       }
+
+       if (result < VMCI_SUCCESS)
+               return result;
+
+       if (update_consumer)
+               vmci_q_header_add_consumer_head(produce_q->q_header,
+                                               read, consume_q_size);
+
+       return read;
+}
+
+/*
+ * vmci_qpair_alloc() - Allocates a queue pair.
+ * @qpair:      Pointer for the new vmci_qp struct.
+ * @handle:     Handle to track the resource.
+ * @produce_qsize:      Desired size of the producer queue.
+ * @consume_qsize:      Desired size of the consumer queue.
+ * @peer:       ContextID of the peer.
+ * @flags:      VMCI flags.
+ * @priv_flags: VMCI priviledge flags.
+ *
+ * This is the client interface for allocating the memory for a
+ * vmci_qp structure and then attaching to the underlying
+ * queue.  If an error occurs allocating the memory for the
+ * vmci_qp structure no attempt is made to attach.  If an
+ * error occurs attaching, then the structure is freed.
+ */
+int vmci_qpair_alloc(struct vmci_qp **qpair,
+                    struct vmci_handle *handle,
+                    u64 produce_qsize,
+                    u64 consume_qsize,
+                    u32 peer,
+                    u32 flags,
+                    u32 priv_flags)
+{
+       struct vmci_qp *my_qpair;
+       int retval;
+       struct vmci_handle src = VMCI_INVALID_HANDLE;
+       struct vmci_handle dst = vmci_make_handle(peer, VMCI_INVALID_ID);
+       enum vmci_route route;
+       vmci_event_release_cb wakeup_cb;
+       void *client_data;
+
+       /*
+        * Restrict the size of a queuepair.  The device already
+        * enforces a limit on the total amount of memory that can be
+        * allocated to queuepairs for a guest.  However, we try to
+        * allocate this memory before we make the queuepair
+        * allocation hypercall.  On Linux, we allocate each page
+        * separately, which means rather than fail, the guest will
+        * thrash while it tries to allocate, and will become
+        * increasingly unresponsive to the point where it appears to
+        * be hung.  So we place a limit on the size of an individual
+        * queuepair here, and leave the device to enforce the
+        * restriction on total queuepair memory.  (Note that this
+        * doesn't prevent all cases; a user with only this much
+        * physical memory could still get into trouble.)  The error
+        * used by the device is NO_RESOURCES, so use that here too.
+        */
+
+       if (produce_qsize + consume_qsize < max(produce_qsize, consume_qsize) ||
+           produce_qsize + consume_qsize > VMCI_MAX_GUEST_QP_MEMORY)
+               return VMCI_ERROR_NO_RESOURCES;
+
+       retval = vmci_route(&src, &dst, false, &route);
+       if (retval < VMCI_SUCCESS)
+               route = vmci_guest_code_active() ?
+                   VMCI_ROUTE_AS_GUEST : VMCI_ROUTE_AS_HOST;
+
+       /* If NONBLOCK or PINNED is set, we better be the guest personality. */
+       if ((!vmci_can_block(flags) || vmci_qp_pinned(flags)) &&
+           VMCI_ROUTE_AS_GUEST != route) {
+               pr_devel("Not guest personality w/ NONBLOCK OR PINNED set");
+               return VMCI_ERROR_INVALID_ARGS;
+       }
+
+       /*
+        * Limit the size of pinned QPs and check sanity.
+        *
+        * Pinned pages implies non-blocking mode.  Mutexes aren't acquired
+        * when the NONBLOCK flag is set in qpair code; and also should not be
+        * acquired when the PINNED flagged is set.  Since pinning pages
+        * implies we want speed, it makes no sense not to have NONBLOCK
+        * set if PINNED is set.  Hence enforce this implication.
+        */
+       if (vmci_qp_pinned(flags)) {
+               if (vmci_can_block(flags)) {
+                       pr_err("Attempted to enable pinning w/o non-blocking");
+                       return VMCI_ERROR_INVALID_ARGS;
+               }
+
+               if (produce_qsize + consume_qsize > VMCI_MAX_PINNED_QP_MEMORY)
+                       return VMCI_ERROR_NO_RESOURCES;
+       }
+
+       my_qpair = kzalloc(sizeof(*my_qpair), GFP_KERNEL);
+       if (!my_qpair)
+               return VMCI_ERROR_NO_MEM;
+
+       my_qpair->produce_q_size = produce_qsize;
+       my_qpair->consume_q_size = consume_qsize;
+       my_qpair->peer = peer;
+       my_qpair->flags = flags;
+       my_qpair->priv_flags = priv_flags;
+
+       wakeup_cb = NULL;
+       client_data = NULL;
+
+       if (VMCI_ROUTE_AS_HOST == route) {
+               my_qpair->guest_endpoint = false;
+               if (!(flags & VMCI_QPFLAG_LOCAL)) {
+                       my_qpair->blocked = 0;
+                       my_qpair->generation = 0;
+                       init_waitqueue_head(&my_qpair->event);
+                       wakeup_cb = qp_wakeup_cb;
+                       client_data = (void *)my_qpair;
+               }
+       } else {
+               my_qpair->guest_endpoint = true;
+       }
+
+       retval = vmci_qp_alloc(handle,
+                              &my_qpair->produce_q,
+                              my_qpair->produce_q_size,
+                              &my_qpair->consume_q,
+                              my_qpair->consume_q_size,
+                              my_qpair->peer,
+                              my_qpair->flags,
+                              my_qpair->priv_flags,
+                              my_qpair->guest_endpoint,
+                              wakeup_cb, client_data);
+
+       if (retval < VMCI_SUCCESS) {
+               kfree(my_qpair);
+               return retval;
+       }
+
+       *qpair = my_qpair;
+       my_qpair->handle = *handle;
+
+       return retval;
+}
+EXPORT_SYMBOL_GPL(vmci_qpair_alloc);
+
+/*
+ * vmci_qpair_detach() - Detatches the client from a queue pair.
+ * @qpair:      Reference of a pointer to the qpair struct.
+ *
+ * This is the client interface for detaching from a VMCIQPair.
+ * Note that this routine will free the memory allocated for the
+ * vmci_qp structure too.
+ */
+int vmci_qpair_detach(struct vmci_qp **qpair)
+{
+       int result;
+       struct vmci_qp *old_qpair;
+
+       if (!qpair || !(*qpair))
+               return VMCI_ERROR_INVALID_ARGS;
+
+       old_qpair = *qpair;
+       result = qp_detatch(old_qpair->handle, old_qpair->guest_endpoint);
+
+       /*
+        * The guest can fail to detach for a number of reasons, and
+        * if it does so, it will cleanup the entry (if there is one).
+        * The host can fail too, but it won't cleanup the entry
+        * immediately, it will do that later when the context is
+        * freed.  Either way, we need to release the qpair struct
+        * here; there isn't much the caller can do, and we don't want
+        * to leak.
+        */
+
+       memset(old_qpair, 0, sizeof(*old_qpair));
+       old_qpair->handle = VMCI_INVALID_HANDLE;
+       old_qpair->peer = VMCI_INVALID_ID;
+       kfree(old_qpair);
+       *qpair = NULL;
+
+       return result;
+}
+EXPORT_SYMBOL_GPL(vmci_qpair_detach);
+
+/*
+ * vmci_qpair_get_produce_indexes() - Retrieves the indexes of the producer.
+ * @qpair:      Pointer to the queue pair struct.
+ * @producer_tail:      Reference used for storing producer tail index.
+ * @consumer_head:      Reference used for storing the consumer head index.
+ *
+ * This is the client interface for getting the current indexes of the
+ * QPair from the point of the view of the caller as the producer.
+ */
+int vmci_qpair_get_produce_indexes(const struct vmci_qp *qpair,
+                                  u64 *producer_tail,
+                                  u64 *consumer_head)
+{
+       struct vmci_queue_header *produce_q_header;
+       struct vmci_queue_header *consume_q_header;
+       int result;
+
+       if (!qpair)
+               return VMCI_ERROR_INVALID_ARGS;
+
+       qp_lock(qpair);
+       result =
+           qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
+       if (result == VMCI_SUCCESS)
+               vmci_q_header_get_pointers(produce_q_header, consume_q_header,
+                                          producer_tail, consumer_head);
+       qp_unlock(qpair);
+
+       if (result == VMCI_SUCCESS &&
+           ((producer_tail && *producer_tail >= qpair->produce_q_size) ||
+            (consumer_head && *consumer_head >= qpair->produce_q_size)))
+               return VMCI_ERROR_INVALID_SIZE;
+
+       return result;
+}
+EXPORT_SYMBOL_GPL(vmci_qpair_get_produce_indexes);
+
+/*
+ * vmci_qpair_get_consume_indexes() - Retrieves the indexes of the comsumer.
+ * @qpair:      Pointer to the queue pair struct.
+ * @consumer_tail:      Reference used for storing consumer tail index.
+ * @producer_head:      Reference used for storing the producer head index.
+ *
+ * This is the client interface for getting the current indexes of the
+ * QPair from the point of the view of the caller as the consumer.
+ */
+int vmci_qpair_get_consume_indexes(const struct vmci_qp *qpair,
+                                  u64 *consumer_tail,
+                                  u64 *producer_head)
+{
+       struct vmci_queue_header *produce_q_header;
+       struct vmci_queue_header *consume_q_header;
+       int result;
+
+       if (!qpair)
+               return VMCI_ERROR_INVALID_ARGS;
+
+       qp_lock(qpair);
+       result =
+           qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
+       if (result == VMCI_SUCCESS)
+               vmci_q_header_get_pointers(consume_q_header, produce_q_header,
+                                          consumer_tail, producer_head);
+       qp_unlock(qpair);
+
+       if (result == VMCI_SUCCESS &&
+           ((consumer_tail && *consumer_tail >= qpair->consume_q_size) ||
+            (producer_head && *producer_head >= qpair->consume_q_size)))
+               return VMCI_ERROR_INVALID_SIZE;
+
+       return result;
+}
+EXPORT_SYMBOL_GPL(vmci_qpair_get_consume_indexes);
+
+/*
+ * vmci_qpair_produce_free_space() - Retrieves free space in producer queue.
+ * @qpair:      Pointer to the queue pair struct.
+ *
+ * This is the client interface for getting the amount of free
+ * space in the QPair from the point of the view of the caller as
+ * the producer which is the common case.  Returns < 0 if err, else
+ * available bytes into which data can be enqueued if > 0.
+ */
+s64 vmci_qpair_produce_free_space(const struct vmci_qp *qpair)
+{
+       struct vmci_queue_header *produce_q_header;
+       struct vmci_queue_header *consume_q_header;
+       s64 result;
+
+       if (!qpair)
+               return VMCI_ERROR_INVALID_ARGS;
+
+       qp_lock(qpair);
+       result =
+           qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
+       if (result == VMCI_SUCCESS)
+               result = vmci_q_header_free_space(produce_q_header,
+                                                 consume_q_header,
+                                                 qpair->produce_q_size);
+       else
+               result = 0;
+
+       qp_unlock(qpair);
+
+       return result;
+}
+EXPORT_SYMBOL_GPL(vmci_qpair_produce_free_space);
+
+/*
+ * vmci_qpair_consume_free_space() - Retrieves free space in consumer queue.
+ * @qpair:      Pointer to the queue pair struct.
+ *
+ * This is the client interface for getting the amount of free
+ * space in the QPair from the point of the view of the caller as
+ * the consumer which is not the common case.  Returns < 0 if err, else
+ * available bytes into which data can be enqueued if > 0.
+ */
+s64 vmci_qpair_consume_free_space(const struct vmci_qp *qpair)
+{
+       struct vmci_queue_header *produce_q_header;
+       struct vmci_queue_header *consume_q_header;
+       s64 result;
+
+       if (!qpair)
+               return VMCI_ERROR_INVALID_ARGS;
+
+       qp_lock(qpair);
+       result =
+           qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
+       if (result == VMCI_SUCCESS)
+               result = vmci_q_header_free_space(consume_q_header,
+                                                 produce_q_header,
+                                                 qpair->consume_q_size);
+       else
+               result = 0;
+
+       qp_unlock(qpair);
+
+       return result;
+}
+EXPORT_SYMBOL_GPL(vmci_qpair_consume_free_space);
+
+/*
+ * vmci_qpair_produce_buf_ready() - Gets bytes ready to read from
+ * producer queue.
+ * @qpair:      Pointer to the queue pair struct.
+ *
+ * This is the client interface for getting the amount of
+ * enqueued data in the QPair from the point of the view of the
+ * caller as the producer which is not the common case.  Returns < 0 if err,
+ * else available bytes that may be read.
+ */
+s64 vmci_qpair_produce_buf_ready(const struct vmci_qp *qpair)
+{
+       struct vmci_queue_header *produce_q_header;
+       struct vmci_queue_header *consume_q_header;
+       s64 result;
+
+       if (!qpair)
+               return VMCI_ERROR_INVALID_ARGS;
+
+       qp_lock(qpair);
+       result =
+           qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
+       if (result == VMCI_SUCCESS)
+               result = vmci_q_header_buf_ready(produce_q_header,
+                                                consume_q_header,
+                                                qpair->produce_q_size);
+       else
+               result = 0;
+
+       qp_unlock(qpair);
+
+       return result;
+}
+EXPORT_SYMBOL_GPL(vmci_qpair_produce_buf_ready);
+
+/*
+ * vmci_qpair_consume_buf_ready() - Gets bytes ready to read from
+ * consumer queue.
+ * @qpair:      Pointer to the queue pair struct.
+ *
+ * This is the client interface for getting the amount of
+ * enqueued data in the QPair from the point of the view of the
+ * caller as the consumer which is the normal case.  Returns < 0 if err,
+ * else available bytes that may be read.
+ */
+s64 vmci_qpair_consume_buf_ready(const struct vmci_qp *qpair)
+{
+       struct vmci_queue_header *produce_q_header;
+       struct vmci_queue_header *consume_q_header;
+       s64 result;
+
+       if (!qpair)
+               return VMCI_ERROR_INVALID_ARGS;
+
+       qp_lock(qpair);
+       result =
+           qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
+       if (result == VMCI_SUCCESS)
+               result = vmci_q_header_buf_ready(consume_q_header,
+                                                produce_q_header,
+                                                qpair->consume_q_size);
+       else
+               result = 0;
+
+       qp_unlock(qpair);
+
+       return result;
+}
+EXPORT_SYMBOL_GPL(vmci_qpair_consume_buf_ready);
+
+/*
+ * vmci_qpair_enqueue() - Throw data on the queue.
+ * @qpair:      Pointer to the queue pair struct.
+ * @buf:        Pointer to buffer containing data
+ * @buf_size:   Length of buffer.
+ * @buf_type:   Buffer type (Unused).
+ *
+ * This is the client interface for enqueueing data into the queue.
+ * Returns number of bytes enqueued or < 0 on error.
+ */
+ssize_t vmci_qpair_enqueue(struct vmci_qp *qpair,
+                          const void *buf,
+                          size_t buf_size,
+                          int buf_type)
+{
+       ssize_t result;
+
+       if (!qpair || !buf)
+               return VMCI_ERROR_INVALID_ARGS;
+
+       qp_lock(qpair);
+
+       do {
+               result = qp_enqueue_locked(qpair->produce_q,
+                                          qpair->consume_q,
+                                          qpair->produce_q_size,
+                                          buf, buf_size,
+                                          qp_memcpy_to_queue,
+                                          vmci_can_block(qpair->flags));
+
+               if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
+                   !qp_wait_for_ready_queue(qpair))
+                       result = VMCI_ERROR_WOULD_BLOCK;
+
+       } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
+
+       qp_unlock(qpair);
+
+       return result;
+}
+EXPORT_SYMBOL_GPL(vmci_qpair_enqueue);
+
+/*
+ * vmci_qpair_dequeue() - Get data from the queue.
+ * @qpair:      Pointer to the queue pair struct.
+ * @buf:        Pointer to buffer for the data
+ * @buf_size:   Length of buffer.
+ * @buf_type:   Buffer type (Unused).
+ *
+ * This is the client interface for dequeueing data from the queue.
+ * Returns number of bytes dequeued or < 0 on error.
+ */
+ssize_t vmci_qpair_dequeue(struct vmci_qp *qpair,
+                          void *buf,
+                          size_t buf_size,
+                          int buf_type)
+{
+       ssize_t result;
+
+       if (!qpair || !buf)
+               return VMCI_ERROR_INVALID_ARGS;
+
+       qp_lock(qpair);
+
+       do {
+               result = qp_dequeue_locked(qpair->produce_q,
+                                          qpair->consume_q,
+                                          qpair->consume_q_size,
+                                          buf, buf_size,
+                                          qp_memcpy_from_queue, true,
+                                          vmci_can_block(qpair->flags));
+
+               if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
+                   !qp_wait_for_ready_queue(qpair))
+                       result = VMCI_ERROR_WOULD_BLOCK;
+
+       } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
+
+       qp_unlock(qpair);
+
+       return result;
+}
+EXPORT_SYMBOL_GPL(vmci_qpair_dequeue);
+
+/*
+ * vmci_qpair_peek() - Peek at the data in the queue.
+ * @qpair:      Pointer to the queue pair struct.
+ * @buf:        Pointer to buffer for the data
+ * @buf_size:   Length of buffer.
+ * @buf_type:   Buffer type (Unused on Linux).
+ *
+ * This is the client interface for peeking into a queue.  (I.e.,
+ * copy data from the queue without updating the head pointer.)
+ * Returns number of bytes dequeued or < 0 on error.
+ */
+ssize_t vmci_qpair_peek(struct vmci_qp *qpair,
+                       void *buf,
+                       size_t buf_size,
+                       int buf_type)
+{
+       ssize_t result;
+
+       if (!qpair || !buf)
+               return VMCI_ERROR_INVALID_ARGS;
+
+       qp_lock(qpair);
+
+       do {
+               result = qp_dequeue_locked(qpair->produce_q,
+                                          qpair->consume_q,
+                                          qpair->consume_q_size,
+                                          buf, buf_size,
+                                          qp_memcpy_from_queue, false,
+                                          vmci_can_block(qpair->flags));
+
+               if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
+                   !qp_wait_for_ready_queue(qpair))
+                       result = VMCI_ERROR_WOULD_BLOCK;
+
+       } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
+
+       qp_unlock(qpair);
+
+       return result;
+}
+EXPORT_SYMBOL_GPL(vmci_qpair_peek);
+
+/*
+ * vmci_qpair_enquev() - Throw data on the queue using iov.
+ * @qpair:      Pointer to the queue pair struct.
+ * @iov:        Pointer to buffer containing data
+ * @iov_size:   Length of buffer.
+ * @buf_type:   Buffer type (Unused).
+ *
+ * This is the client interface for enqueueing data into the queue.
+ * This function uses IO vectors to handle the work. Returns number
+ * of bytes enqueued or < 0 on error.
+ */
+ssize_t vmci_qpair_enquev(struct vmci_qp *qpair,
+                         void *iov,
+                         size_t iov_size,
+                         int buf_type)
+{
+       ssize_t result;
+
+       if (!qpair || !iov)
+               return VMCI_ERROR_INVALID_ARGS;
+
+       qp_lock(qpair);
+
+       do {
+               result = qp_enqueue_locked(qpair->produce_q,
+                                          qpair->consume_q,
+                                          qpair->produce_q_size,
+                                          iov, iov_size,
+                                          qp_memcpy_to_queue_iov,
+                                          vmci_can_block(qpair->flags));
+
+               if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
+                   !qp_wait_for_ready_queue(qpair))
+                       result = VMCI_ERROR_WOULD_BLOCK;
+
+       } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
+
+       qp_unlock(qpair);
+
+       return result;
+}
+EXPORT_SYMBOL_GPL(vmci_qpair_enquev);
+
+/*
+ * vmci_qpair_dequev() - Get data from the queue using iov.
+ * @qpair:      Pointer to the queue pair struct.
+ * @iov:        Pointer to buffer for the data
+ * @iov_size:   Length of buffer.
+ * @buf_type:   Buffer type (Unused).
+ *
+ * This is the client interface for dequeueing data from the queue.
+ * This function uses IO vectors to handle the work. Returns number
+ * of bytes dequeued or < 0 on error.
+ */
+ssize_t vmci_qpair_dequev(struct vmci_qp *qpair,
+                         void *iov,
+                         size_t iov_size,
+                         int buf_type)
+{
+       ssize_t result;
+
+       if (!qpair || !iov)
+               return VMCI_ERROR_INVALID_ARGS;
+
+       qp_lock(qpair);
+
+       do {
+               result = qp_dequeue_locked(qpair->produce_q,
+                                          qpair->consume_q,
+                                          qpair->consume_q_size,
+                                          iov, iov_size,
+                                          qp_memcpy_from_queue_iov,
+                                          true, vmci_can_block(qpair->flags));
+
+               if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
+                   !qp_wait_for_ready_queue(qpair))
+                       result = VMCI_ERROR_WOULD_BLOCK;
+
+       } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
+
+       qp_unlock(qpair);
+
+       return result;
+}
+EXPORT_SYMBOL_GPL(vmci_qpair_dequev);
+
+/*
+ * vmci_qpair_peekv() - Peek at the data in the queue using iov.
+ * @qpair:      Pointer to the queue pair struct.
+ * @iov:        Pointer to buffer for the data
+ * @iov_size:   Length of buffer.
+ * @buf_type:   Buffer type (Unused on Linux).
+ *
+ * This is the client interface for peeking into a queue.  (I.e.,
+ * copy data from the queue without updating the head pointer.)
+ * This function uses IO vectors to handle the work. Returns number
+ * of bytes peeked or < 0 on error.
+ */
+ssize_t vmci_qpair_peekv(struct vmci_qp *qpair,
+                        void *iov,
+                        size_t iov_size,
+                        int buf_type)
+{
+       ssize_t result;
+
+       if (!qpair || !iov)
+               return VMCI_ERROR_INVALID_ARGS;
+
+       qp_lock(qpair);
+
+       do {
+               result = qp_dequeue_locked(qpair->produce_q,
+                                          qpair->consume_q,
+                                          qpair->consume_q_size,
+                                          iov, iov_size,
+                                          qp_memcpy_from_queue_iov,
+                                          false, vmci_can_block(qpair->flags));
+
+               if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
+                   !qp_wait_for_ready_queue(qpair))
+                       result = VMCI_ERROR_WOULD_BLOCK;
+
+       } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
+
+       qp_unlock(qpair);
+       return result;
+}
+EXPORT_SYMBOL_GPL(vmci_qpair_peekv);
diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.h b/drivers/misc/vmw_vmci/vmci_queue_pair.h
new file mode 100644 (file)
index 0000000..58c6959
--- /dev/null
@@ -0,0 +1,191 @@
+/*
+ * VMware VMCI Driver
+ *
+ * Copyright (C) 2012 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * for more details.
+ */
+
+#ifndef _VMCI_QUEUE_PAIR_H_
+#define _VMCI_QUEUE_PAIR_H_
+
+#include <linux/vmw_vmci_defs.h>
+#include <linux/types.h>
+
+#include "vmci_context.h"
+
+/* Callback needed for correctly waiting on events. */
+typedef int (*vmci_event_release_cb) (void *client_data);
+
+/* Guest device port I/O. */
+struct ppn_set {
+       u64 num_produce_pages;
+       u64 num_consume_pages;
+       u32 *produce_ppns;
+       u32 *consume_ppns;
+       bool initialized;
+};
+
+/* VMCIqueue_pairAllocInfo */
+struct vmci_qp_alloc_info {
+       struct vmci_handle handle;
+       u32 peer;
+       u32 flags;
+       u64 produce_size;
+       u64 consume_size;
+       u64 ppn_va;     /* Start VA of queue pair PPNs. */
+       u64 num_ppns;
+       s32 result;
+       u32 version;
+};
+
+/* VMCIqueue_pairSetVAInfo */
+struct vmci_qp_set_va_info {
+       struct vmci_handle handle;
+       u64 va;         /* Start VA of queue pair PPNs. */
+       u64 num_ppns;
+       u32 version;
+       s32 result;
+};
+
+/*
+ * For backwards compatibility, here is a version of the
+ * VMCIqueue_pairPageFileInfo before host support end-points was added.
+ * Note that the current version of that structure requires VMX to
+ * pass down the VA of the mapped file.  Before host support was added
+ * there was nothing of the sort.  So, when the driver sees the ioctl
+ * with a parameter that is the sizeof
+ * VMCIqueue_pairPageFileInfo_NoHostQP then it can infer that the version
+ * of VMX running can't attach to host end points because it doesn't
+ * provide the VA of the mapped files.
+ *
+ * The Linux driver doesn't get an indication of the size of the
+ * structure passed down from user space.  So, to fix a long standing
+ * but unfiled bug, the _pad field has been renamed to version.
+ * Existing versions of VMX always initialize the PageFileInfo
+ * structure so that _pad, er, version is set to 0.
+ *
+ * A version value of 1 indicates that the size of the structure has
+ * been increased to include two UVA's: produce_uva and consume_uva.
+ * These UVA's are of the mmap()'d queue contents backing files.
+ *
+ * In addition, if when VMX is sending down the
+ * VMCIqueue_pairPageFileInfo structure it gets an error then it will
+ * try again with the _NoHostQP version of the file to see if an older
+ * VMCI kernel module is running.
+ */
+
+/* VMCIqueue_pairPageFileInfo */
+struct vmci_qp_page_file_info {
+       struct vmci_handle handle;
+       u64 produce_page_file;    /* User VA. */
+       u64 consume_page_file;    /* User VA. */
+       u64 produce_page_file_size;  /* Size of the file name array. */
+       u64 consume_page_file_size;  /* Size of the file name array. */
+       s32 result;
+       u32 version;    /* Was _pad. */
+       u64 produce_va; /* User VA of the mapped file. */
+       u64 consume_va; /* User VA of the mapped file. */
+};
+
+/* vmci queuepair detach info */
+struct vmci_qp_dtch_info {
+       struct vmci_handle handle;
+       s32 result;
+       u32 _pad;
+};
+
+/*
+ * struct vmci_qp_page_store describes how the memory of a given queue pair
+ * is backed. When the queue pair is between the host and a guest, the
+ * page store consists of references to the guest pages. On vmkernel,
+ * this is a list of PPNs, and on hosted, it is a user VA where the
+ * queue pair is mapped into the VMX address space.
+ */
+struct vmci_qp_page_store {
+       /* Reference to pages backing the queue pair. */
+       u64 pages;
+       /* Length of pageList/virtual addres range (in pages). */
+       u32 len;
+};
+
+/*
+ * This data type contains the information about a queue.
+ * There are two queues (hence, queue pairs) per transaction model between a
+ * pair of end points, A & B.  One queue is used by end point A to transmit
+ * commands and responses to B.  The other queue is used by B to transmit
+ * commands and responses.
+ *
+ * struct vmci_queue_kern_if is a per-OS defined Queue structure.  It contains
+ * either a direct pointer to the linear address of the buffer contents or a
+ * pointer to structures which help the OS locate those data pages.  See
+ * vmciKernelIf.c for each platform for its definition.
+ */
+struct vmci_queue {
+       struct vmci_queue_header *q_header;
+       struct vmci_queue_header *saved_header;
+       struct vmci_queue_kern_if *kernel_if;
+};
+
+/*
+ * Utility function that checks whether the fields of the page
+ * store contain valid values.
+ * Result:
+ * true if the page store is wellformed. false otherwise.
+ */
+static inline bool
+VMCI_QP_PAGESTORE_IS_WELLFORMED(struct vmci_qp_page_store *page_store)
+{
+       return page_store->len >= 2;
+}
+
+/*
+ * Helper function to check if the non-blocking flag
+ * is set for a given queue pair.
+ */
+static inline bool vmci_can_block(u32 flags)
+{
+       return !(flags & VMCI_QPFLAG_NONBLOCK);
+}
+
+/*
+ * Helper function to check if the queue pair is pinned
+ * into memory.
+ */
+static inline bool vmci_qp_pinned(u32 flags)
+{
+       return flags & VMCI_QPFLAG_PINNED;
+}
+
+void vmci_qp_broker_exit(void);
+int vmci_qp_broker_alloc(struct vmci_handle handle, u32 peer,
+                        u32 flags, u32 priv_flags,
+                        u64 produce_size, u64 consume_size,
+                        struct vmci_qp_page_store *page_store,
+                        struct vmci_ctx *context);
+int vmci_qp_broker_set_page_store(struct vmci_handle handle,
+                                 u64 produce_uva, u64 consume_uva,
+                                 struct vmci_ctx *context);
+int vmci_qp_broker_detach(struct vmci_handle handle, struct vmci_ctx *context);
+
+void vmci_qp_guest_endpoints_exit(void);
+
+int vmci_qp_alloc(struct vmci_handle *handle,
+                 struct vmci_queue **produce_q, u64 produce_size,
+                 struct vmci_queue **consume_q, u64 consume_size,
+                 u32 peer, u32 flags, u32 priv_flags,
+                 bool guest_endpoint, vmci_event_release_cb wakeup_cb,
+                 void *client_data);
+int vmci_qp_broker_map(struct vmci_handle handle,
+                      struct vmci_ctx *context, u64 guest_mem);
+int vmci_qp_broker_unmap(struct vmci_handle handle,
+                        struct vmci_ctx *context, u32 gid);
+
+#endif /* _VMCI_QUEUE_PAIR_H_ */
diff --git a/drivers/misc/vmw_vmci/vmci_resource.c b/drivers/misc/vmw_vmci/vmci_resource.c
new file mode 100644 (file)
index 0000000..a196f84
--- /dev/null
@@ -0,0 +1,229 @@
+/*
+ * VMware VMCI Driver
+ *
+ * Copyright (C) 2012 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * for more details.
+ */
+
+#include <linux/vmw_vmci_defs.h>
+#include <linux/hash.h>
+#include <linux/types.h>
+#include <linux/rculist.h>
+
+#include "vmci_resource.h"
+#include "vmci_driver.h"
+
+
+#define VMCI_RESOURCE_HASH_BITS         7
+#define VMCI_RESOURCE_HASH_BUCKETS      (1 << VMCI_RESOURCE_HASH_BITS)
+
+struct vmci_hash_table {
+       spinlock_t lock;
+       struct hlist_head entries[VMCI_RESOURCE_HASH_BUCKETS];
+};
+
+static struct vmci_hash_table vmci_resource_table = {
+       .lock = __SPIN_LOCK_UNLOCKED(vmci_resource_table.lock),
+};
+
+static unsigned int vmci_resource_hash(struct vmci_handle handle)
+{
+       return hash_32(handle.resource, VMCI_RESOURCE_HASH_BITS);
+}
+
+/*
+ * Gets a resource (if one exists) matching given handle from the hash table.
+ */
+static struct vmci_resource *vmci_resource_lookup(struct vmci_handle handle,
+                                                 enum vmci_resource_type type)
+{
+       struct vmci_resource *r, *resource = NULL;
+       struct hlist_node *node;
+       unsigned int idx = vmci_resource_hash(handle);
+
+       rcu_read_lock();
+       hlist_for_each_entry_rcu(r, node,
+                                &vmci_resource_table.entries[idx], node) {
+               u32 cid = r->handle.context;
+               u32 rid = r->handle.resource;
+
+               if (r->type == type &&
+                   rid == handle.resource &&
+                   (cid == handle.context || cid == VMCI_INVALID_ID)) {
+                       resource = r;
+                       break;
+               }
+       }
+       rcu_read_unlock();
+
+       return resource;
+}
+
+/*
+ * Find an unused resource ID and return it. The first
+ * VMCI_RESERVED_RESOURCE_ID_MAX are reserved so we start from
+ * its value + 1.
+ * Returns VMCI resource id on success, VMCI_INVALID_ID on failure.
+ */
+static u32 vmci_resource_find_id(u32 context_id,
+                                enum vmci_resource_type resource_type)
+{
+       static u32 resource_id = VMCI_RESERVED_RESOURCE_ID_MAX + 1;
+       u32 old_rid = resource_id;
+       u32 current_rid;
+
+       /*
+        * Generate a unique resource ID.  Keep on trying until we wrap around
+        * in the RID space.
+        */
+       do {
+               struct vmci_handle handle;
+
+               current_rid = resource_id;
+               resource_id++;
+               if (unlikely(resource_id == VMCI_INVALID_ID)) {
+                       /* Skip the reserved rids. */
+                       resource_id = VMCI_RESERVED_RESOURCE_ID_MAX + 1;
+               }
+
+               handle = vmci_make_handle(context_id, current_rid);
+               if (!vmci_resource_lookup(handle, resource_type))
+                       return current_rid;
+       } while (resource_id != old_rid);
+
+       return VMCI_INVALID_ID;
+}
+
+
+int vmci_resource_add(struct vmci_resource *resource,
+                     enum vmci_resource_type resource_type,
+                     struct vmci_handle handle)
+
+{
+       unsigned int idx;
+       int result;
+
+       spin_lock(&vmci_resource_table.lock);
+
+       if (handle.resource == VMCI_INVALID_ID) {
+               handle.resource = vmci_resource_find_id(handle.context,
+                       resource_type);
+               if (handle.resource == VMCI_INVALID_ID) {
+                       result = VMCI_ERROR_NO_HANDLE;
+                       goto out;
+               }
+       } else if (vmci_resource_lookup(handle, resource_type)) {
+               result = VMCI_ERROR_ALREADY_EXISTS;
+               goto out;
+       }
+
+       resource->handle = handle;
+       resource->type = resource_type;
+       INIT_HLIST_NODE(&resource->node);
+       kref_init(&resource->kref);
+       init_completion(&resource->done);
+
+       idx = vmci_resource_hash(resource->handle);
+       hlist_add_head_rcu(&resource->node, &vmci_resource_table.entries[idx]);
+
+       result = VMCI_SUCCESS;
+
+out:
+       spin_unlock(&vmci_resource_table.lock);
+       return result;
+}
+
+void vmci_resource_remove(struct vmci_resource *resource)
+{
+       struct vmci_handle handle = resource->handle;
+       unsigned int idx = vmci_resource_hash(handle);
+       struct vmci_resource *r;
+       struct hlist_node *node;
+
+       /* Remove resource from hash table. */
+       spin_lock(&vmci_resource_table.lock);
+
+       hlist_for_each_entry(r, node, &vmci_resource_table.entries[idx], node) {
+               if (vmci_handle_is_equal(r->handle, resource->handle)) {
+                       hlist_del_init_rcu(&r->node);
+                       break;
+               }
+       }
+
+       spin_unlock(&vmci_resource_table.lock);
+       synchronize_rcu();
+
+       vmci_resource_put(resource);
+       wait_for_completion(&resource->done);
+}
+
+struct vmci_resource *
+vmci_resource_by_handle(struct vmci_handle resource_handle,
+                       enum vmci_resource_type resource_type)
+{
+       struct vmci_resource *r, *resource = NULL;
+
+       rcu_read_lock();
+
+       r = vmci_resource_lookup(resource_handle, resource_type);
+       if (r &&
+           (resource_type == r->type ||
+            resource_type == VMCI_RESOURCE_TYPE_ANY)) {
+               resource = vmci_resource_get(r);
+       }
+
+       rcu_read_unlock();
+
+       return resource;
+}
+
+/*
+ * Get a reference to given resource.
+ */
+struct vmci_resource *vmci_resource_get(struct vmci_resource *resource)
+{
+       kref_get(&resource->kref);
+
+       return resource;
+}
+
+static void vmci_release_resource(struct kref *kref)
+{
+       struct vmci_resource *resource =
+               container_of(kref, struct vmci_resource, kref);
+
+       /* Verify the resource has been unlinked from hash table */
+       WARN_ON(!hlist_unhashed(&resource->node));
+
+       /* Signal that container of this resource can now be destroyed */
+       complete(&resource->done);
+}
+
+/*
+ * Resource's release function will get called if last reference.
+ * If it is the last reference, then we are sure that nobody else
+ * can increment the count again (it's gone from the resource hash
+ * table), so there's no need for locking here.
+ */
+int vmci_resource_put(struct vmci_resource *resource)
+{
+       /*
+        * We propagate the information back to caller in case it wants to know
+        * whether entry was freed.
+        */
+       return kref_put(&resource->kref, vmci_release_resource) ?
+               VMCI_SUCCESS_ENTRY_DEAD : VMCI_SUCCESS;
+}
+
+struct vmci_handle vmci_resource_handle(struct vmci_resource *resource)
+{
+       return resource->handle;
+}
diff --git a/drivers/misc/vmw_vmci/vmci_resource.h b/drivers/misc/vmw_vmci/vmci_resource.h
new file mode 100644 (file)
index 0000000..9190cd2
--- /dev/null
@@ -0,0 +1,59 @@
+/*
+ * VMware VMCI Driver
+ *
+ * Copyright (C) 2012 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * for more details.
+ */
+
+#ifndef _VMCI_RESOURCE_H_
+#define _VMCI_RESOURCE_H_
+
+#include <linux/vmw_vmci_defs.h>
+#include <linux/types.h>
+
+#include "vmci_context.h"
+
+
+enum vmci_resource_type {
+       VMCI_RESOURCE_TYPE_ANY,
+       VMCI_RESOURCE_TYPE_API,
+       VMCI_RESOURCE_TYPE_GROUP,
+       VMCI_RESOURCE_TYPE_DATAGRAM,
+       VMCI_RESOURCE_TYPE_DOORBELL,
+       VMCI_RESOURCE_TYPE_QPAIR_GUEST,
+       VMCI_RESOURCE_TYPE_QPAIR_HOST
+};
+
+struct vmci_resource {
+       struct vmci_handle handle;
+       enum vmci_resource_type type;
+       struct hlist_node node;
+       struct kref kref;
+       struct completion done;
+};
+
+
+int vmci_resource_add(struct vmci_resource *resource,
+                     enum vmci_resource_type resource_type,
+                     struct vmci_handle handle);
+
+void vmci_resource_remove(struct vmci_resource *resource);
+
+struct vmci_resource *
+vmci_resource_by_handle(struct vmci_handle resource_handle,
+                       enum vmci_resource_type resource_type);
+
+struct vmci_resource *vmci_resource_get(struct vmci_resource *resource);
+int vmci_resource_put(struct vmci_resource *resource);
+
+struct vmci_handle vmci_resource_handle(struct vmci_resource *resource);
+
+#endif /* _VMCI_RESOURCE_H_ */
diff --git a/drivers/misc/vmw_vmci/vmci_route.c b/drivers/misc/vmw_vmci/vmci_route.c
new file mode 100644 (file)
index 0000000..9109065
--- /dev/null
@@ -0,0 +1,226 @@
+/*
+ * VMware VMCI Driver
+ *
+ * Copyright (C) 2012 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * for more details.
+ */
+
+#include <linux/vmw_vmci_defs.h>
+#include <linux/vmw_vmci_api.h>
+
+#include "vmci_context.h"
+#include "vmci_driver.h"
+#include "vmci_route.h"
+
+/*
+ * Make a routing decision for the given source and destination handles.
+ * This will try to determine the route using the handles and the available
+ * devices.  Will set the source context if it is invalid.
+ */
+int vmci_route(struct vmci_handle *src,
+              const struct vmci_handle *dst,
+              bool from_guest,
+              enum vmci_route *route)
+{
+       bool has_host_device = vmci_host_code_active();
+       bool has_guest_device = vmci_guest_code_active();
+
+       *route = VMCI_ROUTE_NONE;
+
+       /*
+        * "from_guest" is only ever set to true by
+        * IOCTL_VMCI_DATAGRAM_SEND (or by the vmkernel equivalent),
+        * which comes from the VMX, so we know it is coming from a
+        * guest.
+        *
+        * To avoid inconsistencies, test these once.  We will test
+        * them again when we do the actual send to ensure that we do
+        * not touch a non-existent device.
+        */
+
+       /* Must have a valid destination context. */
+       if (VMCI_INVALID_ID == dst->context)
+               return VMCI_ERROR_INVALID_ARGS;
+
+       /* Anywhere to hypervisor. */
+       if (VMCI_HYPERVISOR_CONTEXT_ID == dst->context) {
+
+               /*
+                * If this message already came from a guest then we
+                * cannot send it to the hypervisor.  It must come
+                * from a local client.
+                */
+               if (from_guest)
+                       return VMCI_ERROR_DST_UNREACHABLE;
+
+               /*
+                * We must be acting as a guest in order to send to
+                * the hypervisor.
+                */
+               if (!has_guest_device)
+                       return VMCI_ERROR_DEVICE_NOT_FOUND;
+
+               /* And we cannot send if the source is the host context. */
+               if (VMCI_HOST_CONTEXT_ID == src->context)
+                       return VMCI_ERROR_INVALID_ARGS;
+
+               /*
+                * If the client passed the ANON source handle then
+                * respect it (both context and resource are invalid).
+                * However, if they passed only an invalid context,
+                * then they probably mean ANY, in which case we
+                * should set the real context here before passing it
+                * down.
+                */
+               if (VMCI_INVALID_ID == src->context &&
+                   VMCI_INVALID_ID != src->resource)
+                       src->context = vmci_get_context_id();
+
+               /* Send from local client down to the hypervisor. */
+               *route = VMCI_ROUTE_AS_GUEST;
+               return VMCI_SUCCESS;
+       }
+
+       /* Anywhere to local client on host. */
+       if (VMCI_HOST_CONTEXT_ID == dst->context) {
+               /*
+                * If it is not from a guest but we are acting as a
+                * guest, then we need to send it down to the host.
+                * Note that if we are also acting as a host then this
+                * will prevent us from sending from local client to
+                * local client, but we accept that restriction as a
+                * way to remove any ambiguity from the host context.
+                */
+               if (src->context == VMCI_HYPERVISOR_CONTEXT_ID) {
+                       /*
+                        * If the hypervisor is the source, this is
+                        * host local communication. The hypervisor
+                        * may send vmci event datagrams to the host
+                        * itself, but it will never send datagrams to
+                        * an "outer host" through the guest device.
+                        */
+
+                       if (has_host_device) {
+                               *route = VMCI_ROUTE_AS_HOST;
+                               return VMCI_SUCCESS;
+                       } else {
+                               return VMCI_ERROR_DEVICE_NOT_FOUND;
+                       }
+               }
+
+               if (!from_guest && has_guest_device) {
+                       /* If no source context then use the current. */
+                       if (VMCI_INVALID_ID == src->context)
+                               src->context = vmci_get_context_id();
+
+                       /* Send it from local client down to the host. */
+                       *route = VMCI_ROUTE_AS_GUEST;
+                       return VMCI_SUCCESS;
+               }
+
+               /*
+                * Otherwise we already received it from a guest and
+                * it is destined for a local client on this host, or
+                * it is from another local client on this host.  We
+                * must be acting as a host to service it.
+                */
+               if (!has_host_device)
+                       return VMCI_ERROR_DEVICE_NOT_FOUND;
+
+               if (VMCI_INVALID_ID == src->context) {
+                       /*
+                        * If it came from a guest then it must have a
+                        * valid context.  Otherwise we can use the
+                        * host context.
+                        */
+                       if (from_guest)
+                               return VMCI_ERROR_INVALID_ARGS;
+
+                       src->context = VMCI_HOST_CONTEXT_ID;
+               }
+
+               /* Route to local client. */
+               *route = VMCI_ROUTE_AS_HOST;
+               return VMCI_SUCCESS;
+       }
+
+       /*
+        * If we are acting as a host then this might be destined for
+        * a guest.
+        */
+       if (has_host_device) {
+               /* It will have a context if it is meant for a guest. */
+               if (vmci_ctx_exists(dst->context)) {
+                       if (VMCI_INVALID_ID == src->context) {
+                               /*
+                                * If it came from a guest then it
+                                * must have a valid context.
+                                * Otherwise we can use the host
+                                * context.
+                                */
+
+                               if (from_guest)
+                                       return VMCI_ERROR_INVALID_ARGS;
+
+                               src->context = VMCI_HOST_CONTEXT_ID;
+                       } else if (VMCI_CONTEXT_IS_VM(src->context) &&
+                                  src->context != dst->context) {
+                               /*
+                                * VM to VM communication is not
+                                * allowed. Since we catch all
+                                * communication destined for the host
+                                * above, this must be destined for a
+                                * VM since there is a valid context.
+                                */
+
+                               return VMCI_ERROR_DST_UNREACHABLE;
+                       }
+
+                       /* Pass it up to the guest. */
+                       *route = VMCI_ROUTE_AS_HOST;
+                       return VMCI_SUCCESS;
+               } else if (!has_guest_device) {
+                       /*
+                        * The host is attempting to reach a CID
+                        * without an active context, and we can't
+                        * send it down, since we have no guest
+                        * device.
+                        */
+
+                       return VMCI_ERROR_DST_UNREACHABLE;
+               }
+       }
+
+       /*
+        * We must be a guest trying to send to another guest, which means
+        * we need to send it down to the host. We do not filter out VM to
+        * VM communication here, since we want to be able to use the guest
+        * driver on older versions that do support VM to VM communication.
+        */
+       if (!has_guest_device) {
+               /*
+                * Ending up here means we have neither guest nor host
+                * device.
+                */
+               return VMCI_ERROR_DEVICE_NOT_FOUND;
+       }
+
+       /* If no source context then use the current context. */
+       if (VMCI_INVALID_ID == src->context)
+               src->context = vmci_get_context_id();
+
+       /*
+        * Send it from local client down to the host, which will
+        * route it to the other guest for us.
+        */
+       *route = VMCI_ROUTE_AS_GUEST;
+       return VMCI_SUCCESS;
+}
diff --git a/drivers/misc/vmw_vmci/vmci_route.h b/drivers/misc/vmw_vmci/vmci_route.h
new file mode 100644 (file)
index 0000000..3b30e82
--- /dev/null
@@ -0,0 +1,30 @@
+/*
+ * VMware VMCI Driver
+ *
+ * Copyright (C) 2012 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * for more details.
+ */
+
+#ifndef _VMCI_ROUTE_H_
+#define _VMCI_ROUTE_H_
+
+#include <linux/vmw_vmci_defs.h>
+
+enum vmci_route {
+       VMCI_ROUTE_NONE,
+       VMCI_ROUTE_AS_HOST,
+       VMCI_ROUTE_AS_GUEST,
+};
+
+int vmci_route(struct vmci_handle *src, const struct vmci_handle *dst,
+              bool from_guest, enum vmci_route *route);
+
+#endif /* _VMCI_ROUTE_H_ */
index 8d13c65..3004127 100644 (file)
@@ -461,7 +461,7 @@ config MMC_SDHI
 
 config MMC_CB710
        tristate "ENE CB710 MMC/SD Interface support"
-       depends on PCI
+       depends on PCI && GENERIC_HARDIRQS
        select CB710_CORE
        help
          This option enables support for MMC/SD part of ENE CB710/720 Flash
index de4c20b..f8dd361 100644 (file)
@@ -50,8 +50,6 @@ struct mvsd_host {
        struct timer_list timer;
        struct mmc_host *mmc;
        struct device *dev;
-       struct resource *res;
-       int irq;
        struct clk *clk;
        int gpio_card_detect;
        int gpio_write_protect;
@@ -718,10 +716,6 @@ static int __init mvsd_probe(struct platform_device *pdev)
        if (!r || irq < 0 || !mvsd_data)
                return -ENXIO;
 
-       r = request_mem_region(r->start, SZ_1K, DRIVER_NAME);
-       if (!r)
-               return -EBUSY;
-
        mmc = mmc_alloc_host(sizeof(struct mvsd_host), &pdev->dev);
        if (!mmc) {
                ret = -ENOMEM;
@@ -731,8 +725,8 @@ static int __init mvsd_probe(struct platform_device *pdev)
        host = mmc_priv(mmc);
        host->mmc = mmc;
        host->dev = &pdev->dev;
-       host->res = r;
        host->base_clock = mvsd_data->clock / 2;
+       host->clk = ERR_PTR(-EINVAL);
 
        mmc->ops = &mvsd_ops;
 
@@ -752,7 +746,7 @@ static int __init mvsd_probe(struct platform_device *pdev)
 
        spin_lock_init(&host->lock);
 
-       host->base = ioremap(r->start, SZ_4K);
+       host->base = devm_request_and_ioremap(&pdev->dev, r);
        if (!host->base) {
                ret = -ENOMEM;
                goto out;
@@ -765,44 +759,45 @@ static int __init mvsd_probe(struct platform_device *pdev)
 
        mvsd_power_down(host);
 
-       ret = request_irq(irq, mvsd_irq, 0, DRIVER_NAME, host);
+       ret = devm_request_irq(&pdev->dev, irq, mvsd_irq, 0, DRIVER_NAME, host);
        if (ret) {
                pr_err("%s: cannot assign irq %d\n", DRIVER_NAME, irq);
                goto out;
-       } else
-               host->irq = irq;
+       }
 
        /* Not all platforms can gate the clock, so it is not
           an error if the clock does not exists. */
-       host->clk = clk_get(&pdev->dev, NULL);
-       if (!IS_ERR(host->clk)) {
+       host->clk = devm_clk_get(&pdev->dev, NULL);
+       if (!IS_ERR(host->clk))
                clk_prepare_enable(host->clk);
-       }
 
        if (mvsd_data->gpio_card_detect) {
-               ret = gpio_request(mvsd_data->gpio_card_detect,
-                                  DRIVER_NAME " cd");
+               ret = devm_gpio_request_one(&pdev->dev,
+                                           mvsd_data->gpio_card_detect,
+                                           GPIOF_IN, DRIVER_NAME " cd");
                if (ret == 0) {
-                       gpio_direction_input(mvsd_data->gpio_card_detect);
                        irq = gpio_to_irq(mvsd_data->gpio_card_detect);
-                       ret = request_irq(irq, mvsd_card_detect_irq,
-                                         IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING,
-                                         DRIVER_NAME " cd", host);
+                       ret = devm_request_irq(&pdev->dev, irq,
+                                              mvsd_card_detect_irq,
+                                              IRQ_TYPE_EDGE_RISING |
+                                              IRQ_TYPE_EDGE_FALLING,
+                                              DRIVER_NAME " cd", host);
                        if (ret == 0)
                                host->gpio_card_detect =
                                        mvsd_data->gpio_card_detect;
                        else
-                               gpio_free(mvsd_data->gpio_card_detect);
+                               devm_gpio_free(&pdev->dev,
+                                              mvsd_data->gpio_card_detect);
                }
        }
        if (!host->gpio_card_detect)
                mmc->caps |= MMC_CAP_NEEDS_POLL;
 
        if (mvsd_data->gpio_write_protect) {
-               ret = gpio_request(mvsd_data->gpio_write_protect,
-                                  DRIVER_NAME " wp");
+               ret = devm_gpio_request_one(&pdev->dev,
+                                           mvsd_data->gpio_write_protect,
+                                           GPIOF_IN, DRIVER_NAME " wp");
                if (ret == 0) {
-                       gpio_direction_input(mvsd_data->gpio_write_protect);
                        host->gpio_write_protect =
                                mvsd_data->gpio_write_protect;
                }
@@ -824,26 +819,11 @@ static int __init mvsd_probe(struct platform_device *pdev)
        return 0;
 
 out:
-       if (host) {
-               if (host->irq)
-                       free_irq(host->irq, host);
-               if (host->gpio_card_detect) {
-                       free_irq(gpio_to_irq(host->gpio_card_detect), host);
-                       gpio_free(host->gpio_card_detect);
-               }
-               if (host->gpio_write_protect)
-                       gpio_free(host->gpio_write_protect);
-               if (host->base)
-                       iounmap(host->base);
-       }
-       if (r)
-               release_resource(r);
-       if (mmc)
-               if (!IS_ERR_OR_NULL(host->clk)) {
+       if (mmc) {
+               if (!IS_ERR(host->clk))
                        clk_disable_unprepare(host->clk);
-                       clk_put(host->clk);
-               }
                mmc_free_host(mmc);
+       }
 
        return ret;
 }
@@ -852,28 +832,16 @@ static int __exit mvsd_remove(struct platform_device *pdev)
 {
        struct mmc_host *mmc = platform_get_drvdata(pdev);
 
-       if (mmc) {
-               struct mvsd_host *host = mmc_priv(mmc);
+       struct mvsd_host *host = mmc_priv(mmc);
 
-               if (host->gpio_card_detect) {
-                       free_irq(gpio_to_irq(host->gpio_card_detect), host);
-                       gpio_free(host->gpio_card_detect);
-               }
-               mmc_remove_host(mmc);
-               free_irq(host->irq, host);
-               if (host->gpio_write_protect)
-                       gpio_free(host->gpio_write_protect);
-               del_timer_sync(&host->timer);
-               mvsd_power_down(host);
-               iounmap(host->base);
-               release_resource(host->res);
+       mmc_remove_host(mmc);
+       del_timer_sync(&host->timer);
+       mvsd_power_down(host);
+
+       if (!IS_ERR(host->clk))
+               clk_disable_unprepare(host->clk);
+       mmc_free_host(mmc);
 
-               if (!IS_ERR(host->clk)) {
-                       clk_disable_unprepare(host->clk);
-                       clk_put(host->clk);
-               }
-               mmc_free_host(mmc);
-       }
        platform_set_drvdata(pdev, NULL);
        return 0;
 }
index 6a70184..5db9acb 100644 (file)
@@ -189,6 +189,10 @@ config NETPOLL_TRAP
 config NET_POLL_CONTROLLER
        def_bool NETPOLL
 
+config NTB_NETDEV
+       tristate "Virtual Ethernet over NTB"
+       depends on NTB
+
 config RIONET
        tristate "RapidIO Ethernet over messaging driver support"
        depends on RAPIDIO
index 335db78..ef3d090 100644 (file)
@@ -71,3 +71,4 @@ obj-$(CONFIG_USB_IPHETH)        += usb/
 obj-$(CONFIG_USB_CDC_PHONET)   += usb/
 
 obj-$(CONFIG_HYPERV_NET) += hyperv/
+obj-$(CONFIG_NTB_NETDEV) += ntb_netdev.o
index e49c0ef..a948160 100644 (file)
@@ -61,6 +61,7 @@ config BFIN_RX_DESC_NUM
 
 config BFIN_MAC_USE_HWSTAMP
        bool "Use IEEE 1588 hwstamp"
+       depends on BFIN_MAC && BF518
        select PTP_1588_CLOCK
        default y
        ---help---
index 01588b6..f771ddf 100644 (file)
@@ -80,12 +80,37 @@ static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
                new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
        }
 
-       memcpy(&bp->bnx2x_txq[old_txdata_index],
-              &bp->bnx2x_txq[new_txdata_index],
+       memcpy(&bp->bnx2x_txq[new_txdata_index],
+              &bp->bnx2x_txq[old_txdata_index],
               sizeof(struct bnx2x_fp_txdata));
        to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
 }
 
+/**
+ * bnx2x_shrink_eth_fp - guarantees fastpath structures stay intact
+ *
+ * @bp:        driver handle
+ * @delta:     number of eth queues which were not allocated
+ */
+static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta)
+{
+       int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp);
+
+       /* Queue pointer cannot be re-set on an fp-basis, as moving pointer
+        * backward along the array could cause memory to be overriden
+        */
+       for (cos = 1; cos < bp->max_cos; cos++) {
+               for (i = 0; i < old_eth_num - delta; i++) {
+                       struct bnx2x_fastpath *fp = &bp->fp[i];
+                       int new_idx = cos * (old_eth_num - delta) + i;
+
+                       memcpy(&bp->bnx2x_txq[new_idx], fp->txdata_ptr[cos],
+                              sizeof(struct bnx2x_fp_txdata));
+                       fp->txdata_ptr[cos] = &bp->bnx2x_txq[new_idx];
+               }
+       }
+}
+
 int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
 
 /* free skb in the packet ring at pos idx
@@ -3863,6 +3888,7 @@ int bnx2x_alloc_fp_mem(struct bnx2x *bp)
                int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
 
                WARN_ON(delta < 0);
+               bnx2x_shrink_eth_fp(bp, delta);
                if (CNIC_SUPPORT(bp))
                        /* move non eth FPs next to last eth FP
                         * must be done in that order
index 277f17e..a427b49 100644 (file)
@@ -2777,10 +2777,10 @@ static int bnx2x_set_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info)
                } else if ((info->flow_type == UDP_V6_FLOW) &&
                           (bp->rss_conf_obj.udp_rss_v6 != udp_rss_requested)) {
                        bp->rss_conf_obj.udp_rss_v6 = udp_rss_requested;
-                       return bnx2x_config_rss_pf(bp, &bp->rss_conf_obj, 0);
                        DP(BNX2X_MSG_ETHTOOL,
                           "rss re-configured, UDP 4-tupple %s\n",
                           udp_rss_requested ? "enabled" : "disabled");
+                       return bnx2x_config_rss_pf(bp, &bp->rss_conf_obj, 0);
                } else {
                        return 0;
                }
index 940ef85..5523da3 100644 (file)
@@ -127,6 +127,17 @@ MODULE_PARM_DESC(debug, " Default debug msglevel");
 
 struct workqueue_struct *bnx2x_wq;
 
+struct bnx2x_mac_vals {
+       u32 xmac_addr;
+       u32 xmac_val;
+       u32 emac_addr;
+       u32 emac_val;
+       u32 umac_addr;
+       u32 umac_val;
+       u32 bmac_addr;
+       u32 bmac_val[2];
+};
+
 enum bnx2x_board_type {
        BCM57710 = 0,
        BCM57711,
@@ -9420,12 +9431,19 @@ static inline void bnx2x_undi_int_disable(struct bnx2x *bp)
                bnx2x_undi_int_disable_e1h(bp);
 }
 
-static void bnx2x_prev_unload_close_mac(struct bnx2x *bp)
+static void bnx2x_prev_unload_close_mac(struct bnx2x *bp,
+                                       struct bnx2x_mac_vals *vals)
 {
        u32 val, base_addr, offset, mask, reset_reg;
        bool mac_stopped = false;
        u8 port = BP_PORT(bp);
 
+       /* reset addresses as they also mark which values were changed */
+       vals->bmac_addr = 0;
+       vals->umac_addr = 0;
+       vals->xmac_addr = 0;
+       vals->emac_addr = 0;
+
        reset_reg = REG_RD(bp, MISC_REG_RESET_REG_2);
 
        if (!CHIP_IS_E3(bp)) {
@@ -9447,14 +9465,18 @@ static void bnx2x_prev_unload_close_mac(struct bnx2x *bp)
                         */
                        wb_data[0] = REG_RD(bp, base_addr + offset);
                        wb_data[1] = REG_RD(bp, base_addr + offset + 0x4);
+                       vals->bmac_addr = base_addr + offset;
+                       vals->bmac_val[0] = wb_data[0];
+                       vals->bmac_val[1] = wb_data[1];
                        wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE;
-                       REG_WR(bp, base_addr + offset, wb_data[0]);
-                       REG_WR(bp, base_addr + offset + 0x4, wb_data[1]);
+                       REG_WR(bp, vals->bmac_addr, wb_data[0]);
+                       REG_WR(bp, vals->bmac_addr + 0x4, wb_data[1]);
 
                }
                BNX2X_DEV_INFO("Disable emac Rx\n");
-               REG_WR(bp, NIG_REG_NIG_EMAC0_EN + BP_PORT(bp)*4, 0);
-
+               vals->emac_addr = NIG_REG_NIG_EMAC0_EN + BP_PORT(bp)*4;
+               vals->emac_val = REG_RD(bp, vals->emac_addr);
+               REG_WR(bp, vals->emac_addr, 0);
                mac_stopped = true;
        } else {
                if (reset_reg & MISC_REGISTERS_RESET_REG_2_XMAC) {
@@ -9465,14 +9487,18 @@ static void bnx2x_prev_unload_close_mac(struct bnx2x *bp)
                               val & ~(1 << 1));
                        REG_WR(bp, base_addr + XMAC_REG_PFC_CTRL_HI,
                               val | (1 << 1));
-                       REG_WR(bp, base_addr + XMAC_REG_CTRL, 0);
+                       vals->xmac_addr = base_addr + XMAC_REG_CTRL;
+                       vals->xmac_val = REG_RD(bp, vals->xmac_addr);
+                       REG_WR(bp, vals->xmac_addr, 0);
                        mac_stopped = true;
                }
                mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port;
                if (mask & reset_reg) {
                        BNX2X_DEV_INFO("Disable umac Rx\n");
                        base_addr = BP_PORT(bp) ? GRCBASE_UMAC1 : GRCBASE_UMAC0;
-                       REG_WR(bp, base_addr + UMAC_REG_COMMAND_CONFIG, 0);
+                       vals->umac_addr = base_addr + UMAC_REG_COMMAND_CONFIG;
+                       vals->umac_val = REG_RD(bp, vals->umac_addr);
+                       REG_WR(bp, vals->umac_addr, 0);
                        mac_stopped = true;
                }
        }
@@ -9664,12 +9690,16 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
 {
        u32 reset_reg, tmp_reg = 0, rc;
        bool prev_undi = false;
+       struct bnx2x_mac_vals mac_vals;
+
        /* It is possible a previous function received 'common' answer,
         * but hasn't loaded yet, therefore creating a scenario of
         * multiple functions receiving 'common' on the same path.
         */
        BNX2X_DEV_INFO("Common unload Flow\n");
 
+       memset(&mac_vals, 0, sizeof(mac_vals));
+
        if (bnx2x_prev_is_path_marked(bp))
                return bnx2x_prev_mcp_done(bp);
 
@@ -9680,7 +9710,10 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
                u32 timer_count = 1000;
 
                /* Close the MAC Rx to prevent BRB from filling up */
-               bnx2x_prev_unload_close_mac(bp);
+               bnx2x_prev_unload_close_mac(bp, &mac_vals);
+
+               /* close LLH filters towards the BRB */
+               bnx2x_set_rx_filter(&bp->link_params, 0);
 
                /* Check if the UNDI driver was previously loaded
                 * UNDI driver initializes CID offset for normal bell to 0x7
@@ -9727,6 +9760,17 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
        /* No packets are in the pipeline, path is ready for reset */
        bnx2x_reset_common(bp);
 
+       if (mac_vals.xmac_addr)
+               REG_WR(bp, mac_vals.xmac_addr, mac_vals.xmac_val);
+       if (mac_vals.umac_addr)
+               REG_WR(bp, mac_vals.umac_addr, mac_vals.umac_val);
+       if (mac_vals.emac_addr)
+               REG_WR(bp, mac_vals.emac_addr, mac_vals.emac_val);
+       if (mac_vals.bmac_addr) {
+               REG_WR(bp, mac_vals.bmac_addr, mac_vals.bmac_val[0]);
+               REG_WR(bp, mac_vals.bmac_addr + 4, mac_vals.bmac_val[1]);
+       }
+
        rc = bnx2x_prev_mark_path(bp, prev_undi);
        if (rc) {
                bnx2x_prev_mcp_done(bp);
index 3bc1912..4eba17b 100644 (file)
@@ -190,6 +190,7 @@ struct be_eq_obj {
 
        u8 idx;                 /* array index */
        u16 tx_budget;
+       u16 spurious_intr;
        struct napi_struct napi;
        struct be_adapter *adapter;
 } ____cacheline_aligned_in_smp;
index 9dca22b..5c99570 100644 (file)
@@ -2026,19 +2026,30 @@ static irqreturn_t be_intx(int irq, void *dev)
        struct be_adapter *adapter = eqo->adapter;
        int num_evts = 0;
 
-       /* On Lancer, clear-intr bit of the EQ DB does not work.
-        * INTx is de-asserted only on notifying num evts.
+       /* IRQ is not expected when NAPI is scheduled as the EQ
+        * will not be armed.
+        * But, this can happen on Lancer INTx where it takes
+        * a while to de-assert INTx or in BE2 where occasionaly
+        * an interrupt may be raised even when EQ is unarmed.
+        * If NAPI is already scheduled, then counting & notifying
+        * events will orphan them.
         */
-       if (lancer_chip(adapter))
+       if (napi_schedule_prep(&eqo->napi)) {
                num_evts = events_get(eqo);
+               __napi_schedule(&eqo->napi);
+               if (num_evts)
+                       eqo->spurious_intr = 0;
+       }
+       be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
 
-       /* The EQ-notify may not de-assert INTx rightaway, causing
-        * the ISR to be invoked again. So, return HANDLED even when
-        * num_evts is zero.
+       /* Return IRQ_HANDLED only for the the first spurious intr
+        * after a valid intr to stop the kernel from branding
+        * this irq as a bad one!
         */
-       be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
-       napi_schedule(&eqo->napi);
-       return IRQ_HANDLED;
+       if (num_evts || eqo->spurious_intr++ == 0)
+               return IRQ_HANDLED;
+       else
+               return IRQ_NONE;
 }
 
 static irqreturn_t be_msix(int irq, void *dev)
index f80cd97..3e73742 100644 (file)
@@ -4678,7 +4678,7 @@ static int qlge_probe(struct pci_dev *pdev,
        qdev = netdev_priv(ndev);
        SET_NETDEV_DEV(ndev, &pdev->dev);
        ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
-               NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN |
+               NETIF_F_TSO | NETIF_F_TSO_ECN |
                NETIF_F_HW_VLAN_TX | NETIF_F_RXCSUM;
        ndev->features = ndev->hw_features |
                NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
index 5778a4a..122d60c 100644 (file)
@@ -27,7 +27,7 @@ config XILINX_EMACLITE
 
 config XILINX_AXI_EMAC
        tristate "Xilinx 10/100/1000 AXI Ethernet support"
-       depends on (PPC32 || MICROBLAZE)
+       depends on MICROBLAZE
        select PHYLIB
        ---help---
          This driver supports the 10/100/1000 Ethernet from Xilinx for the
index d9f69b8..6f47100 100644 (file)
@@ -1590,7 +1590,7 @@ static int axienet_of_probe(struct platform_device *op)
        lp->rx_irq = irq_of_parse_and_map(np, 1);
        lp->tx_irq = irq_of_parse_and_map(np, 0);
        of_node_put(np);
-       if ((lp->rx_irq == NO_IRQ) || (lp->tx_irq == NO_IRQ)) {
+       if ((lp->rx_irq <= 0) || (lp->tx_irq <= 0)) {
                dev_err(&op->dev, "could not determine irqs\n");
                ret = -ENOMEM;
                goto err_iounmap_2;
index f825a62..5ac9caf 100644 (file)
@@ -498,8 +498,7 @@ static int netvsc_remove(struct hv_device *dev)
 
 static const struct hv_vmbus_device_id id_table[] = {
        /* Network guid */
-       { VMBUS_DEVICE(0x63, 0x51, 0x61, 0xF8, 0x3E, 0xDF, 0xc5, 0x46,
-                      0x91, 0x3F, 0xF2, 0xD2, 0xF9, 0x65, 0xED, 0x0E) },
+       { HV_NIC_GUID, },
        { },
 };
 
diff --git a/drivers/net/ntb_netdev.c b/drivers/net/ntb_netdev.c
new file mode 100644 (file)
index 0000000..ed947dd
--- /dev/null
@@ -0,0 +1,408 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ *   redistributing this file, you may do so under either license.
+ *
+ *   GPL LICENSE SUMMARY
+ *
+ *   Copyright(c) 2012 Intel Corporation. All rights reserved.
+ *
+ *   This program is free software; you can redistribute it and/or modify
+ *   it under the terms of version 2 of the GNU General Public License as
+ *   published by the Free Software Foundation.
+ *
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2012 Intel Corporation. All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copy
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Intel PCIe NTB Network Linux driver
+ *
+ * Contact Information:
+ * Jon Mason <jon.mason@intel.com>
+ */
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/ntb.h>
+
+#define NTB_NETDEV_VER "0.7"
+
+MODULE_DESCRIPTION(KBUILD_MODNAME);
+MODULE_VERSION(NTB_NETDEV_VER);
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Intel Corporation");
+
+struct ntb_netdev {
+       struct list_head list;
+       struct pci_dev *pdev;
+       struct net_device *ndev;
+       struct ntb_transport_qp *qp;
+};
+
+#define        NTB_TX_TIMEOUT_MS       1000
+#define        NTB_RXQ_SIZE            100
+
+static LIST_HEAD(dev_list);
+
+static void ntb_netdev_event_handler(void *data, int status)
+{
+       struct net_device *ndev = data;
+       struct ntb_netdev *dev = netdev_priv(ndev);
+
+       netdev_dbg(ndev, "Event %x, Link %x\n", status,
+                  ntb_transport_link_query(dev->qp));
+
+       /* Currently, only link status event is supported */
+       if (status)
+               netif_carrier_on(ndev);
+       else
+               netif_carrier_off(ndev);
+}
+
+static void ntb_netdev_rx_handler(struct ntb_transport_qp *qp, void *qp_data,
+                                 void *data, int len)
+{
+       struct net_device *ndev = qp_data;
+       struct sk_buff *skb;
+       int rc;
+
+       skb = data;
+       if (!skb)
+               return;
+
+       netdev_dbg(ndev, "%s: %d byte payload received\n", __func__, len);
+
+       skb_put(skb, len);
+       skb->protocol = eth_type_trans(skb, ndev);
+       skb->ip_summed = CHECKSUM_NONE;
+
+       if (netif_rx(skb) == NET_RX_DROP) {
+               ndev->stats.rx_errors++;
+               ndev->stats.rx_dropped++;
+       } else {
+               ndev->stats.rx_packets++;
+               ndev->stats.rx_bytes += len;
+       }
+
+       skb = netdev_alloc_skb(ndev, ndev->mtu + ETH_HLEN);
+       if (!skb) {
+               ndev->stats.rx_errors++;
+               ndev->stats.rx_frame_errors++;
+               return;
+       }
+
+       rc = ntb_transport_rx_enqueue(qp, skb, skb->data, ndev->mtu + ETH_HLEN);
+       if (rc) {
+               dev_kfree_skb(skb);
+               ndev->stats.rx_errors++;
+               ndev->stats.rx_fifo_errors++;
+       }
+}
+
+static void ntb_netdev_tx_handler(struct ntb_transport_qp *qp, void *qp_data,
+                                 void *data, int len)
+{
+       struct net_device *ndev = qp_data;
+       struct sk_buff *skb;
+
+       skb = data;
+       if (!skb || !ndev)
+               return;
+
+       if (len > 0) {
+               ndev->stats.tx_packets++;
+               ndev->stats.tx_bytes += skb->len;
+       } else {
+               ndev->stats.tx_errors++;
+               ndev->stats.tx_aborted_errors++;
+       }
+
+       dev_kfree_skb(skb);
+}
+
+static netdev_tx_t ntb_netdev_start_xmit(struct sk_buff *skb,
+                                        struct net_device *ndev)
+{
+       struct ntb_netdev *dev = netdev_priv(ndev);
+       int rc;
+
+       netdev_dbg(ndev, "%s: skb len %d\n", __func__, skb->len);
+
+       rc = ntb_transport_tx_enqueue(dev->qp, skb, skb->data, skb->len);
+       if (rc)
+               goto err;
+
+       return NETDEV_TX_OK;
+
+err:
+       ndev->stats.tx_dropped++;
+       ndev->stats.tx_errors++;
+       return NETDEV_TX_BUSY;
+}
+
+static int ntb_netdev_open(struct net_device *ndev)
+{
+       struct ntb_netdev *dev = netdev_priv(ndev);
+       struct sk_buff *skb;
+       int rc, i, len;
+
+       /* Add some empty rx bufs */
+       for (i = 0; i < NTB_RXQ_SIZE; i++) {
+               skb = netdev_alloc_skb(ndev, ndev->mtu + ETH_HLEN);
+               if (!skb) {
+                       rc = -ENOMEM;
+                       goto err;
+               }
+
+               rc = ntb_transport_rx_enqueue(dev->qp, skb, skb->data,
+                                             ndev->mtu + ETH_HLEN);
+               if (rc == -EINVAL)
+                       goto err;
+       }
+
+       netif_carrier_off(ndev);
+       ntb_transport_link_up(dev->qp);
+
+       return 0;
+
+err:
+       while ((skb = ntb_transport_rx_remove(dev->qp, &len)))
+               dev_kfree_skb(skb);
+       return rc;
+}
+
+static int ntb_netdev_close(struct net_device *ndev)
+{
+       struct ntb_netdev *dev = netdev_priv(ndev);
+       struct sk_buff *skb;
+       int len;
+
+       ntb_transport_link_down(dev->qp);
+
+       while ((skb = ntb_transport_rx_remove(dev->qp, &len)))
+               dev_kfree_skb(skb);
+
+       return 0;
+}
+
+static int ntb_netdev_change_mtu(struct net_device *ndev, int new_mtu)
+{
+       struct ntb_netdev *dev = netdev_priv(ndev);
+       struct sk_buff *skb;
+       int len, rc;
+
+       if (new_mtu > ntb_transport_max_size(dev->qp) - ETH_HLEN)
+               return -EINVAL;
+
+       if (!netif_running(ndev)) {
+               ndev->mtu = new_mtu;
+               return 0;
+       }
+
+       /* Bring down the link and dispose of posted rx entries */
+       ntb_transport_link_down(dev->qp);
+
+       if (ndev->mtu < new_mtu) {
+               int i;
+
+               for (i = 0; (skb = ntb_transport_rx_remove(dev->qp, &len)); i++)
+                       dev_kfree_skb(skb);
+
+               for (; i; i--) {
+                       skb = netdev_alloc_skb(ndev, new_mtu + ETH_HLEN);
+                       if (!skb) {
+                               rc = -ENOMEM;
+                               goto err;
+                       }
+
+                       rc = ntb_transport_rx_enqueue(dev->qp, skb, skb->data,
+                                                     new_mtu + ETH_HLEN);
+                       if (rc) {
+                               dev_kfree_skb(skb);
+                               goto err;
+                       }
+               }
+       }
+
+       ndev->mtu = new_mtu;
+
+       ntb_transport_link_up(dev->qp);
+
+       return 0;
+
+err:
+       ntb_transport_link_down(dev->qp);
+
+       while ((skb = ntb_transport_rx_remove(dev->qp, &len)))
+               dev_kfree_skb(skb);
+
+       netdev_err(ndev, "Error changing MTU, device inoperable\n");
+       return rc;
+}
+
+static const struct net_device_ops ntb_netdev_ops = {
+       .ndo_open = ntb_netdev_open,
+       .ndo_stop = ntb_netdev_close,
+       .ndo_start_xmit = ntb_netdev_start_xmit,
+       .ndo_change_mtu = ntb_netdev_change_mtu,
+       .ndo_set_mac_address = eth_mac_addr,
+};
+
+static void ntb_get_drvinfo(struct net_device *ndev,
+                           struct ethtool_drvinfo *info)
+{
+       struct ntb_netdev *dev = netdev_priv(ndev);
+
+       strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+       strlcpy(info->version, NTB_NETDEV_VER, sizeof(info->version));
+       strlcpy(info->bus_info, pci_name(dev->pdev), sizeof(info->bus_info));
+}
+
+static int ntb_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+       cmd->supported = SUPPORTED_Backplane;
+       cmd->advertising = ADVERTISED_Backplane;
+       cmd->speed = SPEED_UNKNOWN;
+       ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
+       cmd->duplex = DUPLEX_FULL;
+       cmd->port = PORT_OTHER;
+       cmd->phy_address = 0;
+       cmd->transceiver = XCVR_DUMMY1;
+       cmd->autoneg = AUTONEG_ENABLE;
+       cmd->maxtxpkt = 0;
+       cmd->maxrxpkt = 0;
+
+       return 0;
+}
+
+static const struct ethtool_ops ntb_ethtool_ops = {
+       .get_drvinfo = ntb_get_drvinfo,
+       .get_link = ethtool_op_get_link,
+       .get_settings = ntb_get_settings,
+};
+
+static const struct ntb_queue_handlers ntb_netdev_handlers = {
+       .tx_handler = ntb_netdev_tx_handler,
+       .rx_handler = ntb_netdev_rx_handler,
+       .event_handler = ntb_netdev_event_handler,
+};
+
+static int ntb_netdev_probe(struct pci_dev *pdev)
+{
+       struct net_device *ndev;
+       struct ntb_netdev *dev;
+       int rc;
+
+       ndev = alloc_etherdev(sizeof(struct ntb_netdev));
+       if (!ndev)
+               return -ENOMEM;
+
+       dev = netdev_priv(ndev);
+       dev->ndev = ndev;
+       dev->pdev = pdev;
+       BUG_ON(!dev->pdev);
+       ndev->features = NETIF_F_HIGHDMA;
+
+       ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+
+       ndev->hw_features = ndev->features;
+       ndev->watchdog_timeo = msecs_to_jiffies(NTB_TX_TIMEOUT_MS);
+
+       random_ether_addr(ndev->perm_addr);
+       memcpy(ndev->dev_addr, ndev->perm_addr, ndev->addr_len);
+
+       ndev->netdev_ops = &ntb_netdev_ops;
+       SET_ETHTOOL_OPS(ndev, &ntb_ethtool_ops);
+
+       dev->qp = ntb_transport_create_queue(ndev, pdev, &ntb_netdev_handlers);
+       if (!dev->qp) {
+               rc = -EIO;
+               goto err;
+       }
+
+       ndev->mtu = ntb_transport_max_size(dev->qp) - ETH_HLEN;
+
+       rc = register_netdev(ndev);
+       if (rc)
+               goto err1;
+
+       list_add(&dev->list, &dev_list);
+       dev_info(&pdev->dev, "%s created\n", ndev->name);
+       return 0;
+
+err1:
+       ntb_transport_free_queue(dev->qp);
+err:
+       free_netdev(ndev);
+       return rc;
+}
+
+static void ntb_netdev_remove(struct pci_dev *pdev)
+{
+       struct net_device *ndev;
+       struct ntb_netdev *dev;
+
+       list_for_each_entry(dev, &dev_list, list) {
+               if (dev->pdev == pdev)
+                       break;
+       }
+       if (dev == NULL)
+               return;
+
+       ndev = dev->ndev;
+
+       unregister_netdev(ndev);
+       ntb_transport_free_queue(dev->qp);
+       free_netdev(ndev);
+}
+
+static struct ntb_client ntb_netdev_client = {
+       .driver.name = KBUILD_MODNAME,
+       .driver.owner = THIS_MODULE,
+       .probe = ntb_netdev_probe,
+       .remove = ntb_netdev_remove,
+};
+
+static int __init ntb_netdev_init_module(void)
+{
+       int rc;
+
+       rc = ntb_register_client_dev(KBUILD_MODNAME);
+       if (rc)
+               return rc;
+       return ntb_register_client(&ntb_netdev_client);
+}
+module_init(ntb_netdev_init_module);
+
+static void __exit ntb_netdev_exit_module(void)
+{
+       ntb_unregister_client(&ntb_netdev_client);
+       ntb_unregister_client_dev(KBUILD_MODNAME);
+}
+module_exit(ntb_netdev_exit_module);
index fbd106e..af372d0 100644 (file)
@@ -404,8 +404,8 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
        struct tun_struct *tun;
        struct net_device *dev;
 
-       tun = rcu_dereference_protected(tfile->tun,
-                                       lockdep_rtnl_is_held());
+       tun = rtnl_dereference(tfile->tun);
+
        if (tun) {
                u16 index = tfile->queue_index;
                BUG_ON(index >= tun->numqueues);
@@ -414,8 +414,7 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
                rcu_assign_pointer(tun->tfiles[index],
                                   tun->tfiles[tun->numqueues - 1]);
                rcu_assign_pointer(tfile->tun, NULL);
-               ntfile = rcu_dereference_protected(tun->tfiles[index],
-                                                  lockdep_rtnl_is_held());
+               ntfile = rtnl_dereference(tun->tfiles[index]);
                ntfile->queue_index = index;
 
                --tun->numqueues;
@@ -429,8 +428,10 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
                /* Drop read queue */
                skb_queue_purge(&tfile->sk.sk_receive_queue);
                tun_set_real_num_queues(tun);
-       } else if (tfile->detached && clean)
+       } else if (tfile->detached && clean) {
                tun = tun_enable_queue(tfile);
+               sock_put(&tfile->sk);
+       }
 
        if (clean) {
                if (tun && tun->numqueues == 0 && tun->numdisabled == 0 &&
@@ -458,8 +459,7 @@ static void tun_detach_all(struct net_device *dev)
        int i, n = tun->numqueues;
 
        for (i = 0; i < n; i++) {
-               tfile = rcu_dereference_protected(tun->tfiles[i],
-                                                 lockdep_rtnl_is_held());
+               tfile = rtnl_dereference(tun->tfiles[i]);
                BUG_ON(!tfile);
                wake_up_all(&tfile->wq.wait);
                rcu_assign_pointer(tfile->tun, NULL);
@@ -469,8 +469,7 @@ static void tun_detach_all(struct net_device *dev)
 
        synchronize_net();
        for (i = 0; i < n; i++) {
-               tfile = rcu_dereference_protected(tun->tfiles[i],
-                                                 lockdep_rtnl_is_held());
+               tfile = rtnl_dereference(tun->tfiles[i]);
                /* Drop read queue */
                skb_queue_purge(&tfile->sk.sk_receive_queue);
                sock_put(&tfile->sk);
@@ -481,6 +480,9 @@ static void tun_detach_all(struct net_device *dev)
                sock_put(&tfile->sk);
        }
        BUG_ON(tun->numdisabled != 0);
+
+       if (tun->flags & TUN_PERSIST)
+               module_put(THIS_MODULE);
 }
 
 static int tun_attach(struct tun_struct *tun, struct file *file)
@@ -489,7 +491,7 @@ static int tun_attach(struct tun_struct *tun, struct file *file)
        int err;
 
        err = -EINVAL;
-       if (rcu_dereference_protected(tfile->tun, lockdep_rtnl_is_held()))
+       if (rtnl_dereference(tfile->tun))
                goto out;
 
        err = -EBUSY;
@@ -1544,6 +1546,9 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
        struct net_device *dev;
        int err;
 
+       if (tfile->detached)
+               return -EINVAL;
+
        dev = __dev_get_by_name(net, ifr->ifr_name);
        if (dev) {
                if (ifr->ifr_flags & IFF_TUN_EXCL)
@@ -1738,8 +1743,7 @@ static void tun_detach_filter(struct tun_struct *tun, int n)
        struct tun_file *tfile;
 
        for (i = 0; i < n; i++) {
-               tfile = rcu_dereference_protected(tun->tfiles[i],
-                                                 lockdep_rtnl_is_held());
+               tfile = rtnl_dereference(tun->tfiles[i]);
                sk_detach_filter(tfile->socket.sk);
        }
 
@@ -1752,8 +1756,7 @@ static int tun_attach_filter(struct tun_struct *tun)
        struct tun_file *tfile;
 
        for (i = 0; i < tun->numqueues; i++) {
-               tfile = rcu_dereference_protected(tun->tfiles[i],
-                                                 lockdep_rtnl_is_held());
+               tfile = rtnl_dereference(tun->tfiles[i]);
                ret = sk_attach_filter(&tun->fprog, tfile->socket.sk);
                if (ret) {
                        tun_detach_filter(tun, i);
@@ -1771,8 +1774,7 @@ static void tun_set_sndbuf(struct tun_struct *tun)
        int i;
 
        for (i = 0; i < tun->numqueues; i++) {
-               tfile = rcu_dereference_protected(tun->tfiles[i],
-                                               lockdep_rtnl_is_held());
+               tfile = rtnl_dereference(tun->tfiles[i]);
                tfile->socket.sk->sk_sndbuf = tun->sndbuf;
        }
 }
@@ -1789,13 +1791,10 @@ static int tun_set_queue(struct file *file, struct ifreq *ifr)
                tun = tfile->detached;
                if (!tun)
                        ret = -EINVAL;
-               else if (tun_not_capable(tun))
-                       ret = -EPERM;
                else
                        ret = tun_attach(tun, file);
        } else if (ifr->ifr_flags & IFF_DETACH_QUEUE) {
-               tun = rcu_dereference_protected(tfile->tun,
-                                               lockdep_rtnl_is_held());
+               tun = rtnl_dereference(tfile->tun);
                if (!tun || !(tun->flags & TUN_TAP_MQ))
                        ret = -EINVAL;
                else
@@ -1880,10 +1879,11 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
                /* Disable/Enable persist mode. Keep an extra reference to the
                 * module to prevent the module being unprobed.
                 */
-               if (arg) {
+               if (arg && !(tun->flags & TUN_PERSIST)) {
                        tun->flags |= TUN_PERSIST;
                        __module_get(THIS_MODULE);
-               } else {
+               }
+               if (!arg && (tun->flags & TUN_PERSIST)) {
                        tun->flags &= ~TUN_PERSIST;
                        module_put(THIS_MODULE);
                }
index 1a67a4f..2c02b4e 100644 (file)
@@ -30,5 +30,6 @@ source "drivers/net/wireless/ath/ath9k/Kconfig"
 source "drivers/net/wireless/ath/carl9170/Kconfig"
 source "drivers/net/wireless/ath/ath6kl/Kconfig"
 source "drivers/net/wireless/ath/ar5523/Kconfig"
+source "drivers/net/wireless/ath/wil6210/Kconfig"
 
 endif
index 1e18621..97b964d 100644 (file)
@@ -3,6 +3,7 @@ obj-$(CONFIG_ATH9K_HW)          += ath9k/
 obj-$(CONFIG_CARL9170)         += carl9170/
 obj-$(CONFIG_ATH6KL)           += ath6kl/
 obj-$(CONFIG_AR5523)           += ar5523/
+obj-$(CONFIG_WIL6210)          += wil6210/
 
 obj-$(CONFIG_ATH_COMMON)       += ath.o
 
diff --git a/drivers/net/wireless/ath/wil6210/Kconfig b/drivers/net/wireless/ath/wil6210/Kconfig
new file mode 100644 (file)
index 0000000..bac3d98
--- /dev/null
@@ -0,0 +1,29 @@
+config WIL6210
+       tristate "Wilocity 60g WiFi card wil6210 support"
+       depends on CFG80211
+       depends on PCI
+       default n
+       ---help---
+         This module adds support for wireless adapter based on
+         wil6210 chip by Wilocity. It supports operation on the
+         60 GHz band, covered by the IEEE802.11ad standard.
+
+         http://wireless.kernel.org/en/users/Drivers/wil6210
+
+         If you choose to build it as a module, it will be called
+         wil6210
+
+config WIL6210_ISR_COR
+       bool "Use Clear-On-Read mode for ISR registers for wil6210"
+       depends on WIL6210
+       default y
+       ---help---
+         ISR registers on wil6210 chip may operate in either
+         COR (Clear-On-Read) or W1C (Write-1-to-Clear) mode.
+         For production code, use COR (say y); is default since
+         it saves extra target transaction;
+         For ISR debug, use W1C (say n); is allows to monitor ISR
+         registers with debugfs. If COR were used, ISR would
+         self-clear when accessed for debug purposes, it makes
+         such monitoring impossible.
+         Say y unless you debug interrupts
diff --git a/drivers/net/wireless/ath/wil6210/Makefile b/drivers/net/wireless/ath/wil6210/Makefile
new file mode 100644 (file)
index 0000000..9396dc9
--- /dev/null
@@ -0,0 +1,13 @@
+obj-$(CONFIG_WIL6210) += wil6210.o
+
+wil6210-objs := main.o
+wil6210-objs += netdev.o
+wil6210-objs += cfg80211.o
+wil6210-objs += pcie_bus.o
+wil6210-objs += debugfs.o
+wil6210-objs += wmi.o
+wil6210-objs += interrupt.o
+wil6210-objs += txrx.o
+
+subdir-ccflags-y += -Werror
+subdir-ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
new file mode 100644 (file)
index 0000000..116f4e8
--- /dev/null
@@ -0,0 +1,573 @@
+/*
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/sched.h>
+#include <linux/etherdevice.h>
+#include <linux/wireless.h>
+#include <linux/ieee80211.h>
+#include <linux/slab.h>
+#include <linux/version.h>
+#include <net/cfg80211.h>
+
+#include "wil6210.h"
+#include "wmi.h"
+
+#define CHAN60G(_channel, _flags) {                            \
+       .band                   = IEEE80211_BAND_60GHZ,         \
+       .center_freq            = 56160 + (2160 * (_channel)),  \
+       .hw_value               = (_channel),                   \
+       .flags                  = (_flags),                     \
+       .max_antenna_gain       = 0,                            \
+       .max_power              = 40,                           \
+}
+
+static struct ieee80211_channel wil_60ghz_channels[] = {
+       CHAN60G(1, 0),
+       CHAN60G(2, 0),
+       CHAN60G(3, 0),
+/* channel 4 not supported yet */
+};
+
+static struct ieee80211_supported_band wil_band_60ghz = {
+       .channels = wil_60ghz_channels,
+       .n_channels = ARRAY_SIZE(wil_60ghz_channels),
+       .ht_cap = {
+               .ht_supported = true,
+               .cap = 0, /* TODO */
+               .ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K, /* TODO */
+               .ampdu_density = IEEE80211_HT_MPDU_DENSITY_8, /* TODO */
+               .mcs = {
+                               /* MCS 1..12 - SC PHY */
+                       .rx_mask = {0xfe, 0x1f}, /* 1..12 */
+                       .tx_params = IEEE80211_HT_MCS_TX_DEFINED, /* TODO */
+               },
+       },
+};
+
+static const struct ieee80211_txrx_stypes
+wil_mgmt_stypes[NUM_NL80211_IFTYPES] = {
+       [NL80211_IFTYPE_STATION] = {
+               .tx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+               BIT(IEEE80211_STYPE_PROBE_RESP >> 4),
+               .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+               BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
+       },
+       [NL80211_IFTYPE_AP] = {
+               .tx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+               BIT(IEEE80211_STYPE_PROBE_RESP >> 4),
+               .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+               BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
+       },
+       [NL80211_IFTYPE_P2P_CLIENT] = {
+               .tx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+               BIT(IEEE80211_STYPE_PROBE_RESP >> 4),
+               .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+               BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
+       },
+       [NL80211_IFTYPE_P2P_GO] = {
+               .tx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+               BIT(IEEE80211_STYPE_PROBE_RESP >> 4),
+               .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+               BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
+       },
+};
+
+static const u32 wil_cipher_suites[] = {
+       WLAN_CIPHER_SUITE_GCMP,
+};
+
+int wil_iftype_nl2wmi(enum nl80211_iftype type)
+{
+       static const struct {
+               enum nl80211_iftype nl;
+               enum wmi_network_type wmi;
+       } __nl2wmi[] = {
+               {NL80211_IFTYPE_ADHOC,          WMI_NETTYPE_ADHOC},
+               {NL80211_IFTYPE_STATION,        WMI_NETTYPE_INFRA},
+               {NL80211_IFTYPE_AP,             WMI_NETTYPE_AP},
+               {NL80211_IFTYPE_P2P_CLIENT,     WMI_NETTYPE_P2P},
+               {NL80211_IFTYPE_P2P_GO,         WMI_NETTYPE_P2P},
+               {NL80211_IFTYPE_MONITOR,        WMI_NETTYPE_ADHOC}, /* FIXME */
+       };
+       uint i;
+
+       for (i = 0; i < ARRAY_SIZE(__nl2wmi); i++) {
+               if (__nl2wmi[i].nl == type)
+                       return __nl2wmi[i].wmi;
+       }
+
+       return -EOPNOTSUPP;
+}
+
+static int wil_cfg80211_get_station(struct wiphy *wiphy,
+                                   struct net_device *ndev,
+                                   u8 *mac, struct station_info *sinfo)
+{
+       struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+       int rc;
+       struct wmi_notify_req_cmd cmd = {
+               .cid = 0,
+               .interval_usec = 0,
+       };
+
+       if (memcmp(mac, wil->dst_addr[0], ETH_ALEN))
+               return -ENOENT;
+
+       /* WMI_NOTIFY_REQ_DONE_EVENTID handler fills wil->stats.bf_mcs */
+       rc = wmi_call(wil, WMI_NOTIFY_REQ_CMDID, &cmd, sizeof(cmd),
+                     WMI_NOTIFY_REQ_DONE_EVENTID, NULL, 0, 20);
+       if (rc)
+               return rc;
+
+       sinfo->generation = wil->sinfo_gen;
+
+       sinfo->filled |= STATION_INFO_TX_BITRATE;
+       sinfo->txrate.flags = RATE_INFO_FLAGS_MCS | RATE_INFO_FLAGS_60G;
+       sinfo->txrate.mcs = wil->stats.bf_mcs;
+       sinfo->filled |= STATION_INFO_RX_BITRATE;
+       sinfo->rxrate.flags = RATE_INFO_FLAGS_MCS | RATE_INFO_FLAGS_60G;
+       sinfo->rxrate.mcs = wil->stats.last_mcs_rx;
+
+       if (test_bit(wil_status_fwconnected, &wil->status)) {
+               sinfo->filled |= STATION_INFO_SIGNAL;
+               sinfo->signal = 12; /* TODO: provide real value */
+       }
+
+       return 0;
+}
+
+static int wil_cfg80211_change_iface(struct wiphy *wiphy,
+                                    struct net_device *ndev,
+                                    enum nl80211_iftype type, u32 *flags,
+                                    struct vif_params *params)
+{
+       struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+       struct wireless_dev *wdev = wil->wdev;
+
+       switch (type) {
+       case NL80211_IFTYPE_STATION:
+       case NL80211_IFTYPE_AP:
+       case NL80211_IFTYPE_P2P_CLIENT:
+       case NL80211_IFTYPE_P2P_GO:
+               break;
+       case NL80211_IFTYPE_MONITOR:
+               if (flags)
+                       wil->monitor_flags = *flags;
+               else
+                       wil->monitor_flags = 0;
+
+               break;
+       default:
+               return -EOPNOTSUPP;
+       }
+
+       wdev->iftype = type;
+
+       return 0;
+}
+
+static int wil_cfg80211_scan(struct wiphy *wiphy,
+                            struct cfg80211_scan_request *request)
+{
+       struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+       struct wireless_dev *wdev = wil->wdev;
+       struct {
+               struct wmi_start_scan_cmd cmd;
+               u16 chnl[4];
+       } __packed cmd;
+       uint i, n;
+
+       if (wil->scan_request) {
+               wil_err(wil, "Already scanning\n");
+               return -EAGAIN;
+       }
+
+       /* check we are client side */
+       switch (wdev->iftype) {
+       case NL80211_IFTYPE_STATION:
+       case NL80211_IFTYPE_P2P_CLIENT:
+               break;
+       default:
+               return -EOPNOTSUPP;
+
+       }
+
+       /* FW don't support scan after connection attempt */
+       if (test_bit(wil_status_dontscan, &wil->status)) {
+               wil_err(wil, "Scan after connect attempt not supported\n");
+               return -EBUSY;
+       }
+
+       wil->scan_request = request;
+
+       memset(&cmd, 0, sizeof(cmd));
+       cmd.cmd.num_channels = 0;
+       n = min(request->n_channels, 4U);
+       for (i = 0; i < n; i++) {
+               int ch = request->channels[i]->hw_value;
+               if (ch == 0) {
+                       wil_err(wil,
+                               "Scan requested for unknown frequency %dMhz\n",
+                               request->channels[i]->center_freq);
+                       continue;
+               }
+               /* 0-based channel indexes */
+               cmd.cmd.channel_list[cmd.cmd.num_channels++].channel = ch - 1;
+               wil_dbg(wil, "Scan for ch %d  : %d MHz\n", ch,
+                       request->channels[i]->center_freq);
+       }
+
+       return wmi_send(wil, WMI_START_SCAN_CMDID, &cmd, sizeof(cmd.cmd) +
+                       cmd.cmd.num_channels * sizeof(cmd.cmd.channel_list[0]));
+}
+
+static int wil_cfg80211_connect(struct wiphy *wiphy,
+                               struct net_device *ndev,
+                               struct cfg80211_connect_params *sme)
+{
+       struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+       struct cfg80211_bss *bss;
+       struct wmi_connect_cmd conn;
+       const u8 *ssid_eid;
+       const u8 *rsn_eid;
+       int ch;
+       int rc = 0;
+
+       bss = cfg80211_get_bss(wiphy, sme->channel, sme->bssid,
+                              sme->ssid, sme->ssid_len,
+                              WLAN_CAPABILITY_ESS, WLAN_CAPABILITY_ESS);
+       if (!bss) {
+               wil_err(wil, "Unable to find BSS\n");
+               return -ENOENT;
+       }
+
+       ssid_eid = ieee80211_bss_get_ie(bss, WLAN_EID_SSID);
+       if (!ssid_eid) {
+               wil_err(wil, "No SSID\n");
+               rc = -ENOENT;
+               goto out;
+       }
+
+       rsn_eid = sme->ie ?
+                       cfg80211_find_ie(WLAN_EID_RSN, sme->ie, sme->ie_len) :
+                       NULL;
+       if (rsn_eid) {
+               if (sme->ie_len > WMI_MAX_IE_LEN) {
+                       rc = -ERANGE;
+                       wil_err(wil, "IE too large (%td bytes)\n",
+                               sme->ie_len);
+                       goto out;
+               }
+               /*
+                * For secure assoc, send:
+                * (1) WMI_DELETE_CIPHER_KEY_CMD
+                * (2) WMI_SET_APPIE_CMD
+                */
+               rc = wmi_del_cipher_key(wil, 0, bss->bssid);
+               if (rc) {
+                       wil_err(wil, "WMI_DELETE_CIPHER_KEY_CMD failed\n");
+                       goto out;
+               }
+               /* WMI_SET_APPIE_CMD */
+               rc = wmi_set_ie(wil, WMI_FRAME_ASSOC_REQ, sme->ie_len, sme->ie);
+               if (rc) {
+                       wil_err(wil, "WMI_SET_APPIE_CMD failed\n");
+                       goto out;
+               }
+       }
+
+       /* WMI_CONNECT_CMD */
+       memset(&conn, 0, sizeof(conn));
+       switch (bss->capability & 0x03) {
+       case WLAN_CAPABILITY_DMG_TYPE_AP:
+               conn.network_type = WMI_NETTYPE_INFRA;
+               break;
+       case WLAN_CAPABILITY_DMG_TYPE_PBSS:
+               conn.network_type = WMI_NETTYPE_P2P;
+               break;
+       default:
+               wil_err(wil, "Unsupported BSS type, capability= 0x%04x\n",
+                       bss->capability);
+               goto out;
+       }
+       if (rsn_eid) {
+               conn.dot11_auth_mode = WMI_AUTH11_SHARED;
+               conn.auth_mode = WMI_AUTH_WPA2_PSK;
+               conn.pairwise_crypto_type = WMI_CRYPT_AES_GCMP;
+               conn.pairwise_crypto_len = 16;
+       } else {
+               conn.dot11_auth_mode = WMI_AUTH11_OPEN;
+               conn.auth_mode = WMI_AUTH_NONE;
+       }
+
+       conn.ssid_len = min_t(u8, ssid_eid[1], 32);
+       memcpy(conn.ssid, ssid_eid+2, conn.ssid_len);
+
+       ch = bss->channel->hw_value;
+       if (ch == 0) {
+               wil_err(wil, "BSS at unknown frequency %dMhz\n",
+                       bss->channel->center_freq);
+               rc = -EOPNOTSUPP;
+               goto out;
+       }
+       conn.channel = ch - 1;
+
+       memcpy(conn.bssid, bss->bssid, 6);
+       memcpy(conn.dst_mac, bss->bssid, 6);
+       /*
+        * FW don't support scan after connection attempt
+        */
+       set_bit(wil_status_dontscan, &wil->status);
+
+       rc = wmi_send(wil, WMI_CONNECT_CMDID, &conn, sizeof(conn));
+       if (rc == 0) {
+               /* Connect can take lots of time */
+               mod_timer(&wil->connect_timer,
+                         jiffies + msecs_to_jiffies(2000));
+       }
+
+ out:
+       cfg80211_put_bss(bss);
+
+       return rc;
+}
+
+static int wil_cfg80211_disconnect(struct wiphy *wiphy,
+                                  struct net_device *ndev,
+                                  u16 reason_code)
+{
+       int rc;
+       struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+
+       rc = wmi_send(wil, WMI_DISCONNECT_CMDID, NULL, 0);
+
+       return rc;
+}
+
+static int wil_cfg80211_set_channel(struct wiphy *wiphy,
+                                   struct cfg80211_chan_def *chandef)
+{
+       struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+       struct wireless_dev *wdev = wil->wdev;
+
+       wdev->preset_chandef = *chandef;
+
+       return 0;
+}
+
+static int wil_cfg80211_add_key(struct wiphy *wiphy,
+                               struct net_device *ndev,
+                               u8 key_index, bool pairwise,
+                               const u8 *mac_addr,
+                               struct key_params *params)
+{
+       struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+
+       /* group key is not used */
+       if (!pairwise)
+               return 0;
+
+       return wmi_add_cipher_key(wil, key_index, mac_addr,
+                                 params->key_len, params->key);
+}
+
+static int wil_cfg80211_del_key(struct wiphy *wiphy,
+                               struct net_device *ndev,
+                               u8 key_index, bool pairwise,
+                               const u8 *mac_addr)
+{
+       struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+
+       /* group key is not used */
+       if (!pairwise)
+               return 0;
+
+       return wmi_del_cipher_key(wil, key_index, mac_addr);
+}
+
+/* Need to be present or wiphy_new() will WARN */
+static int wil_cfg80211_set_default_key(struct wiphy *wiphy,
+                                       struct net_device *ndev,
+                                       u8 key_index, bool unicast,
+                                       bool multicast)
+{
+       return 0;
+}
+
+static int wil_cfg80211_start_ap(struct wiphy *wiphy,
+                                struct net_device *ndev,
+                                struct cfg80211_ap_settings *info)
+{
+       int rc = 0;
+       struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+       struct wireless_dev *wdev = ndev->ieee80211_ptr;
+       struct ieee80211_channel *channel = info->chandef.chan;
+       struct cfg80211_beacon_data *bcon = &info->beacon;
+       u8 wmi_nettype = wil_iftype_nl2wmi(wdev->iftype);
+
+       if (!channel) {
+               wil_err(wil, "AP: No channel???\n");
+               return -EINVAL;
+       }
+
+       wil_dbg(wil, "AP on Channel %d %d MHz, %s\n", channel->hw_value,
+               channel->center_freq, info->privacy ? "secure" : "open");
+       print_hex_dump_bytes("SSID ", DUMP_PREFIX_OFFSET,
+                            info->ssid, info->ssid_len);
+
+       rc = wil_reset(wil);
+       if (rc)
+               return rc;
+
+       rc = wmi_set_ssid(wil, info->ssid_len, info->ssid);
+       if (rc)
+               return rc;
+
+       rc = wmi_set_channel(wil, channel->hw_value);
+       if (rc)
+               return rc;
+
+       /* MAC address - pre-requisite for other commands */
+       wmi_set_mac_address(wil, ndev->dev_addr);
+
+       /* IE's */
+       /* bcon 'head IE's are not relevant for 60g band */
+       wmi_set_ie(wil, WMI_FRAME_BEACON, bcon->beacon_ies_len,
+                  bcon->beacon_ies);
+       wmi_set_ie(wil, WMI_FRAME_PROBE_RESP, bcon->proberesp_ies_len,
+                  bcon->proberesp_ies);
+       wmi_set_ie(wil, WMI_FRAME_ASSOC_RESP, bcon->assocresp_ies_len,
+                  bcon->assocresp_ies);
+
+       wil->secure_pcp = info->privacy;
+
+       rc = wmi_set_bcon(wil, info->beacon_interval, wmi_nettype);
+       if (rc)
+               return rc;
+
+       /* Rx VRING. After MAC and beacon */
+       rc = wil_rx_init(wil);
+
+       netif_carrier_on(ndev);
+
+       return rc;
+}
+
+static int wil_cfg80211_stop_ap(struct wiphy *wiphy,
+                               struct net_device *ndev)
+{
+       int rc = 0;
+       struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+       struct wireless_dev *wdev = ndev->ieee80211_ptr;
+       u8 wmi_nettype = wil_iftype_nl2wmi(wdev->iftype);
+
+       /* To stop beaconing, set BI to 0 */
+       rc = wmi_set_bcon(wil, 0, wmi_nettype);
+
+       return rc;
+}
+
+static struct cfg80211_ops wil_cfg80211_ops = {
+       .scan = wil_cfg80211_scan,
+       .connect = wil_cfg80211_connect,
+       .disconnect = wil_cfg80211_disconnect,
+       .change_virtual_intf = wil_cfg80211_change_iface,
+       .get_station = wil_cfg80211_get_station,
+       .set_monitor_channel = wil_cfg80211_set_channel,
+       .add_key = wil_cfg80211_add_key,
+       .del_key = wil_cfg80211_del_key,
+       .set_default_key = wil_cfg80211_set_default_key,
+       /* AP mode */
+       .start_ap = wil_cfg80211_start_ap,
+       .stop_ap = wil_cfg80211_stop_ap,
+};
+
+static void wil_wiphy_init(struct wiphy *wiphy)
+{
+       /* TODO: set real value */
+       wiphy->max_scan_ssids = 10;
+       wiphy->max_num_pmkids = 0 /* TODO: */;
+       wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
+                                BIT(NL80211_IFTYPE_AP) |
+                                BIT(NL80211_IFTYPE_MONITOR);
+       /* TODO: enable P2P when integrated with supplicant:
+        * BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO)
+        */
+       wiphy->flags |= WIPHY_FLAG_HAVE_AP_SME |
+                       WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
+       dev_warn(wiphy_dev(wiphy), "%s : flags = 0x%08x\n",
+                __func__, wiphy->flags);
+       wiphy->probe_resp_offload =
+               NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
+               NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
+               NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
+
+       wiphy->bands[IEEE80211_BAND_60GHZ] = &wil_band_60ghz;
+
+       /* TODO: figure this out */
+       wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
+
+       wiphy->cipher_suites = wil_cipher_suites;
+       wiphy->n_cipher_suites = ARRAY_SIZE(wil_cipher_suites);
+       wiphy->mgmt_stypes = wil_mgmt_stypes;
+}
+
+struct wireless_dev *wil_cfg80211_init(struct device *dev)
+{
+       int rc = 0;
+       struct wireless_dev *wdev;
+
+       wdev = kzalloc(sizeof(struct wireless_dev), GFP_KERNEL);
+       if (!wdev)
+               return ERR_PTR(-ENOMEM);
+
+       wdev->wiphy = wiphy_new(&wil_cfg80211_ops,
+                               sizeof(struct wil6210_priv));
+       if (!wdev->wiphy) {
+               rc = -ENOMEM;
+               goto out;
+       }
+
+       set_wiphy_dev(wdev->wiphy, dev);
+       wil_wiphy_init(wdev->wiphy);
+
+       rc = wiphy_register(wdev->wiphy);
+       if (rc < 0)
+               goto out_failed_reg;
+
+       return wdev;
+
+out_failed_reg:
+       wiphy_free(wdev->wiphy);
+out:
+       kfree(wdev);
+
+       return ERR_PTR(rc);
+}
+
+void wil_wdev_free(struct wil6210_priv *wil)
+{
+       struct wireless_dev *wdev = wil_to_wdev(wil);
+
+       if (!wdev)
+               return;
+
+       wiphy_unregister(wdev->wiphy);
+       wiphy_free(wdev->wiphy);
+       kfree(wdev);
+}
diff --git a/drivers/net/wireless/ath/wil6210/dbg_hexdump.h b/drivers/net/wireless/ath/wil6210/dbg_hexdump.h
new file mode 100644 (file)
index 0000000..6a315ba
--- /dev/null
@@ -0,0 +1,30 @@
+#ifndef WIL_DBG_HEXDUMP_H_
+#define WIL_DBG_HEXDUMP_H_
+
+#if defined(CONFIG_DYNAMIC_DEBUG)
+#define wil_dynamic_hex_dump(prefix_str, prefix_type, rowsize, \
+                            groupsize, buf, len, ascii)        \
+do {                                                           \
+       DEFINE_DYNAMIC_DEBUG_METADATA(descriptor,               \
+               __builtin_constant_p(prefix_str) ? prefix_str : "hexdump");\
+       if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT))  \
+               print_hex_dump(KERN_DEBUG, prefix_str,          \
+                              prefix_type, rowsize, groupsize, \
+                              buf, len, ascii);                \
+} while (0)
+
+#define wil_print_hex_dump_debug(prefix_str, prefix_type, rowsize,     \
+                                groupsize, buf, len, ascii)            \
+       wil_dynamic_hex_dump(prefix_str, prefix_type, rowsize,          \
+                            groupsize, buf, len, ascii)
+
+#define print_hex_dump_bytes(prefix_str, prefix_type, buf, len)        \
+       wil_dynamic_hex_dump(prefix_str, prefix_type, 16, 1, buf, len, true)
+#else /* defined(CONFIG_DYNAMIC_DEBUG) */
+#define wil_print_hex_dump_debug(prefix_str, prefix_type, rowsize,     \
+                                groupsize, buf, len, ascii)            \
+       print_hex_dump(KERN_DEBUG, prefix_str, prefix_type, rowsize,    \
+                      groupsize, buf, len, ascii)
+#endif /* defined(CONFIG_DYNAMIC_DEBUG) */
+
+#endif /* WIL_DBG_HEXDUMP_H_ */
diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
new file mode 100644 (file)
index 0000000..65fc968
--- /dev/null
@@ -0,0 +1,603 @@
+/*
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/pci.h>
+#include <linux/rtnetlink.h>
+
+#include "wil6210.h"
+#include "txrx.h"
+
+/* Nasty hack. Better have per device instances */
+static u32 mem_addr;
+static u32 dbg_txdesc_index;
+
+static void wil_print_vring(struct seq_file *s, struct wil6210_priv *wil,
+                           const char *name, struct vring *vring)
+{
+       void __iomem *x = wmi_addr(wil, vring->hwtail);
+
+       seq_printf(s, "VRING %s = {\n", name);
+       seq_printf(s, "  pa     = 0x%016llx\n", (unsigned long long)vring->pa);
+       seq_printf(s, "  va     = 0x%p\n", vring->va);
+       seq_printf(s, "  size   = %d\n", vring->size);
+       seq_printf(s, "  swtail = %d\n", vring->swtail);
+       seq_printf(s, "  swhead = %d\n", vring->swhead);
+       seq_printf(s, "  hwtail = [0x%08x] -> ", vring->hwtail);
+       if (x)
+               seq_printf(s, "0x%08x\n", ioread32(x));
+       else
+               seq_printf(s, "???\n");
+
+       if (vring->va && (vring->size < 1025)) {
+               uint i;
+               for (i = 0; i < vring->size; i++) {
+                       volatile struct vring_tx_desc *d = &vring->va[i].tx;
+                       if ((i % 64) == 0 && (i != 0))
+                               seq_printf(s, "\n");
+                       seq_printf(s, "%s", (d->dma.status & BIT(0)) ?
+                                       "S" : (vring->ctx[i] ? "H" : "h"));
+               }
+               seq_printf(s, "\n");
+       }
+       seq_printf(s, "}\n");
+}
+
+static int wil_vring_debugfs_show(struct seq_file *s, void *data)
+{
+       uint i;
+       struct wil6210_priv *wil = s->private;
+
+       wil_print_vring(s, wil, "rx", &wil->vring_rx);
+
+       for (i = 0; i < ARRAY_SIZE(wil->vring_tx); i++) {
+               struct vring *vring = &(wil->vring_tx[i]);
+               if (vring->va) {
+                       char name[10];
+                       snprintf(name, sizeof(name), "tx_%2d", i);
+                       wil_print_vring(s, wil, name, vring);
+               }
+       }
+
+       return 0;
+}
+
+static int wil_vring_seq_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, wil_vring_debugfs_show, inode->i_private);
+}
+
+static const struct file_operations fops_vring = {
+       .open           = wil_vring_seq_open,
+       .release        = single_release,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+};
+
+static void wil_print_ring(struct seq_file *s, const char *prefix,
+                          void __iomem *off)
+{
+       struct wil6210_priv *wil = s->private;
+       struct wil6210_mbox_ring r;
+       int rsize;
+       uint i;
+
+       wil_memcpy_fromio_32(&r, off, sizeof(r));
+       wil_mbox_ring_le2cpus(&r);
+       /*
+        * we just read memory block from NIC. This memory may be
+        * garbage. Check validity before using it.
+        */
+       rsize = r.size / sizeof(struct wil6210_mbox_ring_desc);
+
+       seq_printf(s, "ring %s = {\n", prefix);
+       seq_printf(s, "  base = 0x%08x\n", r.base);
+       seq_printf(s, "  size = 0x%04x bytes -> %d entries\n", r.size, rsize);
+       seq_printf(s, "  tail = 0x%08x\n", r.tail);
+       seq_printf(s, "  head = 0x%08x\n", r.head);
+       seq_printf(s, "  entry size = %d\n", r.entry_size);
+
+       if (r.size % sizeof(struct wil6210_mbox_ring_desc)) {
+               seq_printf(s, "  ??? size is not multiple of %zd, garbage?\n",
+                          sizeof(struct wil6210_mbox_ring_desc));
+               goto out;
+       }
+
+       if (!wmi_addr(wil, r.base) ||
+           !wmi_addr(wil, r.tail) ||
+           !wmi_addr(wil, r.head)) {
+               seq_printf(s, "  ??? pointers are garbage?\n");
+               goto out;
+       }
+
+       for (i = 0; i < rsize; i++) {
+               struct wil6210_mbox_ring_desc d;
+               struct wil6210_mbox_hdr hdr;
+               size_t delta = i * sizeof(d);
+               void __iomem *x = wil->csr + HOSTADDR(r.base) + delta;
+
+               wil_memcpy_fromio_32(&d, x, sizeof(d));
+
+               seq_printf(s, "  [%2x] %s %s%s 0x%08x", i,
+                          d.sync ? "F" : "E",
+                          (r.tail - r.base == delta) ? "t" : " ",
+                          (r.head - r.base == delta) ? "h" : " ",
+                          le32_to_cpu(d.addr));
+               if (0 == wmi_read_hdr(wil, d.addr, &hdr)) {
+                       u16 len = le16_to_cpu(hdr.len);
+                       seq_printf(s, " -> %04x %04x %04x %02x\n",
+                                  le16_to_cpu(hdr.seq), len,
+                                  le16_to_cpu(hdr.type), hdr.flags);
+                       if (len <= MAX_MBOXITEM_SIZE) {
+                               int n = 0;
+                               unsigned char printbuf[16 * 3 + 2];
+                               unsigned char databuf[MAX_MBOXITEM_SIZE];
+                               void __iomem *src = wmi_buffer(wil, d.addr) +
+                                       sizeof(struct wil6210_mbox_hdr);
+                               /*
+                                * No need to check @src for validity -
+                                * we already validated @d.addr while
+                                * reading header
+                                */
+                               wil_memcpy_fromio_32(databuf, src, len);
+                               while (n < len) {
+                                       int l = min(len - n, 16);
+                                       hex_dump_to_buffer(databuf + n, l,
+                                                          16, 1, printbuf,
+                                                          sizeof(printbuf),
+                                                          false);
+                                       seq_printf(s, "      : %s\n", printbuf);
+                                       n += l;
+                               }
+                       }
+               } else {
+                       seq_printf(s, "\n");
+               }
+       }
+ out:
+       seq_printf(s, "}\n");
+}
+
+static int wil_mbox_debugfs_show(struct seq_file *s, void *data)
+{
+       struct wil6210_priv *wil = s->private;
+
+       wil_print_ring(s, "tx", wil->csr + HOST_MBOX +
+                      offsetof(struct wil6210_mbox_ctl, tx));
+       wil_print_ring(s, "rx", wil->csr + HOST_MBOX +
+                      offsetof(struct wil6210_mbox_ctl, rx));
+
+       return 0;
+}
+
+static int wil_mbox_seq_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, wil_mbox_debugfs_show, inode->i_private);
+}
+
+static const struct file_operations fops_mbox = {
+       .open           = wil_mbox_seq_open,
+       .release        = single_release,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+};
+
+static int wil_debugfs_iomem_x32_set(void *data, u64 val)
+{
+       iowrite32(val, (void __iomem *)data);
+       wmb(); /* make sure write propagated to HW */
+
+       return 0;
+}
+
+static int wil_debugfs_iomem_x32_get(void *data, u64 *val)
+{
+       *val = ioread32((void __iomem *)data);
+
+       return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(fops_iomem_x32, wil_debugfs_iomem_x32_get,
+                       wil_debugfs_iomem_x32_set, "0x%08llx\n");
+
+static struct dentry *wil_debugfs_create_iomem_x32(const char *name,
+                                                  mode_t mode,
+                                                  struct dentry *parent,
+                                                  void __iomem *value)
+{
+       return debugfs_create_file(name, mode, parent, (void * __force)value,
+                                  &fops_iomem_x32);
+}
+
+static int wil6210_debugfs_create_ISR(struct wil6210_priv *wil,
+                                     const char *name,
+                                     struct dentry *parent, u32 off)
+{
+       struct dentry *d = debugfs_create_dir(name, parent);
+
+       if (IS_ERR_OR_NULL(d))
+               return -ENODEV;
+
+       wil_debugfs_create_iomem_x32("ICC", S_IRUGO | S_IWUSR, d,
+                                    wil->csr + off);
+       wil_debugfs_create_iomem_x32("ICR", S_IRUGO | S_IWUSR, d,
+                                    wil->csr + off + 4);
+       wil_debugfs_create_iomem_x32("ICM", S_IRUGO | S_IWUSR, d,
+                                    wil->csr + off + 8);
+       wil_debugfs_create_iomem_x32("ICS", S_IWUSR, d,
+                                    wil->csr + off + 12);
+       wil_debugfs_create_iomem_x32("IMV", S_IRUGO | S_IWUSR, d,
+                                    wil->csr + off + 16);
+       wil_debugfs_create_iomem_x32("IMS", S_IWUSR, d,
+                                    wil->csr + off + 20);
+       wil_debugfs_create_iomem_x32("IMC", S_IWUSR, d,
+                                    wil->csr + off + 24);
+
+       return 0;
+}
+
+static int wil6210_debugfs_create_pseudo_ISR(struct wil6210_priv *wil,
+                                            struct dentry *parent)
+{
+       struct dentry *d = debugfs_create_dir("PSEUDO_ISR", parent);
+
+       if (IS_ERR_OR_NULL(d))
+               return -ENODEV;
+
+       wil_debugfs_create_iomem_x32("CAUSE", S_IRUGO, d, wil->csr +
+                                    HOSTADDR(RGF_DMA_PSEUDO_CAUSE));
+       wil_debugfs_create_iomem_x32("MASK_SW", S_IRUGO, d, wil->csr +
+                                    HOSTADDR(RGF_DMA_PSEUDO_CAUSE_MASK_SW));
+       wil_debugfs_create_iomem_x32("MASK_FW", S_IRUGO, d, wil->csr +
+                                    HOSTADDR(RGF_DMA_PSEUDO_CAUSE_MASK_FW));
+
+       return 0;
+}
+
+static int wil6210_debugfs_create_ITR_CNT(struct wil6210_priv *wil,
+                                         struct dentry *parent)
+{
+       struct dentry *d = debugfs_create_dir("ITR_CNT", parent);
+
+       if (IS_ERR_OR_NULL(d))
+               return -ENODEV;
+
+       wil_debugfs_create_iomem_x32("TRSH", S_IRUGO, d, wil->csr +
+                                    HOSTADDR(RGF_DMA_ITR_CNT_TRSH));
+       wil_debugfs_create_iomem_x32("DATA", S_IRUGO, d, wil->csr +
+                                    HOSTADDR(RGF_DMA_ITR_CNT_DATA));
+       wil_debugfs_create_iomem_x32("CTL", S_IRUGO, d, wil->csr +
+                                    HOSTADDR(RGF_DMA_ITR_CNT_CRL));
+
+       return 0;
+}
+
+static int wil_memread_debugfs_show(struct seq_file *s, void *data)
+{
+       struct wil6210_priv *wil = s->private;
+       void __iomem *a = wmi_buffer(wil, cpu_to_le32(mem_addr));
+
+       if (a)
+               seq_printf(s, "[0x%08x] = 0x%08x\n", mem_addr, ioread32(a));
+       else
+               seq_printf(s, "[0x%08x] = INVALID\n", mem_addr);
+
+       return 0;
+}
+
+static int wil_memread_seq_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, wil_memread_debugfs_show, inode->i_private);
+}
+
+static const struct file_operations fops_memread = {
+       .open           = wil_memread_seq_open,
+       .release        = single_release,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+};
+
+static int wil_default_open(struct inode *inode, struct file *file)
+{
+       if (inode->i_private)
+               file->private_data = inode->i_private;
+
+       return 0;
+}
+
+static ssize_t wil_read_file_ioblob(struct file *file, char __user *user_buf,
+                               size_t count, loff_t *ppos)
+{
+       enum { max_count = 4096 };
+       struct debugfs_blob_wrapper *blob = file->private_data;
+       loff_t pos = *ppos;
+       size_t available = blob->size;
+       void *buf;
+       size_t ret;
+
+       if (pos < 0)
+               return -EINVAL;
+
+       if (pos >= available || !count)
+               return 0;
+
+       if (count > available - pos)
+               count = available - pos;
+       if (count > max_count)
+               count = max_count;
+
+       buf = kmalloc(count, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+
+       wil_memcpy_fromio_32(buf, (const volatile void __iomem *)blob->data +
+                            pos, count);
+
+       ret = copy_to_user(user_buf, buf, count);
+       kfree(buf);
+       if (ret == count)
+               return -EFAULT;
+
+       count -= ret;
+       *ppos = pos + count;
+
+       return count;
+}
+
+static const struct file_operations fops_ioblob = {
+       .read =         wil_read_file_ioblob,
+       .open =         wil_default_open,
+       .llseek =       default_llseek,
+};
+
+static
+struct dentry *wil_debugfs_create_ioblob(const char *name,
+                                        mode_t mode,
+                                        struct dentry *parent,
+                                        struct debugfs_blob_wrapper *blob)
+{
+       return debugfs_create_file(name, mode, parent, blob, &fops_ioblob);
+}
+/*---reset---*/
+static ssize_t wil_write_file_reset(struct file *file, const char __user *buf,
+                                   size_t len, loff_t *ppos)
+{
+       struct wil6210_priv *wil = file->private_data;
+       struct net_device *ndev = wil_to_ndev(wil);
+
+       /**
+        * BUG:
+        * this code does NOT sync device state with the rest of system
+        * use with care, debug only!!!
+        */
+       rtnl_lock();
+       dev_close(ndev);
+       ndev->flags &= ~IFF_UP;
+       rtnl_unlock();
+       wil_reset(wil);
+
+       return len;
+}
+
+static const struct file_operations fops_reset = {
+       .write = wil_write_file_reset,
+       .open  = wil_default_open,
+};
+/*---------Tx descriptor------------*/
+
+static int wil_txdesc_debugfs_show(struct seq_file *s, void *data)
+{
+       struct wil6210_priv *wil = s->private;
+       struct vring *vring = &(wil->vring_tx[0]);
+
+       if (!vring->va) {
+               seq_printf(s, "No Tx VRING\n");
+               return 0;
+       }
+
+       if (dbg_txdesc_index < vring->size) {
+               volatile struct vring_tx_desc *d =
+                               &(vring->va[dbg_txdesc_index].tx);
+               volatile u32 *u = (volatile u32 *)d;
+               struct sk_buff *skb = vring->ctx[dbg_txdesc_index];
+
+               seq_printf(s, "Tx[%3d] = {\n", dbg_txdesc_index);
+               seq_printf(s, "  MAC = 0x%08x 0x%08x 0x%08x 0x%08x\n",
+                          u[0], u[1], u[2], u[3]);
+               seq_printf(s, "  DMA = 0x%08x 0x%08x 0x%08x 0x%08x\n",
+                          u[4], u[5], u[6], u[7]);
+               seq_printf(s, "  SKB = %p\n", skb);
+
+               if (skb) {
+                       unsigned char printbuf[16 * 3 + 2];
+                       int i = 0;
+                       int len = skb_headlen(skb);
+                       void *p = skb->data;
+
+                       seq_printf(s, "    len = %d\n", len);
+
+                       while (i < len) {
+                               int l = min(len - i, 16);
+                               hex_dump_to_buffer(p + i, l, 16, 1, printbuf,
+                                                  sizeof(printbuf), false);
+                               seq_printf(s, "      : %s\n", printbuf);
+                               i += l;
+                       }
+               }
+               seq_printf(s, "}\n");
+       } else {
+               seq_printf(s, "TxDesc index (%d) >= size (%d)\n",
+                          dbg_txdesc_index, vring->size);
+       }
+
+       return 0;
+}
+
+static int wil_txdesc_seq_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, wil_txdesc_debugfs_show, inode->i_private);
+}
+
+static const struct file_operations fops_txdesc = {
+       .open           = wil_txdesc_seq_open,
+       .release        = single_release,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+};
+
+/*---------beamforming------------*/
+static int wil_bf_debugfs_show(struct seq_file *s, void *data)
+{
+       struct wil6210_priv *wil = s->private;
+       seq_printf(s,
+                  "TSF : 0x%016llx\n"
+                  "TxMCS : %d\n"
+                  "Sectors(rx:tx) my %2d:%2d peer %2d:%2d\n",
+                  wil->stats.tsf, wil->stats.bf_mcs,
+                  wil->stats.my_rx_sector, wil->stats.my_tx_sector,
+                  wil->stats.peer_rx_sector, wil->stats.peer_tx_sector);
+       return 0;
+}
+
+static int wil_bf_seq_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, wil_bf_debugfs_show, inode->i_private);
+}
+
+static const struct file_operations fops_bf = {
+       .open           = wil_bf_seq_open,
+       .release        = single_release,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+};
+/*---------SSID------------*/
+static ssize_t wil_read_file_ssid(struct file *file, char __user *user_buf,
+                                 size_t count, loff_t *ppos)
+{
+       struct wil6210_priv *wil = file->private_data;
+       struct wireless_dev *wdev = wil_to_wdev(wil);
+
+       return simple_read_from_buffer(user_buf, count, ppos,
+                                      wdev->ssid, wdev->ssid_len);
+}
+
+static ssize_t wil_write_file_ssid(struct file *file, const char __user *buf,
+                                  size_t count, loff_t *ppos)
+{
+       struct wil6210_priv *wil = file->private_data;
+       struct wireless_dev *wdev = wil_to_wdev(wil);
+       struct net_device *ndev = wil_to_ndev(wil);
+
+       if (*ppos != 0) {
+               wil_err(wil, "Unable to set SSID substring from [%d]\n",
+                       (int)*ppos);
+               return -EINVAL;
+       }
+
+       if (count > sizeof(wdev->ssid)) {
+               wil_err(wil, "SSID too long, len = %d\n", (int)count);
+               return -EINVAL;
+       }
+       if (netif_running(ndev)) {
+               wil_err(wil, "Unable to change SSID on running interface\n");
+               return -EINVAL;
+       }
+
+       wdev->ssid_len = count;
+       return simple_write_to_buffer(wdev->ssid, wdev->ssid_len, ppos,
+                                     buf, count);
+}
+
+static const struct file_operations fops_ssid = {
+       .read = wil_read_file_ssid,
+       .write = wil_write_file_ssid,
+       .open  = wil_default_open,
+};
+
+/*----------------*/
+int wil6210_debugfs_init(struct wil6210_priv *wil)
+{
+       struct dentry *dbg = wil->debug = debugfs_create_dir(WIL_NAME,
+                       wil_to_wiphy(wil)->debugfsdir);
+
+       if (IS_ERR_OR_NULL(dbg))
+               return -ENODEV;
+
+       debugfs_create_file("mbox", S_IRUGO, dbg, wil, &fops_mbox);
+       debugfs_create_file("vrings", S_IRUGO, dbg, wil, &fops_vring);
+       debugfs_create_file("txdesc", S_IRUGO, dbg, wil, &fops_txdesc);
+       debugfs_create_u32("txdesc_index", S_IRUGO | S_IWUSR, dbg,
+                          &dbg_txdesc_index);
+       debugfs_create_file("bf", S_IRUGO, dbg, wil, &fops_bf);
+       debugfs_create_file("ssid", S_IRUGO | S_IWUSR, dbg, wil, &fops_ssid);
+       debugfs_create_u32("secure_pcp", S_IRUGO | S_IWUSR, dbg,
+                          &wil->secure_pcp);
+
+       wil6210_debugfs_create_ISR(wil, "USER_ICR", dbg,
+                                  HOSTADDR(RGF_USER_USER_ICR));
+       wil6210_debugfs_create_ISR(wil, "DMA_EP_TX_ICR", dbg,
+                                  HOSTADDR(RGF_DMA_EP_TX_ICR));
+       wil6210_debugfs_create_ISR(wil, "DMA_EP_RX_ICR", dbg,
+                                  HOSTADDR(RGF_DMA_EP_RX_ICR));
+       wil6210_debugfs_create_ISR(wil, "DMA_EP_MISC_ICR", dbg,
+                                  HOSTADDR(RGF_DMA_EP_MISC_ICR));
+       wil6210_debugfs_create_pseudo_ISR(wil, dbg);
+       wil6210_debugfs_create_ITR_CNT(wil, dbg);
+
+       debugfs_create_u32("mem_addr", S_IRUGO | S_IWUSR, dbg, &mem_addr);
+       debugfs_create_file("mem_val", S_IRUGO, dbg, wil, &fops_memread);
+
+       debugfs_create_file("reset", S_IWUSR, dbg, wil, &fops_reset);
+
+       wil->rgf_blob.data = (void * __force)wil->csr + 0;
+       wil->rgf_blob.size = 0xa000;
+       wil_debugfs_create_ioblob("blob_rgf", S_IRUGO, dbg, &wil->rgf_blob);
+
+       wil->fw_code_blob.data = (void * __force)wil->csr + 0x40000;
+       wil->fw_code_blob.size = 0x40000;
+       wil_debugfs_create_ioblob("blob_fw_code", S_IRUGO, dbg,
+                                 &wil->fw_code_blob);
+
+       wil->fw_data_blob.data = (void * __force)wil->csr + 0x80000;
+       wil->fw_data_blob.size = 0x8000;
+       wil_debugfs_create_ioblob("blob_fw_data", S_IRUGO, dbg,
+                                 &wil->fw_data_blob);
+
+       wil->fw_peri_blob.data = (void * __force)wil->csr + 0x88000;
+       wil->fw_peri_blob.size = 0x18000;
+       wil_debugfs_create_ioblob("blob_fw_peri", S_IRUGO, dbg,
+                                 &wil->fw_peri_blob);
+
+       wil->uc_code_blob.data = (void * __force)wil->csr + 0xa0000;
+       wil->uc_code_blob.size = 0x10000;
+       wil_debugfs_create_ioblob("blob_uc_code", S_IRUGO, dbg,
+                                 &wil->uc_code_blob);
+
+       wil->uc_data_blob.data = (void * __force)wil->csr + 0xb0000;
+       wil->uc_data_blob.size = 0x4000;
+       wil_debugfs_create_ioblob("blob_uc_data", S_IRUGO, dbg,
+                                 &wil->uc_data_blob);
+
+       return 0;
+}
+
+void wil6210_debugfs_remove(struct wil6210_priv *wil)
+{
+       debugfs_remove_recursive(wil->debug);
+       wil->debug = NULL;
+}
diff --git a/drivers/net/wireless/ath/wil6210/interrupt.c b/drivers/net/wireless/ath/wil6210/interrupt.c
new file mode 100644 (file)
index 0000000..38049da
--- /dev/null
@@ -0,0 +1,471 @@
+/*
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/interrupt.h>
+
+#include "wil6210.h"
+
+/**
+ * Theory of operation:
+ *
+ * There is ISR pseudo-cause register,
+ * dma_rgf->DMA_RGF.PSEUDO_CAUSE.PSEUDO_CAUSE
+ * Its bits represents OR'ed bits from 3 real ISR registers:
+ * TX, RX, and MISC.
+ *
+ * Registers may be configured to either "write 1 to clear" or
+ * "clear on read" mode
+ *
+ * When handling interrupt, one have to mask/unmask interrupts for the
+ * real ISR registers, or hardware may malfunction.
+ *
+ */
+
+#define WIL6210_IRQ_DISABLE    (0xFFFFFFFFUL)
+#define WIL6210_IMC_RX         BIT_DMA_EP_RX_ICR_RX_DONE
+#define WIL6210_IMC_TX         (BIT_DMA_EP_TX_ICR_TX_DONE | \
+                               BIT_DMA_EP_TX_ICR_TX_DONE_N(0))
+#define WIL6210_IMC_MISC       (ISR_MISC_FW_READY | ISR_MISC_MBOX_EVT)
+
+#define WIL6210_IRQ_PSEUDO_MASK (u32)(~(BIT_DMA_PSEUDO_CAUSE_RX | \
+                                       BIT_DMA_PSEUDO_CAUSE_TX | \
+                                       BIT_DMA_PSEUDO_CAUSE_MISC))
+
+#if defined(CONFIG_WIL6210_ISR_COR)
+/* configure to Clear-On-Read mode */
+#define WIL_ICR_ICC_VALUE      (0xFFFFFFFFUL)
+
+static inline void wil_icr_clear(u32 x, void __iomem *addr)
+{
+
+}
+#else /* defined(CONFIG_WIL6210_ISR_COR) */
+/* configure to Write-1-to-Clear mode */
+#define WIL_ICR_ICC_VALUE      (0UL)
+
+static inline void wil_icr_clear(u32 x, void __iomem *addr)
+{
+       iowrite32(x, addr);
+}
+#endif /* defined(CONFIG_WIL6210_ISR_COR) */
+
+static inline u32 wil_ioread32_and_clear(void __iomem *addr)
+{
+       u32 x = ioread32(addr);
+
+       wil_icr_clear(x, addr);
+
+       return x;
+}
+
+static void wil6210_mask_irq_tx(struct wil6210_priv *wil)
+{
+       iowrite32(WIL6210_IRQ_DISABLE, wil->csr +
+                 HOSTADDR(RGF_DMA_EP_TX_ICR) +
+                 offsetof(struct RGF_ICR, IMS));
+}
+
+static void wil6210_mask_irq_rx(struct wil6210_priv *wil)
+{
+       iowrite32(WIL6210_IRQ_DISABLE, wil->csr +
+                 HOSTADDR(RGF_DMA_EP_RX_ICR) +
+                 offsetof(struct RGF_ICR, IMS));
+}
+
+static void wil6210_mask_irq_misc(struct wil6210_priv *wil)
+{
+       iowrite32(WIL6210_IRQ_DISABLE, wil->csr +
+                 HOSTADDR(RGF_DMA_EP_MISC_ICR) +
+                 offsetof(struct RGF_ICR, IMS));
+}
+
+static void wil6210_mask_irq_pseudo(struct wil6210_priv *wil)
+{
+       wil_dbg_IRQ(wil, "%s()\n", __func__);
+
+       iowrite32(WIL6210_IRQ_DISABLE, wil->csr +
+                 HOSTADDR(RGF_DMA_PSEUDO_CAUSE_MASK_SW));
+
+       clear_bit(wil_status_irqen, &wil->status);
+}
+
+static void wil6210_unmask_irq_tx(struct wil6210_priv *wil)
+{
+       iowrite32(WIL6210_IMC_TX, wil->csr +
+                 HOSTADDR(RGF_DMA_EP_TX_ICR) +
+                 offsetof(struct RGF_ICR, IMC));
+}
+
+static void wil6210_unmask_irq_rx(struct wil6210_priv *wil)
+{
+       iowrite32(WIL6210_IMC_RX, wil->csr +
+                 HOSTADDR(RGF_DMA_EP_RX_ICR) +
+                 offsetof(struct RGF_ICR, IMC));
+}
+
+static void wil6210_unmask_irq_misc(struct wil6210_priv *wil)
+{
+       iowrite32(WIL6210_IMC_MISC, wil->csr +
+                 HOSTADDR(RGF_DMA_EP_MISC_ICR) +
+                 offsetof(struct RGF_ICR, IMC));
+}
+
+static void wil6210_unmask_irq_pseudo(struct wil6210_priv *wil)
+{
+       wil_dbg_IRQ(wil, "%s()\n", __func__);
+
+       set_bit(wil_status_irqen, &wil->status);
+
+       iowrite32(WIL6210_IRQ_PSEUDO_MASK, wil->csr +
+                 HOSTADDR(RGF_DMA_PSEUDO_CAUSE_MASK_SW));
+}
+
+void wil6210_disable_irq(struct wil6210_priv *wil)
+{
+       wil_dbg_IRQ(wil, "%s()\n", __func__);
+
+       wil6210_mask_irq_tx(wil);
+       wil6210_mask_irq_rx(wil);
+       wil6210_mask_irq_misc(wil);
+       wil6210_mask_irq_pseudo(wil);
+}
+
+void wil6210_enable_irq(struct wil6210_priv *wil)
+{
+       wil_dbg_IRQ(wil, "%s()\n", __func__);
+
+       iowrite32(WIL_ICR_ICC_VALUE, wil->csr + HOSTADDR(RGF_DMA_EP_RX_ICR) +
+                 offsetof(struct RGF_ICR, ICC));
+       iowrite32(WIL_ICR_ICC_VALUE, wil->csr + HOSTADDR(RGF_DMA_EP_TX_ICR) +
+                 offsetof(struct RGF_ICR, ICC));
+       iowrite32(WIL_ICR_ICC_VALUE, wil->csr + HOSTADDR(RGF_DMA_EP_MISC_ICR) +
+                 offsetof(struct RGF_ICR, ICC));
+
+       wil6210_unmask_irq_pseudo(wil);
+       wil6210_unmask_irq_tx(wil);
+       wil6210_unmask_irq_rx(wil);
+       wil6210_unmask_irq_misc(wil);
+}
+
+static irqreturn_t wil6210_irq_rx(int irq, void *cookie)
+{
+       struct wil6210_priv *wil = cookie;
+       u32 isr = wil_ioread32_and_clear(wil->csr +
+                                        HOSTADDR(RGF_DMA_EP_RX_ICR) +
+                                        offsetof(struct RGF_ICR, ICR));
+
+       wil_dbg_IRQ(wil, "ISR RX 0x%08x\n", isr);
+
+       if (!isr) {
+               wil_err(wil, "spurious IRQ: RX\n");
+               return IRQ_NONE;
+       }
+
+       wil6210_mask_irq_rx(wil);
+
+       if (isr & BIT_DMA_EP_RX_ICR_RX_DONE) {
+               wil_dbg_IRQ(wil, "RX done\n");
+               isr &= ~BIT_DMA_EP_RX_ICR_RX_DONE;
+               wil_rx_handle(wil);
+       }
+
+       if (isr)
+               wil_err(wil, "un-handled RX ISR bits 0x%08x\n", isr);
+
+       wil6210_unmask_irq_rx(wil);
+
+       return IRQ_HANDLED;
+}
+
+static irqreturn_t wil6210_irq_tx(int irq, void *cookie)
+{
+       struct wil6210_priv *wil = cookie;
+       u32 isr = wil_ioread32_and_clear(wil->csr +
+                                        HOSTADDR(RGF_DMA_EP_TX_ICR) +
+                                        offsetof(struct RGF_ICR, ICR));
+
+       wil_dbg_IRQ(wil, "ISR TX 0x%08x\n", isr);
+
+       if (!isr) {
+               wil_err(wil, "spurious IRQ: TX\n");
+               return IRQ_NONE;
+       }
+
+       wil6210_mask_irq_tx(wil);
+
+       if (isr & BIT_DMA_EP_TX_ICR_TX_DONE) {
+               uint i;
+               wil_dbg_IRQ(wil, "TX done\n");
+               isr &= ~BIT_DMA_EP_TX_ICR_TX_DONE;
+               for (i = 0; i < 24; i++) {
+                       u32 mask = BIT_DMA_EP_TX_ICR_TX_DONE_N(i);
+                       if (isr & mask) {
+                               isr &= ~mask;
+                               wil_dbg_IRQ(wil, "TX done(%i)\n", i);
+                               wil_tx_complete(wil, i);
+                       }
+               }
+       }
+
+       if (isr)
+               wil_err(wil, "un-handled TX ISR bits 0x%08x\n", isr);
+
+       wil6210_unmask_irq_tx(wil);
+
+       return IRQ_HANDLED;
+}
+
+static irqreturn_t wil6210_irq_misc(int irq, void *cookie)
+{
+       struct wil6210_priv *wil = cookie;
+       u32 isr = wil_ioread32_and_clear(wil->csr +
+                                        HOSTADDR(RGF_DMA_EP_MISC_ICR) +
+                                        offsetof(struct RGF_ICR, ICR));
+
+       wil_dbg_IRQ(wil, "ISR MISC 0x%08x\n", isr);
+
+       if (!isr) {
+               wil_err(wil, "spurious IRQ: MISC\n");
+               return IRQ_NONE;
+       }
+
+       wil6210_mask_irq_misc(wil);
+
+       if (isr & ISR_MISC_FW_READY) {
+               wil_dbg_IRQ(wil, "IRQ: FW ready\n");
+               /**
+                * Actual FW ready indicated by the
+                * WMI_FW_READY_EVENTID
+                */
+               isr &= ~ISR_MISC_FW_READY;
+       }
+
+       wil->isr_misc = isr;
+
+       if (isr) {
+               return IRQ_WAKE_THREAD;
+       } else {
+               wil6210_unmask_irq_misc(wil);
+               return IRQ_HANDLED;
+       }
+}
+
+static irqreturn_t wil6210_irq_misc_thread(int irq, void *cookie)
+{
+       struct wil6210_priv *wil = cookie;
+       u32 isr = wil->isr_misc;
+
+       wil_dbg_IRQ(wil, "Thread ISR MISC 0x%08x\n", isr);
+
+       if (isr & ISR_MISC_MBOX_EVT) {
+               wil_dbg_IRQ(wil, "MBOX event\n");
+               wmi_recv_cmd(wil);
+               isr &= ~ISR_MISC_MBOX_EVT;
+       }
+
+       if (isr)
+               wil_err(wil, "un-handled MISC ISR bits 0x%08x\n", isr);
+
+       wil->isr_misc = 0;
+
+       wil6210_unmask_irq_misc(wil);
+
+       return IRQ_HANDLED;
+}
+
+/**
+ * thread IRQ handler
+ */
+static irqreturn_t wil6210_thread_irq(int irq, void *cookie)
+{
+       struct wil6210_priv *wil = cookie;
+
+       wil_dbg_IRQ(wil, "Thread IRQ\n");
+       /* Discover real IRQ cause */
+       if (wil->isr_misc)
+               wil6210_irq_misc_thread(irq, cookie);
+
+       wil6210_unmask_irq_pseudo(wil);
+
+       return IRQ_HANDLED;
+}
+
+/* DEBUG
+ * There is subtle bug in hardware that causes IRQ to raise when it should be
+ * masked. It is quite rare and hard to debug.
+ *
+ * Catch irq issue if it happens and print all I can.
+ */
+static int wil6210_debug_irq_mask(struct wil6210_priv *wil, u32 pseudo_cause)
+{
+       if (!test_bit(wil_status_irqen, &wil->status)) {
+               u32 icm_rx = wil_ioread32_and_clear(wil->csr +
+                               HOSTADDR(RGF_DMA_EP_RX_ICR) +
+                               offsetof(struct RGF_ICR, ICM));
+               u32 icr_rx = wil_ioread32_and_clear(wil->csr +
+                               HOSTADDR(RGF_DMA_EP_RX_ICR) +
+                               offsetof(struct RGF_ICR, ICR));
+               u32 imv_rx = ioread32(wil->csr +
+                               HOSTADDR(RGF_DMA_EP_RX_ICR) +
+                               offsetof(struct RGF_ICR, IMV));
+               u32 icm_tx = wil_ioread32_and_clear(wil->csr +
+                               HOSTADDR(RGF_DMA_EP_TX_ICR) +
+                               offsetof(struct RGF_ICR, ICM));
+               u32 icr_tx = wil_ioread32_and_clear(wil->csr +
+                               HOSTADDR(RGF_DMA_EP_TX_ICR) +
+                               offsetof(struct RGF_ICR, ICR));
+               u32 imv_tx = ioread32(wil->csr +
+                               HOSTADDR(RGF_DMA_EP_TX_ICR) +
+                               offsetof(struct RGF_ICR, IMV));
+               u32 icm_misc = wil_ioread32_and_clear(wil->csr +
+                               HOSTADDR(RGF_DMA_EP_MISC_ICR) +
+                               offsetof(struct RGF_ICR, ICM));
+               u32 icr_misc = wil_ioread32_and_clear(wil->csr +
+                               HOSTADDR(RGF_DMA_EP_MISC_ICR) +
+                               offsetof(struct RGF_ICR, ICR));
+               u32 imv_misc = ioread32(wil->csr +
+                               HOSTADDR(RGF_DMA_EP_MISC_ICR) +
+                               offsetof(struct RGF_ICR, IMV));
+               wil_err(wil, "IRQ when it should be masked: pseudo 0x%08x\n"
+                               "Rx   icm:icr:imv 0x%08x 0x%08x 0x%08x\n"
+                               "Tx   icm:icr:imv 0x%08x 0x%08x 0x%08x\n"
+                               "Misc icm:icr:imv 0x%08x 0x%08x 0x%08x\n",
+                               pseudo_cause,
+                               icm_rx, icr_rx, imv_rx,
+                               icm_tx, icr_tx, imv_tx,
+                               icm_misc, icr_misc, imv_misc);
+
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static irqreturn_t wil6210_hardirq(int irq, void *cookie)
+{
+       irqreturn_t rc = IRQ_HANDLED;
+       struct wil6210_priv *wil = cookie;
+       u32 pseudo_cause = ioread32(wil->csr + HOSTADDR(RGF_DMA_PSEUDO_CAUSE));
+
+       /**
+        * pseudo_cause is Clear-On-Read, no need to ACK
+        */
+       if ((pseudo_cause == 0) || ((pseudo_cause & 0xff) == 0xff))
+               return IRQ_NONE;
+
+       /* FIXME: IRQ mask debug */
+       if (wil6210_debug_irq_mask(wil, pseudo_cause))
+               return IRQ_NONE;
+
+       wil6210_mask_irq_pseudo(wil);
+
+       /* Discover real IRQ cause
+        * There are 2 possible phases for every IRQ:
+        * - hard IRQ handler called right here
+        * - threaded handler called later
+        *
+        * Hard IRQ handler reads and clears ISR.
+        *
+        * If threaded handler requested, hard IRQ handler
+        * returns IRQ_WAKE_THREAD and saves ISR register value
+        * for the threaded handler use.
+        *
+        * voting for wake thread - need at least 1 vote
+        */
+       if ((pseudo_cause & BIT_DMA_PSEUDO_CAUSE_RX) &&
+           (wil6210_irq_rx(irq, cookie) == IRQ_WAKE_THREAD))
+               rc = IRQ_WAKE_THREAD;
+
+       if ((pseudo_cause & BIT_DMA_PSEUDO_CAUSE_TX) &&
+           (wil6210_irq_tx(irq, cookie) == IRQ_WAKE_THREAD))
+               rc = IRQ_WAKE_THREAD;
+
+       if ((pseudo_cause & BIT_DMA_PSEUDO_CAUSE_MISC) &&
+           (wil6210_irq_misc(irq, cookie) == IRQ_WAKE_THREAD))
+               rc = IRQ_WAKE_THREAD;
+
+       /* if thread is requested, it will unmask IRQ */
+       if (rc != IRQ_WAKE_THREAD)
+               wil6210_unmask_irq_pseudo(wil);
+
+       wil_dbg_IRQ(wil, "Hard IRQ 0x%08x\n", pseudo_cause);
+
+       return rc;
+}
+
+static int wil6210_request_3msi(struct wil6210_priv *wil, int irq)
+{
+       int rc;
+       /*
+        * IRQ's are in the following order:
+        * - Tx
+        * - Rx
+        * - Misc
+        */
+
+       rc = request_irq(irq, wil6210_irq_tx, IRQF_SHARED,
+                        WIL_NAME"_tx", wil);
+       if (rc)
+               return rc;
+
+       rc = request_irq(irq + 1, wil6210_irq_rx, IRQF_SHARED,
+                        WIL_NAME"_rx", wil);
+       if (rc)
+               goto free0;
+
+       rc = request_threaded_irq(irq + 2, wil6210_irq_misc,
+                                 wil6210_irq_misc_thread,
+                                 IRQF_SHARED, WIL_NAME"_misc", wil);
+       if (rc)
+               goto free1;
+
+       return 0;
+       /* error branch */
+free1:
+       free_irq(irq + 1, wil);
+free0:
+       free_irq(irq, wil);
+
+       return rc;
+}
+
+int wil6210_init_irq(struct wil6210_priv *wil, int irq)
+{
+       int rc;
+       if (wil->n_msi == 3)
+               rc = wil6210_request_3msi(wil, irq);
+       else
+               rc = request_threaded_irq(irq, wil6210_hardirq,
+                                         wil6210_thread_irq,
+                                         wil->n_msi ? 0 : IRQF_SHARED,
+                                         WIL_NAME, wil);
+       if (rc)
+               return rc;
+
+       wil6210_enable_irq(wil);
+
+       return 0;
+}
+
+void wil6210_fini_irq(struct wil6210_priv *wil, int irq)
+{
+       wil6210_disable_irq(wil);
+       free_irq(irq, wil);
+       if (wil->n_msi == 3) {
+               free_irq(irq + 1, wil);
+               free_irq(irq + 2, wil);
+       }
+}
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
new file mode 100644 (file)
index 0000000..95fcd36
--- /dev/null
@@ -0,0 +1,407 @@
+/*
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/sched.h>
+#include <linux/ieee80211.h>
+#include <linux/wireless.h>
+#include <linux/slab.h>
+#include <linux/moduleparam.h>
+#include <linux/if_arp.h>
+
+#include "wil6210.h"
+
+/*
+ * Due to a hardware issue,
+ * one has to read/write to/from NIC in 32-bit chunks;
+ * regular memcpy_fromio and siblings will
+ * not work on 64-bit platform - it uses 64-bit transactions
+ *
+ * Force 32-bit transactions to enable NIC on 64-bit platforms
+ *
+ * To avoid byte swap on big endian host, __raw_{read|write}l
+ * should be used - {read|write}l would swap bytes to provide
+ * little endian on PCI value in host endianness.
+ */
+void wil_memcpy_fromio_32(void *dst, const volatile void __iomem *src,
+                         size_t count)
+{
+       u32 *d = dst;
+       const volatile u32 __iomem *s = src;
+
+       /* size_t is unsigned, if (count%4 != 0) it will wrap */
+       for (count += 4; count > 4; count -= 4)
+               *d++ = __raw_readl(s++);
+}
+
+void wil_memcpy_toio_32(volatile void __iomem *dst, const void *src,
+                       size_t count)
+{
+       volatile u32 __iomem *d = dst;
+       const u32 *s = src;
+
+       for (count += 4; count > 4; count -= 4)
+               __raw_writel(*s++, d++);
+}
+
+static void _wil6210_disconnect(struct wil6210_priv *wil, void *bssid)
+{
+       uint i;
+       struct net_device *ndev = wil_to_ndev(wil);
+       struct wireless_dev *wdev = wil->wdev;
+
+       wil_dbg(wil, "%s()\n", __func__);
+
+       wil_link_off(wil);
+       clear_bit(wil_status_fwconnected, &wil->status);
+
+       switch (wdev->sme_state) {
+       case CFG80211_SME_CONNECTED:
+               cfg80211_disconnected(ndev, WLAN_STATUS_UNSPECIFIED_FAILURE,
+                                     NULL, 0, GFP_KERNEL);
+               break;
+       case CFG80211_SME_CONNECTING:
+               cfg80211_connect_result(ndev, bssid, NULL, 0, NULL, 0,
+                                       WLAN_STATUS_UNSPECIFIED_FAILURE,
+                                       GFP_KERNEL);
+               break;
+       default:
+               ;
+       }
+
+       for (i = 0; i < ARRAY_SIZE(wil->vring_tx); i++)
+               wil_vring_fini_tx(wil, i);
+}
+
+static void wil_disconnect_worker(struct work_struct *work)
+{
+       struct wil6210_priv *wil = container_of(work,
+                       struct wil6210_priv, disconnect_worker);
+
+       _wil6210_disconnect(wil, NULL);
+}
+
+static void wil_connect_timer_fn(ulong x)
+{
+       struct wil6210_priv *wil = (void *)x;
+
+       wil_dbg(wil, "Connect timeout\n");
+
+       /* reschedule to thread context - disconnect won't
+        * run from atomic context
+        */
+       schedule_work(&wil->disconnect_worker);
+}
+
+int wil_priv_init(struct wil6210_priv *wil)
+{
+       wil_dbg(wil, "%s()\n", __func__);
+
+       mutex_init(&wil->mutex);
+       mutex_init(&wil->wmi_mutex);
+
+       init_completion(&wil->wmi_ready);
+
+       wil->pending_connect_cid = -1;
+       setup_timer(&wil->connect_timer, wil_connect_timer_fn, (ulong)wil);
+
+       INIT_WORK(&wil->wmi_connect_worker, wmi_connect_worker);
+       INIT_WORK(&wil->disconnect_worker, wil_disconnect_worker);
+       INIT_WORK(&wil->wmi_event_worker, wmi_event_worker);
+
+       INIT_LIST_HEAD(&wil->pending_wmi_ev);
+       spin_lock_init(&wil->wmi_ev_lock);
+
+       wil->wmi_wq = create_singlethread_workqueue(WIL_NAME"_wmi");
+       if (!wil->wmi_wq)
+               return -EAGAIN;
+
+       wil->wmi_wq_conn = create_singlethread_workqueue(WIL_NAME"_connect");
+       if (!wil->wmi_wq_conn) {
+               destroy_workqueue(wil->wmi_wq);
+               return -EAGAIN;
+       }
+
+       /* make shadow copy of registers that should not change on run time */
+       wil_memcpy_fromio_32(&wil->mbox_ctl, wil->csr + HOST_MBOX,
+                            sizeof(struct wil6210_mbox_ctl));
+       wil_mbox_ring_le2cpus(&wil->mbox_ctl.rx);
+       wil_mbox_ring_le2cpus(&wil->mbox_ctl.tx);
+
+       return 0;
+}
+
+void wil6210_disconnect(struct wil6210_priv *wil, void *bssid)
+{
+       del_timer_sync(&wil->connect_timer);
+       _wil6210_disconnect(wil, bssid);
+}
+
+void wil_priv_deinit(struct wil6210_priv *wil)
+{
+       cancel_work_sync(&wil->disconnect_worker);
+       wil6210_disconnect(wil, NULL);
+       wmi_event_flush(wil);
+       destroy_workqueue(wil->wmi_wq_conn);
+       destroy_workqueue(wil->wmi_wq);
+}
+
+static void wil_target_reset(struct wil6210_priv *wil)
+{
+       wil_dbg(wil, "Resetting...\n");
+
+       /* register write */
+#define W(a, v) iowrite32(v, wil->csr + HOSTADDR(a))
+       /* register set = read, OR, write */
+#define S(a, v) iowrite32(ioread32(wil->csr + HOSTADDR(a)) | v, \
+               wil->csr + HOSTADDR(a))
+
+       /* hpal_perst_from_pad_src_n_mask */
+       S(RGF_USER_CLKS_CTL_SW_RST_MASK_0, BIT(6));
+       /* car_perst_rst_src_n_mask */
+       S(RGF_USER_CLKS_CTL_SW_RST_MASK_0, BIT(7));
+
+       W(RGF_USER_MAC_CPU_0,  BIT(1)); /* mac_cpu_man_rst */
+       W(RGF_USER_USER_CPU_0, BIT(1)); /* user_cpu_man_rst */
+
+       msleep(100);
+
+       W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0xFE000000);
+       W(RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0x0000003F);
+       W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x00000170);
+       W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0xFFE7FC00);
+
+       msleep(100);
+
+       W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0);
+       W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0);
+       W(RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0);
+       W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0);
+
+       W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x00000001);
+       W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0x00000080);
+       W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0);
+
+       msleep(2000);
+
+       W(RGF_USER_USER_CPU_0, BIT(0)); /* user_cpu_man_de_rst */
+
+       msleep(2000);
+
+       wil_dbg(wil, "Reset completed\n");
+
+#undef W
+#undef S
+}
+
+void wil_mbox_ring_le2cpus(struct wil6210_mbox_ring *r)
+{
+       le32_to_cpus(&r->base);
+       le16_to_cpus(&r->entry_size);
+       le16_to_cpus(&r->size);
+       le32_to_cpus(&r->tail);
+       le32_to_cpus(&r->head);
+}
+
+static int wil_wait_for_fw_ready(struct wil6210_priv *wil)
+{
+       ulong to = msecs_to_jiffies(1000);
+       ulong left = wait_for_completion_timeout(&wil->wmi_ready, to);
+       if (0 == left) {
+               wil_err(wil, "Firmware not ready\n");
+               return -ETIME;
+       } else {
+               wil_dbg(wil, "FW ready after %d ms\n",
+                       jiffies_to_msecs(to-left));
+       }
+       return 0;
+}
+
+/*
+ * We reset all the structures, and we reset the UMAC.
+ * After calling this routine, you're expected to reload
+ * the firmware.
+ */
+int wil_reset(struct wil6210_priv *wil)
+{
+       int rc;
+
+       cancel_work_sync(&wil->disconnect_worker);
+       wil6210_disconnect(wil, NULL);
+
+       wmi_event_flush(wil);
+
+       flush_workqueue(wil->wmi_wq);
+       flush_workqueue(wil->wmi_wq_conn);
+
+       wil6210_disable_irq(wil);
+       wil->status = 0;
+
+       /* TODO: put MAC in reset */
+       wil_target_reset(wil);
+
+       /* init after reset */
+       wil->pending_connect_cid = -1;
+       INIT_COMPLETION(wil->wmi_ready);
+
+       /* make shadow copy of registers that should not change on run time */
+       wil_memcpy_fromio_32(&wil->mbox_ctl, wil->csr + HOST_MBOX,
+                            sizeof(struct wil6210_mbox_ctl));
+       wil_mbox_ring_le2cpus(&wil->mbox_ctl.rx);
+       wil_mbox_ring_le2cpus(&wil->mbox_ctl.tx);
+
+       /* TODO: release MAC reset */
+       wil6210_enable_irq(wil);
+
+       /* we just started MAC, wait for FW ready */
+       rc = wil_wait_for_fw_ready(wil);
+
+       return rc;
+}
+
+
+void wil_link_on(struct wil6210_priv *wil)
+{
+       struct net_device *ndev = wil_to_ndev(wil);
+
+       wil_dbg(wil, "%s()\n", __func__);
+
+       netif_carrier_on(ndev);
+       netif_tx_wake_all_queues(ndev);
+}
+
+void wil_link_off(struct wil6210_priv *wil)
+{
+       struct net_device *ndev = wil_to_ndev(wil);
+
+       wil_dbg(wil, "%s()\n", __func__);
+
+       netif_tx_stop_all_queues(ndev);
+       netif_carrier_off(ndev);
+}
+
+static int __wil_up(struct wil6210_priv *wil)
+{
+       struct net_device *ndev = wil_to_ndev(wil);
+       struct wireless_dev *wdev = wil->wdev;
+       struct ieee80211_channel *channel = wdev->preset_chandef.chan;
+       int rc;
+       int bi;
+       u16 wmi_nettype = wil_iftype_nl2wmi(wdev->iftype);
+
+       rc = wil_reset(wil);
+       if (rc)
+               return rc;
+
+       /* FIXME Firmware works now in PBSS mode(ToDS=0, FromDS=0) */
+       wmi_nettype = wil_iftype_nl2wmi(NL80211_IFTYPE_ADHOC);
+       switch (wdev->iftype) {
+       case NL80211_IFTYPE_STATION:
+               wil_dbg(wil, "type: STATION\n");
+               bi = 0;
+               ndev->type = ARPHRD_ETHER;
+               break;
+       case NL80211_IFTYPE_AP:
+               wil_dbg(wil, "type: AP\n");
+               bi = 100;
+               ndev->type = ARPHRD_ETHER;
+               break;
+       case NL80211_IFTYPE_P2P_CLIENT:
+               wil_dbg(wil, "type: P2P_CLIENT\n");
+               bi = 0;
+               ndev->type = ARPHRD_ETHER;
+               break;
+       case NL80211_IFTYPE_P2P_GO:
+               wil_dbg(wil, "type: P2P_GO\n");
+               bi = 100;
+               ndev->type = ARPHRD_ETHER;
+               break;
+       case NL80211_IFTYPE_MONITOR:
+               wil_dbg(wil, "type: Monitor\n");
+               bi = 0;
+               ndev->type = ARPHRD_IEEE80211_RADIOTAP;
+               /* ARPHRD_IEEE80211 or ARPHRD_IEEE80211_RADIOTAP ? */
+               break;
+       default:
+               return -EOPNOTSUPP;
+       }
+
+       /* Apply profile in the following order: */
+       /* SSID and channel for the AP */
+       switch (wdev->iftype) {
+       case NL80211_IFTYPE_AP:
+       case NL80211_IFTYPE_P2P_GO:
+               if (wdev->ssid_len == 0) {
+                       wil_err(wil, "SSID not set\n");
+                       return -EINVAL;
+               }
+               wmi_set_ssid(wil, wdev->ssid_len, wdev->ssid);
+               if (channel)
+                       wmi_set_channel(wil, channel->hw_value);
+               break;
+       default:
+               ;
+       }
+
+       /* MAC address - pre-requisite for other commands */
+       wmi_set_mac_address(wil, ndev->dev_addr);
+
+       /* Set up beaconing if required. */
+       rc = wmi_set_bcon(wil, bi, wmi_nettype);
+       if (rc)
+               return rc;
+
+       /* Rx VRING. After MAC and beacon */
+       wil_rx_init(wil);
+
+       return 0;
+}
+
+int wil_up(struct wil6210_priv *wil)
+{
+       int rc;
+
+       mutex_lock(&wil->mutex);
+       rc = __wil_up(wil);
+       mutex_unlock(&wil->mutex);
+
+       return rc;
+}
+
+static int __wil_down(struct wil6210_priv *wil)
+{
+       if (wil->scan_request) {
+               cfg80211_scan_done(wil->scan_request, true);
+               wil->scan_request = NULL;
+       }
+
+       wil6210_disconnect(wil, NULL);
+       wil_rx_fini(wil);
+
+       return 0;
+}
+
+int wil_down(struct wil6210_priv *wil)
+{
+       int rc;
+
+       mutex_lock(&wil->mutex);
+       rc = __wil_down(wil);
+       mutex_unlock(&wil->mutex);
+
+       return rc;
+}
diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c
new file mode 100644 (file)
index 0000000..3068b5c
--- /dev/null
@@ -0,0 +1,157 @@
+/*
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/slab.h>
+
+#include "wil6210.h"
+
+static int wil_open(struct net_device *ndev)
+{
+       struct wil6210_priv *wil = ndev_to_wil(ndev);
+
+       return wil_up(wil);
+}
+
+static int wil_stop(struct net_device *ndev)
+{
+       struct wil6210_priv *wil = ndev_to_wil(ndev);
+
+       return wil_down(wil);
+}
+
+/*
+ * AC to queue mapping
+ *
+ * AC_VO -> queue 3
+ * AC_VI -> queue 2
+ * AC_BE -> queue 1
+ * AC_BK -> queue 0
+ */
+static u16 wil_select_queue(struct net_device *ndev, struct sk_buff *skb)
+{
+       static const u16 wil_1d_to_queue[8] = { 1, 0, 0, 1, 2, 2, 3, 3 };
+       struct wil6210_priv *wil = ndev_to_wil(ndev);
+       u16 rc;
+
+       skb->priority = cfg80211_classify8021d(skb);
+
+       rc = wil_1d_to_queue[skb->priority];
+
+       wil_dbg_TXRX(wil, "%s() %d -> %d\n", __func__, (int)skb->priority,
+                    (int)rc);
+
+       return rc;
+}
+
+static const struct net_device_ops wil_netdev_ops = {
+       .ndo_open               = wil_open,
+       .ndo_stop               = wil_stop,
+       .ndo_start_xmit         = wil_start_xmit,
+       .ndo_select_queue       = wil_select_queue,
+       .ndo_set_mac_address    = eth_mac_addr,
+       .ndo_validate_addr      = eth_validate_addr,
+};
+
+void *wil_if_alloc(struct device *dev, void __iomem *csr)
+{
+       struct net_device *ndev;
+       struct wireless_dev *wdev;
+       struct wil6210_priv *wil;
+       struct ieee80211_channel *ch;
+       int rc = 0;
+
+       wdev = wil_cfg80211_init(dev);
+       if (IS_ERR(wdev)) {
+               dev_err(dev, "wil_cfg80211_init failed\n");
+               return wdev;
+       }
+
+       wil = wdev_to_wil(wdev);
+       wil->csr = csr;
+       wil->wdev = wdev;
+
+       rc = wil_priv_init(wil);
+       if (rc) {
+               dev_err(dev, "wil_priv_init failed\n");
+               goto out_wdev;
+       }
+
+       wdev->iftype = NL80211_IFTYPE_STATION; /* TODO */
+       /* default monitor channel */
+       ch = wdev->wiphy->bands[IEEE80211_BAND_60GHZ]->channels;
+       cfg80211_chandef_create(&wdev->preset_chandef, ch, NL80211_CHAN_NO_HT);
+
+       ndev = alloc_netdev_mqs(0, "wlan%d", ether_setup, WIL6210_TX_QUEUES, 1);
+       if (!ndev) {
+               dev_err(dev, "alloc_netdev_mqs failed\n");
+               rc = -ENOMEM;
+               goto out_priv;
+       }
+
+       ndev->netdev_ops = &wil_netdev_ops;
+       ndev->ieee80211_ptr = wdev;
+       SET_NETDEV_DEV(ndev, wiphy_dev(wdev->wiphy));
+       wdev->netdev = ndev;
+
+       wil_link_off(wil);
+
+       return wil;
+
+ out_priv:
+       wil_priv_deinit(wil);
+
+ out_wdev:
+       wil_wdev_free(wil);
+
+       return ERR_PTR(rc);
+}
+
+void wil_if_free(struct wil6210_priv *wil)
+{
+       struct net_device *ndev = wil_to_ndev(wil);
+       if (!ndev)
+               return;
+
+       free_netdev(ndev);
+       wil_priv_deinit(wil);
+       wil_wdev_free(wil);
+}
+
+int wil_if_add(struct wil6210_priv *wil)
+{
+       struct net_device *ndev = wil_to_ndev(wil);
+       int rc;
+
+       rc = register_netdev(ndev);
+       if (rc < 0) {
+               dev_err(&ndev->dev, "Failed to register netdev: %d\n", rc);
+               return rc;
+       }
+
+       wil_link_off(wil);
+
+       return 0;
+}
+
+void wil_if_remove(struct wil6210_priv *wil)
+{
+       struct net_device *ndev = wil_to_ndev(wil);
+
+       unregister_netdev(ndev);
+}
diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c
new file mode 100644 (file)
index 0000000..0fc83ed
--- /dev/null
@@ -0,0 +1,223 @@
+/*
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/netdevice.h>
+#include <linux/debugfs.h>
+#include <linux/pci.h>
+#include <linux/moduleparam.h>
+
+#include "wil6210.h"
+
+static int use_msi = 1;
+module_param(use_msi, int, S_IRUGO);
+MODULE_PARM_DESC(use_msi,
+                " Use MSI interrupt: "
+                "0 - don't, 1 - (default) - single, or 3");
+
+/* Bus ops */
+static int wil_if_pcie_enable(struct wil6210_priv *wil)
+{
+       struct pci_dev *pdev = wil->pdev;
+       int rc;
+
+       pci_set_master(pdev);
+
+       /*
+        * how many MSI interrupts to request?
+        */
+       switch (use_msi) {
+       case 3:
+       case 1:
+       case 0:
+               break;
+       default:
+               wil_err(wil, "Invalid use_msi=%d, default to 1\n",
+                       use_msi);
+               use_msi = 1;
+       }
+       wil->n_msi = use_msi;
+       if (wil->n_msi) {
+               wil_dbg(wil, "Setup %d MSI interrupts\n", use_msi);
+               rc = pci_enable_msi_block(pdev, wil->n_msi);
+               if (rc && (wil->n_msi == 3)) {
+                       wil_err(wil, "3 MSI mode failed, try 1 MSI\n");
+                       wil->n_msi = 1;
+                       rc = pci_enable_msi_block(pdev, wil->n_msi);
+               }
+               if (rc) {
+                       wil_err(wil, "pci_enable_msi failed, use INTx\n");
+                       wil->n_msi = 0;
+               }
+       } else {
+               wil_dbg(wil, "MSI interrupts disabled, use INTx\n");
+       }
+
+       rc = wil6210_init_irq(wil, pdev->irq);
+       if (rc)
+               goto stop_master;
+
+       /* need reset here to obtain MAC */
+       rc = wil_reset(wil);
+       if (rc)
+               goto release_irq;
+
+       return 0;
+
+ release_irq:
+       wil6210_fini_irq(wil, pdev->irq);
+       /* safe to call if no MSI */
+       pci_disable_msi(pdev);
+ stop_master:
+       pci_clear_master(pdev);
+       return rc;
+}
+
+static int wil_if_pcie_disable(struct wil6210_priv *wil)
+{
+       struct pci_dev *pdev = wil->pdev;
+
+       pci_clear_master(pdev);
+       /* disable and release IRQ */
+       wil6210_fini_irq(wil, pdev->irq);
+       /* safe to call if no MSI */
+       pci_disable_msi(pdev);
+       /* TODO: disable HW */
+
+       return 0;
+}
+
+static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+       struct wil6210_priv *wil;
+       struct device *dev = &pdev->dev;
+       void __iomem *csr;
+       int rc;
+
+       /* check HW */
+       dev_info(&pdev->dev, WIL_NAME " device found [%04x:%04x] (rev %x)\n",
+                (int)pdev->vendor, (int)pdev->device, (int)pdev->revision);
+
+       if (pci_resource_len(pdev, 0) != WIL6210_MEM_SIZE) {
+               dev_err(&pdev->dev, "Not " WIL_NAME "? "
+                       "BAR0 size is %lu while expecting %lu\n",
+                       (ulong)pci_resource_len(pdev, 0), WIL6210_MEM_SIZE);
+               return -ENODEV;
+       }
+
+       rc = pci_enable_device(pdev);
+       if (rc) {
+               dev_err(&pdev->dev, "pci_enable_device failed\n");
+               return -ENODEV;
+       }
+       /* rollback to err_disable_pdev */
+
+       rc = pci_request_region(pdev, 0, WIL_NAME);
+       if (rc) {
+               dev_err(&pdev->dev, "pci_request_region failed\n");
+               goto err_disable_pdev;
+       }
+       /* rollback to err_release_reg */
+
+       csr = pci_ioremap_bar(pdev, 0);
+       if (!csr) {
+               dev_err(&pdev->dev, "pci_ioremap_bar failed\n");
+               rc = -ENODEV;
+               goto err_release_reg;
+       }
+       /* rollback to err_iounmap */
+       dev_info(&pdev->dev, "CSR at %pR -> %p\n", &pdev->resource[0], csr);
+
+       wil = wil_if_alloc(dev, csr);
+       if (IS_ERR(wil)) {
+               rc = (int)PTR_ERR(wil);
+               dev_err(dev, "wil_if_alloc failed: %d\n", rc);
+               goto err_iounmap;
+       }
+       /* rollback to if_free */
+
+       pci_set_drvdata(pdev, wil);
+       wil->pdev = pdev;
+
+       /* FW should raise IRQ when ready */
+       rc = wil_if_pcie_enable(wil);
+       if (rc) {
+               wil_err(wil, "Enable device failed\n");
+               goto if_free;
+       }
+       /* rollback to bus_disable */
+
+       rc = wil_if_add(wil);
+       if (rc) {
+               wil_err(wil, "wil_if_add failed: %d\n", rc);
+               goto bus_disable;
+       }
+
+       wil6210_debugfs_init(wil);
+
+       /* check FW is alive */
+       wmi_echo(wil);
+
+       return 0;
+
+ bus_disable:
+       wil_if_pcie_disable(wil);
+ if_free:
+       wil_if_free(wil);
+ err_iounmap:
+       pci_iounmap(pdev, csr);
+ err_release_reg:
+       pci_release_region(pdev, 0);
+ err_disable_pdev:
+       pci_disable_device(pdev);
+
+       return rc;
+}
+
+static void wil_pcie_remove(struct pci_dev *pdev)
+{
+       struct wil6210_priv *wil = pci_get_drvdata(pdev);
+
+       wil6210_debugfs_remove(wil);
+       wil_if_pcie_disable(wil);
+       wil_if_remove(wil);
+       wil_if_free(wil);
+       pci_iounmap(pdev, wil->csr);
+       pci_release_region(pdev, 0);
+       pci_disable_device(pdev);
+       pci_set_drvdata(pdev, NULL);
+}
+
+static DEFINE_PCI_DEVICE_TABLE(wil6210_pcie_ids) = {
+       { PCI_DEVICE(0x1ae9, 0x0301) },
+       { /* end: all zeroes */ },
+};
+MODULE_DEVICE_TABLE(pci, wil6210_pcie_ids);
+
+static struct pci_driver wil6210_driver = {
+       .probe          = wil_pcie_probe,
+       .remove         = wil_pcie_remove,
+       .id_table       = wil6210_pcie_ids,
+       .name           = WIL_NAME,
+};
+
+module_pci_driver(wil6210_driver);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Qualcomm Atheros <wil6210@qca.qualcomm.com>");
+MODULE_DESCRIPTION("Driver for 60g WiFi WIL6210 card");
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
new file mode 100644 (file)
index 0000000..f29c294
--- /dev/null
@@ -0,0 +1,871 @@
+/*
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/hardirq.h>
+#include <net/ieee80211_radiotap.h>
+#include <linux/if_arp.h>
+#include <linux/moduleparam.h>
+
+#include "wil6210.h"
+#include "wmi.h"
+#include "txrx.h"
+
+static bool rtap_include_phy_info;
+module_param(rtap_include_phy_info, bool, S_IRUGO);
+MODULE_PARM_DESC(rtap_include_phy_info,
+                " Include PHY info in the radiotap header, default - no");
+
+static inline int wil_vring_is_empty(struct vring *vring)
+{
+       return vring->swhead == vring->swtail;
+}
+
+static inline u32 wil_vring_next_tail(struct vring *vring)
+{
+       return (vring->swtail + 1) % vring->size;
+}
+
+static inline void wil_vring_advance_head(struct vring *vring, int n)
+{
+       vring->swhead = (vring->swhead + n) % vring->size;
+}
+
+static inline int wil_vring_is_full(struct vring *vring)
+{
+       return wil_vring_next_tail(vring) == vring->swhead;
+}
+/*
+ * Available space in Tx Vring
+ */
+static inline int wil_vring_avail_tx(struct vring *vring)
+{
+       u32 swhead = vring->swhead;
+       u32 swtail = vring->swtail;
+       int used = (vring->size + swhead - swtail) % vring->size;
+
+       return vring->size - used - 1;
+}
+
+static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring)
+{
+       struct device *dev = wil_to_dev(wil);
+       size_t sz = vring->size * sizeof(vring->va[0]);
+       uint i;
+
+       BUILD_BUG_ON(sizeof(vring->va[0]) != 32);
+
+       vring->swhead = 0;
+       vring->swtail = 0;
+       vring->ctx = kzalloc(vring->size * sizeof(vring->ctx[0]), GFP_KERNEL);
+       if (!vring->ctx) {
+               wil_err(wil, "vring_alloc [%d] failed to alloc ctx mem\n",
+                       vring->size);
+               vring->va = NULL;
+               return -ENOMEM;
+       }
+       /*
+        * vring->va should be aligned on its size rounded up to power of 2
+        * This is granted by the dma_alloc_coherent
+        */
+       vring->va = dma_alloc_coherent(dev, sz, &vring->pa, GFP_KERNEL);
+       if (!vring->va) {
+               wil_err(wil, "vring_alloc [%d] failed to alloc DMA mem\n",
+                       vring->size);
+               kfree(vring->ctx);
+               vring->ctx = NULL;
+               return -ENOMEM;
+       }
+       /* initially, all descriptors are SW owned
+        * For Tx and Rx, ownership bit is at the same location, thus
+        * we can use any
+        */
+       for (i = 0; i < vring->size; i++) {
+               volatile struct vring_tx_desc *d = &(vring->va[i].tx);
+               d->dma.status = TX_DMA_STATUS_DU;
+       }
+
+       wil_dbg(wil, "vring[%d] 0x%p:0x%016llx 0x%p\n", vring->size,
+               vring->va, (unsigned long long)vring->pa, vring->ctx);
+
+       return 0;
+}
+
+static void wil_vring_free(struct wil6210_priv *wil, struct vring *vring,
+                          int tx)
+{
+       struct device *dev = wil_to_dev(wil);
+       size_t sz = vring->size * sizeof(vring->va[0]);
+
+       while (!wil_vring_is_empty(vring)) {
+               if (tx) {
+                       volatile struct vring_tx_desc *d =
+                                       &vring->va[vring->swtail].tx;
+                       dma_addr_t pa = d->dma.addr_low |
+                                       ((u64)d->dma.addr_high << 32);
+                       struct sk_buff *skb = vring->ctx[vring->swtail];
+                       if (skb) {
+                               dma_unmap_single(dev, pa, d->dma.length,
+                                                DMA_TO_DEVICE);
+                               dev_kfree_skb_any(skb);
+                               vring->ctx[vring->swtail] = NULL;
+                       } else {
+                               dma_unmap_page(dev, pa, d->dma.length,
+                                              DMA_TO_DEVICE);
+                       }
+                       vring->swtail = wil_vring_next_tail(vring);
+               } else { /* rx */
+                       volatile struct vring_rx_desc *d =
+                                       &vring->va[vring->swtail].rx;
+                       dma_addr_t pa = d->dma.addr_low |
+                                       ((u64)d->dma.addr_high << 32);
+                       struct sk_buff *skb = vring->ctx[vring->swhead];
+                       dma_unmap_single(dev, pa, d->dma.length,
+                                        DMA_FROM_DEVICE);
+                       kfree_skb(skb);
+                       wil_vring_advance_head(vring, 1);
+               }
+       }
+       dma_free_coherent(dev, sz, (void *)vring->va, vring->pa);
+       kfree(vring->ctx);
+       vring->pa = 0;
+       vring->va = NULL;
+       vring->ctx = NULL;
+}
+
+/**
+ * Allocate one skb for Rx VRING
+ *
+ * Safe to call from IRQ
+ */
+static int wil_vring_alloc_skb(struct wil6210_priv *wil, struct vring *vring,
+                              u32 i, int headroom)
+{
+       struct device *dev = wil_to_dev(wil);
+       unsigned int sz = RX_BUF_LEN;
+       volatile struct vring_rx_desc *d = &(vring->va[i].rx);
+       dma_addr_t pa;
+
+       /* TODO align */
+       struct sk_buff *skb = dev_alloc_skb(sz + headroom);
+       if (unlikely(!skb))
+               return -ENOMEM;
+
+       skb_reserve(skb, headroom);
+       skb_put(skb, sz);
+
+       pa = dma_map_single(dev, skb->data, skb->len, DMA_FROM_DEVICE);
+       if (unlikely(dma_mapping_error(dev, pa))) {
+               kfree_skb(skb);
+               return -ENOMEM;
+       }
+
+       d->dma.d0 = BIT(9) | RX_DMA_D0_CMD_DMA_IT;
+       d->dma.addr_low = lower_32_bits(pa);
+       d->dma.addr_high = (u16)upper_32_bits(pa);
+       /* ip_length don't care */
+       /* b11 don't care */
+       /* error don't care */
+       d->dma.status = 0; /* BIT(0) should be 0 for HW_OWNED */
+       d->dma.length = sz;
+       vring->ctx[i] = skb;
+
+       return 0;
+}
+
+/**
+ * Adds radiotap header
+ *
+ * Any error indicated as "Bad FCS"
+ *
+ * Vendor data for 04:ce:14-1 (Wilocity-1) consists of:
+ *  - Rx descriptor: 32 bytes
+ *  - Phy info
+ */
+static void wil_rx_add_radiotap_header(struct wil6210_priv *wil,
+                                      struct sk_buff *skb,
+                                      volatile struct vring_rx_desc *d)
+{
+       struct wireless_dev *wdev = wil->wdev;
+       struct wil6210_rtap {
+               struct ieee80211_radiotap_header rthdr;
+               /* fields should be in the order of bits in rthdr.it_present */
+               /* flags */
+               u8 flags;
+               /* channel */
+               __le16 chnl_freq __aligned(2);
+               __le16 chnl_flags;
+               /* MCS */
+               u8 mcs_present;
+               u8 mcs_flags;
+               u8 mcs_index;
+       } __packed;
+       struct wil6210_rtap_vendor {
+               struct wil6210_rtap rtap;
+               /* vendor */
+               u8 vendor_oui[3] __aligned(2);
+               u8 vendor_ns;
+               __le16 vendor_skip;
+               u8 vendor_data[0];
+       } __packed;
+       struct wil6210_rtap_vendor *rtap_vendor;
+       int rtap_len = sizeof(struct wil6210_rtap);
+       int phy_length = 0; /* phy info header size, bytes */
+       static char phy_data[128];
+       struct ieee80211_channel *ch = wdev->preset_chandef.chan;
+
+       if (rtap_include_phy_info) {
+               rtap_len = sizeof(*rtap_vendor) + sizeof(*d);
+               /* calculate additional length */
+               if (d->dma.status & RX_DMA_STATUS_PHY_INFO) {
+                       /**
+                        * PHY info starts from 8-byte boundary
+                        * there are 8-byte lines, last line may be partially
+                        * written (HW bug), thus FW configures for last line
+                        * to be excessive. Driver skips this last line.
+                        */
+                       int len = min_t(int, 8 + sizeof(phy_data),
+                                       wil_rxdesc_phy_length(d));
+                       if (len > 8) {
+                               void *p = skb_tail_pointer(skb);
+                               void *pa = PTR_ALIGN(p, 8);
+                               if (skb_tailroom(skb) >= len + (pa - p)) {
+                                       phy_length = len - 8;
+                                       memcpy(phy_data, pa, phy_length);
+                               }
+                       }
+               }
+               rtap_len += phy_length;
+       }
+
+       if (skb_headroom(skb) < rtap_len &&
+           pskb_expand_head(skb, rtap_len, 0, GFP_ATOMIC)) {
+               wil_err(wil, "Unable to expand headrom to %d\n", rtap_len);
+               return;
+       }
+
+       rtap_vendor = (void *)skb_push(skb, rtap_len);
+       memset(rtap_vendor, 0, rtap_len);
+
+       rtap_vendor->rtap.rthdr.it_version = PKTHDR_RADIOTAP_VERSION;
+       rtap_vendor->rtap.rthdr.it_len = cpu_to_le16(rtap_len);
+       rtap_vendor->rtap.rthdr.it_present = cpu_to_le32(
+                       (1 << IEEE80211_RADIOTAP_FLAGS) |
+                       (1 << IEEE80211_RADIOTAP_CHANNEL) |
+                       (1 << IEEE80211_RADIOTAP_MCS));
+       if (d->dma.status & RX_DMA_STATUS_ERROR)
+               rtap_vendor->rtap.flags |= IEEE80211_RADIOTAP_F_BADFCS;
+
+       rtap_vendor->rtap.chnl_freq = cpu_to_le16(ch ? ch->center_freq : 58320);
+       rtap_vendor->rtap.chnl_flags = cpu_to_le16(0);
+
+       rtap_vendor->rtap.mcs_present = IEEE80211_RADIOTAP_MCS_HAVE_MCS;
+       rtap_vendor->rtap.mcs_flags = 0;
+       rtap_vendor->rtap.mcs_index = wil_rxdesc_mcs(d);
+
+       if (rtap_include_phy_info) {
+               rtap_vendor->rtap.rthdr.it_present |= cpu_to_le32(1 <<
+                               IEEE80211_RADIOTAP_VENDOR_NAMESPACE);
+               /* OUI for Wilocity 04:ce:14 */
+               rtap_vendor->vendor_oui[0] = 0x04;
+               rtap_vendor->vendor_oui[1] = 0xce;
+               rtap_vendor->vendor_oui[2] = 0x14;
+               rtap_vendor->vendor_ns = 1;
+               /* Rx descriptor + PHY data  */
+               rtap_vendor->vendor_skip = cpu_to_le16(sizeof(*d) +
+                                                      phy_length);
+               memcpy(rtap_vendor->vendor_data, (void *)d, sizeof(*d));
+               memcpy(rtap_vendor->vendor_data + sizeof(*d), phy_data,
+                      phy_length);
+       }
+}
+
+/*
+ * Fast swap in place between 2 registers
+ */
+static void wil_swap_u16(u16 *a, u16 *b)
+{
+       *a ^= *b;
+       *b ^= *a;
+       *a ^= *b;
+}
+
+static void wil_swap_ethaddr(void *data)
+{
+       struct ethhdr *eth = data;
+       u16 *s = (u16 *)eth->h_source;
+       u16 *d = (u16 *)eth->h_dest;
+
+       wil_swap_u16(s++, d++);
+       wil_swap_u16(s++, d++);
+       wil_swap_u16(s, d);
+}
+
+/**
+ * reap 1 frame from @swhead
+ *
+ * Safe to call from IRQ
+ */
+static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
+                                        struct vring *vring)
+{
+       struct device *dev = wil_to_dev(wil);
+       struct net_device *ndev = wil_to_ndev(wil);
+       volatile struct vring_rx_desc *d;
+       struct sk_buff *skb;
+       dma_addr_t pa;
+       unsigned int sz = RX_BUF_LEN;
+       u8 ftype;
+       u8 ds_bits;
+
+       if (wil_vring_is_empty(vring))
+               return NULL;
+
+       d = &(vring->va[vring->swhead].rx);
+       if (!(d->dma.status & RX_DMA_STATUS_DU)) {
+               /* it is not error, we just reached end of Rx done area */
+               return NULL;
+       }
+
+       pa = d->dma.addr_low | ((u64)d->dma.addr_high << 32);
+       skb = vring->ctx[vring->swhead];
+       dma_unmap_single(dev, pa, sz, DMA_FROM_DEVICE);
+       skb_trim(skb, d->dma.length);
+
+       wil->stats.last_mcs_rx = wil_rxdesc_mcs(d);
+
+       /* use radiotap header only if required */
+       if (ndev->type == ARPHRD_IEEE80211_RADIOTAP)
+               wil_rx_add_radiotap_header(wil, skb, d);
+
+       wil_dbg_TXRX(wil, "Rx[%3d] : %d bytes\n", vring->swhead, d->dma.length);
+       wil_hex_dump_TXRX("Rx ", DUMP_PREFIX_NONE, 32, 4,
+                         (const void *)d, sizeof(*d), false);
+
+       wil_vring_advance_head(vring, 1);
+
+       /* no extra checks if in sniffer mode */
+       if (ndev->type != ARPHRD_ETHER)
+               return skb;
+       /*
+        * Non-data frames may be delivered through Rx DMA channel (ex: BAR)
+        * Driver should recognize it by frame type, that is found
+        * in Rx descriptor. If type is not data, it is 802.11 frame as is
+        */
+       ftype = wil_rxdesc_ftype(d) << 2;
+       if (ftype != IEEE80211_FTYPE_DATA) {
+               wil_dbg_TXRX(wil, "Non-data frame ftype 0x%08x\n", ftype);
+               /* TODO: process it */
+               kfree_skb(skb);
+               return NULL;
+       }
+
+       if (skb->len < ETH_HLEN) {
+               wil_err(wil, "Short frame, len = %d\n", skb->len);
+               /* TODO: process it (i.e. BAR) */
+               kfree_skb(skb);
+               return NULL;
+       }
+
+       ds_bits = wil_rxdesc_ds_bits(d);
+       if (ds_bits == 1) {
+               /*
+                * HW bug - in ToDS mode, i.e. Rx on AP side,
+                * addresses get swapped
+                */
+               wil_swap_ethaddr(skb->data);
+       }
+
+       return skb;
+}
+
+/**
+ * allocate and fill up to @count buffers in rx ring
+ * buffers posted at @swtail
+ */
+static int wil_rx_refill(struct wil6210_priv *wil, int count)
+{
+       struct net_device *ndev = wil_to_ndev(wil);
+       struct vring *v = &wil->vring_rx;
+       u32 next_tail;
+       int rc = 0;
+       int headroom = ndev->type == ARPHRD_IEEE80211_RADIOTAP ?
+                       WIL6210_RTAP_SIZE : 0;
+
+       for (; next_tail = wil_vring_next_tail(v),
+                       (next_tail != v->swhead) && (count-- > 0);
+                       v->swtail = next_tail) {
+               rc = wil_vring_alloc_skb(wil, v, v->swtail, headroom);
+               if (rc) {
+                       wil_err(wil, "Error %d in wil_rx_refill[%d]\n",
+                               rc, v->swtail);
+                       break;
+               }
+       }
+       iowrite32(v->swtail, wil->csr + HOSTADDR(v->hwtail));
+
+       return rc;
+}
+
+/*
+ * Pass Rx packet to the netif. Update statistics.
+ */
+static void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
+{
+       int rc;
+       unsigned int len = skb->len;
+
+       if (in_interrupt())
+               rc = netif_rx(skb);
+       else
+               rc = netif_rx_ni(skb);
+
+       if (likely(rc == NET_RX_SUCCESS)) {
+               ndev->stats.rx_packets++;
+               ndev->stats.rx_bytes += len;
+
+       } else {
+               ndev->stats.rx_dropped++;
+       }
+}
+
+/**
+ * Proceed all completed skb's from Rx VRING
+ *
+ * Safe to call from IRQ
+ */
+void wil_rx_handle(struct wil6210_priv *wil)
+{
+       struct net_device *ndev = wil_to_ndev(wil);
+       struct vring *v = &wil->vring_rx;
+       struct sk_buff *skb;
+
+       if (!v->va) {
+               wil_err(wil, "Rx IRQ while Rx not yet initialized\n");
+               return;
+       }
+       wil_dbg_TXRX(wil, "%s()\n", __func__);
+       while (NULL != (skb = wil_vring_reap_rx(wil, v))) {
+               wil_hex_dump_TXRX("Rx ", DUMP_PREFIX_OFFSET, 16, 1,
+                                 skb->data, skb_headlen(skb), false);
+
+               skb_orphan(skb);
+
+               if (wil->wdev->iftype == NL80211_IFTYPE_MONITOR) {
+                       skb->dev = ndev;
+                       skb_reset_mac_header(skb);
+                       skb->ip_summed = CHECKSUM_UNNECESSARY;
+                       skb->pkt_type = PACKET_OTHERHOST;
+                       skb->protocol = htons(ETH_P_802_2);
+
+               } else {
+                       skb->protocol = eth_type_trans(skb, ndev);
+               }
+
+               wil_netif_rx_any(skb, ndev);
+       }
+       wil_rx_refill(wil, v->size);
+}
+
+int wil_rx_init(struct wil6210_priv *wil)
+{
+       struct net_device *ndev = wil_to_ndev(wil);
+       struct wireless_dev *wdev = wil->wdev;
+       struct vring *vring = &wil->vring_rx;
+       int rc;
+       struct wmi_cfg_rx_chain_cmd cmd = {
+               .action = WMI_RX_CHAIN_ADD,
+               .rx_sw_ring = {
+                       .max_mpdu_size = cpu_to_le16(RX_BUF_LEN),
+               },
+               .mid = 0, /* TODO - what is it? */
+               .decap_trans_type = WMI_DECAP_TYPE_802_3,
+       };
+       struct {
+               struct wil6210_mbox_hdr_wmi wmi;
+               struct wmi_cfg_rx_chain_done_event evt;
+       } __packed evt;
+
+       vring->size = WIL6210_RX_RING_SIZE;
+       rc = wil_vring_alloc(wil, vring);
+       if (rc)
+               return rc;
+
+       cmd.rx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa);
+       cmd.rx_sw_ring.ring_size = cpu_to_le16(vring->size);
+       if (wdev->iftype == NL80211_IFTYPE_MONITOR) {
+               struct ieee80211_channel *ch = wdev->preset_chandef.chan;
+
+               cmd.sniffer_cfg.mode = cpu_to_le32(WMI_SNIFFER_ON);
+               if (ch)
+                       cmd.sniffer_cfg.channel = ch->hw_value - 1;
+               cmd.sniffer_cfg.phy_info_mode =
+                       cpu_to_le32(ndev->type == ARPHRD_IEEE80211_RADIOTAP);
+               cmd.sniffer_cfg.phy_support =
+                       cpu_to_le32((wil->monitor_flags & MONITOR_FLAG_CONTROL)
+                                   ? WMI_SNIFFER_CP : WMI_SNIFFER_DP);
+       }
+       /* typical time for secure PCP is 840ms */
+       rc = wmi_call(wil, WMI_CFG_RX_CHAIN_CMDID, &cmd, sizeof(cmd),
+                     WMI_CFG_RX_CHAIN_DONE_EVENTID, &evt, sizeof(evt), 2000);
+       if (rc)
+               goto err_free;
+
+       vring->hwtail = le32_to_cpu(evt.evt.rx_ring_tail_ptr);
+
+       wil_dbg(wil, "Rx init: status %d tail 0x%08x\n",
+               le32_to_cpu(evt.evt.status), vring->hwtail);
+
+       rc = wil_rx_refill(wil, vring->size);
+       if (rc)
+               goto err_free;
+
+       return 0;
+ err_free:
+       wil_vring_free(wil, vring, 0);
+
+       return rc;
+}
+
+void wil_rx_fini(struct wil6210_priv *wil)
+{
+       struct vring *vring = &wil->vring_rx;
+
+       if (vring->va) {
+               int rc;
+               struct wmi_cfg_rx_chain_cmd cmd = {
+                       .action = cpu_to_le32(WMI_RX_CHAIN_DEL),
+                       .rx_sw_ring = {
+                               .max_mpdu_size = cpu_to_le16(RX_BUF_LEN),
+                       },
+               };
+               struct {
+                       struct wil6210_mbox_hdr_wmi wmi;
+                       struct wmi_cfg_rx_chain_done_event cfg;
+               } __packed wmi_rx_cfg_reply;
+
+               rc = wmi_call(wil, WMI_CFG_RX_CHAIN_CMDID, &cmd, sizeof(cmd),
+                             WMI_CFG_RX_CHAIN_DONE_EVENTID,
+                             &wmi_rx_cfg_reply, sizeof(wmi_rx_cfg_reply),
+                             100);
+               wil_vring_free(wil, vring, 0);
+       }
+}
+
+int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size,
+                     int cid, int tid)
+{
+       int rc;
+       struct wmi_vring_cfg_cmd cmd = {
+               .action = cpu_to_le32(WMI_VRING_CMD_ADD),
+               .vring_cfg = {
+                       .tx_sw_ring = {
+                               .max_mpdu_size = cpu_to_le16(TX_BUF_LEN),
+                       },
+                       .ringid = id,
+                       .cidxtid = (cid & 0xf) | ((tid & 0xf) << 4),
+                       .encap_trans_type = WMI_VRING_ENC_TYPE_802_3,
+                       .mac_ctrl = 0,
+                       .to_resolution = 0,
+                       .agg_max_wsize = 16,
+                       .schd_params = {
+                               .priority = cpu_to_le16(0),
+                               .timeslot_us = cpu_to_le16(0xfff),
+                       },
+               },
+       };
+       struct {
+               struct wil6210_mbox_hdr_wmi wmi;
+               struct wmi_vring_cfg_done_event cmd;
+       } __packed reply;
+       struct vring *vring = &wil->vring_tx[id];
+
+       if (vring->va) {
+               wil_err(wil, "Tx ring [%d] already allocated\n", id);
+               rc = -EINVAL;
+               goto out;
+       }
+
+       vring->size = size;
+       rc = wil_vring_alloc(wil, vring);
+       if (rc)
+               goto out;
+
+       cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa);
+       cmd.vring_cfg.tx_sw_ring.ring_size = cpu_to_le16(vring->size);
+
+       rc = wmi_call(wil, WMI_VRING_CFG_CMDID, &cmd, sizeof(cmd),
+                     WMI_VRING_CFG_DONE_EVENTID, &reply, sizeof(reply), 100);
+       if (rc)
+               goto out_free;
+
+       if (reply.cmd.status != WMI_VRING_CFG_SUCCESS) {
+               wil_err(wil, "Tx config failed, status 0x%02x\n",
+                       reply.cmd.status);
+               goto out_free;
+       }
+       vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr);
+
+       return 0;
+ out_free:
+       wil_vring_free(wil, vring, 1);
+ out:
+
+       return rc;
+}
+
+void wil_vring_fini_tx(struct wil6210_priv *wil, int id)
+{
+       struct vring *vring = &wil->vring_tx[id];
+
+       if (!vring->va)
+               return;
+
+       wil_vring_free(wil, vring, 1);
+}
+
+static struct vring *wil_find_tx_vring(struct wil6210_priv *wil,
+                                      struct sk_buff *skb)
+{
+       struct vring *v = &wil->vring_tx[0];
+
+       if (v->va)
+               return v;
+
+       return NULL;
+}
+
+static int wil_tx_desc_map(volatile struct vring_tx_desc *d,
+                          dma_addr_t pa, u32 len)
+{
+       d->dma.addr_low = lower_32_bits(pa);
+       d->dma.addr_high = (u16)upper_32_bits(pa);
+       d->dma.ip_length = 0;
+       /* 0..6: mac_length; 7:ip_version 0-IP6 1-IP4*/
+       d->dma.b11 = 0/*14 | BIT(7)*/;
+       d->dma.error = 0;
+       d->dma.status = 0; /* BIT(0) should be 0 for HW_OWNED */
+       d->dma.length = len;
+       d->dma.d0 = 0;
+       d->mac.d[0] = 0;
+       d->mac.d[1] = 0;
+       d->mac.d[2] = 0;
+       d->mac.ucode_cmd = 0;
+       /* use dst index 0 */
+       d->mac.d[1] |= BIT(MAC_CFG_DESC_TX_1_DST_INDEX_EN_POS) |
+                      (0 << MAC_CFG_DESC_TX_1_DST_INDEX_POS);
+       /* translation type:  0 - bypass; 1 - 802.3; 2 - native wifi */
+       d->mac.d[2] = BIT(MAC_CFG_DESC_TX_2_SNAP_HDR_INSERTION_EN_POS) |
+                     (1 << MAC_CFG_DESC_TX_2_L2_TRANSLATION_TYPE_POS);
+
+       return 0;
+}
+
+static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
+                       struct sk_buff *skb)
+{
+       struct device *dev = wil_to_dev(wil);
+       volatile struct vring_tx_desc *d;
+       u32 swhead = vring->swhead;
+       int avail = wil_vring_avail_tx(vring);
+       int nr_frags = skb_shinfo(skb)->nr_frags;
+       uint f;
+       int vring_index = vring - wil->vring_tx;
+       uint i = swhead;
+       dma_addr_t pa;
+
+       wil_dbg_TXRX(wil, "%s()\n", __func__);
+
+       if (avail < vring->size/8)
+               netif_tx_stop_all_queues(wil_to_ndev(wil));
+       if (avail < 1 + nr_frags) {
+               wil_err(wil, "Tx ring full. No space for %d fragments\n",
+                       1 + nr_frags);
+               return -ENOMEM;
+       }
+       d = &(vring->va[i].tx);
+
+       /* FIXME FW can accept only unicast frames for the peer */
+       memcpy(skb->data, wil->dst_addr[vring_index], ETH_ALEN);
+
+       pa = dma_map_single(dev, skb->data,
+                       skb_headlen(skb), DMA_TO_DEVICE);
+
+       wil_dbg_TXRX(wil, "Tx skb %d bytes %p -> %#08llx\n", skb_headlen(skb),
+                    skb->data, (unsigned long long)pa);
+       wil_hex_dump_TXRX("Tx ", DUMP_PREFIX_OFFSET, 16, 1,
+                         skb->data, skb_headlen(skb), false);
+
+       if (unlikely(dma_mapping_error(dev, pa)))
+               return -EINVAL;
+       /* 1-st segment */
+       wil_tx_desc_map(d, pa, skb_headlen(skb));
+       d->mac.d[2] |= ((nr_frags + 1) <<
+                      MAC_CFG_DESC_TX_2_NUM_OF_DESCRIPTORS_POS);
+       /* middle segments */
+       for (f = 0; f < nr_frags; f++) {
+               const struct skb_frag_struct *frag =
+                               &skb_shinfo(skb)->frags[f];
+               int len = skb_frag_size(frag);
+               i = (swhead + f + 1) % vring->size;
+               d = &(vring->va[i].tx);
+               pa = skb_frag_dma_map(dev, frag, 0, skb_frag_size(frag),
+                               DMA_TO_DEVICE);
+               if (unlikely(dma_mapping_error(dev, pa)))
+                       goto dma_error;
+               wil_tx_desc_map(d, pa, len);
+               vring->ctx[i] = NULL;
+       }
+       /* for the last seg only */
+       d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_EOP_POS);
+       d->dma.d0 |= BIT(9); /* BUG: undocumented bit */
+       d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_DMA_IT_POS);
+       d->dma.d0 |= (vring_index << DMA_CFG_DESC_TX_0_QID_POS);
+
+       wil_hex_dump_TXRX("Tx ", DUMP_PREFIX_NONE, 32, 4,
+                         (const void *)d, sizeof(*d), false);
+
+       /* advance swhead */
+       wil_vring_advance_head(vring, nr_frags + 1);
+       wil_dbg_TXRX(wil, "Tx swhead %d -> %d\n", swhead, vring->swhead);
+       iowrite32(vring->swhead, wil->csr + HOSTADDR(vring->hwtail));
+       /* hold reference to skb
+        * to prevent skb release before accounting
+        * in case of immediate "tx done"
+        */
+       vring->ctx[i] = skb_get(skb);
+
+       return 0;
+ dma_error:
+       /* unmap what we have mapped */
+       /* Note: increment @f to operate with positive index */
+       for (f++; f > 0; f--) {
+               i = (swhead + f) % vring->size;
+               d = &(vring->va[i].tx);
+               d->dma.status = TX_DMA_STATUS_DU;
+               pa = d->dma.addr_low | ((u64)d->dma.addr_high << 32);
+               if (vring->ctx[i])
+                       dma_unmap_single(dev, pa, d->dma.length, DMA_TO_DEVICE);
+               else
+                       dma_unmap_page(dev, pa, d->dma.length, DMA_TO_DEVICE);
+       }
+
+       return -EINVAL;
+}
+
+
+netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+       struct wil6210_priv *wil = ndev_to_wil(ndev);
+       struct vring *vring;
+       int rc;
+
+       wil_dbg_TXRX(wil, "%s()\n", __func__);
+       if (!test_bit(wil_status_fwready, &wil->status)) {
+               wil_err(wil, "FW not ready\n");
+               goto drop;
+       }
+       if (!test_bit(wil_status_fwconnected, &wil->status)) {
+               wil_err(wil, "FW not connected\n");
+               goto drop;
+       }
+       if (wil->wdev->iftype == NL80211_IFTYPE_MONITOR) {
+               wil_err(wil, "Xmit in monitor mode not supported\n");
+               goto drop;
+       }
+       if (skb->protocol == cpu_to_be16(ETH_P_PAE)) {
+               rc = wmi_tx_eapol(wil, skb);
+       } else {
+               /* find vring */
+               vring = wil_find_tx_vring(wil, skb);
+               if (!vring) {
+                       wil_err(wil, "No Tx VRING available\n");
+                       goto drop;
+               }
+               /* set up vring entry */
+               rc = wil_tx_vring(wil, vring, skb);
+       }
+       switch (rc) {
+       case 0:
+               ndev->stats.tx_packets++;
+               ndev->stats.tx_bytes += skb->len;
+               dev_kfree_skb_any(skb);
+               return NETDEV_TX_OK;
+       case -ENOMEM:
+               return NETDEV_TX_BUSY;
+       default:
+               ; /* goto drop; */
+               break;
+       }
+ drop:
+       netif_tx_stop_all_queues(ndev);
+       ndev->stats.tx_dropped++;
+       dev_kfree_skb_any(skb);
+
+       return NET_XMIT_DROP;
+}
+
+/**
+ * Clean up transmitted skb's from the Tx VRING
+ *
+ * Safe to call from IRQ
+ */
+void wil_tx_complete(struct wil6210_priv *wil, int ringid)
+{
+       struct device *dev = wil_to_dev(wil);
+       struct vring *vring = &wil->vring_tx[ringid];
+
+       if (!vring->va) {
+               wil_err(wil, "Tx irq[%d]: vring not initialized\n", ringid);
+               return;
+       }
+
+       wil_dbg_TXRX(wil, "%s(%d)\n", __func__, ringid);
+
+       while (!wil_vring_is_empty(vring)) {
+               volatile struct vring_tx_desc *d = &vring->va[vring->swtail].tx;
+               dma_addr_t pa;
+               struct sk_buff *skb;
+               if (!(d->dma.status & TX_DMA_STATUS_DU))
+                       break;
+
+               wil_dbg_TXRX(wil,
+                            "Tx[%3d] : %d bytes, status 0x%02x err 0x%02x\n",
+                            vring->swtail, d->dma.length, d->dma.status,
+                            d->dma.error);
+               wil_hex_dump_TXRX("TxC ", DUMP_PREFIX_NONE, 32, 4,
+                                 (const void *)d, sizeof(*d), false);
+
+               pa = d->dma.addr_low | ((u64)d->dma.addr_high << 32);
+               skb = vring->ctx[vring->swtail];
+               if (skb) {
+                       dma_unmap_single(dev, pa, d->dma.length, DMA_TO_DEVICE);
+                       dev_kfree_skb_any(skb);
+                       vring->ctx[vring->swtail] = NULL;
+               } else {
+                       dma_unmap_page(dev, pa, d->dma.length, DMA_TO_DEVICE);
+               }
+               d->dma.addr_low = 0;
+               d->dma.addr_high = 0;
+               d->dma.length = 0;
+               d->dma.status = TX_DMA_STATUS_DU;
+               vring->swtail = wil_vring_next_tail(vring);
+       }
+       if (wil_vring_avail_tx(vring) > vring->size/4)
+               netif_tx_wake_all_queues(wil_to_ndev(wil));
+}
diff --git a/drivers/net/wireless/ath/wil6210/txrx.h b/drivers/net/wireless/ath/wil6210/txrx.h
new file mode 100644 (file)
index 0000000..45a61f5
--- /dev/null
@@ -0,0 +1,362 @@
+/*
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef WIL6210_TXRX_H
+#define WIL6210_TXRX_H
+
+#define BUF_SW_OWNED    (1)
+#define BUF_HW_OWNED    (0)
+
+/* size of max. Rx packet */
+#define RX_BUF_LEN      (2048)
+#define TX_BUF_LEN      (2048)
+/* how many bytes to reserve for rtap header? */
+#define WIL6210_RTAP_SIZE (128)
+
+/* Tx/Rx path */
+/*
+ * Tx descriptor - MAC part
+ * [dword 0]
+ * bit  0.. 9 : lifetime_expiry_value:10
+ * bit     10 : interrup_en:1
+ * bit     11 : status_en:1
+ * bit 12..13 : txss_override:2
+ * bit     14 : timestamp_insertion:1
+ * bit     15 : duration_preserve:1
+ * bit 16..21 : reserved0:6
+ * bit 22..26 : mcs_index:5
+ * bit     27 : mcs_en:1
+ * bit 28..29 : reserved1:2
+ * bit     30 : reserved2:1
+ * bit     31 : sn_preserved:1
+ * [dword 1]
+ * bit  0.. 3 : pkt_mode:4
+ * bit      4 : pkt_mode_en:1
+ * bit  5.. 7 : reserved0:3
+ * bit  8..13 : reserved1:6
+ * bit     14 : reserved2:1
+ * bit     15 : ack_policy_en:1
+ * bit 16..19 : dst_index:4
+ * bit     20 : dst_index_en:1
+ * bit 21..22 : ack_policy:2
+ * bit     23 : lifetime_en:1
+ * bit 24..30 : max_retry:7
+ * bit     31 : max_retry_en:1
+ * [dword 2]
+ * bit  0.. 7 : num_of_descriptors:8
+ * bit  8..17 : reserved:10
+ * bit 18..19 : l2_translation_type:2
+ * bit     20 : snap_hdr_insertion_en:1
+ * bit     21 : vlan_removal_en:1
+ * bit 22..31 : reserved0:10
+ * [dword 3]
+ * bit  0.. 31: ucode_cmd:32
+ */
+struct vring_tx_mac {
+       u32 d[3];
+       u32 ucode_cmd;
+} __packed;
+
+/* TX MAC Dword 0 */
+#define MAC_CFG_DESC_TX_0_LIFETIME_EXPIRY_VALUE_POS 0
+#define MAC_CFG_DESC_TX_0_LIFETIME_EXPIRY_VALUE_LEN 10
+#define MAC_CFG_DESC_TX_0_LIFETIME_EXPIRY_VALUE_MSK 0x3FF
+
+#define MAC_CFG_DESC_TX_0_INTERRUP_EN_POS 10
+#define MAC_CFG_DESC_TX_0_INTERRUP_EN_LEN 1
+#define MAC_CFG_DESC_TX_0_INTERRUP_EN_MSK 0x400
+
+#define MAC_CFG_DESC_TX_0_STATUS_EN_POS 11
+#define MAC_CFG_DESC_TX_0_STATUS_EN_LEN 1
+#define MAC_CFG_DESC_TX_0_STATUS_EN_MSK 0x800
+
+#define MAC_CFG_DESC_TX_0_TXSS_OVERRIDE_POS 12
+#define MAC_CFG_DESC_TX_0_TXSS_OVERRIDE_LEN 2
+#define MAC_CFG_DESC_TX_0_TXSS_OVERRIDE_MSK 0x3000
+
+#define MAC_CFG_DESC_TX_0_TIMESTAMP_INSERTION_POS 14
+#define MAC_CFG_DESC_TX_0_TIMESTAMP_INSERTION_LEN 1
+#define MAC_CFG_DESC_TX_0_TIMESTAMP_INSERTION_MSK 0x4000
+
+#define MAC_CFG_DESC_TX_0_DURATION_PRESERVE_POS 15
+#define MAC_CFG_DESC_TX_0_DURATION_PRESERVE_LEN 1
+#define MAC_CFG_DESC_TX_0_DURATION_PRESERVE_MSK 0x8000
+
+#define MAC_CFG_DESC_TX_0_MCS_INDEX_POS 22
+#define MAC_CFG_DESC_TX_0_MCS_INDEX_LEN 5
+#define MAC_CFG_DESC_TX_0_MCS_INDEX_MSK 0x7C00000
+
+#define MAC_CFG_DESC_TX_0_MCS_EN_POS 27
+#define MAC_CFG_DESC_TX_0_MCS_EN_LEN 1
+#define MAC_CFG_DESC_TX_0_MCS_EN_MSK 0x8000000
+
+#define MAC_CFG_DESC_TX_0_SN_PRESERVED_POS 31
+#define MAC_CFG_DESC_TX_0_SN_PRESERVED_LEN 1
+#define MAC_CFG_DESC_TX_0_SN_PRESERVED_MSK 0x80000000
+
+/* TX MAC Dword 1 */
+#define MAC_CFG_DESC_TX_1_PKT_MODE_POS 0
+#define MAC_CFG_DESC_TX_1_PKT_MODE_LEN 4
+#define MAC_CFG_DESC_TX_1_PKT_MODE_MSK 0xF
+
+#define MAC_CFG_DESC_TX_1_PKT_MODE_EN_POS 4
+#define MAC_CFG_DESC_TX_1_PKT_MODE_EN_LEN 1
+#define MAC_CFG_DESC_TX_1_PKT_MODE_EN_MSK 0x10
+
+#define MAC_CFG_DESC_TX_1_ACK_POLICY_EN_POS 15
+#define MAC_CFG_DESC_TX_1_ACK_POLICY_EN_LEN 1
+#define MAC_CFG_DESC_TX_1_ACK_POLICY_EN_MSK 0x8000
+
+#define MAC_CFG_DESC_TX_1_DST_INDEX_POS 16
+#define MAC_CFG_DESC_TX_1_DST_INDEX_LEN 4
+#define MAC_CFG_DESC_TX_1_DST_INDEX_MSK 0xF0000
+
+#define MAC_CFG_DESC_TX_1_DST_INDEX_EN_POS 20
+#define MAC_CFG_DESC_TX_1_DST_INDEX_EN_LEN 1
+#define MAC_CFG_DESC_TX_1_DST_INDEX_EN_MSK 0x100000
+
+#define MAC_CFG_DESC_TX_1_ACK_POLICY_POS 21
+#define MAC_CFG_DESC_TX_1_ACK_POLICY_LEN 2
+#define MAC_CFG_DESC_TX_1_ACK_POLICY_MSK 0x600000
+
+#define MAC_CFG_DESC_TX_1_LIFETIME_EN_POS 23
+#define MAC_CFG_DESC_TX_1_LIFETIME_EN_LEN 1
+#define MAC_CFG_DESC_TX_1_LIFETIME_EN_MSK 0x800000
+
+#define MAC_CFG_DESC_TX_1_MAX_RETRY_POS 24
+#define MAC_CFG_DESC_TX_1_MAX_RETRY_LEN 7
+#define MAC_CFG_DESC_TX_1_MAX_RETRY_MSK 0x7F000000
+
+#define MAC_CFG_DESC_TX_1_MAX_RETRY_EN_POS 31
+#define MAC_CFG_DESC_TX_1_MAX_RETRY_EN_LEN 1
+#define MAC_CFG_DESC_TX_1_MAX_RETRY_EN_MSK 0x80000000
+
+/* TX MAC Dword 2 */
+#define MAC_CFG_DESC_TX_2_NUM_OF_DESCRIPTORS_POS 0
+#define MAC_CFG_DESC_TX_2_NUM_OF_DESCRIPTORS_LEN 8
+#define MAC_CFG_DESC_TX_2_NUM_OF_DESCRIPTORS_MSK 0xFF
+
+#define MAC_CFG_DESC_TX_2_RESERVED_POS 8
+#define MAC_CFG_DESC_TX_2_RESERVED_LEN 10
+#define MAC_CFG_DESC_TX_2_RESERVED_MSK 0x3FF00
+
+#define MAC_CFG_DESC_TX_2_L2_TRANSLATION_TYPE_POS 18
+#define MAC_CFG_DESC_TX_2_L2_TRANSLATION_TYPE_LEN 2
+#define MAC_CFG_DESC_TX_2_L2_TRANSLATION_TYPE_MSK 0xC0000
+
+#define MAC_CFG_DESC_TX_2_SNAP_HDR_INSERTION_EN_POS 20
+#define MAC_CFG_DESC_TX_2_SNAP_HDR_INSERTION_EN_LEN 1
+#define MAC_CFG_DESC_TX_2_SNAP_HDR_INSERTION_EN_MSK 0x100000
+
+#define MAC_CFG_DESC_TX_2_VLAN_REMOVAL_EN_POS 21
+#define MAC_CFG_DESC_TX_2_VLAN_REMOVAL_EN_LEN 1
+#define MAC_CFG_DESC_TX_2_VLAN_REMOVAL_EN_MSK 0x200000
+
+/* TX MAC Dword 3 */
+#define MAC_CFG_DESC_TX_3_UCODE_CMD_POS 0
+#define MAC_CFG_DESC_TX_3_UCODE_CMD_LEN 32
+#define MAC_CFG_DESC_TX_3_UCODE_CMD_MSK 0xFFFFFFFF
+
+/* TX DMA Dword 0 */
+#define DMA_CFG_DESC_TX_0_L4_LENGTH_POS 0
+#define DMA_CFG_DESC_TX_0_L4_LENGTH_LEN 8
+#define DMA_CFG_DESC_TX_0_L4_LENGTH_MSK 0xFF
+
+#define DMA_CFG_DESC_TX_0_CMD_EOP_POS 8
+#define DMA_CFG_DESC_TX_0_CMD_EOP_LEN 1
+#define DMA_CFG_DESC_TX_0_CMD_EOP_MSK 0x100
+
+#define DMA_CFG_DESC_TX_0_CMD_DMA_IT_POS 10
+#define DMA_CFG_DESC_TX_0_CMD_DMA_IT_LEN 1
+#define DMA_CFG_DESC_TX_0_CMD_DMA_IT_MSK 0x400
+
+#define DMA_CFG_DESC_TX_0_SEGMENT_BUF_DETAILS_POS 11
+#define DMA_CFG_DESC_TX_0_SEGMENT_BUF_DETAILS_LEN 2
+#define DMA_CFG_DESC_TX_0_SEGMENT_BUF_DETAILS_MSK 0x1800
+
+#define DMA_CFG_DESC_TX_0_TCP_SEG_EN_POS 13
+#define DMA_CFG_DESC_TX_0_TCP_SEG_EN_LEN 1
+#define DMA_CFG_DESC_TX_0_TCP_SEG_EN_MSK 0x2000
+
+#define DMA_CFG_DESC_TX_0_IPV4_CHECKSUM_EN_POS 14
+#define DMA_CFG_DESC_TX_0_IPV4_CHECKSUM_EN_LEN 1
+#define DMA_CFG_DESC_TX_0_IPV4_CHECKSUM_EN_MSK 0x4000
+
+#define DMA_CFG_DESC_TX_0_TCP_UDP_CHECKSUM_EN_POS 15
+#define DMA_CFG_DESC_TX_0_TCP_UDP_CHECKSUM_EN_LEN 1
+#define DMA_CFG_DESC_TX_0_TCP_UDP_CHECKSUM_EN_MSK 0x8000
+
+#define DMA_CFG_DESC_TX_0_QID_POS 16
+#define DMA_CFG_DESC_TX_0_QID_LEN 5
+#define DMA_CFG_DESC_TX_0_QID_MSK 0x1F0000
+
+#define DMA_CFG_DESC_TX_0_PSEUDO_HEADER_CALC_EN_POS 21
+#define DMA_CFG_DESC_TX_0_PSEUDO_HEADER_CALC_EN_LEN 1
+#define DMA_CFG_DESC_TX_0_PSEUDO_HEADER_CALC_EN_MSK 0x200000
+
+#define DMA_CFG_DESC_TX_0_L4_TYPE_POS 30
+#define DMA_CFG_DESC_TX_0_L4_TYPE_LEN 2
+#define DMA_CFG_DESC_TX_0_L4_TYPE_MSK 0xC0000000
+
+
+#define TX_DMA_STATUS_DU         BIT(0)
+
+struct vring_tx_dma {
+       u32 d0;
+       u32 addr_low;
+       u16 addr_high;
+       u8  ip_length;
+       u8  b11;       /* 0..6: mac_length; 7:ip_version */
+       u8  error;     /* 0..2: err; 3..7: reserved; */
+       u8  status;    /* 0: used; 1..7; reserved */
+       u16 length;
+} __packed;
+
+/*
+ * Rx descriptor - MAC part
+ * [dword 0]
+ * bit  0.. 3 : tid:4 The QoS (b3-0) TID Field
+ * bit  4.. 6 : connection_id:3 :The Source index that  was found during
+ *  Parsing the TA.  This field is used to  define the source of the packet
+ * bit      7 : reserved:1
+ * bit  8.. 9 : mac_id:2 : The MAC virtual  Ring number (always zero)
+ * bit 10..11 : frame_type:2 : The FC Control  (b3-2) -  MPDU Type
+ *              (management, data, control  and extension)
+ * bit 12..15 : frame_subtype:4 : The FC Control  (b7-4) -  Frame Subtype
+ * bit 16..27 : seq_number:12 The received Sequence number field
+ * bit 28..31 : extended:4 extended subtype
+ * [dword 1]
+ * bit  0.. 3 : reserved
+ * bit  4.. 5 : key_id:2
+ * bit      6 : decrypt_bypass:1
+ * bit      7 : security:1
+ * bit  8.. 9 : ds_bits:2
+ * bit     10 : a_msdu_present:1  from qos header
+ * bit     11 : a_msdu_type:1  from qos header
+ * bit     12 : a_mpdu:1  part of AMPDU aggregation
+ * bit     13 : broadcast:1
+ * bit     14 : mutlicast:1
+ * bit     15 : reserved:1
+ * bit 16..20 : rx_mac_qid:5   The Queue Identifier that the packet
+ *                             is received from
+ * bit 21..24 : mcs:4
+ * bit 25..28 : mic_icr:4
+ * bit 29..31 : reserved:3
+ * [dword 2]
+ * bit  0.. 2 : time_slot:3 The timeslot that the MPDU is received
+ * bit      3 : fc_protocol_ver:1 The FC Control  (b0) - Protocol  Version
+ * bit      4 : fc_order:1 The FC Control (b15) -Order
+ * bit  5.. 7 : qos_ack_policy:3  The QoS (b6-5) ack policy Field
+ * bit      8 : esop:1 The QoS (b4) ESOP field
+ * bit      9 : qos_rdg_more_ppdu:1 The QoS (b9) RDG  field
+ * bit 10..14 : qos_reserved:5 The QoS (b14-10) Reserved  field
+ * bit     15 : qos_ac_constraint:1
+ * bit 16..31 : pn_15_0:16 low 2 bytes of PN
+ * [dword 3]
+ * bit  0..31 : pn_47_16:32 high 4 bytes of PN
+ */
+struct vring_rx_mac {
+       u32 d0;
+       u32 d1;
+       u16 w4;
+       u16 pn_15_0;
+       u32 pn_47_16;
+} __packed;
+
+/*
+ * Rx descriptor - DMA part
+ * [dword 0]
+ * bit  0.. 7 : l4_length:8 layer 4 length
+ * bit  8.. 9 : reserved:2
+ * bit     10 : cmd_dma_it:1
+ * bit 11..15 : reserved:5
+ * bit 16..29 : phy_info_length:14
+ * bit 30..31 : l4_type:2 valid if the L4I bit is set in the status field
+ * [dword 1]
+ * bit  0..31 : addr_low:32 The payload buffer low address
+ * [dword 2]
+ * bit  0..15 : addr_high:16 The payload buffer high address
+ * bit 16..23 : ip_length:8
+ * bit 24..30 : mac_length:7
+ * bit     31 : ip_version:1
+ * [dword 3]
+ *  [byte 12] error
+ *  [byte 13] status
+ * bit      0 : du:1
+ * bit      1 : eop:1
+ * bit      2 : error:1
+ * bit      3 : mi:1
+ * bit      4 : l3_identified:1
+ * bit      5 : l4_identified:1
+ * bit      6 : phy_info_included:1
+ * bit      7 : reserved:1
+ *  [word 7] length
+ *
+ */
+
+#define RX_DMA_D0_CMD_DMA_IT     BIT(10)
+
+#define RX_DMA_STATUS_DU         BIT(0)
+#define RX_DMA_STATUS_ERROR      BIT(2)
+#define RX_DMA_STATUS_PHY_INFO   BIT(6)
+
+struct vring_rx_dma {
+       u32 d0;
+       u32 addr_low;
+       u16 addr_high;
+       u8  ip_length;
+       u8  b11;
+       u8  error;
+       u8  status;
+       u16 length;
+} __packed;
+
+struct vring_tx_desc {
+       struct vring_tx_mac mac;
+       struct vring_tx_dma dma;
+} __packed;
+
+struct vring_rx_desc {
+       struct vring_rx_mac mac;
+       struct vring_rx_dma dma;
+} __packed;
+
+union vring_desc {
+       struct vring_tx_desc tx;
+       struct vring_rx_desc rx;
+} __packed;
+
+static inline int wil_rxdesc_phy_length(volatile struct vring_rx_desc *d)
+{
+       return WIL_GET_BITS(d->dma.d0, 16, 29);
+}
+
+static inline int wil_rxdesc_mcs(volatile struct vring_rx_desc *d)
+{
+       return WIL_GET_BITS(d->mac.d1, 21, 24);
+}
+
+static inline int wil_rxdesc_ds_bits(volatile struct vring_rx_desc *d)
+{
+       return WIL_GET_BITS(d->mac.d1, 8, 9);
+}
+
+static inline int wil_rxdesc_ftype(volatile struct vring_rx_desc *d)
+{
+       return WIL_GET_BITS(d->mac.d0, 10, 11);
+}
+
+#endif /* WIL6210_TXRX_H */
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
new file mode 100644 (file)
index 0000000..9bcfffa
--- /dev/null
@@ -0,0 +1,363 @@
+/*
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __WIL6210_H__
+#define __WIL6210_H__
+
+#include <linux/netdevice.h>
+#include <linux/wireless.h>
+#include <net/cfg80211.h>
+
+#include "dbg_hexdump.h"
+
+#define WIL_NAME "wil6210"
+
+/**
+ * extract bits [@b0:@b1] (inclusive) from the value @x
+ * it should be @b0 <= @b1, or result is incorrect
+ */
+static inline u32 WIL_GET_BITS(u32 x, int b0, int b1)
+{
+       return (x >> b0) & ((1 << (b1 - b0 + 1)) - 1);
+}
+
+#define WIL6210_MEM_SIZE (2*1024*1024UL)
+
+#define WIL6210_TX_QUEUES (4)
+
+#define WIL6210_RX_RING_SIZE (128)
+#define WIL6210_TX_RING_SIZE (128)
+#define WIL6210_MAX_TX_RINGS (24)
+
+/* Hardware definitions begin */
+
+/*
+ * Mapping
+ * RGF File      | Host addr    |  FW addr
+ *               |              |
+ * user_rgf      | 0x000000     | 0x880000
+ *  dma_rgf      | 0x001000     | 0x881000
+ * pcie_rgf      | 0x002000     | 0x882000
+ *               |              |
+ */
+
+/* Where various structures placed in host address space */
+#define WIL6210_FW_HOST_OFF      (0x880000UL)
+
+#define HOSTADDR(fwaddr)        (fwaddr - WIL6210_FW_HOST_OFF)
+
+/*
+ * Interrupt control registers block
+ *
+ * each interrupt controlled by the same bit in all registers
+ */
+struct RGF_ICR {
+       u32 ICC; /* Cause Control, RW: 0 - W1C, 1 - COR */
+       u32 ICR; /* Cause, W1C/COR depending on ICC */
+       u32 ICM; /* Cause masked (ICR & ~IMV), W1C/COR depending on ICC */
+       u32 ICS; /* Cause Set, WO */
+       u32 IMV; /* Mask, RW+S/C */
+       u32 IMS; /* Mask Set, write 1 to set */
+       u32 IMC; /* Mask Clear, write 1 to clear */
+} __packed;
+
+/* registers - FW addresses */
+#define RGF_USER_USER_SCRATCH_PAD      (0x8802bc)
+#define RGF_USER_USER_ICR              (0x880b4c) /* struct RGF_ICR */
+       #define BIT_USER_USER_ICR_SW_INT_2      BIT(18)
+#define RGF_USER_CLKS_CTL_SW_RST_MASK_0        (0x880b14)
+#define RGF_USER_MAC_CPU_0             (0x8801fc)
+#define RGF_USER_USER_CPU_0            (0x8801e0)
+#define RGF_USER_CLKS_CTL_SW_RST_VEC_0 (0x880b04)
+#define RGF_USER_CLKS_CTL_SW_RST_VEC_1 (0x880b08)
+#define RGF_USER_CLKS_CTL_SW_RST_VEC_2 (0x880b0c)
+#define RGF_USER_CLKS_CTL_SW_RST_VEC_3 (0x880b10)
+
+#define RGF_DMA_PSEUDO_CAUSE           (0x881c68)
+#define RGF_DMA_PSEUDO_CAUSE_MASK_SW   (0x881c6c)
+#define RGF_DMA_PSEUDO_CAUSE_MASK_FW   (0x881c70)
+       #define BIT_DMA_PSEUDO_CAUSE_RX         BIT(0)
+       #define BIT_DMA_PSEUDO_CAUSE_TX         BIT(1)
+       #define BIT_DMA_PSEUDO_CAUSE_MISC       BIT(2)
+
+#define RGF_DMA_EP_TX_ICR              (0x881bb4) /* struct RGF_ICR */
+       #define BIT_DMA_EP_TX_ICR_TX_DONE       BIT(0)
+       #define BIT_DMA_EP_TX_ICR_TX_DONE_N(n)  BIT(n+1) /* n = [0..23] */
+#define RGF_DMA_EP_RX_ICR              (0x881bd0) /* struct RGF_ICR */
+       #define BIT_DMA_EP_RX_ICR_RX_DONE       BIT(0)
+#define RGF_DMA_EP_MISC_ICR            (0x881bec) /* struct RGF_ICR */
+       #define BIT_DMA_EP_MISC_ICR_RX_HTRSH    BIT(0)
+       #define BIT_DMA_EP_MISC_ICR_TX_NO_ACT   BIT(1)
+       #define BIT_DMA_EP_MISC_ICR_FW_INT0     BIT(28)
+       #define BIT_DMA_EP_MISC_ICR_FW_INT1     BIT(29)
+
+/* Interrupt moderation control */
+#define RGF_DMA_ITR_CNT_TRSH           (0x881c5c)
+#define RGF_DMA_ITR_CNT_DATA           (0x881c60)
+#define RGF_DMA_ITR_CNT_CRL            (0x881C64)
+       #define BIT_DMA_ITR_CNT_CRL_EN          BIT(0)
+       #define BIT_DMA_ITR_CNT_CRL_EXT_TICK    BIT(1)
+       #define BIT_DMA_ITR_CNT_CRL_FOREVER     BIT(2)
+       #define BIT_DMA_ITR_CNT_CRL_CLR         BIT(3)
+       #define BIT_DMA_ITR_CNT_CRL_REACH_TRSH  BIT(4)
+
+/* popular locations */
+#define HOST_MBOX   HOSTADDR(RGF_USER_USER_SCRATCH_PAD)
+#define HOST_SW_INT (HOSTADDR(RGF_USER_USER_ICR) + \
+       offsetof(struct RGF_ICR, ICS))
+#define SW_INT_MBOX BIT_USER_USER_ICR_SW_INT_2
+
+/* ISR register bits */
+#define ISR_MISC_FW_READY BIT_DMA_EP_MISC_ICR_FW_INT0
+#define ISR_MISC_MBOX_EVT BIT_DMA_EP_MISC_ICR_FW_INT1
+
+/* Hardware definitions end */
+
+struct wil6210_mbox_ring {
+       u32 base;
+       u16 entry_size; /* max. size of mbox entry, incl. all headers */
+       u16 size;
+       u32 tail;
+       u32 head;
+} __packed;
+
+struct wil6210_mbox_ring_desc {
+       __le32 sync;
+       __le32 addr;
+} __packed;
+
+/* at HOST_OFF_WIL6210_MBOX_CTL */
+struct wil6210_mbox_ctl {
+       struct wil6210_mbox_ring tx;
+       struct wil6210_mbox_ring rx;
+} __packed;
+
+struct wil6210_mbox_hdr {
+       __le16 seq;
+       __le16 len; /* payload, bytes after this header */
+       __le16 type;
+       u8 flags;
+       u8 reserved;
+} __packed;
+
+#define WIL_MBOX_HDR_TYPE_WMI (0)
+
+/* max. value for wil6210_mbox_hdr.len */
+#define MAX_MBOXITEM_SIZE   (240)
+
+struct wil6210_mbox_hdr_wmi {
+       u8 reserved0[2];
+       __le16 id;
+       __le16 info1; /* bits [0..3] - device_id, rest - unused */
+       u8 reserved1[2];
+} __packed;
+
+struct pending_wmi_event {
+       struct list_head list;
+       struct {
+               struct wil6210_mbox_hdr hdr;
+               struct wil6210_mbox_hdr_wmi wmi;
+               u8 data[0];
+       } __packed event;
+};
+
+union vring_desc;
+
+struct vring {
+       dma_addr_t pa;
+       volatile union vring_desc *va; /* vring_desc[size], WriteBack by DMA */
+       u16 size; /* number of vring_desc elements */
+       u32 swtail;
+       u32 swhead;
+       u32 hwtail; /* write here to inform hw */
+       void **ctx; /* void *ctx[size] - software context */
+};
+
+enum { /* for wil6210_priv.status */
+       wil_status_fwready = 0,
+       wil_status_fwconnected,
+       wil_status_dontscan,
+       wil_status_irqen, /* FIXME: interrupts enabled - for debug */
+};
+
+struct pci_dev;
+
+struct wil6210_stats {
+       u64 tsf;
+       u32 snr;
+       u16 last_mcs_rx;
+       u16 bf_mcs; /* last BF, used for Tx */
+       u16 my_rx_sector;
+       u16 my_tx_sector;
+       u16 peer_rx_sector;
+       u16 peer_tx_sector;
+};
+
+struct wil6210_priv {
+       struct pci_dev *pdev;
+       int n_msi;
+       struct wireless_dev *wdev;
+       void __iomem *csr;
+       ulong status;
+       /* profile */
+       u32 monitor_flags;
+       u32 secure_pcp; /* create secure PCP? */
+       int sinfo_gen;
+       /* cached ISR registers */
+       u32 isr_misc;
+       /* mailbox related */
+       struct mutex wmi_mutex;
+       struct wil6210_mbox_ctl mbox_ctl;
+       struct completion wmi_ready;
+       u16 wmi_seq;
+       u16 reply_id; /**< wait for this WMI event */
+       void *reply_buf;
+       u16 reply_size;
+       struct workqueue_struct *wmi_wq; /* for deferred calls */
+       struct work_struct wmi_event_worker;
+       struct workqueue_struct *wmi_wq_conn; /* for connect worker */
+       struct work_struct wmi_connect_worker;
+       struct work_struct disconnect_worker;
+       struct timer_list connect_timer;
+       int pending_connect_cid;
+       struct list_head pending_wmi_ev;
+       /*
+        * protect pending_wmi_ev
+        * - fill in IRQ from wil6210_irq_misc,
+        * - consumed in thread by wmi_event_worker
+        */
+       spinlock_t wmi_ev_lock;
+       /* DMA related */
+       struct vring vring_rx;
+       struct vring vring_tx[WIL6210_MAX_TX_RINGS];
+       u8 dst_addr[WIL6210_MAX_TX_RINGS][ETH_ALEN];
+       /* scan */
+       struct cfg80211_scan_request *scan_request;
+
+       struct mutex mutex; /* for wil6210_priv access in wil_{up|down} */
+       /* statistics */
+       struct wil6210_stats stats;
+       /* debugfs */
+       struct dentry *debug;
+       struct debugfs_blob_wrapper fw_code_blob;
+       struct debugfs_blob_wrapper fw_data_blob;
+       struct debugfs_blob_wrapper fw_peri_blob;
+       struct debugfs_blob_wrapper uc_code_blob;
+       struct debugfs_blob_wrapper uc_data_blob;
+       struct debugfs_blob_wrapper rgf_blob;
+};
+
+#define wil_to_wiphy(i) (i->wdev->wiphy)
+#define wil_to_dev(i) (wiphy_dev(wil_to_wiphy(i)))
+#define wiphy_to_wil(w) (struct wil6210_priv *)(wiphy_priv(w))
+#define wil_to_wdev(i) (i->wdev)
+#define wdev_to_wil(w) (struct wil6210_priv *)(wdev_priv(w))
+#define wil_to_ndev(i) (wil_to_wdev(i)->netdev)
+#define ndev_to_wil(n) (wdev_to_wil(n->ieee80211_ptr))
+
+#define wil_dbg(wil, fmt, arg...) netdev_dbg(wil_to_ndev(wil), fmt, ##arg)
+#define wil_info(wil, fmt, arg...) netdev_info(wil_to_ndev(wil), fmt, ##arg)
+#define wil_err(wil, fmt, arg...) netdev_err(wil_to_ndev(wil), fmt, ##arg)
+
+#define wil_dbg_IRQ(wil, fmt, arg...) wil_dbg(wil, "DBG[ IRQ]" fmt, ##arg)
+#define wil_dbg_TXRX(wil, fmt, arg...) wil_dbg(wil, "DBG[TXRX]" fmt, ##arg)
+#define wil_dbg_WMI(wil, fmt, arg...) wil_dbg(wil, "DBG[ WMI]" fmt, ##arg)
+
+#define wil_hex_dump_TXRX(prefix_str, prefix_type, rowsize,    \
+                         groupsize, buf, len, ascii)           \
+                         wil_print_hex_dump_debug("DBG[TXRX]" prefix_str,\
+                                        prefix_type, rowsize,  \
+                                        groupsize, buf, len, ascii)
+
+#define wil_hex_dump_WMI(prefix_str, prefix_type, rowsize,     \
+                        groupsize, buf, len, ascii)            \
+                        wil_print_hex_dump_debug("DBG[ WMI]" prefix_str,\
+                                       prefix_type, rowsize,   \
+                                       groupsize, buf, len, ascii)
+
+void wil_memcpy_fromio_32(void *dst, const volatile void __iomem *src,
+                         size_t count);
+void wil_memcpy_toio_32(volatile void __iomem *dst, const void *src,
+                       size_t count);
+
+void *wil_if_alloc(struct device *dev, void __iomem *csr);
+void wil_if_free(struct wil6210_priv *wil);
+int wil_if_add(struct wil6210_priv *wil);
+void wil_if_remove(struct wil6210_priv *wil);
+int wil_priv_init(struct wil6210_priv *wil);
+void wil_priv_deinit(struct wil6210_priv *wil);
+int wil_reset(struct wil6210_priv *wil);
+void wil_link_on(struct wil6210_priv *wil);
+void wil_link_off(struct wil6210_priv *wil);
+int wil_up(struct wil6210_priv *wil);
+int wil_down(struct wil6210_priv *wil);
+void wil_mbox_ring_le2cpus(struct wil6210_mbox_ring *r);
+
+void __iomem *wmi_buffer(struct wil6210_priv *wil, __le32 ptr);
+void __iomem *wmi_addr(struct wil6210_priv *wil, u32 ptr);
+int wmi_read_hdr(struct wil6210_priv *wil, __le32 ptr,
+                struct wil6210_mbox_hdr *hdr);
+int wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len);
+void wmi_recv_cmd(struct wil6210_priv *wil);
+int wmi_call(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len,
+            u16 reply_id, void *reply, u8 reply_size, int to_msec);
+void wmi_connect_worker(struct work_struct *work);
+void wmi_event_worker(struct work_struct *work);
+void wmi_event_flush(struct wil6210_priv *wil);
+int wmi_set_ssid(struct wil6210_priv *wil, u8 ssid_len, const void *ssid);
+int wmi_get_ssid(struct wil6210_priv *wil, u8 *ssid_len, void *ssid);
+int wmi_set_channel(struct wil6210_priv *wil, int channel);
+int wmi_get_channel(struct wil6210_priv *wil, int *channel);
+int wmi_tx_eapol(struct wil6210_priv *wil, struct sk_buff *skb);
+int wmi_del_cipher_key(struct wil6210_priv *wil, u8 key_index,
+                      const void *mac_addr);
+int wmi_add_cipher_key(struct wil6210_priv *wil, u8 key_index,
+                      const void *mac_addr, int key_len, const void *key);
+int wmi_echo(struct wil6210_priv *wil);
+int wmi_set_ie(struct wil6210_priv *wil, u8 type, u16 ie_len, const void *ie);
+
+int wil6210_init_irq(struct wil6210_priv *wil, int irq);
+void wil6210_fini_irq(struct wil6210_priv *wil, int irq);
+void wil6210_disable_irq(struct wil6210_priv *wil);
+void wil6210_enable_irq(struct wil6210_priv *wil);
+
+int wil6210_debugfs_init(struct wil6210_priv *wil);
+void wil6210_debugfs_remove(struct wil6210_priv *wil);
+
+struct wireless_dev *wil_cfg80211_init(struct device *dev);
+void wil_wdev_free(struct wil6210_priv *wil);
+
+int wmi_set_mac_address(struct wil6210_priv *wil, void *addr);
+int wmi_set_bcon(struct wil6210_priv *wil, int bi, u8 wmi_nettype);
+void wil6210_disconnect(struct wil6210_priv *wil, void *bssid);
+
+int wil_rx_init(struct wil6210_priv *wil);
+void wil_rx_fini(struct wil6210_priv *wil);
+
+/* TX API */
+int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size,
+                     int cid, int tid);
+void wil_vring_fini_tx(struct wil6210_priv *wil, int id);
+
+netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev);
+void wil_tx_complete(struct wil6210_priv *wil, int ringid);
+
+/* RX API */
+void wil_rx_handle(struct wil6210_priv *wil);
+
+int wil_iftype_nl2wmi(enum nl80211_iftype type);
+
+#endif /* __WIL6210_H__ */
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
new file mode 100644 (file)
index 0000000..12915f6
--- /dev/null
@@ -0,0 +1,975 @@
+/*
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/pci.h>
+#include <linux/io.h>
+#include <linux/list.h>
+#include <linux/etherdevice.h>
+
+#include "wil6210.h"
+#include "wmi.h"
+
+/**
+ * WMI event receiving - theory of operations
+ *
+ * When firmware about to report WMI event, it fills memory area
+ * in the mailbox and raises misc. IRQ. Thread interrupt handler invoked for
+ * the misc IRQ, function @wmi_recv_cmd called by thread IRQ handler.
+ *
+ * @wmi_recv_cmd reads event, allocates memory chunk  and attaches it to the
+ * event list @wil->pending_wmi_ev. Then, work queue @wil->wmi_wq wakes up
+ * and handles events within the @wmi_event_worker. Every event get detached
+ * from list, processed and deleted.
+ *
+ * Purpose for this mechanism is to release IRQ thread; otherwise,
+ * if WMI event handling involves another WMI command flow, this 2-nd flow
+ * won't be completed because of blocked IRQ thread.
+ */
+
+/**
+ * Addressing - theory of operations
+ *
+ * There are several buses present on the WIL6210 card.
+ * Same memory areas are visible at different address on
+ * the different busses. There are 3 main bus masters:
+ *  - MAC CPU (ucode)
+ *  - User CPU (firmware)
+ *  - AHB (host)
+ *
+ * On the PCI bus, there is one BAR (BAR0) of 2Mb size, exposing
+ * AHB addresses starting from 0x880000
+ *
+ * Internally, firmware uses addresses that allows faster access but
+ * are invisible from the host. To read from these addresses, alternative
+ * AHB address must be used.
+ *
+ * Memory mapping
+ * Linker address         PCI/Host address
+ *                        0x880000 .. 0xa80000  2Mb BAR0
+ * 0x800000 .. 0x807000   0x900000 .. 0x907000  28k DCCM
+ * 0x840000 .. 0x857000   0x908000 .. 0x91f000  92k PERIPH
+ */
+
+/**
+ * @fw_mapping provides memory remapping table
+ */
+static const struct {
+       u32 from; /* linker address - from, inclusive */
+       u32 to;   /* linker address - to, exclusive */
+       u32 host; /* PCI/Host address - BAR0 + 0x880000 */
+} fw_mapping[] = {
+       {0x000000, 0x040000, 0x8c0000}, /* FW code RAM 256k */
+       {0x800000, 0x808000, 0x900000}, /* FW data RAM 32k */
+       {0x840000, 0x860000, 0x908000}, /* peripheral data RAM 128k/96k used */
+       {0x880000, 0x88a000, 0x880000}, /* various RGF */
+       {0x8c0000, 0x932000, 0x8c0000}, /* trivial mapping for upper area */
+       /*
+        * 920000..930000 ucode code RAM
+        * 930000..932000 ucode data RAM
+        */
+};
+
+/**
+ * return AHB address for given firmware/ucode internal (linker) address
+ * @x - internal address
+ * If address have no valid AHB mapping, return 0
+ */
+static u32 wmi_addr_remap(u32 x)
+{
+       uint i;
+
+       for (i = 0; i < ARRAY_SIZE(fw_mapping); i++) {
+               if ((x >= fw_mapping[i].from) && (x < fw_mapping[i].to))
+                       return x + fw_mapping[i].host - fw_mapping[i].from;
+       }
+
+       return 0;
+}
+
+/**
+ * Check address validity for WMI buffer; remap if needed
+ * @ptr - internal (linker) fw/ucode address
+ *
+ * Valid buffer should be DWORD aligned
+ *
+ * return address for accessing buffer from the host;
+ * if buffer is not valid, return NULL.
+ */
+void __iomem *wmi_buffer(struct wil6210_priv *wil, __le32 ptr_)
+{
+       u32 off;
+       u32 ptr = le32_to_cpu(ptr_);
+
+       if (ptr % 4)
+               return NULL;
+
+       ptr = wmi_addr_remap(ptr);
+       if (ptr < WIL6210_FW_HOST_OFF)
+               return NULL;
+
+       off = HOSTADDR(ptr);
+       if (off > WIL6210_MEM_SIZE - 4)
+               return NULL;
+
+       return wil->csr + off;
+}
+
+/**
+ * Check address validity
+ */
+void __iomem *wmi_addr(struct wil6210_priv *wil, u32 ptr)
+{
+       u32 off;
+
+       if (ptr % 4)
+               return NULL;
+
+       if (ptr < WIL6210_FW_HOST_OFF)
+               return NULL;
+
+       off = HOSTADDR(ptr);
+       if (off > WIL6210_MEM_SIZE - 4)
+               return NULL;
+
+       return wil->csr + off;
+}
+
+int wmi_read_hdr(struct wil6210_priv *wil, __le32 ptr,
+                struct wil6210_mbox_hdr *hdr)
+{
+       void __iomem *src = wmi_buffer(wil, ptr);
+       if (!src)
+               return -EINVAL;
+
+       wil_memcpy_fromio_32(hdr, src, sizeof(*hdr));
+
+       return 0;
+}
+
+static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
+{
+       struct {
+               struct wil6210_mbox_hdr hdr;
+               struct wil6210_mbox_hdr_wmi wmi;
+       } __packed cmd = {
+               .hdr = {
+                       .type = WIL_MBOX_HDR_TYPE_WMI,
+                       .flags = 0,
+                       .len = cpu_to_le16(sizeof(cmd.wmi) + len),
+               },
+               .wmi = {
+                       .id = cpu_to_le16(cmdid),
+                       .info1 = 0,
+               },
+       };
+       struct wil6210_mbox_ring *r = &wil->mbox_ctl.tx;
+       struct wil6210_mbox_ring_desc d_head;
+       u32 next_head;
+       void __iomem *dst;
+       void __iomem *head = wmi_addr(wil, r->head);
+       uint retry;
+
+       if (sizeof(cmd) + len > r->entry_size) {
+               wil_err(wil, "WMI size too large: %d bytes, max is %d\n",
+                       (int)(sizeof(cmd) + len), r->entry_size);
+               return -ERANGE;
+
+       }
+
+       might_sleep();
+
+       if (!test_bit(wil_status_fwready, &wil->status)) {
+               wil_err(wil, "FW not ready\n");
+               return -EAGAIN;
+       }
+
+       if (!head) {
+               wil_err(wil, "WMI head is garbage: 0x%08x\n", r->head);
+               return -EINVAL;
+       }
+       /* read Tx head till it is not busy */
+       for (retry = 5; retry > 0; retry--) {
+               wil_memcpy_fromio_32(&d_head, head, sizeof(d_head));
+               if (d_head.sync == 0)
+                       break;
+               msleep(20);
+       }
+       if (d_head.sync != 0) {
+               wil_err(wil, "WMI head busy\n");
+               return -EBUSY;
+       }
+       /* next head */
+       next_head = r->base + ((r->head - r->base + sizeof(d_head)) % r->size);
+       wil_dbg_WMI(wil, "Head 0x%08x -> 0x%08x\n", r->head, next_head);
+       /* wait till FW finish with previous command */
+       for (retry = 5; retry > 0; retry--) {
+               r->tail = ioread32(wil->csr + HOST_MBOX +
+                                  offsetof(struct wil6210_mbox_ctl, tx.tail));
+               if (next_head != r->tail)
+                       break;
+               msleep(20);
+       }
+       if (next_head == r->tail) {
+               wil_err(wil, "WMI ring full\n");
+               return -EBUSY;
+       }
+       dst = wmi_buffer(wil, d_head.addr);
+       if (!dst) {
+               wil_err(wil, "invalid WMI buffer: 0x%08x\n",
+                       le32_to_cpu(d_head.addr));
+               return -EINVAL;
+       }
+       cmd.hdr.seq = cpu_to_le16(++wil->wmi_seq);
+       /* set command */
+       wil_dbg_WMI(wil, "WMI command 0x%04x [%d]\n", cmdid, len);
+       wil_hex_dump_WMI("Cmd ", DUMP_PREFIX_OFFSET, 16, 1, &cmd,
+                        sizeof(cmd), true);
+       wil_hex_dump_WMI("cmd ", DUMP_PREFIX_OFFSET, 16, 1, buf,
+                        len, true);
+       wil_memcpy_toio_32(dst, &cmd, sizeof(cmd));
+       wil_memcpy_toio_32(dst + sizeof(cmd), buf, len);
+       /* mark entry as full */
+       iowrite32(1, wil->csr + HOSTADDR(r->head) +
+                 offsetof(struct wil6210_mbox_ring_desc, sync));
+       /* advance next ptr */
+       iowrite32(r->head = next_head, wil->csr + HOST_MBOX +
+                 offsetof(struct wil6210_mbox_ctl, tx.head));
+
+       /* interrupt to FW */
+       iowrite32(SW_INT_MBOX, wil->csr + HOST_SW_INT);
+
+       return 0;
+}
+
+int wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
+{
+       int rc;
+
+       mutex_lock(&wil->wmi_mutex);
+       rc = __wmi_send(wil, cmdid, buf, len);
+       mutex_unlock(&wil->wmi_mutex);
+
+       return rc;
+}
+
+/*=== Event handlers ===*/
+static void wmi_evt_ready(struct wil6210_priv *wil, int id, void *d, int len)
+{
+       struct net_device *ndev = wil_to_ndev(wil);
+       struct wireless_dev *wdev = wil->wdev;
+       struct wmi_ready_event *evt = d;
+       u32 ver = le32_to_cpu(evt->sw_version);
+
+       wil_dbg_WMI(wil, "FW ver. %d; MAC %pM\n", ver, evt->mac);
+
+       if (!is_valid_ether_addr(ndev->dev_addr)) {
+               memcpy(ndev->dev_addr, evt->mac, ETH_ALEN);
+               memcpy(ndev->perm_addr, evt->mac, ETH_ALEN);
+       }
+       snprintf(wdev->wiphy->fw_version, sizeof(wdev->wiphy->fw_version),
+                "%d", ver);
+}
+
+static void wmi_evt_fw_ready(struct wil6210_priv *wil, int id, void *d,
+                            int len)
+{
+       wil_dbg_WMI(wil, "WMI: FW ready\n");
+
+       set_bit(wil_status_fwready, &wil->status);
+       /* reuse wmi_ready for the firmware ready indication */
+       complete(&wil->wmi_ready);
+}
+
+static void wmi_evt_rx_mgmt(struct wil6210_priv *wil, int id, void *d, int len)
+{
+       struct wmi_rx_mgmt_packet_event *data = d;
+       struct wiphy *wiphy = wil_to_wiphy(wil);
+       struct ieee80211_mgmt *rx_mgmt_frame =
+                       (struct ieee80211_mgmt *)data->payload;
+       int ch_no = data->info.channel+1;
+       u32 freq = ieee80211_channel_to_frequency(ch_no,
+                       IEEE80211_BAND_60GHZ);
+       struct ieee80211_channel *channel = ieee80211_get_channel(wiphy, freq);
+       /* TODO convert LE to CPU */
+       s32 signal = 0; /* TODO */
+       __le16 fc = rx_mgmt_frame->frame_control;
+       u32 d_len = le32_to_cpu(data->info.len);
+       u16 d_status = le16_to_cpu(data->info.status);
+
+       wil_dbg_WMI(wil, "MGMT: channel %d MCS %d SNR %d\n",
+                   data->info.channel, data->info.mcs, data->info.snr);
+       wil_dbg_WMI(wil, "status 0x%04x len %d stype %04x\n", d_status, d_len,
+                   le16_to_cpu(data->info.stype));
+       wil_dbg_WMI(wil, "qid %d mid %d cid %d\n",
+                   data->info.qid, data->info.mid, data->info.cid);
+
+       if (!channel) {
+               wil_err(wil, "Frame on unsupported channel\n");
+               return;
+       }
+
+       if (ieee80211_is_beacon(fc) || ieee80211_is_probe_resp(fc)) {
+               struct cfg80211_bss *bss;
+               u64 tsf = le64_to_cpu(rx_mgmt_frame->u.beacon.timestamp);
+               u16 cap = le16_to_cpu(rx_mgmt_frame->u.beacon.capab_info);
+               u16 bi = le16_to_cpu(rx_mgmt_frame->u.beacon.beacon_int);
+               const u8 *ie_buf = rx_mgmt_frame->u.beacon.variable;
+               size_t ie_len = d_len - offsetof(struct ieee80211_mgmt,
+                                                u.beacon.variable);
+               wil_dbg_WMI(wil, "Capability info : 0x%04x\n", cap);
+
+               bss = cfg80211_inform_bss(wiphy, channel, rx_mgmt_frame->bssid,
+                                         tsf, cap, bi, ie_buf, ie_len,
+                                         signal, GFP_KERNEL);
+               if (bss) {
+                       wil_dbg_WMI(wil, "Added BSS %pM\n",
+                                   rx_mgmt_frame->bssid);
+                       cfg80211_put_bss(bss);
+               } else {
+                       wil_err(wil, "cfg80211_inform_bss() failed\n");
+               }
+       }
+}
+
+static void wmi_evt_scan_complete(struct wil6210_priv *wil, int id,
+                                 void *d, int len)
+{
+       if (wil->scan_request) {
+               struct wmi_scan_complete_event *data = d;
+               bool aborted = (data->status != 0);
+
+               wil_dbg_WMI(wil, "SCAN_COMPLETE(0x%08x)\n", data->status);
+               cfg80211_scan_done(wil->scan_request, aborted);
+               wil->scan_request = NULL;
+       } else {
+               wil_err(wil, "SCAN_COMPLETE while not scanning\n");
+       }
+}
+
+static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len)
+{
+       struct net_device *ndev = wil_to_ndev(wil);
+       struct wireless_dev *wdev = wil->wdev;
+       struct wmi_connect_event *evt = d;
+       int ch; /* channel number */
+       struct station_info sinfo;
+       u8 *assoc_req_ie, *assoc_resp_ie;
+       size_t assoc_req_ielen, assoc_resp_ielen;
+       /* capinfo(u16) + listen_interval(u16) + IEs */
+       const size_t assoc_req_ie_offset = sizeof(u16) * 2;
+       /* capinfo(u16) + status_code(u16) + associd(u16) + IEs */
+       const size_t assoc_resp_ie_offset = sizeof(u16) * 3;
+
+       if (len < sizeof(*evt)) {
+               wil_err(wil, "Connect event too short : %d bytes\n", len);
+               return;
+       }
+       if (len != sizeof(*evt) + evt->beacon_ie_len + evt->assoc_req_len +
+                  evt->assoc_resp_len) {
+               wil_err(wil,
+                       "Connect event corrupted : %d != %d + %d + %d + %d\n",
+                       len, (int)sizeof(*evt), evt->beacon_ie_len,
+                       evt->assoc_req_len, evt->assoc_resp_len);
+               return;
+       }
+       ch = evt->channel + 1;
+       wil_dbg_WMI(wil, "Connect %pM channel [%d] cid %d\n",
+                   evt->bssid, ch, evt->cid);
+       wil_hex_dump_WMI("connect AI : ", DUMP_PREFIX_OFFSET, 16, 1,
+                        evt->assoc_info, len - sizeof(*evt), true);
+
+       /* figure out IE's */
+       assoc_req_ie = &evt->assoc_info[evt->beacon_ie_len +
+                                       assoc_req_ie_offset];
+       assoc_req_ielen = evt->assoc_req_len - assoc_req_ie_offset;
+       if (evt->assoc_req_len <= assoc_req_ie_offset) {
+               assoc_req_ie = NULL;
+               assoc_req_ielen = 0;
+       }
+
+       assoc_resp_ie = &evt->assoc_info[evt->beacon_ie_len +
+                                        evt->assoc_req_len +
+                                        assoc_resp_ie_offset];
+       assoc_resp_ielen = evt->assoc_resp_len - assoc_resp_ie_offset;
+       if (evt->assoc_resp_len <= assoc_resp_ie_offset) {
+               assoc_resp_ie = NULL;
+               assoc_resp_ielen = 0;
+       }
+
+       if ((wdev->iftype == NL80211_IFTYPE_STATION) ||
+           (wdev->iftype == NL80211_IFTYPE_P2P_CLIENT)) {
+               if (wdev->sme_state != CFG80211_SME_CONNECTING) {
+                       wil_err(wil, "Not in connecting state\n");
+                       return;
+               }
+               del_timer_sync(&wil->connect_timer);
+               cfg80211_connect_result(ndev, evt->bssid,
+                                       assoc_req_ie, assoc_req_ielen,
+                                       assoc_resp_ie, assoc_resp_ielen,
+                                       WLAN_STATUS_SUCCESS, GFP_KERNEL);
+
+       } else if ((wdev->iftype == NL80211_IFTYPE_AP) ||
+                  (wdev->iftype == NL80211_IFTYPE_P2P_GO)) {
+               memset(&sinfo, 0, sizeof(sinfo));
+
+               sinfo.generation = wil->sinfo_gen++;
+
+               if (assoc_req_ie) {
+                       sinfo.assoc_req_ies = assoc_req_ie;
+                       sinfo.assoc_req_ies_len = assoc_req_ielen;
+                       sinfo.filled |= STATION_INFO_ASSOC_REQ_IES;
+               }
+
+               cfg80211_new_sta(ndev, evt->bssid, &sinfo, GFP_KERNEL);
+       }
+       set_bit(wil_status_fwconnected, &wil->status);
+
+       /* FIXME FW can transmit only ucast frames to peer */
+       /* FIXME real ring_id instead of hard coded 0 */
+       memcpy(wil->dst_addr[0], evt->bssid, ETH_ALEN);
+
+       wil->pending_connect_cid = evt->cid;
+       queue_work(wil->wmi_wq_conn, &wil->wmi_connect_worker);
+}
+
+static void wmi_evt_disconnect(struct wil6210_priv *wil, int id,
+                              void *d, int len)
+{
+       struct wmi_disconnect_event *evt = d;
+
+       wil_dbg_WMI(wil, "Disconnect %pM reason %d proto %d wmi\n",
+                   evt->bssid,
+                   evt->protocol_reason_status, evt->disconnect_reason);
+
+       wil->sinfo_gen++;
+
+       wil6210_disconnect(wil, evt->bssid);
+       clear_bit(wil_status_dontscan, &wil->status);
+}
+
+static void wmi_evt_notify(struct wil6210_priv *wil, int id, void *d, int len)
+{
+       struct wmi_notify_req_done_event *evt = d;
+
+       if (len < sizeof(*evt)) {
+               wil_err(wil, "Short NOTIFY event\n");
+               return;
+       }
+
+       wil->stats.tsf = le64_to_cpu(evt->tsf);
+       wil->stats.snr = le32_to_cpu(evt->snr_val);
+       wil->stats.bf_mcs = le16_to_cpu(evt->bf_mcs);
+       wil->stats.my_rx_sector = le16_to_cpu(evt->my_rx_sector);
+       wil->stats.my_tx_sector = le16_to_cpu(evt->my_tx_sector);
+       wil->stats.peer_rx_sector = le16_to_cpu(evt->other_rx_sector);
+       wil->stats.peer_tx_sector = le16_to_cpu(evt->other_tx_sector);
+       wil_dbg_WMI(wil, "Link status, MCS %d TSF 0x%016llx\n"
+                   "BF status 0x%08x SNR 0x%08x\n"
+                   "Tx Tpt %d goodput %d Rx goodput %d\n"
+                   "Sectors(rx:tx) my %d:%d peer %d:%d\n",
+                   wil->stats.bf_mcs, wil->stats.tsf, evt->status,
+                   wil->stats.snr, le32_to_cpu(evt->tx_tpt),
+                   le32_to_cpu(evt->tx_goodput), le32_to_cpu(evt->rx_goodput),
+                   wil->stats.my_rx_sector, wil->stats.my_tx_sector,
+                   wil->stats.peer_rx_sector, wil->stats.peer_tx_sector);
+}
+
+/*
+ * Firmware reports EAPOL frame using WME event.
+ * Reconstruct Ethernet frame and deliver it via normal Rx
+ */
+static void wmi_evt_eapol_rx(struct wil6210_priv *wil, int id,
+                            void *d, int len)
+{
+       struct net_device *ndev = wil_to_ndev(wil);
+       struct wmi_eapol_rx_event *evt = d;
+       u16 eapol_len = le16_to_cpu(evt->eapol_len);
+       int sz = eapol_len + ETH_HLEN;
+       struct sk_buff *skb;
+       struct ethhdr *eth;
+
+       wil_dbg_WMI(wil, "EAPOL len %d from %pM\n", eapol_len,
+                   evt->src_mac);
+
+       if (eapol_len > 196) { /* TODO: revisit size limit */
+               wil_err(wil, "EAPOL too large\n");
+               return;
+       }
+
+       skb = alloc_skb(sz, GFP_KERNEL);
+       if (!skb) {
+               wil_err(wil, "Failed to allocate skb\n");
+               return;
+       }
+       eth = (struct ethhdr *)skb_put(skb, ETH_HLEN);
+       memcpy(eth->h_dest, ndev->dev_addr, ETH_ALEN);
+       memcpy(eth->h_source, evt->src_mac, ETH_ALEN);
+       eth->h_proto = cpu_to_be16(ETH_P_PAE);
+       memcpy(skb_put(skb, eapol_len), evt->eapol, eapol_len);
+       skb->protocol = eth_type_trans(skb, ndev);
+       if (likely(netif_rx_ni(skb) == NET_RX_SUCCESS)) {
+               ndev->stats.rx_packets++;
+               ndev->stats.rx_bytes += skb->len;
+       } else {
+               ndev->stats.rx_dropped++;
+       }
+}
+
+static const struct {
+       int eventid;
+       void (*handler)(struct wil6210_priv *wil, int eventid,
+                       void *data, int data_len);
+} wmi_evt_handlers[] = {
+       {WMI_READY_EVENTID,             wmi_evt_ready},
+       {WMI_FW_READY_EVENTID,          wmi_evt_fw_ready},
+       {WMI_RX_MGMT_PACKET_EVENTID,    wmi_evt_rx_mgmt},
+       {WMI_SCAN_COMPLETE_EVENTID,     wmi_evt_scan_complete},
+       {WMI_CONNECT_EVENTID,           wmi_evt_connect},
+       {WMI_DISCONNECT_EVENTID,        wmi_evt_disconnect},
+       {WMI_NOTIFY_REQ_DONE_EVENTID,   wmi_evt_notify},
+       {WMI_EAPOL_RX_EVENTID,          wmi_evt_eapol_rx},
+};
+
+/*
+ * Run in IRQ context
+ * Extract WMI command from mailbox. Queue it to the @wil->pending_wmi_ev
+ * that will be eventually handled by the @wmi_event_worker in the thread
+ * context of thread "wil6210_wmi"
+ */
+void wmi_recv_cmd(struct wil6210_priv *wil)
+{
+       struct wil6210_mbox_ring_desc d_tail;
+       struct wil6210_mbox_hdr hdr;
+       struct wil6210_mbox_ring *r = &wil->mbox_ctl.rx;
+       struct pending_wmi_event *evt;
+       u8 *cmd;
+       void __iomem *src;
+       ulong flags;
+
+       for (;;) {
+               u16 len;
+
+               r->head = ioread32(wil->csr + HOST_MBOX +
+                                  offsetof(struct wil6210_mbox_ctl, rx.head));
+               if (r->tail == r->head)
+                       return;
+
+               /* read cmd from tail */
+               wil_memcpy_fromio_32(&d_tail, wil->csr + HOSTADDR(r->tail),
+                                    sizeof(struct wil6210_mbox_ring_desc));
+               if (d_tail.sync == 0) {
+                       wil_err(wil, "Mbox evt not owned by FW?\n");
+                       return;
+               }
+
+               if (0 != wmi_read_hdr(wil, d_tail.addr, &hdr)) {
+                       wil_err(wil, "Mbox evt at 0x%08x?\n",
+                               le32_to_cpu(d_tail.addr));
+                       return;
+               }
+
+               len = le16_to_cpu(hdr.len);
+               src = wmi_buffer(wil, d_tail.addr) +
+                     sizeof(struct wil6210_mbox_hdr);
+               evt = kmalloc(ALIGN(offsetof(struct pending_wmi_event,
+                                            event.wmi) + len, 4),
+                             GFP_KERNEL);
+               if (!evt) {
+                       wil_err(wil, "kmalloc for WMI event (%d) failed\n",
+                               len);
+                       return;
+               }
+               evt->event.hdr = hdr;
+               cmd = (void *)&evt->event.wmi;
+               wil_memcpy_fromio_32(cmd, src, len);
+               /* mark entry as empty */
+               iowrite32(0, wil->csr + HOSTADDR(r->tail) +
+                         offsetof(struct wil6210_mbox_ring_desc, sync));
+               /* indicate */
+               wil_dbg_WMI(wil, "Mbox evt %04x %04x %04x %02x\n",
+                           le16_to_cpu(hdr.seq), len, le16_to_cpu(hdr.type),
+                           hdr.flags);
+               if ((hdr.type == WIL_MBOX_HDR_TYPE_WMI) &&
+                   (len >= sizeof(struct wil6210_mbox_hdr_wmi))) {
+                       wil_dbg_WMI(wil, "WMI event 0x%04x\n",
+                                   evt->event.wmi.id);
+               }
+               wil_hex_dump_WMI("evt ", DUMP_PREFIX_OFFSET, 16, 1,
+                                &evt->event.hdr, sizeof(hdr) + len, true);
+
+               /* advance tail */
+               r->tail = r->base + ((r->tail - r->base +
+                         sizeof(struct wil6210_mbox_ring_desc)) % r->size);
+               iowrite32(r->tail, wil->csr + HOST_MBOX +
+                         offsetof(struct wil6210_mbox_ctl, rx.tail));
+
+               /* add to the pending list */
+               spin_lock_irqsave(&wil->wmi_ev_lock, flags);
+               list_add_tail(&evt->list, &wil->pending_wmi_ev);
+               spin_unlock_irqrestore(&wil->wmi_ev_lock, flags);
+               {
+                       int q = queue_work(wil->wmi_wq,
+                                          &wil->wmi_event_worker);
+                       wil_dbg_WMI(wil, "queue_work -> %d\n", q);
+               }
+       }
+}
+
+int wmi_call(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len,
+            u16 reply_id, void *reply, u8 reply_size, int to_msec)
+{
+       int rc;
+       int remain;
+
+       mutex_lock(&wil->wmi_mutex);
+
+       rc = __wmi_send(wil, cmdid, buf, len);
+       if (rc)
+               goto out;
+
+       wil->reply_id = reply_id;
+       wil->reply_buf = reply;
+       wil->reply_size = reply_size;
+       remain = wait_for_completion_timeout(&wil->wmi_ready,
+                       msecs_to_jiffies(to_msec));
+       if (0 == remain) {
+               wil_err(wil, "wmi_call(0x%04x->0x%04x) timeout %d msec\n",
+                       cmdid, reply_id, to_msec);
+               rc = -ETIME;
+       } else {
+               wil_dbg_WMI(wil,
+                           "wmi_call(0x%04x->0x%04x) completed in %d msec\n",
+                           cmdid, reply_id,
+                           to_msec - jiffies_to_msecs(remain));
+       }
+       wil->reply_id = 0;
+       wil->reply_buf = NULL;
+       wil->reply_size = 0;
+ out:
+       mutex_unlock(&wil->wmi_mutex);
+
+       return rc;
+}
+
+int wmi_echo(struct wil6210_priv *wil)
+{
+       struct wmi_echo_cmd cmd = {
+               .value = cpu_to_le32(0x12345678),
+       };
+
+       return wmi_call(wil, WMI_ECHO_CMDID, &cmd, sizeof(cmd),
+                        WMI_ECHO_RSP_EVENTID, NULL, 0, 20);
+}
+
+int wmi_set_mac_address(struct wil6210_priv *wil, void *addr)
+{
+       struct wmi_set_mac_address_cmd cmd;
+
+       memcpy(cmd.mac, addr, ETH_ALEN);
+
+       wil_dbg_WMI(wil, "Set MAC %pM\n", addr);
+
+       return wmi_send(wil, WMI_SET_MAC_ADDRESS_CMDID, &cmd, sizeof(cmd));
+}
+
+int wmi_set_bcon(struct wil6210_priv *wil, int bi, u8 wmi_nettype)
+{
+       struct wmi_bcon_ctrl_cmd cmd = {
+               .bcon_interval = cpu_to_le16(bi),
+               .network_type = wmi_nettype,
+               .disable_sec_offload = 1,
+       };
+
+       if (!wil->secure_pcp)
+               cmd.disable_sec = 1;
+
+       return wmi_send(wil, WMI_BCON_CTRL_CMDID, &cmd, sizeof(cmd));
+}
+
+int wmi_set_ssid(struct wil6210_priv *wil, u8 ssid_len, const void *ssid)
+{
+       struct wmi_set_ssid_cmd cmd = {
+               .ssid_len = cpu_to_le32(ssid_len),
+       };
+
+       if (ssid_len > sizeof(cmd.ssid))
+               return -EINVAL;
+
+       memcpy(cmd.ssid, ssid, ssid_len);
+
+       return wmi_send(wil, WMI_SET_SSID_CMDID, &cmd, sizeof(cmd));
+}
+
+int wmi_get_ssid(struct wil6210_priv *wil, u8 *ssid_len, void *ssid)
+{
+       int rc;
+       struct {
+               struct wil6210_mbox_hdr_wmi wmi;
+               struct wmi_set_ssid_cmd cmd;
+       } __packed reply;
+       int len; /* reply.cmd.ssid_len in CPU order */
+
+       rc = wmi_call(wil, WMI_GET_SSID_CMDID, NULL, 0, WMI_GET_SSID_EVENTID,
+                     &reply, sizeof(reply), 20);
+       if (rc)
+               return rc;
+
+       len = le32_to_cpu(reply.cmd.ssid_len);
+       if (len > sizeof(reply.cmd.ssid))
+               return -EINVAL;
+
+       *ssid_len = len;
+       memcpy(ssid, reply.cmd.ssid, len);
+
+       return 0;
+}
+
+int wmi_set_channel(struct wil6210_priv *wil, int channel)
+{
+       struct wmi_set_pcp_channel_cmd cmd = {
+               .channel = channel - 1,
+       };
+
+       return wmi_send(wil, WMI_SET_PCP_CHANNEL_CMDID, &cmd, sizeof(cmd));
+}
+
+int wmi_get_channel(struct wil6210_priv *wil, int *channel)
+{
+       int rc;
+       struct {
+               struct wil6210_mbox_hdr_wmi wmi;
+               struct wmi_set_pcp_channel_cmd cmd;
+       } __packed reply;
+
+       rc = wmi_call(wil, WMI_GET_PCP_CHANNEL_CMDID, NULL, 0,
+                     WMI_GET_PCP_CHANNEL_EVENTID, &reply, sizeof(reply), 20);
+       if (rc)
+               return rc;
+
+       if (reply.cmd.channel > 3)
+               return -EINVAL;
+
+       *channel = reply.cmd.channel + 1;
+
+       return 0;
+}
+
+int wmi_tx_eapol(struct wil6210_priv *wil, struct sk_buff *skb)
+{
+       struct wmi_eapol_tx_cmd *cmd;
+       struct ethhdr *eth;
+       u16 eapol_len = skb->len - ETH_HLEN;
+       void *eapol = skb->data + ETH_HLEN;
+       uint i;
+       int rc;
+
+       skb_set_mac_header(skb, 0);
+       eth = eth_hdr(skb);
+       wil_dbg_WMI(wil, "EAPOL %d bytes to %pM\n", eapol_len, eth->h_dest);
+       for (i = 0; i < ARRAY_SIZE(wil->vring_tx); i++) {
+               if (memcmp(wil->dst_addr[i], eth->h_dest, ETH_ALEN) == 0)
+                       goto found_dest;
+       }
+
+       return -EINVAL;
+
+ found_dest:
+       /* find out eapol data & len */
+       cmd = kzalloc(sizeof(*cmd) + eapol_len, GFP_KERNEL);
+       if (!cmd)
+               return -EINVAL;
+
+       memcpy(cmd->dst_mac, eth->h_dest, ETH_ALEN);
+       cmd->eapol_len = cpu_to_le16(eapol_len);
+       memcpy(cmd->eapol, eapol, eapol_len);
+       rc = wmi_send(wil, WMI_EAPOL_TX_CMDID, cmd, sizeof(*cmd) + eapol_len);
+       kfree(cmd);
+
+       return rc;
+}
+
+int wmi_del_cipher_key(struct wil6210_priv *wil, u8 key_index,
+                      const void *mac_addr)
+{
+       struct wmi_delete_cipher_key_cmd cmd = {
+               .key_index = key_index,
+       };
+
+       if (mac_addr)
+               memcpy(cmd.mac, mac_addr, WMI_MAC_LEN);
+
+       return wmi_send(wil, WMI_DELETE_CIPHER_KEY_CMDID, &cmd, sizeof(cmd));
+}
+
+int wmi_add_cipher_key(struct wil6210_priv *wil, u8 key_index,
+                      const void *mac_addr, int key_len, const void *key)
+{
+       struct wmi_add_cipher_key_cmd cmd = {
+               .key_index = key_index,
+               .key_usage = WMI_KEY_USE_PAIRWISE,
+               .key_len = key_len,
+       };
+
+       if (!key || (key_len > sizeof(cmd.key)))
+               return -EINVAL;
+
+       memcpy(cmd.key, key, key_len);
+       if (mac_addr)
+               memcpy(cmd.mac, mac_addr, WMI_MAC_LEN);
+
+       return wmi_send(wil, WMI_ADD_CIPHER_KEY_CMDID, &cmd, sizeof(cmd));
+}
+
+int wmi_set_ie(struct wil6210_priv *wil, u8 type, u16 ie_len, const void *ie)
+{
+       int rc;
+       u16 len = sizeof(struct wmi_set_appie_cmd) + ie_len;
+       struct wmi_set_appie_cmd *cmd = kzalloc(len, GFP_KERNEL);
+       if (!cmd) {
+               wil_err(wil, "kmalloc(%d) failed\n", len);
+               return -ENOMEM;
+       }
+
+       cmd->mgmt_frm_type = type;
+       /* BUG: FW API define ieLen as u8. Will fix FW */
+       cmd->ie_len = cpu_to_le16(ie_len);
+       memcpy(cmd->ie_info, ie, ie_len);
+       rc = wmi_send(wil, WMI_SET_APPIE_CMDID, &cmd, len);
+       kfree(cmd);
+
+       return rc;
+}
+
+void wmi_event_flush(struct wil6210_priv *wil)
+{
+       struct pending_wmi_event *evt, *t;
+
+       wil_dbg_WMI(wil, "%s()\n", __func__);
+
+       list_for_each_entry_safe(evt, t, &wil->pending_wmi_ev, list) {
+               list_del(&evt->list);
+               kfree(evt);
+       }
+}
+
+static bool wmi_evt_call_handler(struct wil6210_priv *wil, int id,
+                                void *d, int len)
+{
+       uint i;
+
+       for (i = 0; i < ARRAY_SIZE(wmi_evt_handlers); i++) {
+               if (wmi_evt_handlers[i].eventid == id) {
+                       wmi_evt_handlers[i].handler(wil, id, d, len);
+                       return true;
+               }
+       }
+
+       return false;
+}
+
+static void wmi_event_handle(struct wil6210_priv *wil,
+                            struct wil6210_mbox_hdr *hdr)
+{
+       u16 len = le16_to_cpu(hdr->len);
+
+       if ((hdr->type == WIL_MBOX_HDR_TYPE_WMI) &&
+           (len >= sizeof(struct wil6210_mbox_hdr_wmi))) {
+               struct wil6210_mbox_hdr_wmi *wmi = (void *)(&hdr[1]);
+               void *evt_data = (void *)(&wmi[1]);
+               u16 id = le16_to_cpu(wmi->id);
+               /* check if someone waits for this event */
+               if (wil->reply_id && wil->reply_id == id) {
+                       if (wil->reply_buf) {
+                               memcpy(wil->reply_buf, wmi,
+                                      min(len, wil->reply_size));
+                       } else {
+                               wmi_evt_call_handler(wil, id, evt_data,
+                                                    len - sizeof(*wmi));
+                       }
+                       wil_dbg_WMI(wil, "Complete WMI 0x%04x\n", id);
+                       complete(&wil->wmi_ready);
+                       return;
+               }
+               /* unsolicited event */
+               /* search for handler */
+               if (!wmi_evt_call_handler(wil, id, evt_data,
+                                         len - sizeof(*wmi))) {
+                       wil_err(wil, "Unhandled event 0x%04x\n", id);
+               }
+       } else {
+               wil_err(wil, "Unknown event type\n");
+               print_hex_dump(KERN_ERR, "evt?? ", DUMP_PREFIX_OFFSET, 16, 1,
+                              hdr, sizeof(*hdr) + len, true);
+       }
+}
+
+/*
+ * Retrieve next WMI event from the pending list
+ */
+static struct list_head *next_wmi_ev(struct wil6210_priv *wil)
+{
+       ulong flags;
+       struct list_head *ret = NULL;
+
+       spin_lock_irqsave(&wil->wmi_ev_lock, flags);
+
+       if (!list_empty(&wil->pending_wmi_ev)) {
+               ret = wil->pending_wmi_ev.next;
+               list_del(ret);
+       }
+
+       spin_unlock_irqrestore(&wil->wmi_ev_lock, flags);
+
+       return ret;
+}
+
+/*
+ * Handler for the WMI events
+ */
+void wmi_event_worker(struct work_struct *work)
+{
+       struct wil6210_priv *wil = container_of(work, struct wil6210_priv,
+                                                wmi_event_worker);
+       struct pending_wmi_event *evt;
+       struct list_head *lh;
+
+       while ((lh = next_wmi_ev(wil)) != NULL) {
+               evt = list_entry(lh, struct pending_wmi_event, list);
+               wmi_event_handle(wil, &evt->event.hdr);
+               kfree(evt);
+       }
+}
+
+void wmi_connect_worker(struct work_struct *work)
+{
+       int rc;
+       struct wil6210_priv *wil = container_of(work, struct wil6210_priv,
+                                               wmi_connect_worker);
+
+       if (wil->pending_connect_cid < 0) {
+               wil_err(wil, "No connection pending\n");
+               return;
+       }
+
+       wil_dbg_WMI(wil, "Configure for connection CID %d\n",
+                   wil->pending_connect_cid);
+
+       rc = wil_vring_init_tx(wil, 0, WIL6210_TX_RING_SIZE,
+                              wil->pending_connect_cid, 0);
+       wil->pending_connect_cid = -1;
+       if (rc == 0)
+               wil_link_on(wil);
+}
diff --git a/drivers/net/wireless/ath/wil6210/wmi.h b/drivers/net/wireless/ath/wil6210/wmi.h
new file mode 100644 (file)
index 0000000..3bbf875
--- /dev/null
@@ -0,0 +1,1116 @@
+/*
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ * Copyright (c) 2006-2012 Wilocity .
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file contains the definitions of the WMI protocol specified in the
+ * Wireless Module Interface (WMI) for the Wilocity
+ * MARLON 60 Gigabit wireless solution.
+ * It includes definitions of all the commands and events.
+ * Commands are messages from the host to the WM.
+ * Events are messages from the WM to the host.
+ */
+
+#ifndef __WILOCITY_WMI_H__
+#define __WILOCITY_WMI_H__
+
+/* General */
+
+#define WMI_MAC_LEN            (6)
+#define WMI_PROX_RANGE_NUM     (3)
+
+/* List of Commands */
+enum wmi_command_id {
+       WMI_CONNECT_CMDID               = 0x0001,
+       WMI_DISCONNECT_CMDID            = 0x0003,
+       WMI_START_SCAN_CMDID            = 0x0007,
+       WMI_SET_BSS_FILTER_CMDID        = 0x0009,
+       WMI_SET_PROBED_SSID_CMDID       = 0x000a,
+       WMI_SET_LISTEN_INT_CMDID        = 0x000b,
+       WMI_BCON_CTRL_CMDID             = 0x000f,
+       WMI_ADD_CIPHER_KEY_CMDID        = 0x0016,
+       WMI_DELETE_CIPHER_KEY_CMDID     = 0x0017,
+       WMI_SET_APPIE_CMDID             = 0x003f,
+       WMI_GET_APPIE_CMDID             = 0x0040,
+       WMI_SET_WSC_STATUS_CMDID        = 0x0041,
+       WMI_PXMT_RANGE_CFG_CMDID        = 0x0042,
+       WMI_PXMT_SNR2_RANGE_CFG_CMDID   = 0x0043,
+       WMI_FAST_MEM_ACC_MODE_CMDID     = 0x0300,
+       WMI_MEM_READ_CMDID              = 0x0800,
+       WMI_MEM_WR_CMDID                = 0x0801,
+       WMI_ECHO_CMDID                  = 0x0803,
+       WMI_DEEP_ECHO_CMDID             = 0x0804,
+       WMI_CONFIG_MAC_CMDID            = 0x0805,
+       WMI_CONFIG_PHY_DEBUG_CMDID      = 0x0806,
+       WMI_ADD_STATION_CMDID           = 0x0807,
+       WMI_ADD_DEBUG_TX_PCKT_CMDID     = 0x0808,
+       WMI_PHY_GET_STATISTICS_CMDID    = 0x0809,
+       WMI_FS_TUNE_CMDID               = 0x080a,
+       WMI_CORR_MEASURE_CMDID          = 0x080b,
+       WMI_TEMP_SENSE_CMDID            = 0x080e,
+       WMI_DC_CALIB_CMDID              = 0x080f,
+       WMI_SEND_TONE_CMDID             = 0x0810,
+       WMI_IQ_TX_CALIB_CMDID           = 0x0811,
+       WMI_IQ_RX_CALIB_CMDID           = 0x0812,
+       WMI_SET_UCODE_IDLE_CMDID        = 0x0813,
+       WMI_SET_WORK_MODE_CMDID         = 0x0815,
+       WMI_LO_LEAKAGE_CALIB_CMDID      = 0x0816,
+       WMI_MARLON_R_ACTIVATE_CMDID     = 0x0817,
+       WMI_MARLON_R_READ_CMDID         = 0x0818,
+       WMI_MARLON_R_WRITE_CMDID        = 0x0819,
+       WMI_MARLON_R_TXRX_SEL_CMDID     = 0x081a,
+       MAC_IO_STATIC_PARAMS_CMDID      = 0x081b,
+       MAC_IO_DYNAMIC_PARAMS_CMDID     = 0x081c,
+       WMI_SILENT_RSSI_CALIB_CMDID     = 0x081d,
+       WMI_CFG_RX_CHAIN_CMDID          = 0x0820,
+       WMI_VRING_CFG_CMDID             = 0x0821,
+       WMI_RX_ON_CMDID                 = 0x0822,
+       WMI_VRING_BA_EN_CMDID           = 0x0823,
+       WMI_VRING_BA_DIS_CMDID          = 0x0824,
+       WMI_RCP_ADDBA_RESP_CMDID        = 0x0825,
+       WMI_RCP_DELBA_CMDID             = 0x0826,
+       WMI_SET_SSID_CMDID              = 0x0827,
+       WMI_GET_SSID_CMDID              = 0x0828,
+       WMI_SET_PCP_CHANNEL_CMDID       = 0x0829,
+       WMI_GET_PCP_CHANNEL_CMDID       = 0x082a,
+       WMI_SW_TX_REQ_CMDID             = 0x082b,
+       WMI_RX_OFF_CMDID                = 0x082c,
+       WMI_READ_MAC_RXQ_CMDID          = 0x0830,
+       WMI_READ_MAC_TXQ_CMDID          = 0x0831,
+       WMI_WRITE_MAC_RXQ_CMDID         = 0x0832,
+       WMI_WRITE_MAC_TXQ_CMDID         = 0x0833,
+       WMI_WRITE_MAC_XQ_FIELD_CMDID    = 0x0834,
+       WMI_MLME_PUSH_CMDID             = 0x0835,
+       WMI_BEAMFORMING_MGMT_CMDID      = 0x0836,
+       WMI_BF_TXSS_MGMT_CMDID          = 0x0837,
+       WMI_BF_SM_MGMT_CMDID            = 0x0838,
+       WMI_BF_RXSS_MGMT_CMDID          = 0x0839,
+       WMI_SET_SECTORS_CMDID           = 0x0849,
+       WMI_MAINTAIN_PAUSE_CMDID        = 0x0850,
+       WMI_MAINTAIN_RESUME_CMDID       = 0x0851,
+       WMI_RS_MGMT_CMDID               = 0x0852,
+       WMI_RF_MGMT_CMDID               = 0x0853,
+       /* Performance monitoring commands */
+       WMI_BF_CTRL_CMDID               = 0x0862,
+       WMI_NOTIFY_REQ_CMDID            = 0x0863,
+       WMI_GET_STATUS_CMDID            = 0x0864,
+       WMI_UNIT_TEST_CMDID             = 0x0900,
+       WMI_HICCUP_CMDID                = 0x0901,
+       WMI_FLASH_READ_CMDID            = 0x0902,
+       WMI_FLASH_WRITE_CMDID           = 0x0903,
+       WMI_SECURITY_UNIT_TEST_CMDID    = 0x0904,
+
+       WMI_SET_MAC_ADDRESS_CMDID       = 0xf003,
+       WMI_ABORT_SCAN_CMDID            = 0xf007,
+       WMI_SET_PMK_CMDID               = 0xf028,
+
+       WMI_SET_PROMISCUOUS_MODE_CMDID  = 0xf041,
+       WMI_GET_PMK_CMDID               = 0xf048,
+       WMI_SET_PASSPHRASE_CMDID        = 0xf049,
+       WMI_SEND_ASSOC_RES_CMDID        = 0xf04a,
+       WMI_SET_ASSOC_REQ_RELAY_CMDID   = 0xf04b,
+       WMI_EAPOL_TX_CMDID              = 0xf04c,
+       WMI_MAC_ADDR_REQ_CMDID          = 0xf04d,
+       WMI_FW_VER_CMDID                = 0xf04e,
+};
+
+/*
+ * Commands data structures
+ */
+
+/*
+ * Frame Types
+ */
+enum wmi_mgmt_frame_type {
+       WMI_FRAME_BEACON        = 0,
+       WMI_FRAME_PROBE_REQ     = 1,
+       WMI_FRAME_PROBE_RESP    = 2,
+       WMI_FRAME_ASSOC_REQ     = 3,
+       WMI_FRAME_ASSOC_RESP    = 4,
+       WMI_NUM_MGMT_FRAME,
+};
+
+/*
+ * WMI_CONNECT_CMDID
+ */
+enum wmi_network_type {
+       WMI_NETTYPE_INFRA               = 0x01,
+       WMI_NETTYPE_ADHOC               = 0x02,
+       WMI_NETTYPE_ADHOC_CREATOR       = 0x04,
+       WMI_NETTYPE_AP                  = 0x10,
+       WMI_NETTYPE_P2P                 = 0x20,
+       WMI_NETTYPE_WBE                 = 0x40, /* PCIE over 60g */
+};
+
+enum wmi_dot11_auth_mode {
+       WMI_AUTH11_OPEN                 = 0x01,
+       WMI_AUTH11_SHARED               = 0x02,
+       WMI_AUTH11_LEAP                 = 0x04,
+       WMI_AUTH11_WSC                  = 0x08,
+};
+
+enum wmi_auth_mode {
+       WMI_AUTH_NONE                   = 0x01,
+       WMI_AUTH_WPA                    = 0x02,
+       WMI_AUTH_WPA2                   = 0x04,
+       WMI_AUTH_WPA_PSK                = 0x08,
+       WMI_AUTH_WPA2_PSK               = 0x10,
+       WMI_AUTH_WPA_CCKM               = 0x20,
+       WMI_AUTH_WPA2_CCKM              = 0x40,
+};
+
+enum wmi_crypto_type {
+       WMI_CRYPT_NONE                  = 0x01,
+       WMI_CRYPT_WEP                   = 0x02,
+       WMI_CRYPT_TKIP                  = 0x04,
+       WMI_CRYPT_AES                   = 0x08,
+       WMI_CRYPT_AES_GCMP              = 0x20,
+};
+
+
+enum wmi_connect_ctrl_flag_bits {
+       WMI_CONNECT_ASSOC_POLICY_USER           = 0x0001,
+       WMI_CONNECT_SEND_REASSOC                = 0x0002,
+       WMI_CONNECT_IGNORE_WPAx_GROUP_CIPHER    = 0x0004,
+       WMI_CONNECT_PROFILE_MATCH_DONE          = 0x0008,
+       WMI_CONNECT_IGNORE_AAC_BEACON           = 0x0010,
+       WMI_CONNECT_CSA_FOLLOW_BSS              = 0x0020,
+       WMI_CONNECT_DO_WPA_OFFLOAD              = 0x0040,
+       WMI_CONNECT_DO_NOT_DEAUTH               = 0x0080,
+};
+
+#define WMI_MAX_SSID_LEN    (32)
+
+struct wmi_connect_cmd {
+       u8 network_type;
+       u8 dot11_auth_mode;
+       u8 auth_mode;
+       u8 pairwise_crypto_type;
+       u8 pairwise_crypto_len;
+       u8 group_crypto_type;
+       u8 group_crypto_len;
+       u8 ssid_len;
+       u8 ssid[WMI_MAX_SSID_LEN];
+       u8 channel;
+       u8 reserved0;
+       u8 bssid[WMI_MAC_LEN];
+       __le32 ctrl_flags;
+       u8 dst_mac[WMI_MAC_LEN];
+       u8 reserved1[2];
+} __packed;
+
+
+/*
+ * WMI_RECONNECT_CMDID
+ */
+struct wmi_reconnect_cmd {
+       u8 channel;                     /* hint */
+       u8 reserved;
+       u8 bssid[WMI_MAC_LEN];          /* mandatory if set */
+} __packed;
+
+
+/*
+ * WMI_SET_PMK_CMDID
+ */
+
+#define WMI_MIN_KEY_INDEX      (0)
+#define WMI_MAX_KEY_INDEX      (3)
+#define WMI_MAX_KEY_LEN                (32)
+#define WMI_PASSPHRASE_LEN     (64)
+#define WMI_PMK_LEN            (32)
+
+struct  wmi_set_pmk_cmd {
+       u8 pmk[WMI_PMK_LEN];
+} __packed;
+
+
+/*
+ * WMI_SET_PASSPHRASE_CMDID
+ */
+struct wmi_set_passphrase_cmd {
+       u8 ssid[WMI_MAX_SSID_LEN];
+       u8 passphrase[WMI_PASSPHRASE_LEN];
+       u8 ssid_len;
+       u8 passphrase_len;
+} __packed;
+
+/*
+ * WMI_ADD_CIPHER_KEY_CMDID
+ */
+enum wmi_key_usage {
+       WMI_KEY_USE_PAIRWISE    = 0,
+       WMI_KEY_USE_GROUP       = 1,
+       WMI_KEY_USE_TX          = 2,  /* default Tx Key - Static WEP only */
+};
+
+struct wmi_add_cipher_key_cmd {
+       u8 key_index;
+       u8 key_type;
+       u8 key_usage;           /* enum wmi_key_usage */
+       u8 key_len;
+       u8 key_rsc[8];          /* key replay sequence counter */
+       u8 key[WMI_MAX_KEY_LEN];
+       u8 key_op_ctrl;         /* Additional Key Control information */
+       u8 mac[WMI_MAC_LEN];
+} __packed;
+
+/*
+ * WMI_DELETE_CIPHER_KEY_CMDID
+ */
+struct wmi_delete_cipher_key_cmd {
+       u8 key_index;
+       u8 mac[WMI_MAC_LEN];
+} __packed;
+
+
+/*
+ * WMI_START_SCAN_CMDID
+ *
+ * Start L1 scan operation
+ *
+ * Returned events:
+ * - WMI_RX_MGMT_PACKET_EVENTID - for every probe resp.
+ * - WMI_SCAN_COMPLETE_EVENTID
+ */
+enum wmi_scan_type {
+       WMI_LONG_SCAN           = 0,
+       WMI_SHORT_SCAN          = 1,
+};
+
+struct wmi_start_scan_cmd {
+       u8 reserved[8];
+       __le32 home_dwell_time; /* Max duration in the home channel(ms) */
+       __le32 force_scan_interval;     /* Time interval between scans (ms)*/
+       u8 scan_type;           /* wmi_scan_type */
+       u8 num_channels;                /* how many channels follow */
+       struct {
+               u8 channel;
+               u8 reserved;
+       } channel_list[0];      /* channels ID's */
+                               /* 0 - 58320 MHz */
+                               /* 1 - 60480 MHz */
+                               /* 2 - 62640 MHz */
+} __packed;
+
+/*
+ * WMI_SET_PROBED_SSID_CMDID
+ */
+#define MAX_PROBED_SSID_INDEX   (15)
+
+enum wmi_ssid_flag {
+       WMI_SSID_FLAG_DISABLE   = 0,    /* disables entry */
+       WMI_SSID_FLAG_SPECIFIC  = 1,    /* probes specified ssid */
+       WMI_SSID_FLAG_ANY       = 2,    /* probes for any ssid */
+};
+
+struct wmi_probed_ssid_cmd {
+       u8 entry_index;                 /* 0 to MAX_PROBED_SSID_INDEX */
+       u8 flag;                        /* enum wmi_ssid_flag */
+       u8 ssid_len;
+       u8 ssid[WMI_MAX_SSID_LEN];
+} __packed;
+
+/*
+ * WMI_SET_APPIE_CMDID
+ * Add Application specified IE to a management frame
+ */
+struct wmi_set_appie_cmd {
+       u8 mgmt_frm_type;       /* enum wmi_mgmt_frame_type */
+       u8 reserved;
+       __le16 ie_len;  /* Length of the IE to be added to MGMT frame */
+       u8 ie_info[0];
+} __packed;
+
+#define WMI_MAX_IE_LEN (1024)
+
+struct wmi_pxmt_range_cfg_cmd {
+       u8 dst_mac[WMI_MAC_LEN];
+       __le16 range;
+} __packed;
+
+struct wmi_pxmt_snr2_range_cfg_cmd {
+       s8 snr2range_arr[WMI_PROX_RANGE_NUM-1];
+} __packed;
+
+/*
+ * WMI_RF_MGMT_CMDID
+ */
+enum wmi_rf_mgmt_type {
+       WMI_RF_MGMT_W_DISABLE   = 0,
+       WMI_RF_MGMT_W_ENABLE    = 1,
+       WMI_RF_MGMT_GET_STATUS  = 2,
+};
+
+struct wmi_rf_mgmt_cmd {
+       __le32 rf_mgmt_type;
+} __packed;
+
+/*
+ * WMI_SET_SSID_CMDID
+ */
+struct wmi_set_ssid_cmd {
+       __le32 ssid_len;
+       u8 ssid[WMI_MAX_SSID_LEN];
+} __packed;
+
+/*
+ * WMI_SET_PCP_CHANNEL_CMDID
+ */
+struct wmi_set_pcp_channel_cmd {
+       u8 channel;
+       u8 reserved[3];
+} __packed;
+
+/*
+ * WMI_BCON_CTRL_CMDID
+ */
+struct wmi_bcon_ctrl_cmd {
+       __le16 bcon_interval;
+       __le16 frag_num;
+       __le64 ss_mask;
+       u8 network_type;
+       u8 reserved;
+       u8 disable_sec_offload;
+       u8 disable_sec;
+} __packed;
+
+/*
+ * WMI_SW_TX_REQ_CMDID
+ */
+struct wmi_sw_tx_req_cmd {
+       u8 dst_mac[WMI_MAC_LEN];
+       __le16 len;
+       u8 payload[0];
+} __packed;
+
+/*
+ * WMI_VRING_CFG_CMDID
+ */
+
+struct wmi_sw_ring_cfg {
+       __le64 ring_mem_base;
+       __le16 ring_size;
+       __le16 max_mpdu_size;
+} __packed;
+
+struct wmi_vring_cfg_schd {
+       __le16 priority;
+       __le16 timeslot_us;
+} __packed;
+
+enum wmi_vring_cfg_encap_trans_type {
+       WMI_VRING_ENC_TYPE_802_3                = 0,
+       WMI_VRING_ENC_TYPE_NATIVE_WIFI          = 1,
+};
+
+enum wmi_vring_cfg_ds_cfg {
+       WMI_VRING_DS_PBSS                       = 0,
+       WMI_VRING_DS_STATION                    = 1,
+       WMI_VRING_DS_AP                         = 2,
+       WMI_VRING_DS_ADDR4                      = 3,
+};
+
+enum wmi_vring_cfg_nwifi_ds_trans_type {
+       WMI_NWIFI_TX_TRANS_MODE_NO              = 0,
+       WMI_NWIFI_TX_TRANS_MODE_AP2PBSS         = 1,
+       WMI_NWIFI_TX_TRANS_MODE_STA2PBSS        = 2,
+};
+
+enum wmi_vring_cfg_schd_params_priority {
+       WMI_SCH_PRIO_REGULAR                    = 0,
+       WMI_SCH_PRIO_HIGH                       = 1,
+};
+
+struct wmi_vring_cfg {
+       struct wmi_sw_ring_cfg tx_sw_ring;
+       u8 ringid;                              /* 0-23 vrings */
+
+       #define CIDXTID_CID_POS (0)
+       #define CIDXTID_CID_LEN (4)
+       #define CIDXTID_CID_MSK (0xF)
+       #define CIDXTID_TID_POS (4)
+       #define CIDXTID_TID_LEN (4)
+       #define CIDXTID_TID_MSK (0xF0)
+       u8 cidxtid;
+
+       u8 encap_trans_type;
+       u8 ds_cfg;                              /* 802.3 DS cfg */
+       u8 nwifi_ds_trans_type;
+
+       #define VRING_CFG_MAC_CTRL_LIFETIME_EN_POS (0)
+       #define VRING_CFG_MAC_CTRL_LIFETIME_EN_LEN (1)
+       #define VRING_CFG_MAC_CTRL_LIFETIME_EN_MSK (0x1)
+       #define VRING_CFG_MAC_CTRL_AGGR_EN_POS (1)
+       #define VRING_CFG_MAC_CTRL_AGGR_EN_LEN (1)
+       #define VRING_CFG_MAC_CTRL_AGGR_EN_MSK (0x2)
+       u8 mac_ctrl;
+
+       #define VRING_CFG_TO_RESOLUTION_VALUE_POS (0)
+       #define VRING_CFG_TO_RESOLUTION_VALUE_LEN (6)
+       #define VRING_CFG_TO_RESOLUTION_VALUE_MSK (0x3F)
+       u8 to_resolution;
+       u8 agg_max_wsize;
+       struct wmi_vring_cfg_schd schd_params;
+} __packed;
+
+enum wmi_vring_cfg_cmd_action {
+       WMI_VRING_CMD_ADD                       = 0,
+       WMI_VRING_CMD_MODIFY                    = 1,
+       WMI_VRING_CMD_DELETE                    = 2,
+};
+
+struct wmi_vring_cfg_cmd {
+       __le32 action;
+       struct wmi_vring_cfg vring_cfg;
+} __packed;
+
+/*
+ * WMI_VRING_BA_EN_CMDID
+ */
+struct wmi_vring_ba_en_cmd {
+       u8 ringid;
+       u8 agg_max_wsize;
+       __le16 ba_timeout;
+} __packed;
+
+/*
+ * WMI_VRING_BA_DIS_CMDID
+ */
+struct wmi_vring_ba_dis_cmd {
+       u8 ringid;
+       u8 reserved;
+       __le16 reason;
+} __packed;
+
+/*
+ * WMI_NOTIFY_REQ_CMDID
+ */
+struct wmi_notify_req_cmd {
+       u8 cid;
+       u8 reserved[3];
+       __le32 interval_usec;
+} __packed;
+
+/*
+ * WMI_CFG_RX_CHAIN_CMDID
+ */
+enum wmi_sniffer_cfg_mode {
+       WMI_SNIFFER_OFF                         = 0,
+       WMI_SNIFFER_ON                          = 1,
+};
+
+enum wmi_sniffer_cfg_phy_info_mode {
+       WMI_SNIFFER_PHY_INFO_DISABLED           = 0,
+       WMI_SNIFFER_PHY_INFO_ENABLED            = 1,
+};
+
+enum wmi_sniffer_cfg_phy_support {
+       WMI_SNIFFER_CP                          = 0,
+       WMI_SNIFFER_DP                          = 1,
+       WMI_SNIFFER_BOTH_PHYS                   = 2,
+};
+
+struct wmi_sniffer_cfg {
+       __le32 mode;            /* enum wmi_sniffer_cfg_mode */
+       __le32 phy_info_mode;   /* enum wmi_sniffer_cfg_phy_info_mode */
+       __le32 phy_support;     /* enum wmi_sniffer_cfg_phy_support */
+       u8 channel;
+       u8 reserved[3];
+} __packed;
+
+enum wmi_cfg_rx_chain_cmd_action {
+       WMI_RX_CHAIN_ADD                        = 0,
+       WMI_RX_CHAIN_DEL                        = 1,
+};
+
+enum wmi_cfg_rx_chain_cmd_decap_trans_type {
+       WMI_DECAP_TYPE_802_3                    = 0,
+       WMI_DECAP_TYPE_NATIVE_WIFI              = 1,
+};
+
+enum wmi_cfg_rx_chain_cmd_nwifi_ds_trans_type {
+       WMI_NWIFI_RX_TRANS_MODE_NO              = 0,
+       WMI_NWIFI_RX_TRANS_MODE_PBSS2AP         = 1,
+       WMI_NWIFI_RX_TRANS_MODE_PBSS2STA        = 2,
+};
+
+struct wmi_cfg_rx_chain_cmd {
+       __le32 action;
+       struct wmi_sw_ring_cfg rx_sw_ring;
+       u8 mid;
+       u8 decap_trans_type;
+
+       #define L2_802_3_OFFLOAD_CTRL_VLAN_TAG_INSERTION_POS (0)
+       #define L2_802_3_OFFLOAD_CTRL_VLAN_TAG_INSERTION_LEN (1)
+       #define L2_802_3_OFFLOAD_CTRL_VLAN_TAG_INSERTION_MSK (0x1)
+       u8 l2_802_3_offload_ctrl;
+
+       #define L2_NWIFI_OFFLOAD_CTRL_REMOVE_QOS_POS (0)
+       #define L2_NWIFI_OFFLOAD_CTRL_REMOVE_QOS_LEN (1)
+       #define L2_NWIFI_OFFLOAD_CTRL_REMOVE_QOS_MSK (0x1)
+       #define L2_NWIFI_OFFLOAD_CTRL_REMOVE_PN_POS (1)
+       #define L2_NWIFI_OFFLOAD_CTRL_REMOVE_PN_LEN (1)
+       #define L2_NWIFI_OFFLOAD_CTRL_REMOVE_PN_MSK (0x2)
+       u8 l2_nwifi_offload_ctrl;
+
+       u8 vlan_id;
+       u8 nwifi_ds_trans_type;
+
+       #define L3_L4_CTRL_IPV4_CHECKSUM_EN_POS (0)
+       #define L3_L4_CTRL_IPV4_CHECKSUM_EN_LEN (1)
+       #define L3_L4_CTRL_IPV4_CHECKSUM_EN_MSK (0x1)
+       #define L3_L4_CTRL_TCPIP_CHECKSUM_EN_POS (1)
+       #define L3_L4_CTRL_TCPIP_CHECKSUM_EN_LEN (1)
+       #define L3_L4_CTRL_TCPIP_CHECKSUM_EN_MSK (0x2)
+       u8 l3_l4_ctrl;
+
+       #define RING_CTRL_OVERRIDE_PREFETCH_THRSH_POS (0)
+       #define RING_CTRL_OVERRIDE_PREFETCH_THRSH_LEN (1)
+       #define RING_CTRL_OVERRIDE_PREFETCH_THRSH_MSK (0x1)
+       #define RING_CTRL_OVERRIDE_WB_THRSH_POS (1)
+       #define RING_CTRL_OVERRIDE_WB_THRSH_LEN (1)
+       #define RING_CTRL_OVERRIDE_WB_THRSH_MSK (0x2)
+       #define RING_CTRL_OVERRIDE_ITR_THRSH_POS (2)
+       #define RING_CTRL_OVERRIDE_ITR_THRSH_LEN (1)
+       #define RING_CTRL_OVERRIDE_ITR_THRSH_MSK (0x4)
+       #define RING_CTRL_OVERRIDE_HOST_THRSH_POS (3)
+       #define RING_CTRL_OVERRIDE_HOST_THRSH_LEN (1)
+       #define RING_CTRL_OVERRIDE_HOST_THRSH_MSK (0x8)
+       u8 ring_ctrl;
+
+       __le16 prefetch_thrsh;
+       __le16 wb_thrsh;
+       __le32 itr_value;
+       __le16 host_thrsh;
+       u8 reserved[2];
+       struct wmi_sniffer_cfg sniffer_cfg;
+} __packed;
+
+/*
+ * WMI_RCP_ADDBA_RESP_CMDID
+ */
+struct wmi_rcp_addba_resp_cmd {
+
+       #define CIDXTID_CID_POS (0)
+       #define CIDXTID_CID_LEN (4)
+       #define CIDXTID_CID_MSK (0xF)
+       #define CIDXTID_TID_POS (4)
+       #define CIDXTID_TID_LEN (4)
+       #define CIDXTID_TID_MSK (0xF0)
+       u8 cidxtid;
+
+       u8 dialog_token;
+       __le16 status_code;
+       __le16 ba_param_set;    /* ieee80211_ba_parameterset field to send */
+       __le16 ba_timeout;
+} __packed;
+
+/*
+ * WMI_RCP_DELBA_CMDID
+ */
+struct wmi_rcp_delba_cmd {
+
+       #define CIDXTID_CID_POS (0)
+       #define CIDXTID_CID_LEN (4)
+       #define CIDXTID_CID_MSK (0xF)
+       #define CIDXTID_TID_POS (4)
+       #define CIDXTID_TID_LEN (4)
+       #define CIDXTID_TID_MSK (0xF0)
+       u8 cidxtid;
+
+       u8 reserved;
+       __le16 reason;
+} __packed;
+
+/*
+ * WMI_RCP_ADDBA_REQ_CMDID
+ */
+struct wmi_rcp_addba_req_cmd {
+
+       #define CIDXTID_CID_POS (0)
+       #define CIDXTID_CID_LEN (4)
+       #define CIDXTID_CID_MSK (0xF)
+       #define CIDXTID_TID_POS (4)
+       #define CIDXTID_TID_LEN (4)
+       #define CIDXTID_TID_MSK (0xF0)
+       u8 cidxtid;
+
+       u8 dialog_token;
+       /* ieee80211_ba_parameterset field as it received */
+       __le16 ba_param_set;
+       __le16 ba_timeout;
+       /* ieee80211_ba_seqstrl field as it received */
+       __le16 ba_seq_ctrl;
+} __packed;
+
+/*
+ * WMI_SET_MAC_ADDRESS_CMDID
+ */
+struct wmi_set_mac_address_cmd {
+       u8 mac[WMI_MAC_LEN];
+       u8 reserved[2];
+} __packed;
+
+
+/*
+* WMI_EAPOL_TX_CMDID
+*/
+struct wmi_eapol_tx_cmd {
+       u8 dst_mac[WMI_MAC_LEN];
+       __le16 eapol_len;
+       u8 eapol[0];
+} __packed;
+
+/*
+ * WMI_ECHO_CMDID
+ *
+ * Check FW is alive
+ *
+ * WMI_DEEP_ECHO_CMDID
+ *
+ * Check FW and ucode are alive
+ *
+ * Returned event: WMI_ECHO_RSP_EVENTID
+ * same event for both commands
+ */
+struct wmi_echo_cmd {
+       __le32 value;
+} __packed;
+
+/*
+ * WMI Events
+ */
+
+/*
+ * List of Events (target to host)
+ */
+enum wmi_event_id {
+       WMI_IMM_RSP_EVENTID                     = 0x0000,
+       WMI_READY_EVENTID                       = 0x1001,
+       WMI_CONNECT_EVENTID                     = 0x1002,
+       WMI_DISCONNECT_EVENTID                  = 0x1003,
+       WMI_SCAN_COMPLETE_EVENTID               = 0x100a,
+       WMI_REPORT_STATISTICS_EVENTID           = 0x100b,
+       WMI_RD_MEM_RSP_EVENTID                  = 0x1800,
+       WMI_FW_READY_EVENTID                    = 0x1801,
+       WMI_EXIT_FAST_MEM_ACC_MODE_EVENTID      = 0x0200,
+       WMI_ECHO_RSP_EVENTID                    = 0x1803,
+       WMI_CONFIG_MAC_DONE_EVENTID             = 0x1805,
+       WMI_CONFIG_PHY_DEBUG_DONE_EVENTID       = 0x1806,
+       WMI_ADD_STATION_DONE_EVENTID            = 0x1807,
+       WMI_ADD_DEBUG_TX_PCKT_DONE_EVENTID      = 0x1808,
+       WMI_PHY_GET_STATISTICS_EVENTID          = 0x1809,
+       WMI_FS_TUNE_DONE_EVENTID                = 0x180a,
+       WMI_CORR_MEASURE_DONE_EVENTID           = 0x180b,
+       WMI_TEMP_SENSE_DONE_EVENTID             = 0x180e,
+       WMI_DC_CALIB_DONE_EVENTID               = 0x180f,
+       WMI_IQ_TX_CALIB_DONE_EVENTID            = 0x1811,
+       WMI_IQ_RX_CALIB_DONE_EVENTID            = 0x1812,
+       WMI_SET_WORK_MODE_DONE_EVENTID          = 0x1815,
+       WMI_LO_LEAKAGE_CALIB_DONE_EVENTID       = 0x1816,
+       WMI_MARLON_R_ACTIVATE_DONE_EVENTID      = 0x1817,
+       WMI_MARLON_R_READ_DONE_EVENTID          = 0x1818,
+       WMI_MARLON_R_WRITE_DONE_EVENTID         = 0x1819,
+       WMI_MARLON_R_TXRX_SEL_DONE_EVENTID      = 0x181a,
+       WMI_SILENT_RSSI_CALIB_DONE_EVENTID      = 0x181d,
+
+       WMI_CFG_RX_CHAIN_DONE_EVENTID           = 0x1820,
+       WMI_VRING_CFG_DONE_EVENTID              = 0x1821,
+       WMI_RX_ON_DONE_EVENTID                  = 0x1822,
+       WMI_BA_STATUS_EVENTID                   = 0x1823,
+       WMI_RCP_ADDBA_REQ_EVENTID               = 0x1824,
+       WMI_ADDBA_RESP_SENT_EVENTID             = 0x1825,
+       WMI_DELBA_EVENTID                       = 0x1826,
+       WMI_GET_SSID_EVENTID                    = 0x1828,
+       WMI_GET_PCP_CHANNEL_EVENTID             = 0x182a,
+       WMI_SW_TX_COMPLETE_EVENTID              = 0x182b,
+       WMI_RX_OFF_DONE_EVENTID                 = 0x182c,
+
+       WMI_READ_MAC_RXQ_EVENTID                = 0x1830,
+       WMI_READ_MAC_TXQ_EVENTID                = 0x1831,
+       WMI_WRITE_MAC_RXQ_EVENTID               = 0x1832,
+       WMI_WRITE_MAC_TXQ_EVENTID               = 0x1833,
+       WMI_WRITE_MAC_XQ_FIELD_EVENTID          = 0x1834,
+
+       WMI_BEAFORMING_MGMT_DONE_EVENTID        = 0x1836,
+       WMI_BF_TXSS_MGMT_DONE_EVENTID           = 0x1837,
+       WMI_BF_RXSS_MGMT_DONE_EVENTID           = 0x1839,
+       WMI_RS_MGMT_DONE_EVENTID                = 0x1852,
+       WMI_RF_MGMT_STATUS_EVENTID              = 0x1853,
+       WMI_BF_SM_MGMT_DONE_EVENTID             = 0x1838,
+       WMI_RX_MGMT_PACKET_EVENTID              = 0x1840,
+
+       /* Performance monitoring events */
+       WMI_DATA_PORT_OPEN_EVENTID              = 0x1860,
+       WMI_WBE_LINKDOWN_EVENTID                = 0x1861,
+
+       WMI_BF_CTRL_DONE_EVENTID                = 0x1862,
+       WMI_NOTIFY_REQ_DONE_EVENTID             = 0x1863,
+       WMI_GET_STATUS_DONE_EVENTID             = 0x1864,
+
+       WMI_UNIT_TEST_EVENTID                   = 0x1900,
+       WMI_FLASH_READ_DONE_EVENTID             = 0x1902,
+       WMI_FLASH_WRITE_DONE_EVENTID            = 0x1903,
+
+       WMI_SET_CHANNEL_EVENTID                 = 0x9000,
+       WMI_ASSOC_REQ_EVENTID                   = 0x9001,
+       WMI_EAPOL_RX_EVENTID                    = 0x9002,
+       WMI_MAC_ADDR_RESP_EVENTID               = 0x9003,
+       WMI_FW_VER_EVENTID                      = 0x9004,
+};
+
+/*
+ * Events data structures
+ */
+
+/*
+ * WMI_RF_MGMT_STATUS_EVENTID
+ */
+enum wmi_rf_status {
+       WMI_RF_ENABLED                  = 0,
+       WMI_RF_DISABLED_HW              = 1,
+       WMI_RF_DISABLED_SW              = 2,
+       WMI_RF_DISABLED_HW_SW           = 3,
+};
+
+struct wmi_rf_mgmt_status_event {
+       __le32 rf_status;
+} __packed;
+
+/*
+ * WMI_GET_STATUS_DONE_EVENTID
+ */
+struct wmi_get_status_done_event {
+       __le32 is_associated;
+       u8 cid;
+       u8 reserved0[3];
+       u8 bssid[WMI_MAC_LEN];
+       u8 channel;
+       u8 reserved1;
+       u8 network_type;
+       u8 reserved2[3];
+       __le32 ssid_len;
+       u8 ssid[WMI_MAX_SSID_LEN];
+       __le32 rf_status;
+       __le32 is_secured;
+} __packed;
+
+/*
+ * WMI_FW_VER_EVENTID
+ */
+struct wmi_fw_ver_event {
+       u8 major;
+       u8 minor;
+       __le16 subminor;
+       __le16 build;
+} __packed;
+
+/*
+* WMI_MAC_ADDR_RESP_EVENTID
+*/
+struct wmi_mac_addr_resp_event {
+       u8 mac[WMI_MAC_LEN];
+       u8 auth_mode;
+       u8 crypt_mode;
+       __le32 offload_mode;
+} __packed;
+
+/*
+* WMI_EAPOL_RX_EVENTID
+*/
+struct wmi_eapol_rx_event {
+       u8 src_mac[WMI_MAC_LEN];
+       __le16 eapol_len;
+       u8 eapol[0];
+} __packed;
+
+/*
+* WMI_READY_EVENTID
+*/
+enum wmi_phy_capability {
+       WMI_11A_CAPABILITY              = 1,
+       WMI_11G_CAPABILITY              = 2,
+       WMI_11AG_CAPABILITY             = 3,
+       WMI_11NA_CAPABILITY             = 4,
+       WMI_11NG_CAPABILITY             = 5,
+       WMI_11NAG_CAPABILITY            = 6,
+       WMI_11AD_CAPABILITY             = 7,
+       WMI_11N_CAPABILITY_OFFSET = WMI_11NA_CAPABILITY - WMI_11A_CAPABILITY,
+};
+
+struct wmi_ready_event {
+       __le32 sw_version;
+       __le32 abi_version;
+       u8 mac[WMI_MAC_LEN];
+       u8 phy_capability;              /* enum wmi_phy_capability */
+       u8 reserved;
+} __packed;
+
+/*
+ * WMI_NOTIFY_REQ_DONE_EVENTID
+ */
+struct wmi_notify_req_done_event {
+       __le32 status;
+       __le64 tsf;
+       __le32 snr_val;
+       __le32 tx_tpt;
+       __le32 tx_goodput;
+       __le32 rx_goodput;
+       __le16 bf_mcs;
+       __le16 my_rx_sector;
+       __le16 my_tx_sector;
+       __le16 other_rx_sector;
+       __le16 other_tx_sector;
+       __le16 range;
+} __packed;
+
+/*
+ * WMI_CONNECT_EVENTID
+ */
+struct wmi_connect_event {
+       u8 channel;
+       u8 reserved0;
+       u8 bssid[WMI_MAC_LEN];
+       __le16 listen_interval;
+       __le16 beacon_interval;
+       u8 network_type;
+       u8 reserved1[3];
+       u8 beacon_ie_len;
+       u8 assoc_req_len;
+       u8 assoc_resp_len;
+       u8 cid;
+       u8 reserved2[3];
+       u8 assoc_info[0];
+} __packed;
+
+/*
+ * WMI_DISCONNECT_EVENTID
+ */
+enum wmi_disconnect_reason {
+       WMI_DIS_REASON_NO_NETWORK_AVAIL         = 1,
+       WMI_DIS_REASON_LOST_LINK                = 2, /* bmiss */
+       WMI_DIS_REASON_DISCONNECT_CMD           = 3,
+       WMI_DIS_REASON_BSS_DISCONNECTED         = 4,
+       WMI_DIS_REASON_AUTH_FAILED              = 5,
+       WMI_DIS_REASON_ASSOC_FAILED             = 6,
+       WMI_DIS_REASON_NO_RESOURCES_AVAIL       = 7,
+       WMI_DIS_REASON_CSERV_DISCONNECT         = 8,
+       WMI_DIS_REASON_INVALID_PROFILE          = 10,
+       WMI_DIS_REASON_DOT11H_CHANNEL_SWITCH    = 11,
+       WMI_DIS_REASON_PROFILE_MISMATCH         = 12,
+       WMI_DIS_REASON_CONNECTION_EVICTED       = 13,
+       WMI_DIS_REASON_IBSS_MERGE               = 14,
+};
+
+struct wmi_disconnect_event {
+       __le16 protocol_reason_status;  /* reason code, see 802.11 spec. */
+       u8 bssid[WMI_MAC_LEN];          /* set if known */
+       u8 disconnect_reason;           /* see wmi_disconnect_reason_e */
+       u8 assoc_resp_len;
+       u8 assoc_info[0];
+} __packed;
+
+/*
+ * WMI_SCAN_COMPLETE_EVENTID
+ */
+struct wmi_scan_complete_event {
+       __le32 status;
+} __packed;
+
+/*
+ * WMI_BA_STATUS_EVENTID
+ */
+enum wmi_vring_ba_status {
+       WMI_BA_AGREED                   = 0,
+       WMI_BA_NON_AGREED               = 1,
+};
+
+struct wmi_vring_ba_status_event {
+       __le16 status;
+       u8 reserved[2];
+       u8 ringid;
+       u8 agg_wsize;
+       __le16 ba_timeout;
+} __packed;
+
+/*
+ * WMI_DELBA_EVENTID
+ */
+struct wmi_delba_event {
+
+       #define CIDXTID_CID_POS (0)
+       #define CIDXTID_CID_LEN (4)
+       #define CIDXTID_CID_MSK (0xF)
+       #define CIDXTID_TID_POS (4)
+       #define CIDXTID_TID_LEN (4)
+       #define CIDXTID_TID_MSK (0xF0)
+       u8 cidxtid;
+
+       u8 from_initiator;
+       __le16 reason;
+} __packed;
+
+/*
+ * WMI_VRING_CFG_DONE_EVENTID
+ */
+enum wmi_vring_cfg_done_event_status {
+       WMI_VRING_CFG_SUCCESS           = 0,
+       WMI_VRING_CFG_FAILURE           = 1,
+};
+
+struct wmi_vring_cfg_done_event {
+       u8 ringid;
+       u8 status;
+       u8 reserved[2];
+       __le32 tx_vring_tail_ptr;
+} __packed;
+
+/*
+ * WMI_ADDBA_RESP_SENT_EVENTID
+ */
+enum wmi_rcp_addba_resp_sent_event_status {
+       WMI_ADDBA_SUCCESS               = 0,
+       WMI_ADDBA_FAIL                  = 1,
+};
+
+struct wmi_rcp_addba_resp_sent_event {
+
+       #define CIDXTID_CID_POS (0)
+       #define CIDXTID_CID_LEN (4)
+       #define CIDXTID_CID_MSK (0xF)
+       #define CIDXTID_TID_POS (4)
+       #define CIDXTID_TID_LEN (4)
+       #define CIDXTID_TID_MSK (0xF0)
+       u8 cidxtid;
+
+       u8 reserved;
+       __le16 status;
+} __packed;
+
+/*
+ * WMI_RCP_ADDBA_REQ_EVENTID
+ */
+struct wmi_rcp_addba_req_event {
+
+       #define CIDXTID_CID_POS (0)
+       #define CIDXTID_CID_LEN (4)
+       #define CIDXTID_CID_MSK (0xF)
+       #define CIDXTID_TID_POS (4)
+       #define CIDXTID_TID_LEN (4)
+       #define CIDXTID_TID_MSK (0xF0)
+       u8 cidxtid;
+
+       u8 dialog_token;
+       __le16 ba_param_set;    /* ieee80211_ba_parameterset as it received */
+       __le16 ba_timeout;
+       __le16 ba_seq_ctrl;     /* ieee80211_ba_seqstrl field as it received */
+} __packed;
+
+/*
+ * WMI_CFG_RX_CHAIN_DONE_EVENTID
+ */
+enum wmi_cfg_rx_chain_done_event_status {
+       WMI_CFG_RX_CHAIN_SUCCESS        = 1,
+};
+
+struct wmi_cfg_rx_chain_done_event {
+       __le32 rx_ring_tail_ptr;        /* Rx V-Ring Tail pointer */
+       __le32 status;
+} __packed;
+
+/*
+ * WMI_WBE_LINKDOWN_EVENTID
+ */
+enum wmi_wbe_link_down_event_reason {
+       WMI_WBE_REASON_USER_REQUEST     = 0,
+       WMI_WBE_REASON_RX_DISASSOC      = 1,
+       WMI_WBE_REASON_BAD_PHY_LINK     = 2,
+};
+
+struct wmi_wbe_link_down_event {
+       u8 cid;
+       u8 reserved[3];
+       __le32 reason;
+} __packed;
+
+/*
+ * WMI_DATA_PORT_OPEN_EVENTID
+ */
+struct wmi_data_port_open_event {
+       u8 cid;
+       u8 reserved[3];
+} __packed;
+
+/*
+ * WMI_GET_PCP_CHANNEL_EVENTID
+ */
+struct wmi_get_pcp_channel_event {
+       u8 channel;
+       u8 reserved[3];
+} __packed;
+
+/*
+ * WMI_SW_TX_COMPLETE_EVENTID
+ */
+enum wmi_sw_tx_status {
+       WMI_TX_SW_STATUS_SUCCESS                = 0,
+       WMI_TX_SW_STATUS_FAILED_NO_RESOURCES    = 1,
+       WMI_TX_SW_STATUS_FAILED_TX              = 2,
+};
+
+struct wmi_sw_tx_complete_event {
+       u8 status;      /* enum wmi_sw_tx_status */
+       u8 reserved[3];
+} __packed;
+
+/*
+ * WMI_GET_SSID_EVENTID
+ */
+struct wmi_get_ssid_event {
+       __le32 ssid_len;
+       u8 ssid[WMI_MAX_SSID_LEN];
+} __packed;
+
+/*
+ * WMI_RX_MGMT_PACKET_EVENTID
+ */
+struct wmi_rx_mgmt_info {
+       u8 mcs;
+       s8 snr;
+       __le16 range;
+       __le16 stype;
+       __le16 status;
+       __le32 len;
+       u8 qid;
+       u8 mid;
+       u8 cid;
+       u8 channel;     /* From Radio MNGR */
+} __packed;
+
+struct wmi_rx_mgmt_packet_event {
+       struct wmi_rx_mgmt_info info;
+       u8 payload[0];
+} __packed;
+
+/*
+ * WMI_ECHO_RSP_EVENTID
+ */
+struct wmi_echo_event {
+       __le32 echoed_value;
+} __packed;
+
+#endif /* __WILOCITY_WMI_H__ */
index b298e5d..10e288d 100644 (file)
@@ -7,6 +7,7 @@
 #include <linux/hw_random.h>
 #include <linux/bcma/bcma.h>
 #include <linux/ssb/ssb.h>
+#include <linux/completion.h>
 #include <net/mac80211.h>
 
 #include "debugfs.h"
@@ -722,6 +723,10 @@ enum b43_firmware_file_type {
 struct b43_request_fw_context {
        /* The device we are requesting the fw for. */
        struct b43_wldev *dev;
+       /* a completion event structure needed if this call is asynchronous */
+       struct completion fw_load_complete;
+       /* a pointer to the firmware object */
+       const struct firmware *blob;
        /* The type of firmware to request. */
        enum b43_firmware_file_type req_type;
        /* Error messages for each firmware type. */
index 16ab280..806e34c 100644 (file)
@@ -2088,11 +2088,18 @@ static void b43_print_fw_helptext(struct b43_wl *wl, bool error)
                b43warn(wl, text);
 }
 
+static void b43_fw_cb(const struct firmware *firmware, void *context)
+{
+       struct b43_request_fw_context *ctx = context;
+
+       ctx->blob = firmware;
+       complete(&ctx->fw_load_complete);
+}
+
 int b43_do_request_fw(struct b43_request_fw_context *ctx,
                      const char *name,
-                     struct b43_firmware_file *fw)
+                     struct b43_firmware_file *fw, bool async)
 {
-       const struct firmware *blob;
        struct b43_fw_header *hdr;
        u32 size;
        int err;
@@ -2131,11 +2138,31 @@ int b43_do_request_fw(struct b43_request_fw_context *ctx,
                B43_WARN_ON(1);
                return -ENOSYS;
        }
-       err = request_firmware(&blob, ctx->fwname, ctx->dev->dev->dev);
+       if (async) {
+               /* do this part asynchronously */
+               init_completion(&ctx->fw_load_complete);
+               err = request_firmware_nowait(THIS_MODULE, 1, ctx->fwname,
+                                             ctx->dev->dev->dev, GFP_KERNEL,
+                                             ctx, b43_fw_cb);
+               if (err < 0) {
+                       pr_err("Unable to load firmware\n");
+                       return err;
+               }
+               /* stall here until fw ready */
+               wait_for_completion(&ctx->fw_load_complete);
+               if (ctx->blob)
+                       goto fw_ready;
+       /* On some ARM systems, the async request will fail, but the next sync
+        * request works. For this reason, we dall through here
+        */
+       }
+       err = request_firmware(&ctx->blob, ctx->fwname,
+                              ctx->dev->dev->dev);
        if (err == -ENOENT) {
                snprintf(ctx->errors[ctx->req_type],
                         sizeof(ctx->errors[ctx->req_type]),
-                        "Firmware file \"%s\" not found\n", ctx->fwname);
+                        "Firmware file \"%s\" not found\n",
+                        ctx->fwname);
                return err;
        } else if (err) {
                snprintf(ctx->errors[ctx->req_type],
@@ -2144,14 +2171,15 @@ int b43_do_request_fw(struct b43_request_fw_context *ctx,
                         ctx->fwname, err);
                return err;
        }
-       if (blob->size < sizeof(struct b43_fw_header))
+fw_ready:
+       if (ctx->blob->size < sizeof(struct b43_fw_header))
                goto err_format;
-       hdr = (struct b43_fw_header *)(blob->data);
+       hdr = (struct b43_fw_header *)(ctx->blob->data);
        switch (hdr->type) {
        case B43_FW_TYPE_UCODE:
        case B43_FW_TYPE_PCM:
                size = be32_to_cpu(hdr->size);
-               if (size != blob->size - sizeof(struct b43_fw_header))
+               if (size != ctx->blob->size - sizeof(struct b43_fw_header))
                        goto err_format;
                /* fallthrough */
        case B43_FW_TYPE_IV:
@@ -2162,7 +2190,7 @@ int b43_do_request_fw(struct b43_request_fw_context *ctx,
                goto err_format;
        }
 
-       fw->data = blob;
+       fw->data = ctx->blob;
        fw->filename = name;
        fw->type = ctx->req_type;
 
@@ -2172,7 +2200,7 @@ err_format:
        snprintf(ctx->errors[ctx->req_type],
                 sizeof(ctx->errors[ctx->req_type]),
                 "Firmware file \"%s\" format error.\n", ctx->fwname);
-       release_firmware(blob);
+       release_firmware(ctx->blob);
 
        return -EPROTO;
 }
@@ -2223,7 +2251,7 @@ static int b43_try_request_fw(struct b43_request_fw_context *ctx)
                        goto err_no_ucode;
                }
        }
-       err = b43_do_request_fw(ctx, filename, &fw->ucode);
+       err = b43_do_request_fw(ctx, filename, &fw->ucode, true);
        if (err)
                goto err_load;
 
@@ -2235,7 +2263,7 @@ static int b43_try_request_fw(struct b43_request_fw_context *ctx)
        else
                goto err_no_pcm;
        fw->pcm_request_failed = false;
-       err = b43_do_request_fw(ctx, filename, &fw->pcm);
+       err = b43_do_request_fw(ctx, filename, &fw->pcm, false);
        if (err == -ENOENT) {
                /* We did not find a PCM file? Not fatal, but
                 * core rev <= 10 must do without hwcrypto then. */
@@ -2296,7 +2324,7 @@ static int b43_try_request_fw(struct b43_request_fw_context *ctx)
        default:
                goto err_no_initvals;
        }
-       err = b43_do_request_fw(ctx, filename, &fw->initvals);
+       err = b43_do_request_fw(ctx, filename, &fw->initvals, false);
        if (err)
                goto err_load;
 
@@ -2355,7 +2383,7 @@ static int b43_try_request_fw(struct b43_request_fw_context *ctx)
        default:
                goto err_no_initvals;
        }
-       err = b43_do_request_fw(ctx, filename, &fw->initvals_band);
+       err = b43_do_request_fw(ctx, filename, &fw->initvals_band, false);
        if (err)
                goto err_load;
 
index 8c684cd..abac25e 100644 (file)
@@ -137,9 +137,8 @@ void b43_mac_phy_clock_set(struct b43_wldev *dev, bool on);
 
 
 struct b43_request_fw_context;
-int b43_do_request_fw(struct b43_request_fw_context *ctx,
-                     const char *name,
-                     struct b43_firmware_file *fw);
+int b43_do_request_fw(struct b43_request_fw_context *ctx, const char *name,
+                     struct b43_firmware_file *fw, bool async);
 void b43_do_release_fw(struct b43_firmware_file *fw);
 
 #endif /* B43_MAIN_H_ */
index d604b40..3726cd6 100644 (file)
@@ -3273,7 +3273,7 @@ il3945_store_measurement(struct device *d, struct device_attribute *attr,
 
        if (count) {
                char *p = buffer;
-               strncpy(buffer, buf, min(sizeof(buffer), count));
+               strlcpy(buffer, buf, sizeof(buffer));
                channel = simple_strtoul(p, NULL, 0);
                if (channel)
                        params.channel = channel;
index da21328..a790599 100644 (file)
@@ -1151,13 +1151,6 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
                        next_reclaimed = ssn;
                }
 
-               if (tid != IWL_TID_NON_QOS) {
-                       priv->tid_data[sta_id][tid].next_reclaimed =
-                               next_reclaimed;
-                       IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d\n",
-                                                 next_reclaimed);
-               }
-
                iwl_trans_reclaim(priv->trans, txq_id, ssn, &skbs);
 
                iwlagn_check_ratid_empty(priv, sta_id, tid);
@@ -1208,11 +1201,28 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
                        if (!is_agg)
                                iwlagn_non_agg_tx_status(priv, ctx, hdr->addr1);
 
+                       /*
+                        * W/A for FW bug - the seq_ctl isn't updated when the
+                        * queues are flushed. Fetch it from the packet itself
+                        */
+                       if (!is_agg && status == TX_STATUS_FAIL_FIFO_FLUSHED) {
+                               next_reclaimed = le16_to_cpu(hdr->seq_ctrl);
+                               next_reclaimed =
+                                       SEQ_TO_SN(next_reclaimed + 0x10);
+                       }
+
                        is_offchannel_skb =
                                (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN);
                        freed++;
                }
 
+               if (tid != IWL_TID_NON_QOS) {
+                       priv->tid_data[sta_id][tid].next_reclaimed =
+                               next_reclaimed;
+                       IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d\n",
+                                          next_reclaimed);
+               }
+
                WARN_ON(!is_agg && freed != 1);
 
                /*
index dad4c4a..8389cd3 100644 (file)
@@ -1166,6 +1166,7 @@ static irqreturn_t iwl_pcie_isr(int irq, void *data)
        else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
                 !trans_pcie->inta)
                iwl_enable_interrupts(trans);
+       return IRQ_HANDLED;
 
 none:
        /* re-enable interrupts here since we don't have anything to service. */
index a875499..efe525b 100644 (file)
@@ -1709,7 +1709,7 @@ static int mwifiex_set_ibss_params(struct mwifiex_private *priv,
                                                NL80211_CHAN_NO_HT)
                        config_bands |= BAND_GN;
        } else {
-               if (cfg80211_get_chandef_type(&params->chandef) !=
+               if (cfg80211_get_chandef_type(&params->chandef) ==
                                                NL80211_CHAN_NO_HT)
                        config_bands = BAND_A;
                else
index cb68256..60e88b5 100644 (file)
@@ -56,7 +56,6 @@ int mwifiex_copy_mcast_addr(struct mwifiex_multicast_list *mlist,
  */
 int mwifiex_wait_queue_complete(struct mwifiex_adapter *adapter)
 {
-       bool cancel_flag = false;
        int status;
        struct cmd_ctrl_node *cmd_queued;
 
@@ -70,14 +69,11 @@ int mwifiex_wait_queue_complete(struct mwifiex_adapter *adapter)
        atomic_inc(&adapter->cmd_pending);
 
        /* Wait for completion */
-       wait_event_interruptible(adapter->cmd_wait_q.wait,
-                                *(cmd_queued->condition));
-       if (!*(cmd_queued->condition))
-               cancel_flag = true;
-
-       if (cancel_flag) {
-               mwifiex_cancel_pending_ioctl(adapter);
-               dev_dbg(adapter->dev, "cmd cancel\n");
+       status = wait_event_interruptible(adapter->cmd_wait_q.wait,
+                                         *(cmd_queued->condition));
+       if (status) {
+               dev_err(adapter->dev, "cmd_wait_q terminated: %d\n", status);
+               return status;
        }
 
        status = adapter->cmd_wait_q.status;
@@ -496,8 +492,11 @@ int mwifiex_enable_hs(struct mwifiex_adapter *adapter)
                return false;
        }
 
-       wait_event_interruptible(adapter->hs_activate_wait_q,
-                                adapter->hs_activate_wait_q_woken);
+       if (wait_event_interruptible(adapter->hs_activate_wait_q,
+                                    adapter->hs_activate_wait_q_woken)) {
+               dev_err(adapter->dev, "hs_activate_wait_q terminated\n");
+               return false;
+       }
 
        return true;
 }
index f221b95..83564d3 100644 (file)
@@ -4250,9 +4250,11 @@ static int mwl8k_cmd_update_stadb_add(struct ieee80211_hw *hw,
        p->amsdu_enabled = 0;
 
        rc = mwl8k_post_cmd(hw, &cmd->header);
+       if (!rc)
+               rc = p->station_id;
        kfree(cmd);
 
-       return rc ? rc : p->station_id;
+       return rc;
 }
 
 static int mwl8k_cmd_update_stadb_del(struct ieee80211_hw *hw,
index 1d5d360..246e535 100644 (file)
@@ -692,7 +692,7 @@ u8 rtl92c_phy_sw_chnl(struct ieee80211_hw *hw)
        if (!(is_hal_stop(rtlhal)) && !(RT_CANNOT_IO(hw))) {
                rtl92c_phy_sw_chnl_callback(hw);
                RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD,
-                        "sw_chnl_inprogress false schdule workitem\n");
+                        "sw_chnl_inprogress false schedule workitem\n");
                rtlphy->sw_chnl_inprogress = false;
        } else {
                RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD,
index 39cc793..3d8536b 100644 (file)
@@ -1106,7 +1106,7 @@ u8 rtl8723ae_phy_sw_chnl(struct ieee80211_hw *hw)
        if (!(is_hal_stop(rtlhal)) && !(RT_CANNOT_IO(hw))) {
                rtl8723ae_phy_sw_chnl_callback(hw);
                RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD,
-                        "sw_chnl_inprogress false schdule workitem\n");
+                        "sw_chnl_inprogress false schedule workitem\n");
                rtlphy->sw_chnl_inprogress = false;
        } else {
                RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD,
diff --git a/drivers/ntb/Kconfig b/drivers/ntb/Kconfig
new file mode 100644 (file)
index 0000000..37ee649
--- /dev/null
@@ -0,0 +1,13 @@
+config NTB
+       tristate "Intel Non-Transparent Bridge support"
+       depends on PCI
+       depends on X86_64
+       help
+        The PCI-E Non-transparent bridge hardware is a point-to-point PCI-E bus
+        connecting 2 systems.  When configured, writes to the device's PCI
+        mapped memory will be mirrored to a buffer on the remote system.  The
+        ntb Linux driver uses this point-to-point communication as a method to
+        transfer data from one system to the other.
+
+        If unsure, say N.
+
diff --git a/drivers/ntb/Makefile b/drivers/ntb/Makefile
new file mode 100644 (file)
index 0000000..15cb59f
--- /dev/null
@@ -0,0 +1,3 @@
+obj-$(CONFIG_NTB) += ntb.o
+
+ntb-objs := ntb_hw.o ntb_transport.o
diff --git a/drivers/ntb/ntb_hw.c b/drivers/ntb/ntb_hw.c
new file mode 100644 (file)
index 0000000..f802e7c
--- /dev/null
@@ -0,0 +1,1141 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ *   redistributing this file, you may do so under either license.
+ *
+ *   GPL LICENSE SUMMARY
+ *
+ *   Copyright(c) 2012 Intel Corporation. All rights reserved.
+ *
+ *   This program is free software; you can redistribute it and/or modify
+ *   it under the terms of version 2 of the GNU General Public License as
+ *   published by the Free Software Foundation.
+ *
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2012 Intel Corporation. All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copy
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Intel PCIe NTB Linux driver
+ *
+ * Contact Information:
+ * Jon Mason <jon.mason@intel.com>
+ */
+#include <linux/debugfs.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include "ntb_hw.h"
+#include "ntb_regs.h"
+
+#define NTB_NAME       "Intel(R) PCI-E Non-Transparent Bridge Driver"
+#define NTB_VER                "0.25"
+
+MODULE_DESCRIPTION(NTB_NAME);
+MODULE_VERSION(NTB_VER);
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Intel Corporation");
+
+enum {
+       NTB_CONN_CLASSIC = 0,
+       NTB_CONN_B2B,
+       NTB_CONN_RP,
+};
+
+enum {
+       NTB_DEV_USD = 0,
+       NTB_DEV_DSD,
+};
+
+enum {
+       SNB_HW = 0,
+       BWD_HW,
+};
+
+/* Translate memory window 0,1 to BAR 2,4 */
+#define MW_TO_BAR(mw)  (mw * 2 + 2)
+
+static DEFINE_PCI_DEVICE_TABLE(ntb_pci_tbl) = {
+       {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_BWD)},
+       {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_JSF)},
+       {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_CLASSIC_JSF)},
+       {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_RP_JSF)},
+       {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_RP_SNB)},
+       {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_SNB)},
+       {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_CLASSIC_SNB)},
+       {0}
+};
+MODULE_DEVICE_TABLE(pci, ntb_pci_tbl);
+
+/**
+ * ntb_register_event_callback() - register event callback
+ * @ndev: pointer to ntb_device instance
+ * @func: callback function to register
+ *
+ * This function registers a callback for any HW driver events such as link
+ * up/down, power management notices and etc.
+ *
+ * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
+ */
+int ntb_register_event_callback(struct ntb_device *ndev,
+                           void (*func)(void *handle, enum ntb_hw_event event))
+{
+       if (ndev->event_cb)
+               return -EINVAL;
+
+       ndev->event_cb = func;
+
+       return 0;
+}
+
+/**
+ * ntb_unregister_event_callback() - unregisters the event callback
+ * @ndev: pointer to ntb_device instance
+ *
+ * This function unregisters the existing callback from transport
+ */
+void ntb_unregister_event_callback(struct ntb_device *ndev)
+{
+       ndev->event_cb = NULL;
+}
+
+/**
+ * ntb_register_db_callback() - register a callback for doorbell interrupt
+ * @ndev: pointer to ntb_device instance
+ * @idx: doorbell index to register callback, zero based
+ * @func: callback function to register
+ *
+ * This function registers a callback function for the doorbell interrupt
+ * on the primary side. The function will unmask the doorbell as well to
+ * allow interrupt.
+ *
+ * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
+ */
+int ntb_register_db_callback(struct ntb_device *ndev, unsigned int idx,
+                            void *data, void (*func)(void *data, int db_num))
+{
+       unsigned long mask;
+
+       if (idx >= ndev->max_cbs || ndev->db_cb[idx].callback) {
+               dev_warn(&ndev->pdev->dev, "Invalid Index.\n");
+               return -EINVAL;
+       }
+
+       ndev->db_cb[idx].callback = func;
+       ndev->db_cb[idx].data = data;
+
+       /* unmask interrupt */
+       mask = readw(ndev->reg_ofs.pdb_mask);
+       clear_bit(idx * ndev->bits_per_vector, &mask);
+       writew(mask, ndev->reg_ofs.pdb_mask);
+
+       return 0;
+}
+
+/**
+ * ntb_unregister_db_callback() - unregister a callback for doorbell interrupt
+ * @ndev: pointer to ntb_device instance
+ * @idx: doorbell index to register callback, zero based
+ *
+ * This function unregisters a callback function for the doorbell interrupt
+ * on the primary side. The function will also mask the said doorbell.
+ */
+void ntb_unregister_db_callback(struct ntb_device *ndev, unsigned int idx)
+{
+       unsigned long mask;
+
+       if (idx >= ndev->max_cbs || !ndev->db_cb[idx].callback)
+               return;
+
+       mask = readw(ndev->reg_ofs.pdb_mask);
+       set_bit(idx * ndev->bits_per_vector, &mask);
+       writew(mask, ndev->reg_ofs.pdb_mask);
+
+       ndev->db_cb[idx].callback = NULL;
+}
+
+/**
+ * ntb_find_transport() - find the transport pointer
+ * @transport: pointer to pci device
+ *
+ * Given the pci device pointer, return the transport pointer passed in when
+ * the transport attached when it was inited.
+ *
+ * RETURNS: pointer to transport.
+ */
+void *ntb_find_transport(struct pci_dev *pdev)
+{
+       struct ntb_device *ndev = pci_get_drvdata(pdev);
+       return ndev->ntb_transport;
+}
+
+/**
+ * ntb_register_transport() - Register NTB transport with NTB HW driver
+ * @transport: transport identifier
+ *
+ * This function allows a transport to reserve the hardware driver for
+ * NTB usage.
+ *
+ * RETURNS: pointer to ntb_device, NULL on error.
+ */
+struct ntb_device *ntb_register_transport(struct pci_dev *pdev, void *transport)
+{
+       struct ntb_device *ndev = pci_get_drvdata(pdev);
+
+       if (ndev->ntb_transport)
+               return NULL;
+
+       ndev->ntb_transport = transport;
+       return ndev;
+}
+
+/**
+ * ntb_unregister_transport() - Unregister the transport with the NTB HW driver
+ * @ndev - ntb_device of the transport to be freed
+ *
+ * This function unregisters the transport from the HW driver and performs any
+ * necessary cleanups.
+ */
+void ntb_unregister_transport(struct ntb_device *ndev)
+{
+       int i;
+
+       if (!ndev->ntb_transport)
+               return;
+
+       for (i = 0; i < ndev->max_cbs; i++)
+               ntb_unregister_db_callback(ndev, i);
+
+       ntb_unregister_event_callback(ndev);
+       ndev->ntb_transport = NULL;
+}
+
+/**
+ * ntb_write_local_spad() - write to the secondary scratchpad register
+ * @ndev: pointer to ntb_device instance
+ * @idx: index to the scratchpad register, 0 based
+ * @val: the data value to put into the register
+ *
+ * This function allows writing of a 32bit value to the indexed scratchpad
+ * register. This writes over the data mirrored to the local scratchpad register
+ * by the remote system.
+ *
+ * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
+ */
+int ntb_write_local_spad(struct ntb_device *ndev, unsigned int idx, u32 val)
+{
+       if (idx >= ndev->limits.max_spads)
+               return -EINVAL;
+
+       dev_dbg(&ndev->pdev->dev, "Writing %x to local scratch pad index %d\n",
+               val, idx);
+       writel(val, ndev->reg_ofs.spad_read + idx * 4);
+
+       return 0;
+}
+
+/**
+ * ntb_read_local_spad() - read from the primary scratchpad register
+ * @ndev: pointer to ntb_device instance
+ * @idx: index to scratchpad register, 0 based
+ * @val: pointer to 32bit integer for storing the register value
+ *
+ * This function allows reading of the 32bit scratchpad register on
+ * the primary (internal) side.  This allows the local system to read data
+ * written and mirrored to the scratchpad register by the remote system.
+ *
+ * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
+ */
+int ntb_read_local_spad(struct ntb_device *ndev, unsigned int idx, u32 *val)
+{
+       if (idx >= ndev->limits.max_spads)
+               return -EINVAL;
+
+       *val = readl(ndev->reg_ofs.spad_write + idx * 4);
+       dev_dbg(&ndev->pdev->dev,
+               "Reading %x from local scratch pad index %d\n", *val, idx);
+
+       return 0;
+}
+
+/**
+ * ntb_write_remote_spad() - write to the secondary scratchpad register
+ * @ndev: pointer to ntb_device instance
+ * @idx: index to the scratchpad register, 0 based
+ * @val: the data value to put into the register
+ *
+ * This function allows writing of a 32bit value to the indexed scratchpad
+ * register. The register resides on the secondary (external) side.  This allows
+ * the local system to write data to be mirrored to the remote systems
+ * scratchpad register.
+ *
+ * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
+ */
+int ntb_write_remote_spad(struct ntb_device *ndev, unsigned int idx, u32 val)
+{
+       if (idx >= ndev->limits.max_spads)
+               return -EINVAL;
+
+       dev_dbg(&ndev->pdev->dev, "Writing %x to remote scratch pad index %d\n",
+               val, idx);
+       writel(val, ndev->reg_ofs.spad_write + idx * 4);
+
+       return 0;
+}
+
+/**
+ * ntb_read_remote_spad() - read from the primary scratchpad register
+ * @ndev: pointer to ntb_device instance
+ * @idx: index to scratchpad register, 0 based
+ * @val: pointer to 32bit integer for storing the register value
+ *
+ * This function allows reading of the 32bit scratchpad register on
+ * the primary (internal) side.  This alloows the local system to read the data
+ * it wrote to be mirrored on the remote system.
+ *
+ * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
+ */
+int ntb_read_remote_spad(struct ntb_device *ndev, unsigned int idx, u32 *val)
+{
+       if (idx >= ndev->limits.max_spads)
+               return -EINVAL;
+
+       *val = readl(ndev->reg_ofs.spad_read + idx * 4);
+       dev_dbg(&ndev->pdev->dev,
+               "Reading %x from remote scratch pad index %d\n", *val, idx);
+
+       return 0;
+}
+
+/**
+ * ntb_get_mw_vbase() - get virtual addr for the NTB memory window
+ * @ndev: pointer to ntb_device instance
+ * @mw: memory window number
+ *
+ * This function provides the base virtual address of the memory window
+ * specified.
+ *
+ * RETURNS: pointer to virtual address, or NULL on error.
+ */
+void __iomem *ntb_get_mw_vbase(struct ntb_device *ndev, unsigned int mw)
+{
+       if (mw > NTB_NUM_MW)
+               return NULL;
+
+       return ndev->mw[mw].vbase;
+}
+
+/**
+ * ntb_get_mw_size() - return size of NTB memory window
+ * @ndev: pointer to ntb_device instance
+ * @mw: memory window number
+ *
+ * This function provides the physical size of the memory window specified
+ *
+ * RETURNS: the size of the memory window or zero on error
+ */
+resource_size_t ntb_get_mw_size(struct ntb_device *ndev, unsigned int mw)
+{
+       if (mw > NTB_NUM_MW)
+               return 0;
+
+       return ndev->mw[mw].bar_sz;
+}
+
+/**
+ * ntb_set_mw_addr - set the memory window address
+ * @ndev: pointer to ntb_device instance
+ * @mw: memory window number
+ * @addr: base address for data
+ *
+ * This function sets the base physical address of the memory window.  This
+ * memory address is where data from the remote system will be transfered into
+ * or out of depending on how the transport is configured.
+ */
+void ntb_set_mw_addr(struct ntb_device *ndev, unsigned int mw, u64 addr)
+{
+       if (mw > NTB_NUM_MW)
+               return;
+
+       dev_dbg(&ndev->pdev->dev, "Writing addr %Lx to BAR %d\n", addr,
+               MW_TO_BAR(mw));
+
+       ndev->mw[mw].phys_addr = addr;
+
+       switch (MW_TO_BAR(mw)) {
+       case NTB_BAR_23:
+               writeq(addr, ndev->reg_ofs.sbar2_xlat);
+               break;
+       case NTB_BAR_45:
+               writeq(addr, ndev->reg_ofs.sbar4_xlat);
+               break;
+       }
+}
+
+/**
+ * ntb_ring_sdb() - Set the doorbell on the secondary/external side
+ * @ndev: pointer to ntb_device instance
+ * @db: doorbell to ring
+ *
+ * This function allows triggering of a doorbell on the secondary/external
+ * side that will initiate an interrupt on the remote host
+ *
+ * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
+ */
+void ntb_ring_sdb(struct ntb_device *ndev, unsigned int db)
+{
+       dev_dbg(&ndev->pdev->dev, "%s: ringing doorbell %d\n", __func__, db);
+
+       if (ndev->hw_type == BWD_HW)
+               writeq((u64) 1 << db, ndev->reg_ofs.sdb);
+       else
+               writew(((1 << ndev->bits_per_vector) - 1) <<
+                      (db * ndev->bits_per_vector), ndev->reg_ofs.sdb);
+}
+
+static void ntb_link_event(struct ntb_device *ndev, int link_state)
+{
+       unsigned int event;
+
+       if (ndev->link_status == link_state)
+               return;
+
+       if (link_state == NTB_LINK_UP) {
+               u16 status;
+
+               dev_info(&ndev->pdev->dev, "Link Up\n");
+               ndev->link_status = NTB_LINK_UP;
+               event = NTB_EVENT_HW_LINK_UP;
+
+               if (ndev->hw_type == BWD_HW)
+                       status = readw(ndev->reg_ofs.lnk_stat);
+               else {
+                       int rc = pci_read_config_word(ndev->pdev,
+                                                     SNB_LINK_STATUS_OFFSET,
+                                                     &status);
+                       if (rc)
+                               return;
+               }
+               dev_info(&ndev->pdev->dev, "Link Width %d, Link Speed %d\n",
+                        (status & NTB_LINK_WIDTH_MASK) >> 4,
+                        (status & NTB_LINK_SPEED_MASK));
+       } else {
+               dev_info(&ndev->pdev->dev, "Link Down\n");
+               ndev->link_status = NTB_LINK_DOWN;
+               event = NTB_EVENT_HW_LINK_DOWN;
+       }
+
+       /* notify the upper layer if we have an event change */
+       if (ndev->event_cb)
+               ndev->event_cb(ndev->ntb_transport, event);
+}
+
+static int ntb_link_status(struct ntb_device *ndev)
+{
+       int link_state;
+
+       if (ndev->hw_type == BWD_HW) {
+               u32 ntb_cntl;
+
+               ntb_cntl = readl(ndev->reg_ofs.lnk_cntl);
+               if (ntb_cntl & BWD_CNTL_LINK_DOWN)
+                       link_state = NTB_LINK_DOWN;
+               else
+                       link_state = NTB_LINK_UP;
+       } else {
+               u16 status;
+               int rc;
+
+               rc = pci_read_config_word(ndev->pdev, SNB_LINK_STATUS_OFFSET,
+                                         &status);
+               if (rc)
+                       return rc;
+
+               if (status & NTB_LINK_STATUS_ACTIVE)
+                       link_state = NTB_LINK_UP;
+               else
+                       link_state = NTB_LINK_DOWN;
+       }
+
+       ntb_link_event(ndev, link_state);
+
+       return 0;
+}
+
+/* BWD doesn't have link status interrupt, poll on that platform */
+static void bwd_link_poll(struct work_struct *work)
+{
+       struct ntb_device *ndev = container_of(work, struct ntb_device,
+                                              hb_timer.work);
+       unsigned long ts = jiffies;
+
+       /* If we haven't gotten an interrupt in a while, check the BWD link
+        * status bit
+        */
+       if (ts > ndev->last_ts + NTB_HB_TIMEOUT) {
+               int rc = ntb_link_status(ndev);
+               if (rc)
+                       dev_err(&ndev->pdev->dev,
+                               "Error determining link status\n");
+       }
+
+       schedule_delayed_work(&ndev->hb_timer, NTB_HB_TIMEOUT);
+}
+
+static int ntb_xeon_setup(struct ntb_device *ndev)
+{
+       int rc;
+       u8 val;
+
+       ndev->hw_type = SNB_HW;
+
+       rc = pci_read_config_byte(ndev->pdev, NTB_PPD_OFFSET, &val);
+       if (rc)
+               return rc;
+
+       switch (val & SNB_PPD_CONN_TYPE) {
+       case NTB_CONN_B2B:
+               ndev->conn_type = NTB_CONN_B2B;
+               break;
+       case NTB_CONN_CLASSIC:
+       case NTB_CONN_RP:
+       default:
+               dev_err(&ndev->pdev->dev, "Only B2B supported at this time\n");
+               return -EINVAL;
+       }
+
+       if (val & SNB_PPD_DEV_TYPE)
+               ndev->dev_type = NTB_DEV_DSD;
+       else
+               ndev->dev_type = NTB_DEV_USD;
+
+       ndev->reg_ofs.pdb = ndev->reg_base + SNB_PDOORBELL_OFFSET;
+       ndev->reg_ofs.pdb_mask = ndev->reg_base + SNB_PDBMSK_OFFSET;
+       ndev->reg_ofs.sbar2_xlat = ndev->reg_base + SNB_SBAR2XLAT_OFFSET;
+       ndev->reg_ofs.sbar4_xlat = ndev->reg_base + SNB_SBAR4XLAT_OFFSET;
+       ndev->reg_ofs.lnk_cntl = ndev->reg_base + SNB_NTBCNTL_OFFSET;
+       ndev->reg_ofs.lnk_stat = ndev->reg_base + SNB_LINK_STATUS_OFFSET;
+       ndev->reg_ofs.spad_read = ndev->reg_base + SNB_SPAD_OFFSET;
+       ndev->reg_ofs.spci_cmd = ndev->reg_base + SNB_PCICMD_OFFSET;
+
+       if (ndev->conn_type == NTB_CONN_B2B) {
+               ndev->reg_ofs.sdb = ndev->reg_base + SNB_B2B_DOORBELL_OFFSET;
+               ndev->reg_ofs.spad_write = ndev->reg_base + SNB_B2B_SPAD_OFFSET;
+               ndev->limits.max_spads = SNB_MAX_SPADS;
+       } else {
+               ndev->reg_ofs.sdb = ndev->reg_base + SNB_SDOORBELL_OFFSET;
+               ndev->reg_ofs.spad_write = ndev->reg_base + SNB_SPAD_OFFSET;
+               ndev->limits.max_spads = SNB_MAX_COMPAT_SPADS;
+       }
+
+       ndev->limits.max_db_bits = SNB_MAX_DB_BITS;
+       ndev->limits.msix_cnt = SNB_MSIX_CNT;
+       ndev->bits_per_vector = SNB_DB_BITS_PER_VEC;
+
+       return 0;
+}
+
+static int ntb_bwd_setup(struct ntb_device *ndev)
+{
+       int rc;
+       u32 val;
+
+       ndev->hw_type = BWD_HW;
+
+       rc = pci_read_config_dword(ndev->pdev, NTB_PPD_OFFSET, &val);
+       if (rc)
+               return rc;
+
+       switch ((val & BWD_PPD_CONN_TYPE) >> 8) {
+       case NTB_CONN_B2B:
+               ndev->conn_type = NTB_CONN_B2B;
+               break;
+       case NTB_CONN_RP:
+       default:
+               dev_err(&ndev->pdev->dev, "Only B2B supported at this time\n");
+               return -EINVAL;
+       }
+
+       if (val & BWD_PPD_DEV_TYPE)
+               ndev->dev_type = NTB_DEV_DSD;
+       else
+               ndev->dev_type = NTB_DEV_USD;
+
+       /* Initiate PCI-E link training */
+       rc = pci_write_config_dword(ndev->pdev, NTB_PPD_OFFSET,
+                                   val | BWD_PPD_INIT_LINK);
+       if (rc)
+               return rc;
+
+       ndev->reg_ofs.pdb = ndev->reg_base + BWD_PDOORBELL_OFFSET;
+       ndev->reg_ofs.pdb_mask = ndev->reg_base + BWD_PDBMSK_OFFSET;
+       ndev->reg_ofs.sbar2_xlat = ndev->reg_base + BWD_SBAR2XLAT_OFFSET;
+       ndev->reg_ofs.sbar4_xlat = ndev->reg_base + BWD_SBAR4XLAT_OFFSET;
+       ndev->reg_ofs.lnk_cntl = ndev->reg_base + BWD_NTBCNTL_OFFSET;
+       ndev->reg_ofs.lnk_stat = ndev->reg_base + BWD_LINK_STATUS_OFFSET;
+       ndev->reg_ofs.spad_read = ndev->reg_base + BWD_SPAD_OFFSET;
+       ndev->reg_ofs.spci_cmd = ndev->reg_base + BWD_PCICMD_OFFSET;
+
+       if (ndev->conn_type == NTB_CONN_B2B) {
+               ndev->reg_ofs.sdb = ndev->reg_base + BWD_B2B_DOORBELL_OFFSET;
+               ndev->reg_ofs.spad_write = ndev->reg_base + BWD_B2B_SPAD_OFFSET;
+               ndev->limits.max_spads = BWD_MAX_SPADS;
+       } else {
+               ndev->reg_ofs.sdb = ndev->reg_base + BWD_PDOORBELL_OFFSET;
+               ndev->reg_ofs.spad_write = ndev->reg_base + BWD_SPAD_OFFSET;
+               ndev->limits.max_spads = BWD_MAX_COMPAT_SPADS;
+       }
+
+       ndev->limits.max_db_bits = BWD_MAX_DB_BITS;
+       ndev->limits.msix_cnt = BWD_MSIX_CNT;
+       ndev->bits_per_vector = BWD_DB_BITS_PER_VEC;
+
+       /* Since bwd doesn't have a link interrupt, setup a poll timer */
+       INIT_DELAYED_WORK(&ndev->hb_timer, bwd_link_poll);
+       schedule_delayed_work(&ndev->hb_timer, NTB_HB_TIMEOUT);
+
+       return 0;
+}
+
+static int ntb_device_setup(struct ntb_device *ndev)
+{
+       int rc;
+
+       switch (ndev->pdev->device) {
+       case PCI_DEVICE_ID_INTEL_NTB_2ND_SNB:
+       case PCI_DEVICE_ID_INTEL_NTB_RP_JSF:
+       case PCI_DEVICE_ID_INTEL_NTB_RP_SNB:
+       case PCI_DEVICE_ID_INTEL_NTB_CLASSIC_JSF:
+       case PCI_DEVICE_ID_INTEL_NTB_CLASSIC_SNB:
+       case PCI_DEVICE_ID_INTEL_NTB_B2B_JSF:
+       case PCI_DEVICE_ID_INTEL_NTB_B2B_SNB:
+               rc = ntb_xeon_setup(ndev);
+               break;
+       case PCI_DEVICE_ID_INTEL_NTB_B2B_BWD:
+               rc = ntb_bwd_setup(ndev);
+               break;
+       default:
+               rc = -ENODEV;
+       }
+
+       /* Enable Bus Master and Memory Space on the secondary side */
+       writew(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER, ndev->reg_ofs.spci_cmd);
+
+       return rc;
+}
+
+static void ntb_device_free(struct ntb_device *ndev)
+{
+       if (ndev->hw_type == BWD_HW)
+               cancel_delayed_work_sync(&ndev->hb_timer);
+}
+
+static irqreturn_t bwd_callback_msix_irq(int irq, void *data)
+{
+       struct ntb_db_cb *db_cb = data;
+       struct ntb_device *ndev = db_cb->ndev;
+
+       dev_dbg(&ndev->pdev->dev, "MSI-X irq %d received for DB %d\n", irq,
+               db_cb->db_num);
+
+       if (db_cb->callback)
+               db_cb->callback(db_cb->data, db_cb->db_num);
+
+       /* No need to check for the specific HB irq, any interrupt means
+        * we're connected.
+        */
+       ndev->last_ts = jiffies;
+
+       writeq((u64) 1 << db_cb->db_num, ndev->reg_ofs.pdb);
+
+       return IRQ_HANDLED;
+}
+
+static irqreturn_t xeon_callback_msix_irq(int irq, void *data)
+{
+       struct ntb_db_cb *db_cb = data;
+       struct ntb_device *ndev = db_cb->ndev;
+
+       dev_dbg(&ndev->pdev->dev, "MSI-X irq %d received for DB %d\n", irq,
+               db_cb->db_num);
+
+       if (db_cb->callback)
+               db_cb->callback(db_cb->data, db_cb->db_num);
+
+       /* On Sandybridge, there are 16 bits in the interrupt register
+        * but only 4 vectors.  So, 5 bits are assigned to the first 3
+        * vectors, with the 4th having a single bit for link
+        * interrupts.
+        */
+       writew(((1 << ndev->bits_per_vector) - 1) <<
+              (db_cb->db_num * ndev->bits_per_vector), ndev->reg_ofs.pdb);
+
+       return IRQ_HANDLED;
+}
+
+/* Since we do not have a HW doorbell in BWD, this is only used in JF/JT */
+static irqreturn_t xeon_event_msix_irq(int irq, void *dev)
+{
+       struct ntb_device *ndev = dev;
+       int rc;
+
+       dev_dbg(&ndev->pdev->dev, "MSI-X irq %d received for Events\n", irq);
+
+       rc = ntb_link_status(ndev);
+       if (rc)
+               dev_err(&ndev->pdev->dev, "Error determining link status\n");
+
+       /* bit 15 is always the link bit */
+       writew(1 << ndev->limits.max_db_bits, ndev->reg_ofs.pdb);
+
+       return IRQ_HANDLED;
+}
+
+static irqreturn_t ntb_interrupt(int irq, void *dev)
+{
+       struct ntb_device *ndev = dev;
+       unsigned int i = 0;
+
+       if (ndev->hw_type == BWD_HW) {
+               u64 pdb = readq(ndev->reg_ofs.pdb);
+
+               dev_dbg(&ndev->pdev->dev, "irq %d - pdb = %Lx\n", irq, pdb);
+
+               while (pdb) {
+                       i = __ffs(pdb);
+                       pdb &= pdb - 1;
+                       bwd_callback_msix_irq(irq, &ndev->db_cb[i]);
+               }
+       } else {
+               u16 pdb = readw(ndev->reg_ofs.pdb);
+
+               dev_dbg(&ndev->pdev->dev, "irq %d - pdb = %x sdb %x\n", irq,
+                       pdb, readw(ndev->reg_ofs.sdb));
+
+               if (pdb & SNB_DB_HW_LINK) {
+                       xeon_event_msix_irq(irq, dev);
+                       pdb &= ~SNB_DB_HW_LINK;
+               }
+
+               while (pdb) {
+                       i = __ffs(pdb);
+                       pdb &= pdb - 1;
+                       xeon_callback_msix_irq(irq, &ndev->db_cb[i]);
+               }
+       }
+
+       return IRQ_HANDLED;
+}
+
+static int ntb_setup_msix(struct ntb_device *ndev)
+{
+       struct pci_dev *pdev = ndev->pdev;
+       struct msix_entry *msix;
+       int msix_entries;
+       int rc, i, pos;
+       u16 val;
+
+       pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
+       if (!pos) {
+               rc = -EIO;
+               goto err;
+       }
+
+       rc = pci_read_config_word(pdev, pos + PCI_MSIX_FLAGS, &val);
+       if (rc)
+               goto err;
+
+       msix_entries = msix_table_size(val);
+       if (msix_entries > ndev->limits.msix_cnt) {
+               rc = -EINVAL;
+               goto err;
+       }
+
+       ndev->msix_entries = kmalloc(sizeof(struct msix_entry) * msix_entries,
+                                    GFP_KERNEL);
+       if (!ndev->msix_entries) {
+               rc = -ENOMEM;
+               goto err;
+       }
+
+       for (i = 0; i < msix_entries; i++)
+               ndev->msix_entries[i].entry = i;
+
+       rc = pci_enable_msix(pdev, ndev->msix_entries, msix_entries);
+       if (rc < 0)
+               goto err1;
+       if (rc > 0) {
+               /* On SNB, the link interrupt is always tied to 4th vector.  If
+                * we can't get all 4, then we can't use MSI-X.
+                */
+               if (ndev->hw_type != BWD_HW) {
+                       rc = -EIO;
+                       goto err1;
+               }
+
+               dev_warn(&pdev->dev,
+                        "Only %d MSI-X vectors.  Limiting the number of queues to that number.\n",
+                        rc);
+               msix_entries = rc;
+       }
+
+       for (i = 0; i < msix_entries; i++) {
+               msix = &ndev->msix_entries[i];
+               WARN_ON(!msix->vector);
+
+               /* Use the last MSI-X vector for Link status */
+               if (ndev->hw_type == BWD_HW) {
+                       rc = request_irq(msix->vector, bwd_callback_msix_irq, 0,
+                                        "ntb-callback-msix", &ndev->db_cb[i]);
+                       if (rc)
+                               goto err2;
+               } else {
+                       if (i == msix_entries - 1) {
+                               rc = request_irq(msix->vector,
+                                                xeon_event_msix_irq, 0,
+                                                "ntb-event-msix", ndev);
+                               if (rc)
+                                       goto err2;
+                       } else {
+                               rc = request_irq(msix->vector,
+                                                xeon_callback_msix_irq, 0,
+                                                "ntb-callback-msix",
+                                                &ndev->db_cb[i]);
+                               if (rc)
+                                       goto err2;
+                       }
+               }
+       }
+
+       ndev->num_msix = msix_entries;
+       if (ndev->hw_type == BWD_HW)
+               ndev->max_cbs = msix_entries;
+       else
+               ndev->max_cbs = msix_entries - 1;
+
+       return 0;
+
+err2:
+       while (--i >= 0) {
+               msix = &ndev->msix_entries[i];
+               if (ndev->hw_type != BWD_HW && i == ndev->num_msix - 1)
+                       free_irq(msix->vector, ndev);
+               else
+                       free_irq(msix->vector, &ndev->db_cb[i]);
+       }
+       pci_disable_msix(pdev);
+err1:
+       kfree(ndev->msix_entries);
+       dev_err(&pdev->dev, "Error allocating MSI-X interrupt\n");
+err:
+       ndev->num_msix = 0;
+       return rc;
+}
+
+static int ntb_setup_msi(struct ntb_device *ndev)
+{
+       struct pci_dev *pdev = ndev->pdev;
+       int rc;
+
+       rc = pci_enable_msi(pdev);
+       if (rc)
+               return rc;
+
+       rc = request_irq(pdev->irq, ntb_interrupt, 0, "ntb-msi", ndev);
+       if (rc) {
+               pci_disable_msi(pdev);
+               dev_err(&pdev->dev, "Error allocating MSI interrupt\n");
+               return rc;
+       }
+
+       return 0;
+}
+
+static int ntb_setup_intx(struct ntb_device *ndev)
+{
+       struct pci_dev *pdev = ndev->pdev;
+       int rc;
+
+       pci_msi_off(pdev);
+
+       /* Verify intx is enabled */
+       pci_intx(pdev, 1);
+
+       rc = request_irq(pdev->irq, ntb_interrupt, IRQF_SHARED, "ntb-intx",
+                        ndev);
+       if (rc)
+               return rc;
+
+       return 0;
+}
+
+static int ntb_setup_interrupts(struct ntb_device *ndev)
+{
+       int rc;
+
+       /* On BWD, disable all interrupts.  On SNB, disable all but Link
+        * Interrupt.  The rest will be unmasked as callbacks are registered.
+        */
+       if (ndev->hw_type == BWD_HW)
+               writeq(~0, ndev->reg_ofs.pdb_mask);
+       else
+               writew(~(1 << ndev->limits.max_db_bits),
+                      ndev->reg_ofs.pdb_mask);
+
+       rc = ntb_setup_msix(ndev);
+       if (!rc)
+               goto done;
+
+       ndev->bits_per_vector = 1;
+       ndev->max_cbs = ndev->limits.max_db_bits;
+
+       rc = ntb_setup_msi(ndev);
+       if (!rc)
+               goto done;
+
+       rc = ntb_setup_intx(ndev);
+       if (rc) {
+               dev_err(&ndev->pdev->dev, "no usable interrupts\n");
+               return rc;
+       }
+
+done:
+       return 0;
+}
+
+static void ntb_free_interrupts(struct ntb_device *ndev)
+{
+       struct pci_dev *pdev = ndev->pdev;
+
+       /* mask interrupts */
+       if (ndev->hw_type == BWD_HW)
+               writeq(~0, ndev->reg_ofs.pdb_mask);
+       else
+               writew(~0, ndev->reg_ofs.pdb_mask);
+
+       if (ndev->num_msix) {
+               struct msix_entry *msix;
+               u32 i;
+
+               for (i = 0; i < ndev->num_msix; i++) {
+                       msix = &ndev->msix_entries[i];
+                       if (ndev->hw_type != BWD_HW && i == ndev->num_msix - 1)
+                               free_irq(msix->vector, ndev);
+                       else
+                               free_irq(msix->vector, &ndev->db_cb[i]);
+               }
+               pci_disable_msix(pdev);
+       } else {
+               free_irq(pdev->irq, ndev);
+
+               if (pci_dev_msi_enabled(pdev))
+                       pci_disable_msi(pdev);
+       }
+}
+
+static int ntb_create_callbacks(struct ntb_device *ndev)
+{
+       int i;
+
+       /* Checken-egg issue.  We won't know how many callbacks are necessary
+        * until we see how many MSI-X vectors we get, but these pointers need
+        * to be passed into the MSI-X register fucntion.  So, we allocate the
+        * max, knowing that they might not all be used, to work around this.
+        */
+       ndev->db_cb = kcalloc(ndev->limits.max_db_bits,
+                             sizeof(struct ntb_db_cb),
+                             GFP_KERNEL);
+       if (!ndev->db_cb)
+               return -ENOMEM;
+
+       for (i = 0; i < ndev->limits.max_db_bits; i++) {
+               ndev->db_cb[i].db_num = i;
+               ndev->db_cb[i].ndev = ndev;
+       }
+
+       return 0;
+}
+
+static void ntb_free_callbacks(struct ntb_device *ndev)
+{
+       int i;
+
+       for (i = 0; i < ndev->limits.max_db_bits; i++)
+               ntb_unregister_db_callback(ndev, i);
+
+       kfree(ndev->db_cb);
+}
+
+static int ntb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+       struct ntb_device *ndev;
+       int rc, i;
+
+       ndev = kzalloc(sizeof(struct ntb_device), GFP_KERNEL);
+       if (!ndev)
+               return -ENOMEM;
+
+       ndev->pdev = pdev;
+       ndev->link_status = NTB_LINK_DOWN;
+       pci_set_drvdata(pdev, ndev);
+
+       rc = pci_enable_device(pdev);
+       if (rc)
+               goto err;
+
+       pci_set_master(ndev->pdev);
+
+       rc = pci_request_selected_regions(pdev, NTB_BAR_MASK, KBUILD_MODNAME);
+       if (rc)
+               goto err1;
+
+       ndev->reg_base = pci_ioremap_bar(pdev, NTB_BAR_MMIO);
+       if (!ndev->reg_base) {
+               dev_warn(&pdev->dev, "Cannot remap BAR 0\n");
+               rc = -EIO;
+               goto err2;
+       }
+
+       for (i = 0; i < NTB_NUM_MW; i++) {
+               ndev->mw[i].bar_sz = pci_resource_len(pdev, MW_TO_BAR(i));
+               ndev->mw[i].vbase =
+                   ioremap_wc(pci_resource_start(pdev, MW_TO_BAR(i)),
+                              ndev->mw[i].bar_sz);
+               dev_info(&pdev->dev, "MW %d size %d\n", i,
+                        (u32) pci_resource_len(pdev, MW_TO_BAR(i)));
+               if (!ndev->mw[i].vbase) {
+                       dev_warn(&pdev->dev, "Cannot remap BAR %d\n",
+                                MW_TO_BAR(i));
+                       rc = -EIO;
+                       goto err3;
+               }
+       }
+
+       rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+       if (rc) {
+               rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+               if (rc)
+                       goto err3;
+
+               dev_warn(&pdev->dev, "Cannot DMA highmem\n");
+       }
+
+       rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+       if (rc) {
+               rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+               if (rc)
+                       goto err3;
+
+               dev_warn(&pdev->dev, "Cannot DMA consistent highmem\n");
+       }
+
+       rc = ntb_device_setup(ndev);
+       if (rc)
+               goto err3;
+
+       rc = ntb_create_callbacks(ndev);
+       if (rc)
+               goto err4;
+
+       rc = ntb_setup_interrupts(ndev);
+       if (rc)
+               goto err5;
+
+       /* The scratchpad registers keep the values between rmmod/insmod,
+        * blast them now
+        */
+       for (i = 0; i < ndev->limits.max_spads; i++) {
+               ntb_write_local_spad(ndev, i, 0);
+               ntb_write_remote_spad(ndev, i, 0);
+       }
+
+       rc = ntb_transport_init(pdev);
+       if (rc)
+               goto err6;
+
+       /* Let's bring the NTB link up */
+       writel(NTB_CNTL_BAR23_SNOOP | NTB_CNTL_BAR45_SNOOP,
+              ndev->reg_ofs.lnk_cntl);
+
+       return 0;
+
+err6:
+       ntb_free_interrupts(ndev);
+err5:
+       ntb_free_callbacks(ndev);
+err4:
+       ntb_device_free(ndev);
+err3:
+       for (i--; i >= 0; i--)
+               iounmap(ndev->mw[i].vbase);
+       iounmap(ndev->reg_base);
+err2:
+       pci_release_selected_regions(pdev, NTB_BAR_MASK);
+err1:
+       pci_disable_device(pdev);
+err:
+       kfree(ndev);
+
+       dev_err(&pdev->dev, "Error loading %s module\n", KBUILD_MODNAME);
+       return rc;
+}
+
+static void ntb_pci_remove(struct pci_dev *pdev)
+{
+       struct ntb_device *ndev = pci_get_drvdata(pdev);
+       int i;
+       u32 ntb_cntl;
+
+       /* Bring NTB link down */
+       ntb_cntl = readl(ndev->reg_ofs.lnk_cntl);
+       ntb_cntl |= NTB_LINK_DISABLE;
+       writel(ntb_cntl, ndev->reg_ofs.lnk_cntl);
+
+       ntb_transport_free(ndev->ntb_transport);
+
+       ntb_free_interrupts(ndev);
+       ntb_free_callbacks(ndev);
+       ntb_device_free(ndev);
+
+       for (i = 0; i < NTB_NUM_MW; i++)
+               iounmap(ndev->mw[i].vbase);
+
+       iounmap(ndev->reg_base);
+       pci_release_selected_regions(pdev, NTB_BAR_MASK);
+       pci_disable_device(pdev);
+       kfree(ndev);
+}
+
+static struct pci_driver ntb_pci_driver = {
+       .name = KBUILD_MODNAME,
+       .id_table = ntb_pci_tbl,
+       .probe = ntb_pci_probe,
+       .remove = ntb_pci_remove,
+};
+module_pci_driver(ntb_pci_driver);
diff --git a/drivers/ntb/ntb_hw.h b/drivers/ntb/ntb_hw.h
new file mode 100644 (file)
index 0000000..3a3038c
--- /dev/null
@@ -0,0 +1,181 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ *   redistributing this file, you may do so under either license.
+ *
+ *   GPL LICENSE SUMMARY
+ *
+ *   Copyright(c) 2012 Intel Corporation. All rights reserved.
+ *
+ *   This program is free software; you can redistribute it and/or modify
+ *   it under the terms of version 2 of the GNU General Public License as
+ *   published by the Free Software Foundation.
+ *
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2012 Intel Corporation. All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copy
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Intel PCIe NTB Linux driver
+ *
+ * Contact Information:
+ * Jon Mason <jon.mason@intel.com>
+ */
+
+#define PCI_DEVICE_ID_INTEL_NTB_B2B_JSF                0x3725
+#define PCI_DEVICE_ID_INTEL_NTB_CLASSIC_JSF    0x3726
+#define PCI_DEVICE_ID_INTEL_NTB_RP_JSF         0x3727
+#define PCI_DEVICE_ID_INTEL_NTB_RP_SNB         0x3C08
+#define PCI_DEVICE_ID_INTEL_NTB_B2B_SNB                0x3C0D
+#define PCI_DEVICE_ID_INTEL_NTB_CLASSIC_SNB    0x3C0E
+#define PCI_DEVICE_ID_INTEL_NTB_2ND_SNB                0x3C0F
+#define PCI_DEVICE_ID_INTEL_NTB_B2B_BWD                0x0C4E
+
+#define msix_table_size(control)       ((control & PCI_MSIX_FLAGS_QSIZE)+1)
+
+#define NTB_BAR_MMIO           0
+#define NTB_BAR_23             2
+#define NTB_BAR_45             4
+#define NTB_BAR_MASK           ((1 << NTB_BAR_MMIO) | (1 << NTB_BAR_23) |\
+                                (1 << NTB_BAR_45))
+
+#define NTB_LINK_DOWN          0
+#define NTB_LINK_UP            1
+
+#define NTB_HB_TIMEOUT         msecs_to_jiffies(1000)
+
+#define NTB_NUM_MW             2
+
+enum ntb_hw_event {
+       NTB_EVENT_SW_EVENT0 = 0,
+       NTB_EVENT_SW_EVENT1,
+       NTB_EVENT_SW_EVENT2,
+       NTB_EVENT_HW_ERROR,
+       NTB_EVENT_HW_LINK_UP,
+       NTB_EVENT_HW_LINK_DOWN,
+};
+
+struct ntb_mw {
+       dma_addr_t phys_addr;
+       void __iomem *vbase;
+       resource_size_t bar_sz;
+};
+
+struct ntb_db_cb {
+       void (*callback) (void *data, int db_num);
+       unsigned int db_num;
+       void *data;
+       struct ntb_device *ndev;
+};
+
+struct ntb_device {
+       struct pci_dev *pdev;
+       struct msix_entry *msix_entries;
+       void __iomem *reg_base;
+       struct ntb_mw mw[NTB_NUM_MW];
+       struct {
+               unsigned int max_spads;
+               unsigned int max_db_bits;
+               unsigned int msix_cnt;
+       } limits;
+       struct {
+               void __iomem *pdb;
+               void __iomem *pdb_mask;
+               void __iomem *sdb;
+               void __iomem *sbar2_xlat;
+               void __iomem *sbar4_xlat;
+               void __iomem *spad_write;
+               void __iomem *spad_read;
+               void __iomem *lnk_cntl;
+               void __iomem *lnk_stat;
+               void __iomem *spci_cmd;
+       } reg_ofs;
+       struct ntb_transport *ntb_transport;
+       void (*event_cb)(void *handle, enum ntb_hw_event event);
+
+       struct ntb_db_cb *db_cb;
+       unsigned char hw_type;
+       unsigned char conn_type;
+       unsigned char dev_type;
+       unsigned char num_msix;
+       unsigned char bits_per_vector;
+       unsigned char max_cbs;
+       unsigned char link_status;
+       struct delayed_work hb_timer;
+       unsigned long last_ts;
+};
+
+/**
+ * ntb_hw_link_status() - return the hardware link status
+ * @ndev: pointer to ntb_device instance
+ *
+ * Returns true if the hardware is connected to the remote system
+ *
+ * RETURNS: true or false based on the hardware link state
+ */
+static inline bool ntb_hw_link_status(struct ntb_device *ndev)
+{
+       return ndev->link_status == NTB_LINK_UP;
+}
+
+/**
+ * ntb_query_pdev() - return the pci_dev pointer
+ * @ndev: pointer to ntb_device instance
+ *
+ * Given the ntb pointer return the pci_dev pointerfor the NTB hardware device
+ *
+ * RETURNS: a pointer to the ntb pci_dev
+ */
+static inline struct pci_dev *ntb_query_pdev(struct ntb_device *ndev)
+{
+       return ndev->pdev;
+}
+
+struct ntb_device *ntb_register_transport(struct pci_dev *pdev,
+                                         void *transport);
+void ntb_unregister_transport(struct ntb_device *ndev);
+void ntb_set_mw_addr(struct ntb_device *ndev, unsigned int mw, u64 addr);
+int ntb_register_db_callback(struct ntb_device *ndev, unsigned int idx,
+                            void *data, void (*db_cb_func) (void *data,
+                                                            int db_num));
+void ntb_unregister_db_callback(struct ntb_device *ndev, unsigned int idx);
+int ntb_register_event_callback(struct ntb_device *ndev,
+                               void (*event_cb_func) (void *handle,
+                                                     enum ntb_hw_event event));
+void ntb_unregister_event_callback(struct ntb_device *ndev);
+int ntb_get_max_spads(struct ntb_device *ndev);
+int ntb_write_local_spad(struct ntb_device *ndev, unsigned int idx, u32 val);
+int ntb_read_local_spad(struct ntb_device *ndev, unsigned int idx, u32 *val);
+int ntb_write_remote_spad(struct ntb_device *ndev, unsigned int idx, u32 val);
+int ntb_read_remote_spad(struct ntb_device *ndev, unsigned int idx, u32 *val);
+void __iomem *ntb_get_mw_vbase(struct ntb_device *ndev, unsigned int mw);
+resource_size_t ntb_get_mw_size(struct ntb_device *ndev, unsigned int mw);
+void ntb_ring_sdb(struct ntb_device *ndev, unsigned int idx);
+void *ntb_find_transport(struct pci_dev *pdev);
+
+int ntb_transport_init(struct pci_dev *pdev);
+void ntb_transport_free(void *transport);
diff --git a/drivers/ntb/ntb_regs.h b/drivers/ntb/ntb_regs.h
new file mode 100644 (file)
index 0000000..5bfa8c0
--- /dev/null
@@ -0,0 +1,139 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ *   redistributing this file, you may do so under either license.
+ *
+ *   GPL LICENSE SUMMARY
+ *
+ *   Copyright(c) 2012 Intel Corporation. All rights reserved.
+ *
+ *   This program is free software; you can redistribute it and/or modify
+ *   it under the terms of version 2 of the GNU General Public License as
+ *   published by the Free Software Foundation.
+ *
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2012 Intel Corporation. All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copy
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Intel PCIe NTB Linux driver
+ *
+ * Contact Information:
+ * Jon Mason <jon.mason@intel.com>
+ */
+
+#define NTB_LINK_ENABLE                0x0000
+#define NTB_LINK_DISABLE       0x0002
+#define NTB_LINK_STATUS_ACTIVE 0x2000
+#define NTB_LINK_SPEED_MASK    0x000f
+#define NTB_LINK_WIDTH_MASK    0x03f0
+
+#define SNB_MSIX_CNT           4
+#define SNB_MAX_SPADS          16
+#define SNB_MAX_COMPAT_SPADS   8
+/* Reserve the uppermost bit for link interrupt */
+#define SNB_MAX_DB_BITS                15
+#define SNB_DB_BITS_PER_VEC    5
+
+#define SNB_DB_HW_LINK         0x8000
+
+#define SNB_PCICMD_OFFSET      0x0504
+#define SNB_DEVCTRL_OFFSET     0x0598
+#define SNB_LINK_STATUS_OFFSET 0x01A2
+
+#define SNB_PBAR2LMT_OFFSET    0x0000
+#define SNB_PBAR4LMT_OFFSET    0x0008
+#define SNB_PBAR2XLAT_OFFSET   0x0010
+#define SNB_PBAR4XLAT_OFFSET   0x0018
+#define SNB_SBAR2LMT_OFFSET    0x0020
+#define SNB_SBAR4LMT_OFFSET    0x0028
+#define SNB_SBAR2XLAT_OFFSET   0x0030
+#define SNB_SBAR4XLAT_OFFSET   0x0038
+#define SNB_SBAR0BASE_OFFSET   0x0040
+#define SNB_SBAR2BASE_OFFSET   0x0048
+#define SNB_SBAR4BASE_OFFSET   0x0050
+#define SNB_NTBCNTL_OFFSET     0x0058
+#define SNB_SBDF_OFFSET                0x005C
+#define SNB_PDOORBELL_OFFSET   0x0060
+#define SNB_PDBMSK_OFFSET      0x0062
+#define SNB_SDOORBELL_OFFSET   0x0064
+#define SNB_SDBMSK_OFFSET      0x0066
+#define SNB_USMEMMISS          0x0070
+#define SNB_SPAD_OFFSET                0x0080
+#define SNB_SPADSEMA4_OFFSET   0x00c0
+#define SNB_WCCNTRL_OFFSET     0x00e0
+#define SNB_B2B_SPAD_OFFSET    0x0100
+#define SNB_B2B_DOORBELL_OFFSET        0x0140
+#define SNB_B2B_XLAT_OFFSET    0x0144
+
+#define BWD_MSIX_CNT           34
+#define BWD_MAX_SPADS          16
+#define BWD_MAX_COMPAT_SPADS   16
+#define BWD_MAX_DB_BITS                34
+#define BWD_DB_BITS_PER_VEC    1
+
+#define BWD_PCICMD_OFFSET      0xb004
+#define BWD_MBAR23_OFFSET      0xb018
+#define BWD_MBAR45_OFFSET      0xb020
+#define BWD_DEVCTRL_OFFSET     0xb048
+#define BWD_LINK_STATUS_OFFSET 0xb052
+
+#define BWD_SBAR2XLAT_OFFSET   0x0008
+#define BWD_SBAR4XLAT_OFFSET   0x0010
+#define BWD_PDOORBELL_OFFSET   0x0020
+#define BWD_PDBMSK_OFFSET      0x0028
+#define BWD_NTBCNTL_OFFSET     0x0060
+#define BWD_EBDF_OFFSET                0x0064
+#define BWD_SPAD_OFFSET                0x0080
+#define BWD_SPADSEMA_OFFSET    0x00c0
+#define BWD_STKYSPAD_OFFSET    0x00c4
+#define BWD_PBAR2XLAT_OFFSET   0x8008
+#define BWD_PBAR4XLAT_OFFSET   0x8010
+#define BWD_B2B_DOORBELL_OFFSET        0x8020
+#define BWD_B2B_SPAD_OFFSET    0x8080
+#define BWD_B2B_SPADSEMA_OFFSET        0x80c0
+#define BWD_B2B_STKYSPAD_OFFSET        0x80c4
+
+#define NTB_CNTL_BAR23_SNOOP   (1 << 2)
+#define NTB_CNTL_BAR45_SNOOP   (1 << 6)
+#define BWD_CNTL_LINK_DOWN     (1 << 16)
+
+#define NTB_PPD_OFFSET         0x00D4
+#define SNB_PPD_CONN_TYPE      0x0003
+#define SNB_PPD_DEV_TYPE       0x0010
+#define BWD_PPD_INIT_LINK      0x0008
+#define BWD_PPD_CONN_TYPE      0x0300
+#define BWD_PPD_DEV_TYPE       0x1000
+
+#define BWD_PBAR2XLAT_USD_ADDR 0x0000004000000000
+#define BWD_PBAR4XLAT_USD_ADDR 0x0000008000000000
+#define BWD_MBAR23_USD_ADDR    0x000000410000000C
+#define BWD_MBAR45_USD_ADDR    0x000000810000000C
+#define BWD_PBAR2XLAT_DSD_ADDR 0x0000004100000000
+#define BWD_PBAR4XLAT_DSD_ADDR 0x0000008100000000
+#define BWD_MBAR23_DSD_ADDR    0x000000400000000C
+#define BWD_MBAR45_DSD_ADDR    0x000000800000000C
diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
new file mode 100644 (file)
index 0000000..e0bdfd7
--- /dev/null
@@ -0,0 +1,1441 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ *   redistributing this file, you may do so under either license.
+ *
+ *   GPL LICENSE SUMMARY
+ *
+ *   Copyright(c) 2012 Intel Corporation. All rights reserved.
+ *
+ *   This program is free software; you can redistribute it and/or modify
+ *   it under the terms of version 2 of the GNU General Public License as
+ *   published by the Free Software Foundation.
+ *
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2012 Intel Corporation. All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copy
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Intel PCIe NTB Linux driver
+ *
+ * Contact Information:
+ * Jon Mason <jon.mason@intel.com>
+ */
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/export.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/ntb.h>
+#include "ntb_hw.h"
+
+#define NTB_TRANSPORT_VERSION  2
+
+static unsigned int transport_mtu = 0x401E;
+module_param(transport_mtu, uint, 0644);
+MODULE_PARM_DESC(transport_mtu, "Maximum size of NTB transport packets");
+
+static unsigned char max_num_clients = 2;
+module_param(max_num_clients, byte, 0644);
+MODULE_PARM_DESC(max_num_clients, "Maximum number of NTB transport clients");
+
+struct ntb_queue_entry {
+       /* ntb_queue list reference */
+       struct list_head entry;
+       /* pointers to data to be transfered */
+       void *cb_data;
+       void *buf;
+       unsigned int len;
+       unsigned int flags;
+};
+
+struct ntb_rx_info {
+       unsigned int entry;
+};
+
+struct ntb_transport_qp {
+       struct ntb_transport *transport;
+       struct ntb_device *ndev;
+       void *cb_data;
+
+       bool client_ready;
+       bool qp_link;
+       u8 qp_num;      /* Only 64 QP's are allowed.  0-63 */
+
+       struct ntb_rx_info __iomem *rx_info;
+       struct ntb_rx_info *remote_rx_info;
+
+       void (*tx_handler) (struct ntb_transport_qp *qp, void *qp_data,
+                           void *data, int len);
+       struct list_head tx_free_q;
+       spinlock_t ntb_tx_free_q_lock;
+       void __iomem *tx_mw;
+       unsigned int tx_index;
+       unsigned int tx_max_entry;
+       unsigned int tx_max_frame;
+
+       void (*rx_handler) (struct ntb_transport_qp *qp, void *qp_data,
+                           void *data, int len);
+       struct tasklet_struct rx_work;
+       struct list_head rx_pend_q;
+       struct list_head rx_free_q;
+       spinlock_t ntb_rx_pend_q_lock;
+       spinlock_t ntb_rx_free_q_lock;
+       void *rx_buff;
+       unsigned int rx_index;
+       unsigned int rx_max_entry;
+       unsigned int rx_max_frame;
+
+       void (*event_handler) (void *data, int status);
+       struct delayed_work link_work;
+       struct work_struct link_cleanup;
+
+       struct dentry *debugfs_dir;
+       struct dentry *debugfs_stats;
+
+       /* Stats */
+       u64 rx_bytes;
+       u64 rx_pkts;
+       u64 rx_ring_empty;
+       u64 rx_err_no_buf;
+       u64 rx_err_oflow;
+       u64 rx_err_ver;
+       u64 tx_bytes;
+       u64 tx_pkts;
+       u64 tx_ring_full;
+};
+
+struct ntb_transport_mw {
+       size_t size;
+       void *virt_addr;
+       dma_addr_t dma_addr;
+};
+
+struct ntb_transport_client_dev {
+       struct list_head entry;
+       struct device dev;
+};
+
+struct ntb_transport {
+       struct list_head entry;
+       struct list_head client_devs;
+
+       struct ntb_device *ndev;
+       struct ntb_transport_mw mw[NTB_NUM_MW];
+       struct ntb_transport_qp *qps;
+       unsigned int max_qps;
+       unsigned long qp_bitmap;
+       bool transport_link;
+       struct delayed_work link_work;
+       struct work_struct link_cleanup;
+       struct dentry *debugfs_dir;
+};
+
+enum {
+       DESC_DONE_FLAG = 1 << 0,
+       LINK_DOWN_FLAG = 1 << 1,
+};
+
+struct ntb_payload_header {
+       unsigned int ver;
+       unsigned int len;
+       unsigned int flags;
+};
+
+enum {
+       VERSION = 0,
+       MW0_SZ,
+       MW1_SZ,
+       NUM_QPS,
+       QP_LINKS,
+       MAX_SPAD,
+};
+
+#define QP_TO_MW(qp)           ((qp) % NTB_NUM_MW)
+#define NTB_QP_DEF_NUM_ENTRIES 100
+#define NTB_LINK_DOWN_TIMEOUT  10
+
+static int ntb_match_bus(struct device *dev, struct device_driver *drv)
+{
+       return !strncmp(dev_name(dev), drv->name, strlen(drv->name));
+}
+
+static int ntb_client_probe(struct device *dev)
+{
+       const struct ntb_client *drv = container_of(dev->driver,
+                                                   struct ntb_client, driver);
+       struct pci_dev *pdev = container_of(dev->parent, struct pci_dev, dev);
+       int rc = -EINVAL;
+
+       get_device(dev);
+       if (drv && drv->probe)
+               rc = drv->probe(pdev);
+       if (rc)
+               put_device(dev);
+
+       return rc;
+}
+
+static int ntb_client_remove(struct device *dev)
+{
+       const struct ntb_client *drv = container_of(dev->driver,
+                                                   struct ntb_client, driver);
+       struct pci_dev *pdev = container_of(dev->parent, struct pci_dev, dev);
+
+       if (drv && drv->remove)
+               drv->remove(pdev);
+
+       put_device(dev);
+
+       return 0;
+}
+
+static struct bus_type ntb_bus_type = {
+       .name = "ntb_bus",
+       .match = ntb_match_bus,
+       .probe = ntb_client_probe,
+       .remove = ntb_client_remove,
+};
+
+static LIST_HEAD(ntb_transport_list);
+
+static int ntb_bus_init(struct ntb_transport *nt)
+{
+       if (list_empty(&ntb_transport_list)) {
+               int rc = bus_register(&ntb_bus_type);
+               if (rc)
+                       return rc;
+       }
+
+       list_add(&nt->entry, &ntb_transport_list);
+
+       return 0;
+}
+
+static void ntb_bus_remove(struct ntb_transport *nt)
+{
+       struct ntb_transport_client_dev *client_dev, *cd;
+
+       list_for_each_entry_safe(client_dev, cd, &nt->client_devs, entry) {
+               dev_err(client_dev->dev.parent, "%s still attached to bus, removing\n",
+                       dev_name(&client_dev->dev));
+               list_del(&client_dev->entry);
+               device_unregister(&client_dev->dev);
+       }
+
+       list_del(&nt->entry);
+
+       if (list_empty(&ntb_transport_list))
+               bus_unregister(&ntb_bus_type);
+}
+
+static void ntb_client_release(struct device *dev)
+{
+       struct ntb_transport_client_dev *client_dev;
+       client_dev = container_of(dev, struct ntb_transport_client_dev, dev);
+
+       kfree(client_dev);
+}
+
+/**
+ * ntb_unregister_client_dev - Unregister NTB client device
+ * @device_name: Name of NTB client device
+ *
+ * Unregister an NTB client device with the NTB transport layer
+ */
+void ntb_unregister_client_dev(char *device_name)
+{
+       struct ntb_transport_client_dev *client, *cd;
+       struct ntb_transport *nt;
+
+       list_for_each_entry(nt, &ntb_transport_list, entry)
+               list_for_each_entry_safe(client, cd, &nt->client_devs, entry)
+                       if (!strncmp(dev_name(&client->dev), device_name,
+                                    strlen(device_name))) {
+                               list_del(&client->entry);
+                               device_unregister(&client->dev);
+                       }
+}
+EXPORT_SYMBOL_GPL(ntb_unregister_client_dev);
+
+/**
+ * ntb_register_client_dev - Register NTB client device
+ * @device_name: Name of NTB client device
+ *
+ * Register an NTB client device with the NTB transport layer
+ */
+int ntb_register_client_dev(char *device_name)
+{
+       struct ntb_transport_client_dev *client_dev;
+       struct ntb_transport *nt;
+       int rc;
+
+       if (list_empty(&ntb_transport_list))
+               return -ENODEV;
+
+       list_for_each_entry(nt, &ntb_transport_list, entry) {
+               struct device *dev;
+
+               client_dev = kzalloc(sizeof(struct ntb_transport_client_dev),
+                                    GFP_KERNEL);
+               if (!client_dev) {
+                       rc = -ENOMEM;
+                       goto err;
+               }
+
+               dev = &client_dev->dev;
+
+               /* setup and register client devices */
+               dev_set_name(dev, "%s", device_name);
+               dev->bus = &ntb_bus_type;
+               dev->release = ntb_client_release;
+               dev->parent = &ntb_query_pdev(nt->ndev)->dev;
+
+               rc = device_register(dev);
+               if (rc) {
+                       kfree(client_dev);
+                       goto err;
+               }
+
+               list_add_tail(&client_dev->entry, &nt->client_devs);
+       }
+
+       return 0;
+
+err:
+       ntb_unregister_client_dev(device_name);
+
+       return rc;
+}
+EXPORT_SYMBOL_GPL(ntb_register_client_dev);
+
+/**
+ * ntb_register_client - Register NTB client driver
+ * @drv: NTB client driver to be registered
+ *
+ * Register an NTB client driver with the NTB transport layer
+ *
+ * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
+ */
+int ntb_register_client(struct ntb_client *drv)
+{
+       drv->driver.bus = &ntb_bus_type;
+
+       if (list_empty(&ntb_transport_list))
+               return -ENODEV;
+
+       return driver_register(&drv->driver);
+}
+EXPORT_SYMBOL_GPL(ntb_register_client);
+
+/**
+ * ntb_unregister_client - Unregister NTB client driver
+ * @drv: NTB client driver to be unregistered
+ *
+ * Unregister an NTB client driver with the NTB transport layer
+ *
+ * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
+ */
+void ntb_unregister_client(struct ntb_client *drv)
+{
+       driver_unregister(&drv->driver);
+}
+EXPORT_SYMBOL_GPL(ntb_unregister_client);
+
+static ssize_t debugfs_read(struct file *filp, char __user *ubuf, size_t count,
+                           loff_t *offp)
+{
+       struct ntb_transport_qp *qp;
+       char *buf;
+       ssize_t ret, out_offset, out_count;
+
+       out_count = 600;
+
+       buf = kmalloc(out_count, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+
+       qp = filp->private_data;
+       out_offset = 0;
+       out_offset += snprintf(buf + out_offset, out_count - out_offset,
+                              "NTB QP stats\n");
+       out_offset += snprintf(buf + out_offset, out_count - out_offset,
+                              "rx_bytes - \t%llu\n", qp->rx_bytes);
+       out_offset += snprintf(buf + out_offset, out_count - out_offset,
+                              "rx_pkts - \t%llu\n", qp->rx_pkts);
+       out_offset += snprintf(buf + out_offset, out_count - out_offset,
+                              "rx_ring_empty - %llu\n", qp->rx_ring_empty);
+       out_offset += snprintf(buf + out_offset, out_count - out_offset,
+                              "rx_err_no_buf - %llu\n", qp->rx_err_no_buf);
+       out_offset += snprintf(buf + out_offset, out_count - out_offset,
+                              "rx_err_oflow - \t%llu\n", qp->rx_err_oflow);
+       out_offset += snprintf(buf + out_offset, out_count - out_offset,
+                              "rx_err_ver - \t%llu\n", qp->rx_err_ver);
+       out_offset += snprintf(buf + out_offset, out_count - out_offset,
+                              "rx_buff - \t%p\n", qp->rx_buff);
+       out_offset += snprintf(buf + out_offset, out_count - out_offset,
+                              "rx_index - \t%u\n", qp->rx_index);
+       out_offset += snprintf(buf + out_offset, out_count - out_offset,
+                              "rx_max_entry - \t%u\n", qp->rx_max_entry);
+
+       out_offset += snprintf(buf + out_offset, out_count - out_offset,
+                              "tx_bytes - \t%llu\n", qp->tx_bytes);
+       out_offset += snprintf(buf + out_offset, out_count - out_offset,
+                              "tx_pkts - \t%llu\n", qp->tx_pkts);
+       out_offset += snprintf(buf + out_offset, out_count - out_offset,
+                              "tx_ring_full - \t%llu\n", qp->tx_ring_full);
+       out_offset += snprintf(buf + out_offset, out_count - out_offset,
+                              "tx_mw - \t%p\n", qp->tx_mw);
+       out_offset += snprintf(buf + out_offset, out_count - out_offset,
+                              "tx_index - \t%u\n", qp->tx_index);
+       out_offset += snprintf(buf + out_offset, out_count - out_offset,
+                              "tx_max_entry - \t%u\n", qp->tx_max_entry);
+
+       out_offset += snprintf(buf + out_offset, out_count - out_offset,
+                              "\nQP Link %s\n", (qp->qp_link == NTB_LINK_UP) ?
+                              "Up" : "Down");
+       if (out_offset > out_count)
+               out_offset = out_count;
+
+       ret = simple_read_from_buffer(ubuf, count, offp, buf, out_offset);
+       kfree(buf);
+       return ret;
+}
+
+static const struct file_operations ntb_qp_debugfs_stats = {
+       .owner = THIS_MODULE,
+       .open = simple_open,
+       .read = debugfs_read,
+};
+
+static void ntb_list_add(spinlock_t *lock, struct list_head *entry,
+                        struct list_head *list)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(lock, flags);
+       list_add_tail(entry, list);
+       spin_unlock_irqrestore(lock, flags);
+}
+
+static struct ntb_queue_entry *ntb_list_rm(spinlock_t *lock,
+                                               struct list_head *list)
+{
+       struct ntb_queue_entry *entry;
+       unsigned long flags;
+
+       spin_lock_irqsave(lock, flags);
+       if (list_empty(list)) {
+               entry = NULL;
+               goto out;
+       }
+       entry = list_first_entry(list, struct ntb_queue_entry, entry);
+       list_del(&entry->entry);
+out:
+       spin_unlock_irqrestore(lock, flags);
+
+       return entry;
+}
+
+static void ntb_transport_setup_qp_mw(struct ntb_transport *nt,
+                                     unsigned int qp_num)
+{
+       struct ntb_transport_qp *qp = &nt->qps[qp_num];
+       unsigned int rx_size, num_qps_mw;
+       u8 mw_num = QP_TO_MW(qp_num);
+       unsigned int i;
+
+       WARN_ON(nt->mw[mw_num].virt_addr == NULL);
+
+       if (nt->max_qps % NTB_NUM_MW && mw_num < nt->max_qps % NTB_NUM_MW)
+               num_qps_mw = nt->max_qps / NTB_NUM_MW + 1;
+       else
+               num_qps_mw = nt->max_qps / NTB_NUM_MW;
+
+       rx_size = (unsigned int) nt->mw[mw_num].size / num_qps_mw;
+       qp->remote_rx_info = nt->mw[mw_num].virt_addr +
+                            (qp_num / NTB_NUM_MW * rx_size);
+       rx_size -= sizeof(struct ntb_rx_info);
+
+       qp->rx_buff = qp->remote_rx_info + sizeof(struct ntb_rx_info);
+       qp->rx_max_frame = min(transport_mtu, rx_size);
+       qp->rx_max_entry = rx_size / qp->rx_max_frame;
+       qp->rx_index = 0;
+
+       qp->remote_rx_info->entry = qp->rx_max_entry;
+
+       /* setup the hdr offsets with 0's */
+       for (i = 0; i < qp->rx_max_entry; i++) {
+               void *offset = qp->rx_buff + qp->rx_max_frame * (i + 1) -
+                              sizeof(struct ntb_payload_header);
+               memset(offset, 0, sizeof(struct ntb_payload_header));
+       }
+
+       qp->rx_pkts = 0;
+       qp->tx_pkts = 0;
+}
+
+static int ntb_set_mw(struct ntb_transport *nt, int num_mw, unsigned int size)
+{
+       struct ntb_transport_mw *mw = &nt->mw[num_mw];
+       struct pci_dev *pdev = ntb_query_pdev(nt->ndev);
+
+       /* Alloc memory for receiving data.  Must be 4k aligned */
+       mw->size = ALIGN(size, 4096);
+
+       mw->virt_addr = dma_alloc_coherent(&pdev->dev, mw->size, &mw->dma_addr,
+                                          GFP_KERNEL);
+       if (!mw->virt_addr) {
+               dev_err(&pdev->dev, "Unable to allocate MW buffer of size %d\n",
+                      (int) mw->size);
+               return -ENOMEM;
+       }
+
+       /* Notify HW the memory location of the receive buffer */
+       ntb_set_mw_addr(nt->ndev, num_mw, mw->dma_addr);
+
+       return 0;
+}
+
+static void ntb_qp_link_cleanup(struct work_struct *work)
+{
+       struct ntb_transport_qp *qp = container_of(work,
+                                                  struct ntb_transport_qp,
+                                                  link_cleanup);
+       struct ntb_transport *nt = qp->transport;
+       struct pci_dev *pdev = ntb_query_pdev(nt->ndev);
+
+       if (qp->qp_link == NTB_LINK_DOWN) {
+               cancel_delayed_work_sync(&qp->link_work);
+               return;
+       }
+
+       if (qp->event_handler)
+               qp->event_handler(qp->cb_data, NTB_LINK_DOWN);
+
+       dev_info(&pdev->dev, "qp %d: Link Down\n", qp->qp_num);
+       qp->qp_link = NTB_LINK_DOWN;
+
+       if (nt->transport_link == NTB_LINK_UP)
+               schedule_delayed_work(&qp->link_work,
+                                     msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
+}
+
+static void ntb_qp_link_down(struct ntb_transport_qp *qp)
+{
+       schedule_work(&qp->link_cleanup);
+}
+
+static void ntb_transport_link_cleanup(struct work_struct *work)
+{
+       struct ntb_transport *nt = container_of(work, struct ntb_transport,
+                                               link_cleanup);
+       int i;
+
+       if (nt->transport_link == NTB_LINK_DOWN)
+               cancel_delayed_work_sync(&nt->link_work);
+       else
+               nt->transport_link = NTB_LINK_DOWN;
+
+       /* Pass along the info to any clients */
+       for (i = 0; i < nt->max_qps; i++)
+               if (!test_bit(i, &nt->qp_bitmap))
+                       ntb_qp_link_down(&nt->qps[i]);
+
+       /* The scratchpad registers keep the values if the remote side
+        * goes down, blast them now to give them a sane value the next
+        * time they are accessed
+        */
+       for (i = 0; i < MAX_SPAD; i++)
+               ntb_write_local_spad(nt->ndev, i, 0);
+}
+
+static void ntb_transport_event_callback(void *data, enum ntb_hw_event event)
+{
+       struct ntb_transport *nt = data;
+
+       switch (event) {
+       case NTB_EVENT_HW_LINK_UP:
+               schedule_delayed_work(&nt->link_work, 0);
+               break;
+       case NTB_EVENT_HW_LINK_DOWN:
+               schedule_work(&nt->link_cleanup);
+               break;
+       default:
+               BUG();
+       }
+}
+
+static void ntb_transport_link_work(struct work_struct *work)
+{
+       struct ntb_transport *nt = container_of(work, struct ntb_transport,
+                                               link_work.work);
+       struct ntb_device *ndev = nt->ndev;
+       struct pci_dev *pdev = ntb_query_pdev(ndev);
+       u32 val;
+       int rc, i;
+
+       /* send the local info */
+       rc = ntb_write_remote_spad(ndev, VERSION, NTB_TRANSPORT_VERSION);
+       if (rc) {
+               dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
+                       0, VERSION);
+               goto out;
+       }
+
+       rc = ntb_write_remote_spad(ndev, MW0_SZ, ntb_get_mw_size(ndev, 0));
+       if (rc) {
+               dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
+                       (u32) ntb_get_mw_size(ndev, 0), MW0_SZ);
+               goto out;
+       }
+
+       rc = ntb_write_remote_spad(ndev, MW1_SZ, ntb_get_mw_size(ndev, 1));
+       if (rc) {
+               dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
+                       (u32) ntb_get_mw_size(ndev, 1), MW1_SZ);
+               goto out;
+       }
+
+       rc = ntb_write_remote_spad(ndev, NUM_QPS, nt->max_qps);
+       if (rc) {
+               dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
+                       nt->max_qps, NUM_QPS);
+               goto out;
+       }
+
+       rc = ntb_read_local_spad(nt->ndev, QP_LINKS, &val);
+       if (rc) {
+               dev_err(&pdev->dev, "Error reading spad %d\n", QP_LINKS);
+               goto out;
+       }
+
+       rc = ntb_write_remote_spad(ndev, QP_LINKS, val);
+       if (rc) {
+               dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
+                       val, QP_LINKS);
+               goto out;
+       }
+
+       /* Query the remote side for its info */
+       rc = ntb_read_remote_spad(ndev, VERSION, &val);
+       if (rc) {
+               dev_err(&pdev->dev, "Error reading remote spad %d\n", VERSION);
+               goto out;
+       }
+
+       if (val != NTB_TRANSPORT_VERSION)
+               goto out;
+       dev_dbg(&pdev->dev, "Remote version = %d\n", val);
+
+       rc = ntb_read_remote_spad(ndev, NUM_QPS, &val);
+       if (rc) {
+               dev_err(&pdev->dev, "Error reading remote spad %d\n", NUM_QPS);
+               goto out;
+       }
+
+       if (val != nt->max_qps)
+               goto out;
+       dev_dbg(&pdev->dev, "Remote max number of qps = %d\n", val);
+
+       rc = ntb_read_remote_spad(ndev, MW0_SZ, &val);
+       if (rc) {
+               dev_err(&pdev->dev, "Error reading remote spad %d\n", MW0_SZ);
+               goto out;
+       }
+
+       if (!val)
+               goto out;
+       dev_dbg(&pdev->dev, "Remote MW0 size = %d\n", val);
+
+       rc = ntb_set_mw(nt, 0, val);
+       if (rc)
+               goto out;
+
+       rc = ntb_read_remote_spad(ndev, MW1_SZ, &val);
+       if (rc) {
+               dev_err(&pdev->dev, "Error reading remote spad %d\n", MW1_SZ);
+               goto out;
+       }
+
+       if (!val)
+               goto out;
+       dev_dbg(&pdev->dev, "Remote MW1 size = %d\n", val);
+
+       rc = ntb_set_mw(nt, 1, val);
+       if (rc)
+               goto out;
+
+       nt->transport_link = NTB_LINK_UP;
+
+       for (i = 0; i < nt->max_qps; i++) {
+               struct ntb_transport_qp *qp = &nt->qps[i];
+
+               ntb_transport_setup_qp_mw(nt, i);
+
+               if (qp->client_ready == NTB_LINK_UP)
+                       schedule_delayed_work(&qp->link_work, 0);
+       }
+
+       return;
+
+out:
+       if (ntb_hw_link_status(ndev))
+               schedule_delayed_work(&nt->link_work,
+                                     msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
+}
+
+static void ntb_qp_link_work(struct work_struct *work)
+{
+       struct ntb_transport_qp *qp = container_of(work,
+                                                  struct ntb_transport_qp,
+                                                  link_work.work);
+       struct pci_dev *pdev = ntb_query_pdev(qp->ndev);
+       struct ntb_transport *nt = qp->transport;
+       int rc, val;
+
+       WARN_ON(nt->transport_link != NTB_LINK_UP);
+
+       rc = ntb_read_local_spad(nt->ndev, QP_LINKS, &val);
+       if (rc) {
+               dev_err(&pdev->dev, "Error reading spad %d\n", QP_LINKS);
+               return;
+       }
+
+       rc = ntb_write_remote_spad(nt->ndev, QP_LINKS, val | 1 << qp->qp_num);
+       if (rc)
+               dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
+                       val | 1 << qp->qp_num, QP_LINKS);
+
+       /* query remote spad for qp ready bits */
+       rc = ntb_read_remote_spad(nt->ndev, QP_LINKS, &val);
+       if (rc)
+               dev_err(&pdev->dev, "Error reading remote spad %d\n", QP_LINKS);
+
+       dev_dbg(&pdev->dev, "Remote QP link status = %x\n", val);
+
+       /* See if the remote side is up */
+       if (1 << qp->qp_num & val) {
+               qp->qp_link = NTB_LINK_UP;
+
+               dev_info(&pdev->dev, "qp %d: Link Up\n", qp->qp_num);
+               if (qp->event_handler)
+                       qp->event_handler(qp->cb_data, NTB_LINK_UP);
+       } else if (nt->transport_link == NTB_LINK_UP)
+               schedule_delayed_work(&qp->link_work,
+                                     msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
+}
+
+static void ntb_transport_init_queue(struct ntb_transport *nt,
+                                    unsigned int qp_num)
+{
+       struct ntb_transport_qp *qp;
+       unsigned int num_qps_mw, tx_size;
+       u8 mw_num = QP_TO_MW(qp_num);
+
+       qp = &nt->qps[qp_num];
+       qp->qp_num = qp_num;
+       qp->transport = nt;
+       qp->ndev = nt->ndev;
+       qp->qp_link = NTB_LINK_DOWN;
+       qp->client_ready = NTB_LINK_DOWN;
+       qp->event_handler = NULL;
+
+       if (nt->max_qps % NTB_NUM_MW && mw_num < nt->max_qps % NTB_NUM_MW)
+               num_qps_mw = nt->max_qps / NTB_NUM_MW + 1;
+       else
+               num_qps_mw = nt->max_qps / NTB_NUM_MW;
+
+       tx_size = (unsigned int) ntb_get_mw_size(qp->ndev, mw_num) / num_qps_mw;
+       qp->rx_info = ntb_get_mw_vbase(nt->ndev, mw_num) +
+                     (qp_num / NTB_NUM_MW * tx_size);
+       tx_size -= sizeof(struct ntb_rx_info);
+
+       qp->tx_mw = qp->rx_info + sizeof(struct ntb_rx_info);
+       qp->tx_max_frame = min(transport_mtu, tx_size);
+       qp->tx_max_entry = tx_size / qp->tx_max_frame;
+       qp->tx_index = 0;
+
+       if (nt->debugfs_dir) {
+               char debugfs_name[4];
+
+               snprintf(debugfs_name, 4, "qp%d", qp_num);
+               qp->debugfs_dir = debugfs_create_dir(debugfs_name,
+                                                    nt->debugfs_dir);
+
+               qp->debugfs_stats = debugfs_create_file("stats", S_IRUSR,
+                                                       qp->debugfs_dir, qp,
+                                                       &ntb_qp_debugfs_stats);
+       }
+
+       INIT_DELAYED_WORK(&qp->link_work, ntb_qp_link_work);
+       INIT_WORK(&qp->link_cleanup, ntb_qp_link_cleanup);
+
+       spin_lock_init(&qp->ntb_rx_pend_q_lock);
+       spin_lock_init(&qp->ntb_rx_free_q_lock);
+       spin_lock_init(&qp->ntb_tx_free_q_lock);
+
+       INIT_LIST_HEAD(&qp->rx_pend_q);
+       INIT_LIST_HEAD(&qp->rx_free_q);
+       INIT_LIST_HEAD(&qp->tx_free_q);
+}
+
+int ntb_transport_init(struct pci_dev *pdev)
+{
+       struct ntb_transport *nt;
+       int rc, i;
+
+       nt = kzalloc(sizeof(struct ntb_transport), GFP_KERNEL);
+       if (!nt)
+               return -ENOMEM;
+
+       if (debugfs_initialized())
+               nt->debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
+       else
+               nt->debugfs_dir = NULL;
+
+       nt->ndev = ntb_register_transport(pdev, nt);
+       if (!nt->ndev) {
+               rc = -EIO;
+               goto err;
+       }
+
+       nt->max_qps = min(nt->ndev->max_cbs, max_num_clients);
+
+       nt->qps = kcalloc(nt->max_qps, sizeof(struct ntb_transport_qp),
+                         GFP_KERNEL);
+       if (!nt->qps) {
+               rc = -ENOMEM;
+               goto err1;
+       }
+
+       nt->qp_bitmap = ((u64) 1 << nt->max_qps) - 1;
+
+       for (i = 0; i < nt->max_qps; i++)
+               ntb_transport_init_queue(nt, i);
+
+       INIT_DELAYED_WORK(&nt->link_work, ntb_transport_link_work);
+       INIT_WORK(&nt->link_cleanup, ntb_transport_link_cleanup);
+
+       rc = ntb_register_event_callback(nt->ndev,
+                                        ntb_transport_event_callback);
+       if (rc)
+               goto err2;
+
+       INIT_LIST_HEAD(&nt->client_devs);
+       rc = ntb_bus_init(nt);
+       if (rc)
+               goto err3;
+
+       if (ntb_hw_link_status(nt->ndev))
+               schedule_delayed_work(&nt->link_work, 0);
+
+       return 0;
+
+err3:
+       ntb_unregister_event_callback(nt->ndev);
+err2:
+       kfree(nt->qps);
+err1:
+       ntb_unregister_transport(nt->ndev);
+err:
+       debugfs_remove_recursive(nt->debugfs_dir);
+       kfree(nt);
+       return rc;
+}
+
+void ntb_transport_free(void *transport)
+{
+       struct ntb_transport *nt = transport;
+       struct pci_dev *pdev;
+       int i;
+
+       nt->transport_link = NTB_LINK_DOWN;
+
+       /* verify that all the qp's are freed */
+       for (i = 0; i < nt->max_qps; i++)
+               if (!test_bit(i, &nt->qp_bitmap))
+                       ntb_transport_free_queue(&nt->qps[i]);
+
+       ntb_bus_remove(nt);
+
+       cancel_delayed_work_sync(&nt->link_work);
+
+       debugfs_remove_recursive(nt->debugfs_dir);
+
+       ntb_unregister_event_callback(nt->ndev);
+
+       pdev = ntb_query_pdev(nt->ndev);
+
+       for (i = 0; i < NTB_NUM_MW; i++)
+               if (nt->mw[i].virt_addr)
+                       dma_free_coherent(&pdev->dev, nt->mw[i].size,
+                                         nt->mw[i].virt_addr,
+                                         nt->mw[i].dma_addr);
+
+       kfree(nt->qps);
+       ntb_unregister_transport(nt->ndev);
+       kfree(nt);
+}
+
+static void ntb_rx_copy_task(struct ntb_transport_qp *qp,
+                            struct ntb_queue_entry *entry, void *offset)
+{
+       void *cb_data = entry->cb_data;
+       unsigned int len = entry->len;
+
+       memcpy(entry->buf, offset, entry->len);
+
+       ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry, &qp->rx_free_q);
+
+       if (qp->rx_handler && qp->client_ready == NTB_LINK_UP)
+               qp->rx_handler(qp, qp->cb_data, cb_data, len);
+}
+
+static int ntb_process_rxc(struct ntb_transport_qp *qp)
+{
+       struct ntb_payload_header *hdr;
+       struct ntb_queue_entry *entry;
+       void *offset;
+
+       offset = qp->rx_buff + qp->rx_max_frame * qp->rx_index;
+       hdr = offset + qp->rx_max_frame - sizeof(struct ntb_payload_header);
+
+       entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q);
+       if (!entry) {
+               dev_dbg(&ntb_query_pdev(qp->ndev)->dev,
+                       "no buffer - HDR ver %u, len %d, flags %x\n",
+                       hdr->ver, hdr->len, hdr->flags);
+               qp->rx_err_no_buf++;
+               return -ENOMEM;
+       }
+
+       if (!(hdr->flags & DESC_DONE_FLAG)) {
+               ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry,
+                            &qp->rx_pend_q);
+               qp->rx_ring_empty++;
+               return -EAGAIN;
+       }
+
+       if (hdr->ver != (u32) qp->rx_pkts) {
+               dev_dbg(&ntb_query_pdev(qp->ndev)->dev,
+                       "qp %d: version mismatch, expected %llu - got %u\n",
+                       qp->qp_num, qp->rx_pkts, hdr->ver);
+               ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry,
+                            &qp->rx_pend_q);
+               qp->rx_err_ver++;
+               return -EIO;
+       }
+
+       if (hdr->flags & LINK_DOWN_FLAG) {
+               ntb_qp_link_down(qp);
+
+               ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry,
+                            &qp->rx_pend_q);
+               goto out;
+       }
+
+       dev_dbg(&ntb_query_pdev(qp->ndev)->dev,
+               "rx offset %u, ver %u - %d payload received, buf size %d\n",
+               qp->rx_index, hdr->ver, hdr->len, entry->len);
+
+       if (hdr->len <= entry->len) {
+               entry->len = hdr->len;
+               ntb_rx_copy_task(qp, entry, offset);
+       } else {
+               ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry,
+                            &qp->rx_pend_q);
+
+               qp->rx_err_oflow++;
+               dev_dbg(&ntb_query_pdev(qp->ndev)->dev,
+                       "RX overflow! Wanted %d got %d\n",
+                       hdr->len, entry->len);
+       }
+
+       qp->rx_bytes += hdr->len;
+       qp->rx_pkts++;
+
+out:
+       /* Ensure that the data is fully copied out before clearing the flag */
+       wmb();
+       hdr->flags = 0;
+       iowrite32(qp->rx_index, &qp->rx_info->entry);
+
+       qp->rx_index++;
+       qp->rx_index %= qp->rx_max_entry;
+
+       return 0;
+}
+
+static void ntb_transport_rx(unsigned long data)
+{
+       struct ntb_transport_qp *qp = (struct ntb_transport_qp *)data;
+       int rc;
+
+       do {
+               rc = ntb_process_rxc(qp);
+       } while (!rc);
+}
+
+static void ntb_transport_rxc_db(void *data, int db_num)
+{
+       struct ntb_transport_qp *qp = data;
+
+       dev_dbg(&ntb_query_pdev(qp->ndev)->dev, "%s: doorbell %d received\n",
+               __func__, db_num);
+
+       tasklet_schedule(&qp->rx_work);
+}
+
+static void ntb_tx_copy_task(struct ntb_transport_qp *qp,
+                            struct ntb_queue_entry *entry,
+                            void __iomem *offset)
+{
+       struct ntb_payload_header __iomem *hdr;
+
+       memcpy_toio(offset, entry->buf, entry->len);
+
+       hdr = offset + qp->tx_max_frame - sizeof(struct ntb_payload_header);
+       iowrite32(entry->len, &hdr->len);
+       iowrite32((u32) qp->tx_pkts, &hdr->ver);
+
+       /* Ensure that the data is fully copied out before setting the flag */
+       wmb();
+       iowrite32(entry->flags | DESC_DONE_FLAG, &hdr->flags);
+
+       ntb_ring_sdb(qp->ndev, qp->qp_num);
+
+       /* The entry length can only be zero if the packet is intended to be a
+        * "link down" or similar.  Since no payload is being sent in these
+        * cases, there is nothing to add to the completion queue.
+        */
+       if (entry->len > 0) {
+               qp->tx_bytes += entry->len;
+
+               if (qp->tx_handler)
+                       qp->tx_handler(qp, qp->cb_data, entry->cb_data,
+                                      entry->len);
+       }
+
+       ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry, &qp->tx_free_q);
+}
+
+static int ntb_process_tx(struct ntb_transport_qp *qp,
+                         struct ntb_queue_entry *entry)
+{
+       void __iomem *offset;
+
+       offset = qp->tx_mw + qp->tx_max_frame * qp->tx_index;
+
+       dev_dbg(&ntb_query_pdev(qp->ndev)->dev, "%lld - offset %p, tx %u, entry len %d flags %x buff %p\n",
+               qp->tx_pkts, offset, qp->tx_index, entry->len, entry->flags,
+               entry->buf);
+       if (qp->tx_index == qp->remote_rx_info->entry) {
+               qp->tx_ring_full++;
+               return -EAGAIN;
+       }
+
+       if (entry->len > qp->tx_max_frame - sizeof(struct ntb_payload_header)) {
+               if (qp->tx_handler)
+                       qp->tx_handler(qp->cb_data, qp, NULL, -EIO);
+
+               ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry,
+                            &qp->tx_free_q);
+               return 0;
+       }
+
+       ntb_tx_copy_task(qp, entry, offset);
+
+       qp->tx_index++;
+       qp->tx_index %= qp->tx_max_entry;
+
+       qp->tx_pkts++;
+
+       return 0;
+}
+
+static void ntb_send_link_down(struct ntb_transport_qp *qp)
+{
+       struct pci_dev *pdev = ntb_query_pdev(qp->ndev);
+       struct ntb_queue_entry *entry;
+       int i, rc;
+
+       if (qp->qp_link == NTB_LINK_DOWN)
+               return;
+
+       qp->qp_link = NTB_LINK_DOWN;
+       dev_info(&pdev->dev, "qp %d: Link Down\n", qp->qp_num);
+
+       for (i = 0; i < NTB_LINK_DOWN_TIMEOUT; i++) {
+               entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q);
+               if (entry)
+                       break;
+               msleep(100);
+       }
+
+       if (!entry)
+               return;
+
+       entry->cb_data = NULL;
+       entry->buf = NULL;
+       entry->len = 0;
+       entry->flags = LINK_DOWN_FLAG;
+
+       rc = ntb_process_tx(qp, entry);
+       if (rc)
+               dev_err(&pdev->dev, "ntb: QP%d unable to send linkdown msg\n",
+                       qp->qp_num);
+}
+
+/**
+ * ntb_transport_create_queue - Create a new NTB transport layer queue
+ * @rx_handler: receive callback function
+ * @tx_handler: transmit callback function
+ * @event_handler: event callback function
+ *
+ * Create a new NTB transport layer queue and provide the queue with a callback
+ * routine for both transmit and receive.  The receive callback routine will be
+ * used to pass up data when the transport has received it on the queue.   The
+ * transmit callback routine will be called when the transport has completed the
+ * transmission of the data on the queue and the data is ready to be freed.
+ *
+ * RETURNS: pointer to newly created ntb_queue, NULL on error.
+ */
+struct ntb_transport_qp *
+ntb_transport_create_queue(void *data, struct pci_dev *pdev,
+                          const struct ntb_queue_handlers *handlers)
+{
+       struct ntb_queue_entry *entry;
+       struct ntb_transport_qp *qp;
+       struct ntb_transport *nt;
+       unsigned int free_queue;
+       int rc, i;
+
+       nt = ntb_find_transport(pdev);
+       if (!nt)
+               goto err;
+
+       free_queue = ffs(nt->qp_bitmap);
+       if (!free_queue)
+               goto err;
+
+       /* decrement free_queue to make it zero based */
+       free_queue--;
+
+       clear_bit(free_queue, &nt->qp_bitmap);
+
+       qp = &nt->qps[free_queue];
+       qp->cb_data = data;
+       qp->rx_handler = handlers->rx_handler;
+       qp->tx_handler = handlers->tx_handler;
+       qp->event_handler = handlers->event_handler;
+
+       for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) {
+               entry = kzalloc(sizeof(struct ntb_queue_entry), GFP_ATOMIC);
+               if (!entry)
+                       goto err1;
+
+               ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry,
+                            &qp->rx_free_q);
+       }
+
+       for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) {
+               entry = kzalloc(sizeof(struct ntb_queue_entry), GFP_ATOMIC);
+               if (!entry)
+                       goto err2;
+
+               ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry,
+                            &qp->tx_free_q);
+       }
+
+       tasklet_init(&qp->rx_work, ntb_transport_rx, (unsigned long) qp);
+
+       rc = ntb_register_db_callback(qp->ndev, free_queue, qp,
+                                     ntb_transport_rxc_db);
+       if (rc)
+               goto err3;
+
+       dev_info(&pdev->dev, "NTB Transport QP %d created\n", qp->qp_num);
+
+       return qp;
+
+err3:
+       tasklet_disable(&qp->rx_work);
+err2:
+       while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
+               kfree(entry);
+err1:
+       while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q)))
+               kfree(entry);
+       set_bit(free_queue, &nt->qp_bitmap);
+err:
+       return NULL;
+}
+EXPORT_SYMBOL_GPL(ntb_transport_create_queue);
+
+/**
+ * ntb_transport_free_queue - Frees NTB transport queue
+ * @qp: NTB queue to be freed
+ *
+ * Frees NTB transport queue
+ */
+void ntb_transport_free_queue(struct ntb_transport_qp *qp)
+{
+       struct pci_dev *pdev = ntb_query_pdev(qp->ndev);
+       struct ntb_queue_entry *entry;
+
+       if (!qp)
+               return;
+
+       cancel_delayed_work_sync(&qp->link_work);
+
+       ntb_unregister_db_callback(qp->ndev, qp->qp_num);
+       tasklet_disable(&qp->rx_work);
+
+       while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q)))
+               kfree(entry);
+
+       while ((entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q))) {
+               dev_warn(&pdev->dev, "Freeing item from a non-empty queue\n");
+               kfree(entry);
+       }
+
+       while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
+               kfree(entry);
+
+       set_bit(qp->qp_num, &qp->transport->qp_bitmap);
+
+       dev_info(&pdev->dev, "NTB Transport QP %d freed\n", qp->qp_num);
+}
+EXPORT_SYMBOL_GPL(ntb_transport_free_queue);
+
+/**
+ * ntb_transport_rx_remove - Dequeues enqueued rx packet
+ * @qp: NTB queue to be freed
+ * @len: pointer to variable to write enqueued buffers length
+ *
+ * Dequeues unused buffers from receive queue.  Should only be used during
+ * shutdown of qp.
+ *
+ * RETURNS: NULL error value on error, or void* for success.
+ */
+void *ntb_transport_rx_remove(struct ntb_transport_qp *qp, unsigned int *len)
+{
+       struct ntb_queue_entry *entry;
+       void *buf;
+
+       if (!qp || qp->client_ready == NTB_LINK_UP)
+               return NULL;
+
+       entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q);
+       if (!entry)
+               return NULL;
+
+       buf = entry->cb_data;
+       *len = entry->len;
+
+       ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry, &qp->rx_free_q);
+
+       return buf;
+}
+EXPORT_SYMBOL_GPL(ntb_transport_rx_remove);
+
+/**
+ * ntb_transport_rx_enqueue - Enqueue a new NTB queue entry
+ * @qp: NTB transport layer queue the entry is to be enqueued on
+ * @cb: per buffer pointer for callback function to use
+ * @data: pointer to data buffer that incoming packets will be copied into
+ * @len: length of the data buffer
+ *
+ * Enqueue a new receive buffer onto the transport queue into which a NTB
+ * payload can be received into.
+ *
+ * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
+ */
+int ntb_transport_rx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
+                            unsigned int len)
+{
+       struct ntb_queue_entry *entry;
+
+       if (!qp)
+               return -EINVAL;
+
+       entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q);
+       if (!entry)
+               return -ENOMEM;
+
+       entry->cb_data = cb;
+       entry->buf = data;
+       entry->len = len;
+
+       ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry, &qp->rx_pend_q);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(ntb_transport_rx_enqueue);
+
+/**
+ * ntb_transport_tx_enqueue - Enqueue a new NTB queue entry
+ * @qp: NTB transport layer queue the entry is to be enqueued on
+ * @cb: per buffer pointer for callback function to use
+ * @data: pointer to data buffer that will be sent
+ * @len: length of the data buffer
+ *
+ * Enqueue a new transmit buffer onto the transport queue from which a NTB
+ * payload will be transmitted.  This assumes that a lock is behing held to
+ * serialize access to the qp.
+ *
+ * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
+ */
+int ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
+                            unsigned int len)
+{
+       struct ntb_queue_entry *entry;
+       int rc;
+
+       if (!qp || qp->qp_link != NTB_LINK_UP || !len)
+               return -EINVAL;
+
+       entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q);
+       if (!entry)
+               return -ENOMEM;
+
+       entry->cb_data = cb;
+       entry->buf = data;
+       entry->len = len;
+       entry->flags = 0;
+
+       rc = ntb_process_tx(qp, entry);
+       if (rc)
+               ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry,
+                            &qp->tx_free_q);
+
+       return rc;
+}
+EXPORT_SYMBOL_GPL(ntb_transport_tx_enqueue);
+
+/**
+ * ntb_transport_link_up - Notify NTB transport of client readiness to use queue
+ * @qp: NTB transport layer queue to be enabled
+ *
+ * Notify NTB transport layer of client readiness to use queue
+ */
+void ntb_transport_link_up(struct ntb_transport_qp *qp)
+{
+       if (!qp)
+               return;
+
+       qp->client_ready = NTB_LINK_UP;
+
+       if (qp->transport->transport_link == NTB_LINK_UP)
+               schedule_delayed_work(&qp->link_work, 0);
+}
+EXPORT_SYMBOL_GPL(ntb_transport_link_up);
+
+/**
+ * ntb_transport_link_down - Notify NTB transport to no longer enqueue data
+ * @qp: NTB transport layer queue to be disabled
+ *
+ * Notify NTB transport layer of client's desire to no longer receive data on
+ * transport queue specified.  It is the client's responsibility to ensure all
+ * entries on queue are purged or otherwise handled appropraitely.
+ */
+void ntb_transport_link_down(struct ntb_transport_qp *qp)
+{
+       struct pci_dev *pdev = ntb_query_pdev(qp->ndev);
+       int rc, val;
+
+       if (!qp)
+               return;
+
+       qp->client_ready = NTB_LINK_DOWN;
+
+       rc = ntb_read_local_spad(qp->ndev, QP_LINKS, &val);
+       if (rc) {
+               dev_err(&pdev->dev, "Error reading spad %d\n", QP_LINKS);
+               return;
+       }
+
+       rc = ntb_write_remote_spad(qp->ndev, QP_LINKS,
+                                  val & ~(1 << qp->qp_num));
+       if (rc)
+               dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
+                       val & ~(1 << qp->qp_num), QP_LINKS);
+
+       if (qp->qp_link == NTB_LINK_UP)
+               ntb_send_link_down(qp);
+       else
+               cancel_delayed_work_sync(&qp->link_work);
+}
+EXPORT_SYMBOL_GPL(ntb_transport_link_down);
+
+/**
+ * ntb_transport_link_query - Query transport link state
+ * @qp: NTB transport layer queue to be queried
+ *
+ * Query connectivity to the remote system of the NTB transport queue
+ *
+ * RETURNS: true for link up or false for link down
+ */
+bool ntb_transport_link_query(struct ntb_transport_qp *qp)
+{
+       return qp->qp_link == NTB_LINK_UP;
+}
+EXPORT_SYMBOL_GPL(ntb_transport_link_query);
+
+/**
+ * ntb_transport_qp_num - Query the qp number
+ * @qp: NTB transport layer queue to be queried
+ *
+ * Query qp number of the NTB transport queue
+ *
+ * RETURNS: a zero based number specifying the qp number
+ */
+unsigned char ntb_transport_qp_num(struct ntb_transport_qp *qp)
+{
+       return qp->qp_num;
+}
+EXPORT_SYMBOL_GPL(ntb_transport_qp_num);
+
+/**
+ * ntb_transport_max_size - Query the max payload size of a qp
+ * @qp: NTB transport layer queue to be queried
+ *
+ * Query the maximum payload size permissible on the given qp
+ *
+ * RETURNS: the max payload size of a qp
+ */
+unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp)
+{
+       return qp->tx_max_frame - sizeof(struct ntb_payload_header);
+}
+EXPORT_SYMBOL_GPL(ntb_transport_max_size);
index 26ffd3e..2c113de 100644 (file)
@@ -44,7 +44,6 @@ extern bool pciehp_poll_mode;
 extern int pciehp_poll_time;
 extern bool pciehp_debug;
 extern bool pciehp_force;
-extern struct workqueue_struct *pciehp_wq;
 
 #define dbg(format, arg...)                                            \
 do {                                                                   \
@@ -78,6 +77,7 @@ struct slot {
        struct hotplug_slot *hotplug_slot;
        struct delayed_work work;       /* work for button event */
        struct mutex lock;
+       struct workqueue_struct *wq;
 };
 
 struct event_info {
index 916bf4f..939bd1d 100644 (file)
@@ -42,7 +42,6 @@ bool pciehp_debug;
 bool pciehp_poll_mode;
 int pciehp_poll_time;
 bool pciehp_force;
-struct workqueue_struct *pciehp_wq;
 
 #define DRIVER_VERSION "0.4"
 #define DRIVER_AUTHOR  "Dan Zink <dan.zink@compaq.com>, Greg Kroah-Hartman <greg@kroah.com>, Dely Sy <dely.l.sy@intel.com>"
@@ -340,18 +339,13 @@ static int __init pcied_init(void)
 {
        int retval = 0;
 
-       pciehp_wq = alloc_workqueue("pciehp", 0, 0);
-       if (!pciehp_wq)
-               return -ENOMEM;
-
        pciehp_firmware_init();
        retval = pcie_port_service_register(&hpdriver_portdrv);
        dbg("pcie_port_service_register = %d\n", retval);
        info(DRIVER_DESC " version: " DRIVER_VERSION "\n");
-       if (retval) {
-               destroy_workqueue(pciehp_wq);
+       if (retval)
                dbg("Failure to register service\n");
-       }
+
        return retval;
 }
 
@@ -359,7 +353,6 @@ static void __exit pcied_cleanup(void)
 {
        dbg("unload_pciehpd()\n");
        pcie_port_service_unregister(&hpdriver_portdrv);
-       destroy_workqueue(pciehp_wq);
        info(DRIVER_DESC " version: " DRIVER_VERSION " unloaded\n");
 }
 
index 27f4429..38f0186 100644 (file)
@@ -49,7 +49,7 @@ static int queue_interrupt_event(struct slot *p_slot, u32 event_type)
        info->p_slot = p_slot;
        INIT_WORK(&info->work, interrupt_event_handler);
 
-       queue_work(pciehp_wq, &info->work);
+       queue_work(p_slot->wq, &info->work);
 
        return 0;
 }
@@ -344,7 +344,7 @@ void pciehp_queue_pushbutton_work(struct work_struct *work)
                kfree(info);
                goto out;
        }
-       queue_work(pciehp_wq, &info->work);
+       queue_work(p_slot->wq, &info->work);
  out:
        mutex_unlock(&p_slot->lock);
 }
@@ -377,7 +377,7 @@ static void handle_button_press_event(struct slot *p_slot)
                if (ATTN_LED(ctrl))
                        pciehp_set_attention_status(p_slot, 0);
 
-               queue_delayed_work(pciehp_wq, &p_slot->work, 5*HZ);
+               queue_delayed_work(p_slot->wq, &p_slot->work, 5*HZ);
                break;
        case BLINKINGOFF_STATE:
        case BLINKINGON_STATE:
@@ -439,7 +439,7 @@ static void handle_surprise_event(struct slot *p_slot)
        else
                p_slot->state = POWERON_STATE;
 
-       queue_work(pciehp_wq, &info->work);
+       queue_work(p_slot->wq, &info->work);
 }
 
 static void interrupt_event_handler(struct work_struct *work)
index 13b2eaf..5127f3f 100644 (file)
@@ -773,23 +773,32 @@ static void pcie_shutdown_notification(struct controller *ctrl)
 static int pcie_init_slot(struct controller *ctrl)
 {
        struct slot *slot;
+       char name[32];
 
        slot = kzalloc(sizeof(*slot), GFP_KERNEL);
        if (!slot)
                return -ENOMEM;
 
+       snprintf(name, sizeof(name), "pciehp-%u", PSN(ctrl));
+       slot->wq = alloc_workqueue(name, 0, 0);
+       if (!slot->wq)
+               goto abort;
+
        slot->ctrl = ctrl;
        mutex_init(&slot->lock);
        INIT_DELAYED_WORK(&slot->work, pciehp_queue_pushbutton_work);
        ctrl->slot = slot;
        return 0;
+abort:
+       kfree(slot);
+       return -ENOMEM;
 }
 
 static void pcie_cleanup_slot(struct controller *ctrl)
 {
        struct slot *slot = ctrl->slot;
        cancel_delayed_work(&slot->work);
-       flush_workqueue(pciehp_wq);
+       destroy_workqueue(slot->wq);
        kfree(slot);
 }
 
index ca64932..b849f99 100644 (file)
@@ -46,8 +46,6 @@
 extern bool shpchp_poll_mode;
 extern int shpchp_poll_time;
 extern bool shpchp_debug;
-extern struct workqueue_struct *shpchp_wq;
-extern struct workqueue_struct *shpchp_ordered_wq;
 
 #define dbg(format, arg...)                                            \
 do {                                                                   \
@@ -91,6 +89,7 @@ struct slot {
        struct list_head        slot_list;
        struct delayed_work work;       /* work for button event */
        struct mutex lock;
+       struct workqueue_struct *wq;
        u8 hp_slot;
 };
 
index b6de307..3100c52 100644 (file)
@@ -39,8 +39,6 @@
 bool shpchp_debug;
 bool shpchp_poll_mode;
 int shpchp_poll_time;
-struct workqueue_struct *shpchp_wq;
-struct workqueue_struct *shpchp_ordered_wq;
 
 #define DRIVER_VERSION "0.4"
 #define DRIVER_AUTHOR  "Dan Zink <dan.zink@compaq.com>, Greg Kroah-Hartman <greg@kroah.com>, Dely Sy <dely.l.sy@intel.com>"
@@ -129,6 +127,14 @@ static int init_slots(struct controller *ctrl)
                slot->device = ctrl->slot_device_offset + i;
                slot->hpc_ops = ctrl->hpc_ops;
                slot->number = ctrl->first_slot + (ctrl->slot_num_inc * i);
+
+               snprintf(name, sizeof(name), "shpchp-%d", slot->number);
+               slot->wq = alloc_workqueue(name, 0, 0);
+               if (!slot->wq) {
+                       retval = -ENOMEM;
+                       goto error_info;
+               }
+
                mutex_init(&slot->lock);
                INIT_DELAYED_WORK(&slot->work, shpchp_queue_pushbutton_work);
 
@@ -148,7 +154,7 @@ static int init_slots(struct controller *ctrl)
                if (retval) {
                        ctrl_err(ctrl, "pci_hp_register failed with error %d\n",
                                 retval);
-                       goto error_info;
+                       goto error_slotwq;
                }
 
                get_power_status(hotplug_slot, &info->power_status);
@@ -160,6 +166,8 @@ static int init_slots(struct controller *ctrl)
        }
 
        return 0;
+error_slotwq:
+       destroy_workqueue(slot->wq);
 error_info:
        kfree(info);
 error_hpslot:
@@ -180,8 +188,7 @@ void cleanup_slots(struct controller *ctrl)
                slot = list_entry(tmp, struct slot, slot_list);
                list_del(&slot->slot_list);
                cancel_delayed_work(&slot->work);
-               flush_workqueue(shpchp_wq);
-               flush_workqueue(shpchp_ordered_wq);
+               destroy_workqueue(slot->wq);
                pci_hp_deregister(slot->hotplug_slot);
        }
 }
@@ -364,25 +371,12 @@ static struct pci_driver shpc_driver = {
 
 static int __init shpcd_init(void)
 {
-       int retval = 0;
-
-       shpchp_wq = alloc_ordered_workqueue("shpchp", 0);
-       if (!shpchp_wq)
-               return -ENOMEM;
-
-       shpchp_ordered_wq = alloc_ordered_workqueue("shpchp_ordered", 0);
-       if (!shpchp_ordered_wq) {
-               destroy_workqueue(shpchp_wq);
-               return -ENOMEM;
-       }
+       int retval;
 
        retval = pci_register_driver(&shpc_driver);
        dbg("%s: pci_register_driver = %d\n", __func__, retval);
        info(DRIVER_DESC " version: " DRIVER_VERSION "\n");
-       if (retval) {
-               destroy_workqueue(shpchp_ordered_wq);
-               destroy_workqueue(shpchp_wq);
-       }
+
        return retval;
 }
 
@@ -390,8 +384,6 @@ static void __exit shpcd_cleanup(void)
 {
        dbg("unload_shpchpd()\n");
        pci_unregister_driver(&shpc_driver);
-       destroy_workqueue(shpchp_ordered_wq);
-       destroy_workqueue(shpchp_wq);
        info(DRIVER_DESC " version: " DRIVER_VERSION " unloaded\n");
 }
 
index f9b5a52..5849927 100644 (file)
@@ -51,7 +51,7 @@ static int queue_interrupt_event(struct slot *p_slot, u32 event_type)
        info->p_slot = p_slot;
        INIT_WORK(&info->work, interrupt_event_handler);
 
-       queue_work(shpchp_wq, &info->work);
+       queue_work(p_slot->wq, &info->work);
 
        return 0;
 }
@@ -453,7 +453,7 @@ void shpchp_queue_pushbutton_work(struct work_struct *work)
                kfree(info);
                goto out;
        }
-       queue_work(shpchp_ordered_wq, &info->work);
+       queue_work(p_slot->wq, &info->work);
  out:
        mutex_unlock(&p_slot->lock);
 }
@@ -501,7 +501,7 @@ static void handle_button_press_event(struct slot *p_slot)
                p_slot->hpc_ops->green_led_blink(p_slot);
                p_slot->hpc_ops->set_attention_status(p_slot, 0);
 
-               queue_delayed_work(shpchp_wq, &p_slot->work, 5*HZ);
+               queue_delayed_work(p_slot->wq, &p_slot->work, 5*HZ);
                break;
        case BLINKINGOFF_STATE:
        case BLINKINGON_STATE:
index bafd2bb..c18e5bf 100644 (file)
@@ -739,7 +739,7 @@ EXPORT_SYMBOL_GPL(pci_num_vf);
 /**
  * pci_sriov_set_totalvfs -- reduce the TotalVFs available
  * @dev: the PCI PF device
- * numvfs: number that should be used for TotalVFs supported
+ * @numvfs: number that should be used for TotalVFs supported
  *
  * Should be called from PF driver's probe routine with
  * device's mutex held.
index 6c8bc58..fde4a32 100644 (file)
@@ -82,4 +82,4 @@ endchoice
 
 config PCIE_PME
        def_bool y
-       depends on PCIEPORTBUS && PM_RUNTIME && EXPERIMENTAL && ACPI
+       depends on PCIEPORTBUS && PM_RUNTIME && ACPI
index 421bbc5..564d97f 100644 (file)
@@ -630,6 +630,7 @@ static void aer_recover_work_func(struct work_struct *work)
                        continue;
                }
                do_recovery(pdev, entry.severity);
+               pci_dev_put(pdev);
        }
 }
 #endif
index b52630b..8474b6a 100644 (file)
@@ -771,6 +771,9 @@ void pcie_clear_aspm(struct pci_bus *bus)
 {
        struct pci_dev *child;
 
+       if (aspm_force)
+               return;
+
        /*
         * Clear any ASPM setup that the firmware has carried out on this bus
         */
index 3578e1c..519c4d6 100644 (file)
@@ -133,8 +133,6 @@ static int i82092aa_pci_probe(struct pci_dev *dev, const struct pci_device_id *i
                goto err_out_free_res;
        }
 
-       pci_set_drvdata(dev, &sockets[i].socket);
-
        for (i = 0; i<socket_count; i++) {
                sockets[i].socket.dev.parent = &dev->dev;
                sockets[i].socket.ops = &i82092aa_operations;
@@ -164,14 +162,14 @@ err_out_disable:
 
 static void i82092aa_pci_remove(struct pci_dev *dev)
 {
-       struct pcmcia_socket *socket = pci_get_drvdata(dev);
+       int i;
 
        enter("i82092aa_pci_remove");
        
        free_irq(dev->irq, i82092aa_interrupt);
 
-       if (socket)
-               pcmcia_unregister_socket(socket);
+       for (i = 0; i < socket_count; i++)
+               pcmcia_unregister_socket(&sockets[i].socket);
 
        leave("i82092aa_pci_remove");
 }
index 75806be..d98a086 100644 (file)
@@ -246,6 +246,7 @@ static int pccard_init(struct pcmcia_socket *sock)
        socket = &vrc4171_sockets[slot];
        socket->csc_irq = search_nonuse_irq();
        socket->io_irq = search_nonuse_irq();
+       spin_lock_init(&socket->lock);
 
        return 0;
 }
index 06f4eb7..afed701 100644 (file)
@@ -125,8 +125,11 @@ static const struct key_entry acer_wmi_keymap[] = {
        {KE_IGNORE, 0x63, {KEY_BRIGHTNESSDOWN} },
        {KE_KEY, 0x64, {KEY_SWITCHVIDEOMODE} }, /* Display Switch */
        {KE_IGNORE, 0x81, {KEY_SLEEP} },
-       {KE_KEY, 0x82, {KEY_TOUCHPAD_TOGGLE} }, /* Touch Pad On/Off */
+       {KE_KEY, 0x82, {KEY_TOUCHPAD_TOGGLE} }, /* Touch Pad Toggle */
+       {KE_KEY, KEY_TOUCHPAD_ON, {KEY_TOUCHPAD_ON} },
+       {KE_KEY, KEY_TOUCHPAD_OFF, {KEY_TOUCHPAD_OFF} },
        {KE_IGNORE, 0x83, {KEY_TOUCHPAD_TOGGLE} },
+       {KE_KEY, 0x85, {KEY_TOUCHPAD_TOGGLE} },
        {KE_END, 0}
 };
 
@@ -147,6 +150,7 @@ struct event_return_value {
 #define ACER_WMID3_GDS_THREEG          (1<<6)  /* 3G */
 #define ACER_WMID3_GDS_WIMAX           (1<<7)  /* WiMAX */
 #define ACER_WMID3_GDS_BLUETOOTH       (1<<11) /* BT */
+#define ACER_WMID3_GDS_TOUCHPAD                (1<<1)  /* Touchpad */
 
 struct lm_input_params {
        u8 function_num;        /* Function Number */
@@ -875,7 +879,7 @@ WMI_execute_u32(u32 method_id, u32 in, u32 *out)
        struct acpi_buffer input = { (acpi_size) sizeof(u32), (void *)(&in) };
        struct acpi_buffer result = { ACPI_ALLOCATE_BUFFER, NULL };
        union acpi_object *obj;
-       u32 tmp;
+       u32 tmp = 0;
        acpi_status status;
 
        status = wmi_evaluate_method(WMID_GUID1, 1, method_id, &input, &result);
@@ -884,14 +888,14 @@ WMI_execute_u32(u32 method_id, u32 in, u32 *out)
                return status;
 
        obj = (union acpi_object *) result.pointer;
-       if (obj && obj->type == ACPI_TYPE_BUFFER &&
-               (obj->buffer.length == sizeof(u32) ||
-               obj->buffer.length == sizeof(u64))) {
-               tmp = *((u32 *) obj->buffer.pointer);
-       } else if (obj->type == ACPI_TYPE_INTEGER) {
-               tmp = (u32) obj->integer.value;
-       } else {
-               tmp = 0;
+       if (obj) {
+               if (obj->type == ACPI_TYPE_BUFFER &&
+                       (obj->buffer.length == sizeof(u32) ||
+                       obj->buffer.length == sizeof(u64))) {
+                       tmp = *((u32 *) obj->buffer.pointer);
+               } else if (obj->type == ACPI_TYPE_INTEGER) {
+                       tmp = (u32) obj->integer.value;
+               }
        }
 
        if (out)
@@ -1193,12 +1197,14 @@ static acpi_status WMID_set_capabilities(void)
                return status;
 
        obj = (union acpi_object *) out.pointer;
-       if (obj && obj->type == ACPI_TYPE_BUFFER &&
-               (obj->buffer.length == sizeof(u32) ||
-               obj->buffer.length == sizeof(u64))) {
-               devices = *((u32 *) obj->buffer.pointer);
-       } else if (obj->type == ACPI_TYPE_INTEGER) {
-               devices = (u32) obj->integer.value;
+       if (obj) {
+               if (obj->type == ACPI_TYPE_BUFFER &&
+                       (obj->buffer.length == sizeof(u32) ||
+                       obj->buffer.length == sizeof(u64))) {
+                       devices = *((u32 *) obj->buffer.pointer);
+               } else if (obj->type == ACPI_TYPE_INTEGER) {
+                       devices = (u32) obj->integer.value;
+               }
        } else {
                kfree(out.pointer);
                return AE_ERROR;
@@ -1676,6 +1682,7 @@ static void acer_wmi_notify(u32 value, void *context)
        acpi_status status;
        u16 device_state;
        const struct key_entry *key;
+       u32 scancode;
 
        status = wmi_get_event_data(value, &response);
        if (status != AE_OK) {
@@ -1712,6 +1719,7 @@ static void acer_wmi_notify(u32 value, void *context)
                        pr_warn("Unknown key number - 0x%x\n",
                                return_value.key_num);
                } else {
+                       scancode = return_value.key_num;
                        switch (key->keycode) {
                        case KEY_WLAN:
                        case KEY_BLUETOOTH:
@@ -1725,9 +1733,11 @@ static void acer_wmi_notify(u32 value, void *context)
                                        rfkill_set_sw_state(bluetooth_rfkill,
                                                !(device_state & ACER_WMID3_GDS_BLUETOOTH));
                                break;
+                       case KEY_TOUCHPAD_TOGGLE:
+                               scancode = (device_state & ACER_WMID3_GDS_TOUCHPAD) ?
+                                               KEY_TOUCHPAD_ON : KEY_TOUCHPAD_OFF;
                        }
-                       sparse_keymap_report_entry(acer_wmi_input_dev, key,
-                                                  1, true);
+                       sparse_keymap_report_event(acer_wmi_input_dev, scancode, 1, true);
                }
                break;
        case WMID_ACCEL_EVENT:
@@ -1946,12 +1956,14 @@ static u32 get_wmid_devices(void)
                return 0;
 
        obj = (union acpi_object *) out.pointer;
-       if (obj && obj->type == ACPI_TYPE_BUFFER &&
-               (obj->buffer.length == sizeof(u32) ||
-               obj->buffer.length == sizeof(u64))) {
-               devices = *((u32 *) obj->buffer.pointer);
-       } else if (obj->type == ACPI_TYPE_INTEGER) {
-               devices = (u32) obj->integer.value;
+       if (obj) {
+               if (obj->type == ACPI_TYPE_BUFFER &&
+                       (obj->buffer.length == sizeof(u32) ||
+                       obj->buffer.length == sizeof(u64))) {
+                       devices = *((u32 *) obj->buffer.pointer);
+               } else if (obj->type == ACPI_TYPE_INTEGER) {
+                       devices = (u32) obj->integer.value;
+               }
        }
 
        kfree(out.pointer);
index ec1d3bc..fcde4e5 100644 (file)
@@ -860,8 +860,10 @@ static ssize_t show_infos(struct device *dev,
        /*
         * The HWRS method return informations about the hardware.
         * 0x80 bit is for WLAN, 0x100 for Bluetooth.
+        * 0x40 for WWAN, 0x10 for WIMAX.
         * The significance of others is yet to be found.
-        * If we don't find the method, we assume the device are present.
+        * We don't currently use this for device detection, and it
+        * takes several seconds to run on some systems.
         */
        rv = acpi_evaluate_integer(asus->handle, "HWRS", NULL, &temp);
        if (!ACPI_FAILURE(rv))
@@ -1682,7 +1684,7 @@ static int asus_laptop_get_info(struct asus_laptop *asus)
 {
        struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
        union acpi_object *model = NULL;
-       unsigned long long bsts_result, hwrs_result;
+       unsigned long long bsts_result;
        char *string = NULL;
        acpi_status status;
 
@@ -1741,20 +1743,9 @@ static int asus_laptop_get_info(struct asus_laptop *asus)
                return -ENOMEM;
        }
 
-       if (*string)
+       if (string)
                pr_notice("  %s model detected\n", string);
 
-       /*
-        * The HWRS method return informations about the hardware.
-        * 0x80 bit is for WLAN, 0x100 for Bluetooth,
-        * 0x40 for WWAN, 0x10 for WIMAX.
-        * The significance of others is yet to be found.
-        */
-       status =
-           acpi_evaluate_integer(asus->handle, "HWRS", NULL, &hwrs_result);
-       if (!ACPI_FAILURE(status))
-               pr_notice("  HWRS returned %x", (int)hwrs_result);
-
        if (!acpi_check_handle(asus->handle, METHOD_WL_STATUS, NULL))
                asus->have_rsts = true;
 
index dd90d15..71623a2 100644 (file)
@@ -1523,6 +1523,16 @@ static struct dmi_system_id __initdata samsung_dmi_table[] = {
                },
         .driver_data = &samsung_broken_acpi_video,
        },
+       {
+        .callback = samsung_dmi_matched,
+        .ident = "N250P",
+        .matches = {
+               DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
+               DMI_MATCH(DMI_PRODUCT_NAME, "N250P"),
+               DMI_MATCH(DMI_BOARD_NAME, "N250P"),
+               },
+        .driver_data = &samsung_broken_acpi_video,
+       },
        { },
 };
 MODULE_DEVICE_TABLE(dmi, samsung_dmi_table);
index daaddec..b8ad71f 100644 (file)
@@ -786,28 +786,29 @@ static int sony_nc_int_call(acpi_handle handle, char *name, int *value,
 static int sony_nc_buffer_call(acpi_handle handle, char *name, u64 *value,
                void *buffer, size_t buflen)
 {
+       int ret = 0;
        size_t len = len;
        union acpi_object *object = __call_snc_method(handle, name, value);
 
        if (!object)
                return -EINVAL;
 
-       if (object->type == ACPI_TYPE_BUFFER)
+       if (object->type == ACPI_TYPE_BUFFER) {
                len = MIN(buflen, object->buffer.length);
+               memcpy(buffer, object->buffer.pointer, len);
 
-       else if (object->type == ACPI_TYPE_INTEGER)
+       } else if (object->type == ACPI_TYPE_INTEGER) {
                len = MIN(buflen, sizeof(object->integer.value));
+               memcpy(buffer, &object->integer.value, len);
 
-       else {
+       else {
                pr_warn("Invalid acpi_object: expected 0x%x got 0x%x\n",
                                ACPI_TYPE_BUFFER, object->type);
-               kfree(object);
-               return -EINVAL;
+               ret = -EINVAL;
        }
 
-       memcpy(buffer, object->buffer.pointer, len);
        kfree(object);
-       return 0;
+       return ret;
 }
 
 struct sony_nc_handles {
index 0f65b24..2785843 100644 (file)
@@ -1885,9 +1885,15 @@ int regulator_can_change_voltage(struct regulator *regulator)
        struct regulator_dev    *rdev = regulator->rdev;
 
        if (rdev->constraints &&
-           rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_VOLTAGE &&
-           (rdev->desc->n_voltages - rdev->desc->linear_min_sel) > 1)
-               return 1;
+           (rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_VOLTAGE)) {
+               if (rdev->desc->n_voltages - rdev->desc->linear_min_sel > 1)
+                       return 1;
+
+               if (rdev->desc->continuous_voltage_range &&
+                   rdev->constraints->min_uV && rdev->constraints->max_uV &&
+                   rdev->constraints->min_uV != rdev->constraints->max_uV)
+                       return 1;
+       }
 
        return 0;
 }
@@ -3315,7 +3321,8 @@ static void rdev_init_debugfs(struct regulator_dev *rdev)
  * @config: runtime configuration for regulator
  *
  * Called by regulator drivers to register a regulator.
- * Returns 0 on success.
+ * Returns a valid pointer to struct regulator_dev on success
+ * or an ERR_PTR() on error.
  */
 struct regulator_dev *
 regulator_register(const struct regulator_desc *regulator_desc,
index df0eafb..02be7fc 100644 (file)
@@ -71,26 +71,26 @@ struct voltage_map_desc {
        int step;
 };
 
-/* Voltage maps in mV */
+/* Voltage maps in uV */
 static const struct voltage_map_desc ldo_voltage_map_desc = {
-       .min = 800,     .max = 3950,    .step = 50,
+       .min = 800000,  .max = 3950000, .step = 50000,
 }; /* LDO1 ~ 18, 21 all */
 
 static const struct voltage_map_desc buck1245_voltage_map_desc = {
-       .min = 650,     .max = 2225,    .step = 25,
+       .min = 650000,  .max = 2225000, .step = 25000,
 }; /* Buck1, 2, 4, 5 */
 
 static const struct voltage_map_desc buck37_voltage_map_desc = {
-       .min = 750,     .max = 3900,    .step = 50,
+       .min = 750000,  .max = 3900000, .step = 50000,
 }; /* Buck3, 7 */
 
-/* current map in mA */
+/* current map in uA */
 static const struct voltage_map_desc charger_current_map_desc = {
-       .min = 200,     .max = 950,     .step = 50,
+       .min = 200000,  .max = 950000,  .step = 50000,
 };
 
 static const struct voltage_map_desc topoff_current_map_desc = {
-       .min = 50,      .max = 200,     .step = 10,
+       .min = 50000,   .max = 200000,  .step = 10000,
 };
 
 static const struct voltage_map_desc *reg_voltage_map[] = {
@@ -194,7 +194,7 @@ static int max8997_list_voltage(struct regulator_dev *rdev,
        if (val > desc->max)
                return -EINVAL;
 
-       return val * 1000;
+       return val;
 }
 
 static int max8997_get_enable_register(struct regulator_dev *rdev,
@@ -485,7 +485,6 @@ static int max8997_set_voltage_ldobuck(struct regulator_dev *rdev,
 {
        struct max8997_data *max8997 = rdev_get_drvdata(rdev);
        struct i2c_client *i2c = max8997->iodev->i2c;
-       int min_vol = min_uV / 1000, max_vol = max_uV / 1000;
        const struct voltage_map_desc *desc;
        int rid = rdev_get_id(rdev);
        int i, reg, shift, mask, ret;
@@ -509,7 +508,7 @@ static int max8997_set_voltage_ldobuck(struct regulator_dev *rdev,
 
        desc = reg_voltage_map[rid];
 
-       i = max8997_get_voltage_proper_val(desc, min_vol, max_vol);
+       i = max8997_get_voltage_proper_val(desc, min_uV, max_uV);
        if (i < 0)
                return i;
 
@@ -557,7 +556,7 @@ static int max8997_set_voltage_ldobuck_time_sel(struct regulator_dev *rdev,
        case MAX8997_BUCK4:
        case MAX8997_BUCK5:
                return DIV_ROUND_UP(desc->step * (new_selector - old_selector),
-                                   max8997->ramp_delay);
+                                   max8997->ramp_delay * 1000);
        }
 
        return 0;
@@ -656,7 +655,6 @@ static int max8997_set_voltage_buck(struct regulator_dev *rdev,
        const struct voltage_map_desc *desc;
        int new_val, new_idx, damage, tmp_val, tmp_idx, tmp_dmg;
        bool gpio_dvs_mode = false;
-       int min_vol = min_uV / 1000, max_vol = max_uV / 1000;
 
        if (rid < MAX8997_BUCK1 || rid > MAX8997_BUCK7)
                return -EINVAL;
@@ -681,7 +679,7 @@ static int max8997_set_voltage_buck(struct regulator_dev *rdev,
                                                selector);
 
        desc = reg_voltage_map[rid];
-       new_val = max8997_get_voltage_proper_val(desc, min_vol, max_vol);
+       new_val = max8997_get_voltage_proper_val(desc, min_uV, max_uV);
        if (new_val < 0)
                return new_val;
 
@@ -1123,8 +1121,8 @@ static int max8997_pmic_probe(struct platform_device *pdev)
                max8997->buck1_vol[i] = ret =
                        max8997_get_voltage_proper_val(
                                        &buck1245_voltage_map_desc,
-                                       pdata->buck1_voltage[i] / 1000,
-                                       pdata->buck1_voltage[i] / 1000 +
+                                       pdata->buck1_voltage[i],
+                                       pdata->buck1_voltage[i] +
                                        buck1245_voltage_map_desc.step);
                if (ret < 0)
                        goto err_out;
@@ -1132,8 +1130,8 @@ static int max8997_pmic_probe(struct platform_device *pdev)
                max8997->buck2_vol[i] = ret =
                        max8997_get_voltage_proper_val(
                                        &buck1245_voltage_map_desc,
-                                       pdata->buck2_voltage[i] / 1000,
-                                       pdata->buck2_voltage[i] / 1000 +
+                                       pdata->buck2_voltage[i],
+                                       pdata->buck2_voltage[i] +
                                        buck1245_voltage_map_desc.step);
                if (ret < 0)
                        goto err_out;
@@ -1141,8 +1139,8 @@ static int max8997_pmic_probe(struct platform_device *pdev)
                max8997->buck5_vol[i] = ret =
                        max8997_get_voltage_proper_val(
                                        &buck1245_voltage_map_desc,
-                                       pdata->buck5_voltage[i] / 1000,
-                                       pdata->buck5_voltage[i] / 1000 +
+                                       pdata->buck5_voltage[i],
+                                       pdata->buck5_voltage[i] +
                                        buck1245_voltage_map_desc.step);
                if (ret < 0)
                        goto err_out;
index b821d08..1f0df40 100644 (file)
@@ -51,39 +51,39 @@ struct voltage_map_desc {
        int step;
 };
 
-/* Voltage maps */
+/* Voltage maps in uV*/
 static const struct voltage_map_desc ldo23_voltage_map_desc = {
-       .min = 800,     .step = 50,     .max = 1300,
+       .min = 800000,  .step = 50000,  .max = 1300000,
 };
 static const struct voltage_map_desc ldo456711_voltage_map_desc = {
-       .min = 1600,    .step = 100,    .max = 3600,
+       .min = 1600000, .step = 100000, .max = 3600000,
 };
 static const struct voltage_map_desc ldo8_voltage_map_desc = {
-       .min = 3000,    .step = 100,    .max = 3600,
+       .min = 3000000, .step = 100000, .max = 3600000,
 };
 static const struct voltage_map_desc ldo9_voltage_map_desc = {
-       .min = 2800,    .step = 100,    .max = 3100,
+       .min = 2800000, .step = 100000, .max = 3100000,
 };
 static const struct voltage_map_desc ldo10_voltage_map_desc = {
-       .min = 950,     .step = 50,     .max = 1300,
+       .min = 95000,   .step = 50000,  .max = 1300000,
 };
 static const struct voltage_map_desc ldo1213_voltage_map_desc = {
-       .min = 800,     .step = 100,    .max = 3300,
+       .min = 800000,  .step = 100000, .max = 3300000,
 };
 static const struct voltage_map_desc ldo1415_voltage_map_desc = {
-       .min = 1200,    .step = 100,    .max = 3300,
+       .min = 1200000, .step = 100000, .max = 3300000,
 };
 static const struct voltage_map_desc ldo1617_voltage_map_desc = {
-       .min = 1600,    .step = 100,    .max = 3600,
+       .min = 1600000, .step = 100000, .max = 3600000,
 };
 static const struct voltage_map_desc buck12_voltage_map_desc = {
-       .min = 750,     .step = 25,     .max = 1525,
+       .min = 750000,  .step = 25000,  .max = 1525000,
 };
 static const struct voltage_map_desc buck3_voltage_map_desc = {
-       .min = 1600,    .step = 100,    .max = 3600,
+       .min = 1600000, .step = 100000, .max = 3600000,
 };
 static const struct voltage_map_desc buck4_voltage_map_desc = {
-       .min = 800,     .step = 100,    .max = 2300,
+       .min = 800000,  .step = 100000, .max = 2300000,
 };
 
 static const struct voltage_map_desc *ldo_voltage_map[] = {
@@ -445,9 +445,9 @@ static int max8998_set_voltage_buck_time_sel(struct regulator_dev *rdev,
        if (max8998->iodev->type == TYPE_MAX8998 && !(val & MAX8998_ENRAMP))
                return 0;
 
-       difference = (new_selector - old_selector) * desc->step;
+       difference = (new_selector - old_selector) * desc->step / 1000;
        if (difference > 0)
-               return difference / ((val & 0x0f) + 1);
+               return DIV_ROUND_UP(difference, (val & 0x0f) + 1);
 
        return 0;
 }
@@ -702,7 +702,7 @@ static int max8998_pmic_probe(struct platform_device *pdev)
                i = 0;
                while (buck12_voltage_map_desc.min +
                       buck12_voltage_map_desc.step*i
-                      < (pdata->buck1_voltage1 / 1000))
+                      < pdata->buck1_voltage1)
                        i++;
                max8998->buck1_vol[0] = i;
                ret = max8998_write_reg(i2c, MAX8998_REG_BUCK1_VOLTAGE1, i);
@@ -713,7 +713,7 @@ static int max8998_pmic_probe(struct platform_device *pdev)
                i = 0;
                while (buck12_voltage_map_desc.min +
                       buck12_voltage_map_desc.step*i
-                      < (pdata->buck1_voltage2 / 1000))
+                      < pdata->buck1_voltage2)
                        i++;
 
                max8998->buck1_vol[1] = i;
@@ -725,7 +725,7 @@ static int max8998_pmic_probe(struct platform_device *pdev)
                i = 0;
                while (buck12_voltage_map_desc.min +
                       buck12_voltage_map_desc.step*i
-                      < (pdata->buck1_voltage3 / 1000))
+                      < pdata->buck1_voltage3)
                        i++;
 
                max8998->buck1_vol[2] = i;
@@ -737,7 +737,7 @@ static int max8998_pmic_probe(struct platform_device *pdev)
                i = 0;
                while (buck12_voltage_map_desc.min +
                       buck12_voltage_map_desc.step*i
-                      < (pdata->buck1_voltage4 / 1000))
+                      < pdata->buck1_voltage4)
                        i++;
 
                max8998->buck1_vol[3] = i;
@@ -763,7 +763,7 @@ static int max8998_pmic_probe(struct platform_device *pdev)
                i = 0;
                while (buck12_voltage_map_desc.min +
                       buck12_voltage_map_desc.step*i
-                      < (pdata->buck2_voltage1 / 1000))
+                      < pdata->buck2_voltage1)
                        i++;
                max8998->buck2_vol[0] = i;
                ret = max8998_write_reg(i2c, MAX8998_REG_BUCK2_VOLTAGE1, i);
@@ -774,7 +774,7 @@ static int max8998_pmic_probe(struct platform_device *pdev)
                i = 0;
                while (buck12_voltage_map_desc.min +
                       buck12_voltage_map_desc.step*i
-                      < (pdata->buck2_voltage2 / 1000))
+                      < pdata->buck2_voltage2)
                        i++;
                max8998->buck2_vol[1] = i;
                ret = max8998_write_reg(i2c, MAX8998_REG_BUCK2_VOLTAGE2, i);
@@ -792,8 +792,8 @@ static int max8998_pmic_probe(struct platform_device *pdev)
                        int count = (desc->max - desc->min) / desc->step + 1;
 
                        regulators[index].n_voltages = count;
-                       regulators[index].min_uV = desc->min * 1000;
-                       regulators[index].uV_step = desc->step * 1000;
+                       regulators[index].min_uV = desc->min;
+                       regulators[index].uV_step = desc->step;
                }
 
                config.dev = max8998->dev;
index 9f991f2..33b65c9 100644 (file)
@@ -214,7 +214,7 @@ static int s5m8767_reg_is_enabled(struct regulator_dev *rdev)
        struct s5m8767_info *s5m8767 = rdev_get_drvdata(rdev);
        int ret, reg;
        int mask = 0xc0, enable_ctrl;
-       u8 val;
+       unsigned int val;
 
        ret = s5m8767_get_register(rdev, &reg, &enable_ctrl);
        if (ret == -EINVAL)
@@ -306,7 +306,7 @@ static int s5m8767_get_voltage_sel(struct regulator_dev *rdev)
        struct s5m8767_info *s5m8767 = rdev_get_drvdata(rdev);
        int reg, mask, ret;
        int reg_id = rdev_get_id(rdev);
-       u8 val;
+       unsigned int val;
 
        ret = s5m8767_get_voltage_register(rdev, &reg);
        if (ret)
index 96bafc5..8f0dcfe 100644 (file)
@@ -227,7 +227,7 @@ static const struct rtc_class_ops da9055_rtc_ops = {
        .alarm_irq_enable = da9055_rtc_alarm_irq_enable,
 };
 
-static int __init da9055_rtc_device_init(struct da9055 *da9055,
+static int da9055_rtc_device_init(struct da9055 *da9055,
                                        struct da9055_pdata *pdata)
 {
        int ret;
index 9bd5da3..704488d 100644 (file)
@@ -248,7 +248,7 @@ static void dasd_ext_handler(struct ext_code ext_code,
        default:
                return;
        }
-       kstat_cpu(smp_processor_id()).irqs[EXTINT_DSD]++;
+       inc_irq_stat(IRQEXT_DSD);
        if (!ip) {              /* no intparm: unsolicited interrupt */
                DBF_EVENT(DBF_NOTICE, "%s", "caught unsolicited "
                              "interrupt");
index 806fe91..e37bc16 100644 (file)
@@ -4274,7 +4274,7 @@ static struct ccw_driver dasd_eckd_driver = {
        .thaw        = dasd_generic_restore_device,
        .restore     = dasd_generic_restore_device,
        .uc_handler  = dasd_generic_uc_handler,
-       .int_class   = IOINT_DAS,
+       .int_class   = IRQIO_DAS,
 };
 
 /*
index eb74850..4146985 100644 (file)
@@ -78,7 +78,7 @@ static struct ccw_driver dasd_fba_driver = {
        .freeze      = dasd_generic_pm_freeze,
        .thaw        = dasd_generic_restore_device,
        .restore     = dasd_generic_restore_device,
-       .int_class   = IOINT_DAS,
+       .int_class   = IRQIO_DAS,
 };
 
 static void
index 4008450..33b7141 100644 (file)
@@ -44,6 +44,7 @@
 #define RAW3215_NR_CCWS            3
 #define RAW3215_TIMEOUT            HZ/10     /* time for delayed output */
 
+#define RAW3215_FIXED      1         /* 3215 console device is not be freed */
 #define RAW3215_WORKING            4         /* set if a request is being worked on */
 #define RAW3215_THROTTLED   8        /* set if reading is disabled */
 #define RAW3215_STOPPED            16        /* set if writing is disabled */
@@ -630,7 +631,8 @@ static void raw3215_shutdown(struct raw3215_info *raw)
        DECLARE_WAITQUEUE(wait, current);
        unsigned long flags;
 
-       if (!(raw->port.flags & ASYNC_INITIALIZED))
+       if (!(raw->port.flags & ASYNC_INITIALIZED) ||
+           (raw->flags & RAW3215_FIXED))
                return;
        /* Wait for outstanding requests, then free irq */
        spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
@@ -805,7 +807,7 @@ static struct ccw_driver raw3215_ccw_driver = {
        .freeze         = &raw3215_pm_stop,
        .thaw           = &raw3215_pm_start,
        .restore        = &raw3215_pm_start,
-       .int_class      = IOINT_C15,
+       .int_class      = IRQIO_C15,
 };
 
 #ifdef CONFIG_TN3215_CONSOLE
@@ -927,6 +929,8 @@ static int __init con3215_init(void)
        dev_set_drvdata(&cdev->dev, raw);
        cdev->handler = raw3215_irq;
 
+       raw->flags |= RAW3215_FIXED;
+
        /* Request the console irq */
        if (raw3215_startup(raw) != 0) {
                raw3215_free_info(raw);
index f3b8bb8..9a6c140 100644 (file)
@@ -1396,7 +1396,7 @@ static struct ccw_driver raw3270_ccw_driver = {
        .freeze         = &raw3270_pm_stop,
        .thaw           = &raw3270_pm_start,
        .restore        = &raw3270_pm_start,
-       .int_class      = IOINT_C70,
+       .int_class      = IRQIO_C70,
 };
 
 static int
index 4fa21f7..12c16a6 100644 (file)
@@ -400,7 +400,7 @@ static void sclp_interrupt_handler(struct ext_code ext_code,
        u32 finished_sccb;
        u32 evbuf_pending;
 
-       kstat_cpu(smp_processor_id()).irqs[EXTINT_SCP]++;
+       inc_irq_stat(IRQEXT_SCP);
        spin_lock(&sclp_lock);
        finished_sccb = param32 & 0xfffffff8;
        evbuf_pending = param32 & 0x3;
@@ -813,7 +813,7 @@ static void sclp_check_handler(struct ext_code ext_code,
 {
        u32 finished_sccb;
 
-       kstat_cpu(smp_processor_id()).irqs[EXTINT_SCP]++;
+       inc_irq_stat(IRQEXT_SCP);
        finished_sccb = param32 & 0xfffffff8;
        /* Is this the interrupt we are waiting for? */
        if (finished_sccb == 0)
index 6ae929c..9aa7970 100644 (file)
@@ -1193,7 +1193,7 @@ static struct ccw_driver tape_34xx_driver = {
        .set_online = tape_34xx_online,
        .set_offline = tape_generic_offline,
        .freeze = tape_generic_pm_suspend,
-       .int_class = IOINT_TAP,
+       .int_class = IRQIO_TAP,
 };
 
 static int
index 1b0eb49..327cb19 100644 (file)
@@ -1656,7 +1656,7 @@ static struct ccw_driver tape_3590_driver = {
        .set_offline = tape_generic_offline,
        .set_online = tape_3590_online,
        .freeze = tape_generic_pm_suspend,
-       .int_class = IOINT_TAP,
+       .int_class = IRQIO_TAP,
 };
 
 /*
index 73bef0b..483f72b 100644 (file)
@@ -74,7 +74,7 @@ static struct ccw_driver ur_driver = {
        .set_online     = ur_set_online,
        .set_offline    = ur_set_offline,
        .freeze         = ur_pm_suspend,
-       .int_class      = IOINT_VMR,
+       .int_class      = IRQIO_VMR,
 };
 
 static DEFINE_MUTEX(vmur_mutex);
index 68e80e2..10729bb 100644 (file)
@@ -283,7 +283,7 @@ struct chsc_sei_nt2_area {
        u8  ccdf[PAGE_SIZE - 24 - 56];  /* content-code dependent field */
 } __packed;
 
-#define CHSC_SEI_NT0   0ULL
+#define CHSC_SEI_NT0   (1ULL << 63)
 #define CHSC_SEI_NT2   (1ULL << 61)
 
 struct chsc_sei {
@@ -291,7 +291,8 @@ struct chsc_sei {
        u32 reserved1;
        u64 ntsm;                       /* notification type mask */
        struct chsc_header response;
-       u32 reserved2;
+       u32 :24;
+       u8 nt;
        union {
                struct chsc_sei_nt0_area nt0_area;
                struct chsc_sei_nt2_area nt2_area;
@@ -496,17 +497,17 @@ static int __chsc_process_crw(struct chsc_sei *sei, u64 ntsm)
                                css_schedule_eval_all();
                        }
 
-                       switch (sei->ntsm) {
-                       case CHSC_SEI_NT0:
+                       switch (sei->nt) {
+                       case 0:
                                chsc_process_sei_nt0(&sei->u.nt0_area);
-                               return 1;
-                       case CHSC_SEI_NT2:
+                               break;
+                       case 2:
                                chsc_process_sei_nt2(&sei->u.nt2_area);
-                               return 1;
+                               break;
                        default:
-                               CIO_CRW_EVENT(2, "chsc: unhandled nt (nt=%08Lx)\n",
-                                             sei->ntsm);
-                               return 0;
+                               CIO_CRW_EVENT(2, "chsc: unhandled nt=%d\n",
+                                             sei->nt);
+                               break;
                        }
                } else {
                        CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n",
@@ -537,15 +538,7 @@ static void chsc_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
        sei = sei_page;
 
        CIO_TRACE_EVENT(2, "prcss");
-
-       /*
-        * The ntsm does not allow to select NT0 and NT2 together. We need to
-        * first check for NT2, than additionally for NT0...
-        */
-#ifdef CONFIG_PCI
-       if (!__chsc_process_crw(sei, CHSC_SEI_NT2))
-#endif
-               __chsc_process_crw(sei, CHSC_SEI_NT0);
+       __chsc_process_crw(sei, CHSC_SEI_NT0 | CHSC_SEI_NT2);
 }
 
 void chsc_chp_online(struct chp_id chpid)
index 8f9a1a3..facdf80 100644 (file)
@@ -58,7 +58,7 @@ static void chsc_subchannel_irq(struct subchannel *sch)
 
        CHSC_LOG(4, "irb");
        CHSC_LOG_HEX(4, irb, sizeof(*irb));
-       kstat_cpu(smp_processor_id()).irqs[IOINT_CSC]++;
+       inc_irq_stat(IRQIO_CSC);
 
        /* Copy irb to provided request and set done. */
        if (!request) {
index 8e927b9..c8faf62 100644 (file)
@@ -611,7 +611,7 @@ void __irq_entry do_IRQ(struct pt_regs *regs)
        tpi_info = (struct tpi_info *)&S390_lowcore.subchannel_id;
        irb = (struct irb *)&S390_lowcore.irb;
        do {
-               kstat_cpu(smp_processor_id()).irqs[IO_INTERRUPT]++;
+               kstat_incr_irqs_this_cpu(IO_INTERRUPT, NULL);
                if (tpi_info->adapter_IO) {
                        do_adapter_IO(tpi_info->isc);
                        continue;
@@ -619,7 +619,7 @@ void __irq_entry do_IRQ(struct pt_regs *regs)
                sch = (struct subchannel *)(unsigned long)tpi_info->intparm;
                if (!sch) {
                        /* Clear pending interrupt condition. */
-                       kstat_cpu(smp_processor_id()).irqs[IOINT_CIO]++;
+                       inc_irq_stat(IRQIO_CIO);
                        tsch(tpi_info->schid, irb);
                        continue;
                }
@@ -633,9 +633,9 @@ void __irq_entry do_IRQ(struct pt_regs *regs)
                        if (sch->driver && sch->driver->irq)
                                sch->driver->irq(sch);
                        else
-                               kstat_cpu(smp_processor_id()).irqs[IOINT_CIO]++;
+                               inc_irq_stat(IRQIO_CIO);
                } else
-                       kstat_cpu(smp_processor_id()).irqs[IOINT_CIO]++;
+                       inc_irq_stat(IRQIO_CIO);
                spin_unlock(sch->lock);
                /*
                 * Are more interrupts pending?
@@ -678,7 +678,7 @@ static void cio_tsch(struct subchannel *sch)
        if (sch->driver && sch->driver->irq)
                sch->driver->irq(sch);
        else
-               kstat_cpu(smp_processor_id()).irqs[IOINT_CIO]++;
+               inc_irq_stat(IRQIO_CIO);
        if (!irq_context) {
                irq_exit();
                _local_bh_enable();
index 6995cff..7cd5c68 100644 (file)
@@ -758,7 +758,7 @@ static int io_subchannel_initialize_dev(struct subchannel *sch,
                                        struct ccw_device *cdev)
 {
        cdev->private->cdev = cdev;
-       cdev->private->int_class = IOINT_CIO;
+       cdev->private->int_class = IRQIO_CIO;
        atomic_set(&cdev->private->onoff, 0);
        cdev->dev.parent = &sch->dev;
        cdev->dev.release = ccw_device_release;
@@ -1023,7 +1023,7 @@ static void io_subchannel_irq(struct subchannel *sch)
        if (cdev)
                dev_fsm_event(cdev, DEV_EVENT_INTERRUPT);
        else
-               kstat_cpu(smp_processor_id()).irqs[IOINT_CIO]++;
+               inc_irq_stat(IRQIO_CIO);
 }
 
 void io_subchannel_init_config(struct subchannel *sch)
@@ -1634,7 +1634,7 @@ ccw_device_probe_console(void)
        memset(&console_private, 0, sizeof(struct ccw_device_private));
        console_cdev.private = &console_private;
        console_private.cdev = &console_cdev;
-       console_private.int_class = IOINT_CIO;
+       console_private.int_class = IRQIO_CIO;
        ret = ccw_device_console_enable(&console_cdev, sch);
        if (ret) {
                cio_release_console();
@@ -1715,13 +1715,13 @@ ccw_device_probe (struct device *dev)
        if (cdrv->int_class != 0)
                cdev->private->int_class = cdrv->int_class;
        else
-               cdev->private->int_class = IOINT_CIO;
+               cdev->private->int_class = IRQIO_CIO;
 
        ret = cdrv->probe ? cdrv->probe(cdev) : -ENODEV;
 
        if (ret) {
                cdev->drv = NULL;
-               cdev->private->int_class = IOINT_CIO;
+               cdev->private->int_class = IRQIO_CIO;
                return ret;
        }
 
@@ -1755,7 +1755,7 @@ ccw_device_remove (struct device *dev)
        }
        ccw_device_set_timeout(cdev, 0);
        cdev->drv = NULL;
-       cdev->private->int_class = IOINT_CIO;
+       cdev->private->int_class = IRQIO_CIO;
        return 0;
 }
 
index 2e575cf..7d4ecb6 100644 (file)
@@ -61,11 +61,10 @@ dev_fsm_event(struct ccw_device *cdev, enum dev_event dev_event)
 
        if (dev_event == DEV_EVENT_INTERRUPT) {
                if (state == DEV_STATE_ONLINE)
-                       kstat_cpu(smp_processor_id()).
-                               irqs[cdev->private->int_class]++;
+                       inc_irq_stat(cdev->private->int_class);
                else if (state != DEV_STATE_CMFCHANGE &&
                         state != DEV_STATE_CMFUPDATE)
-                       kstat_cpu(smp_processor_id()).irqs[IOINT_CIO]++;
+                       inc_irq_stat(IRQIO_CIO);
        }
        dev_jumptable[state][dev_event](cdev, dev_event);
 }
index 6c96734..d9eddcb 100644 (file)
@@ -139,7 +139,7 @@ static void eadm_subchannel_irq(struct subchannel *sch)
        EADM_LOG(6, "irq");
        EADM_LOG_HEX(6, irb, sizeof(*irb));
 
-       kstat_cpu(smp_processor_id()).irqs[IOINT_ADM]++;
+       inc_irq_stat(IRQIO_ADM);
 
        if ((scsw->stctl & (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND))
            && scsw->eswf == 1 && irb->esw.eadm.erw.r)
index bdb394b..bde5255 100644 (file)
@@ -182,7 +182,7 @@ static void tiqdio_thinint_handler(void *alsi, void *data)
        struct qdio_q *q;
 
        last_ai_time = S390_lowcore.int_clock;
-       kstat_cpu(smp_processor_id()).irqs[IOINT_QAI]++;
+       inc_irq_stat(IRQIO_QAI);
 
        /* protect tiq_list entries, only changed in activate or shutdown */
        rcu_read_lock();
index 7b865a7..b8b340a 100644 (file)
@@ -1272,7 +1272,7 @@ out:
 
 static void ap_interrupt_handler(void *unused1, void *unused2)
 {
-       kstat_cpu(smp_processor_id()).irqs[IOINT_APB]++;
+       inc_irq_stat(IRQIO_APB);
        tasklet_schedule(&ap_tasklet);
 }
 
index 7dabef6..8491111 100644 (file)
@@ -392,7 +392,7 @@ static void kvm_extint_handler(struct ext_code ext_code,
 
        if ((ext_code.subcode & 0xff00) != VIRTIO_SUBCODE_64)
                return;
-       kstat_cpu(smp_processor_id()).irqs[EXTINT_VRT]++;
+       inc_irq_stat(IRQEXT_VRT);
 
        /* The LSB might be overloaded, we have to mask it */
        vq = (struct virtqueue *)(param64 & ~1UL);
index 5c70a65..83bc9c5 100644 (file)
@@ -282,7 +282,7 @@ static struct ccw_driver claw_ccw_driver = {
        .ids    = claw_ids,
        .probe  = ccwgroup_probe_ccwdev,
        .remove = ccwgroup_remove_ccwdev,
-       .int_class = IOINT_CLW,
+       .int_class = IRQIO_CLW,
 };
 
 static ssize_t claw_driver_group_store(struct device_driver *ddrv,
index 817b689..676f120 100644 (file)
@@ -1755,7 +1755,7 @@ static struct ccw_driver ctcm_ccw_driver = {
        .ids    = ctcm_ids,
        .probe  = ccwgroup_probe_ccwdev,
        .remove = ccwgroup_remove_ccwdev,
-       .int_class = IOINT_CTC,
+       .int_class = IRQIO_CTC,
 };
 
 static struct ccwgroup_driver ctcm_group_driver = {
index 2ca0f1d..c645dc9 100644 (file)
@@ -2384,7 +2384,7 @@ static struct ccw_driver lcs_ccw_driver = {
        .ids    = lcs_ids,
        .probe  = ccwgroup_probe_ccwdev,
        .remove = ccwgroup_remove_ccwdev,
-       .int_class = IOINT_LCS,
+       .int_class = IRQIO_LCS,
 };
 
 /**
index 0144078..270b3cf 100644 (file)
@@ -1410,13 +1410,13 @@ enum {
 
 static const struct hv_vmbus_device_id id_table[] = {
        /* SCSI guid */
-       { VMBUS_DEVICE(0xd9, 0x63, 0x61, 0xba, 0xa1, 0x04, 0x29, 0x4d,
-                      0xb6, 0x05, 0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f)
-         .driver_data = SCSI_GUID },
+       { HV_SCSI_GUID,
+         .driver_data = SCSI_GUID
+       },
        /* IDE guid */
-       { VMBUS_DEVICE(0x32, 0x26, 0x41, 0x32, 0xcb, 0x86, 0xa2, 0x44,
-                      0x9b, 0x5c, 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5)
-         .driver_data = IDE_GUID },
+       { HV_IDE_GUID,
+         .driver_data = IDE_GUID
+       },
        { },
 };
 
index 5aedcdf..1ebe67c 100644 (file)
@@ -126,6 +126,12 @@ static int sh_clk_div_set_rate(struct clk *clk, unsigned long rate)
 
 static int sh_clk_div_enable(struct clk *clk)
 {
+       if (clk->div_mask == SH_CLK_DIV6_MSK) {
+               int ret = sh_clk_div_set_rate(clk, clk->rate);
+               if (ret < 0)
+                       return ret;
+       }
+
        sh_clk_write(sh_clk_read(clk) & ~CPG_CKSTP_BIT, clk);
        return 0;
 }
index 7de2a10..36eec32 100644 (file)
@@ -444,6 +444,7 @@ config COMEDI_ADQ12B
 
 config COMEDI_NI_AT_A2150
        tristate "NI AT-A2150 ISA card support"
+       select COMEDI_FC
        depends on VIRT_TO_BUS
        ---help---
          Enable support for National Instruments AT-A2150 cards
index b7bba17..9b038e4 100644 (file)
@@ -1549,6 +1549,9 @@ static long comedi_unlocked_ioctl(struct file *file, unsigned int cmd,
        if (cmd == COMEDI_DEVCONFIG) {
                rc = do_devconfig_ioctl(dev,
                                        (struct comedi_devconfig __user *)arg);
+               if (rc == 0)
+                       /* Evade comedi_auto_unconfig(). */
+                       dev_file_info->hardware_device = NULL;
                goto done;
        }
 
index fb3d093..01de996 100644 (file)
@@ -345,7 +345,7 @@ static int waveform_ai_cancel(struct comedi_device *dev,
        struct waveform_private *devpriv = dev->private;
 
        devpriv->timer_running = 0;
-       del_timer(&devpriv->timer);
+       del_timer_sync(&devpriv->timer);
        return 0;
 }
 
index aaac0b2..fd1662b 100644 (file)
@@ -963,7 +963,7 @@ static const struct ni_board_struct ni_boards[] = {
         .ao_range_table = &range_ni_M_625x_ao,
         .reg_type = ni_reg_625x,
         .ao_unipolar = 0,
-        .ao_speed = 357,
+        .ao_speed = 350,
         .num_p0_dio_channels = 8,
         .caldac = {caldac_none},
         .has_8255 = 0,
@@ -982,7 +982,7 @@ static const struct ni_board_struct ni_boards[] = {
         .ao_range_table = &range_ni_M_625x_ao,
         .reg_type = ni_reg_625x,
         .ao_unipolar = 0,
-        .ao_speed = 357,
+        .ao_speed = 350,
         .num_p0_dio_channels = 8,
         .caldac = {caldac_none},
         .has_8255 = 0,
@@ -1001,7 +1001,7 @@ static const struct ni_board_struct ni_boards[] = {
         .ao_range_table = &range_ni_M_625x_ao,
         .reg_type = ni_reg_625x,
         .ao_unipolar = 0,
-        .ao_speed = 357,
+        .ao_speed = 350,
         .num_p0_dio_channels = 8,
         .caldac = {caldac_none},
         .has_8255 = 0,
@@ -1037,7 +1037,7 @@ static const struct ni_board_struct ni_boards[] = {
         .ao_range_table = &range_ni_M_625x_ao,
         .reg_type = ni_reg_625x,
         .ao_unipolar = 0,
-        .ao_speed = 357,
+        .ao_speed = 350,
         .num_p0_dio_channels = 32,
         .caldac = {caldac_none},
         .has_8255 = 0,
@@ -1056,7 +1056,7 @@ static const struct ni_board_struct ni_boards[] = {
         .ao_range_table = &range_ni_M_625x_ao,
         .reg_type = ni_reg_625x,
         .ao_unipolar = 0,
-        .ao_speed = 357,
+        .ao_speed = 350,
         .num_p0_dio_channels = 32,
         .caldac = {caldac_none},
         .has_8255 = 0,
@@ -1092,7 +1092,7 @@ static const struct ni_board_struct ni_boards[] = {
         .ao_range_table = &range_ni_M_628x_ao,
         .reg_type = ni_reg_628x,
         .ao_unipolar = 1,
-        .ao_speed = 357,
+        .ao_speed = 350,
         .num_p0_dio_channels = 8,
         .caldac = {caldac_none},
         .has_8255 = 0,
@@ -1111,7 +1111,7 @@ static const struct ni_board_struct ni_boards[] = {
         .ao_range_table = &range_ni_M_628x_ao,
         .reg_type = ni_reg_628x,
         .ao_unipolar = 1,
-        .ao_speed = 357,
+        .ao_speed = 350,
         .num_p0_dio_channels = 8,
         .caldac = {caldac_none},
         .has_8255 = 0,
@@ -1147,7 +1147,7 @@ static const struct ni_board_struct ni_boards[] = {
         .ao_range_table = &range_ni_M_628x_ao,
         .reg_type = ni_reg_628x,
         .ao_unipolar = 1,
-        .ao_speed = 357,
+        .ao_speed = 350,
         .num_p0_dio_channels = 32,
         .caldac = {caldac_none},
         .has_8255 = 0,
index 580406c..b2f8331 100644 (file)
@@ -3,7 +3,9 @@ config FIREWIRE_SERIAL
        depends on FIREWIRE
        help
           This enables TTY over IEEE 1394, providing high-speed serial
-         connectivity to cabled peers.
+         connectivity to cabled peers. This driver implements a
+         ad-hoc transport protocol and is currently limited to
+         Linux-to-Linux communication.
 
          To compile this driver as a module, say M here:  the module will
          be called firewire-serial.
index 7269005..8dae8fb 100644 (file)
@@ -1,5 +1,5 @@
-TODOs
------
+TODOs prior to this driver moving out of staging
+------------------------------------------------
 1. Implement retries for RCODE_BUSY, RCODE_NO_ACK and RCODE_SEND_ERROR
    - I/O is handled asynchronously which presents some issues when error
      conditions occur.
@@ -11,17 +11,9 @@ TODOs
 -- Issues with firewire stack --
 1. This driver uses the same unregistered vendor id that the firewire core does
      (0xd00d1e). Perhaps this could be exposed as a define in
-     firewire-constants.h?
-2. MAX_ASYNC_PAYLOAD needs to be publicly exposed by core/ohci
-   - otherwise how will this driver know the max size of address window to
-     open for one packet write?
+     firewire.h?
 3. Maybe device_max_receive() and link_speed_to_max_payload() should be
      taken up by the firewire core?
-4. To avoid dropping rx data while still limiting the maximum buffering,
-     the size of the AR context must be known. How to expose this to drivers?
-5. Explore if bigger AR context will reduce RCODE_BUSY responses
-   (or auto-grow to certain max size -- but this would require major surgery
-    as the current AR is contiguously mapped)
 
 -- Issues with TTY core --
   1. Hack for alternate device name scheme
index 61ee290..d03a7f5 100644 (file)
@@ -179,7 +179,7 @@ static void dump_profile(struct seq_file *m, struct stats *stats)
 /* Returns the max receive packet size for the given card */
 static inline int device_max_receive(struct fw_device *fw_device)
 {
-       return 1 <<  (clamp_t(int, fw_device->max_rec, 8U, 13U) + 1);
+       return 1 <<  (clamp_t(int, fw_device->max_rec, 8U, 11U) + 1);
 }
 
 static void fwtty_log_tx_error(struct fwtty_port *port, int rcode)
index 8b572ed..caa1c1e 100644 (file)
@@ -374,10 +374,10 @@ static inline void fwtty_bind_console(struct fwtty_port *port,
  */
 static inline int link_speed_to_max_payload(unsigned speed)
 {
-       static const int max_async[] = { 307, 614, 1229, 2458, 4916, 9832, };
-       BUILD_BUG_ON(ARRAY_SIZE(max_async) - 1 != SCODE_3200);
+       static const int max_async[] = { 307, 614, 1229, 2458, };
+       BUILD_BUG_ON(ARRAY_SIZE(max_async) - 1 != SCODE_800);
 
-       speed = clamp(speed, (unsigned) SCODE_100, (unsigned) SCODE_3200);
+       speed = clamp(speed, (unsigned) SCODE_100, (unsigned) SCODE_800);
        if (limit_bw)
                return max_async[speed];
        else
index fb31b45..c5ceb9d 100644 (file)
@@ -239,7 +239,7 @@ static irqreturn_t mxs_lradc_trigger_handler(int irq, void *p)
        struct mxs_lradc *lradc = iio_priv(iio);
        const uint32_t chan_value = LRADC_CH_ACCUMULATE |
                ((LRADC_DELAY_TIMER_LOOP - 1) << LRADC_CH_NUM_SAMPLES_OFFSET);
-       int i, j = 0;
+       unsigned int i, j = 0;
 
        for_each_set_bit(i, iio->active_scan_mask, iio->masklength) {
                lradc->buffer[j] = readl(lradc->base + LRADC_CH(j));
index ea295b2..87979a0 100644 (file)
@@ -27,8 +27,8 @@ config ADIS16130
 config ADIS16260
        tristate "Analog Devices ADIS16260 Digital Gyroscope Sensor SPI driver"
        depends on SPI
-       select IIO_TRIGGER if IIO_BUFFER
-       select IIO_SW_RING if IIO_BUFFER
+       select IIO_ADIS_LIB
+       select IIO_ADIS_LIB_BUFFER if IIO_BUFFER
        help
          Say yes here to build support for Analog Devices ADIS16260 ADIS16265
          ADIS16250 ADIS16255 and ADIS16251 programmable digital gyroscope sensors.
index 3525a68..41d7350 100644 (file)
@@ -69,7 +69,7 @@ static int adis16080_spi_read(struct iio_dev *indio_dev,
        ret = spi_read(st->us, st->buf, 2);
 
        if (ret == 0)
-               *val = ((st->buf[0] & 0xF) << 8) | st->buf[1];
+               *val = sign_extend32(((st->buf[0] & 0xF) << 8) | st->buf[1], 11);
        mutex_unlock(&st->buf_lock);
 
        return ret;
index ecf0f44..cec19f1 100644 (file)
@@ -584,7 +584,6 @@ int imx_drm_add_encoder(struct drm_encoder *encoder,
 
        ret = imx_drm_encoder_register(imx_drm_encoder);
        if (ret) {
-               kfree(imx_drm_encoder);
                ret = -ENOMEM;
                goto err_register;
        }
index 677e665..f7059cd 100644 (file)
@@ -1104,7 +1104,9 @@ static int ipu_probe(struct platform_device *pdev)
        if (ret)
                goto out_failed_irq;
 
-       ipu_reset(ipu);
+       ret = ipu_reset(ipu);
+       if (ret)
+               goto out_failed_reset;
 
        /* Set MCU_T to divide MCU access window into 2 */
        ipu_cm_write(ipu, 0x00400000L | (IPU_MCU_T_DEFAULT << 18),
@@ -1129,6 +1131,7 @@ failed_add_clients:
        ipu_submodules_exit(ipu);
 failed_submodules_init:
        ipu_irq_exit(ipu);
+out_failed_reset:
 out_failed_irq:
        clk_disable_unprepare(ipu->clk);
 failed_clk_get:
index 1892006..4b3a019 100644 (file)
@@ -452,7 +452,7 @@ static int ipu_get_resources(struct ipu_crtc *ipu_crtc,
        int ret;
 
        ipu_crtc->ipu_ch = ipu_idmac_get(ipu, pdata->dma[0]);
-       if (IS_ERR_OR_NULL(ipu_crtc->ipu_ch)) {
+       if (IS_ERR(ipu_crtc->ipu_ch)) {
                ret = PTR_ERR(ipu_crtc->ipu_ch);
                goto err_out;
        }
@@ -472,7 +472,7 @@ static int ipu_get_resources(struct ipu_crtc *ipu_crtc,
        if (pdata->dp >= 0) {
                ipu_crtc->dp = ipu_dp_get(ipu, pdata->dp);
                if (IS_ERR(ipu_crtc->dp)) {
-                       ret = PTR_ERR(ipu_crtc->ipu_ch);
+                       ret = PTR_ERR(ipu_crtc->dp);
                        goto err_out;
                }
        }
@@ -548,6 +548,8 @@ static int ipu_drm_probe(struct platform_device *pdev)
        ipu_crtc->dev = &pdev->dev;
 
        ret = ipu_crtc_init(ipu_crtc, pdata);
+       if (ret)
+               return ret;
 
        platform_set_drvdata(pdev, ipu_crtc);
 
index 1ca0e00..d85e058 100644 (file)
@@ -5,6 +5,7 @@
 
 ccflags-y := -Iinclude/drm -Werror
 omapdrm-y := omap_drv.o \
+       omap_irq.o \
        omap_debugfs.o \
        omap_crtc.o \
        omap_plane.o \
index 938c788..abeeb00 100644 (file)
@@ -17,9 +17,6 @@ TODO
 . Revisit GEM sync object infrastructure.. TTM has some framework for this
   already.  Possibly this could be refactored out and made more common?
   There should be some way to do this with less wheel-reinvention.
-. Review DSS vs KMS mismatches.  The omap_dss_device is sort of part encoder,
-  part connector.  Which results in a bit of duct tape to fwd calls from
-  encoder to connector.  Possibly this could be done a bit better.
 . Solve PM sequencing on resume.  DMM/TILER must be reloaded before any
   access is made from any component in the system.  Which means on suspend
   CRTC's should be disabled, and on resume the LUT should be reprogrammed
index 91edb3f..4cc9ee7 100644 (file)
 struct omap_connector {
        struct drm_connector base;
        struct omap_dss_device *dssdev;
+       struct drm_encoder *encoder;
 };
 
-static inline void copy_timings_omap_to_drm(struct drm_display_mode *mode,
+void copy_timings_omap_to_drm(struct drm_display_mode *mode,
                struct omap_video_timings *timings)
 {
        mode->clock = timings->pixel_clock;
@@ -64,7 +65,7 @@ static inline void copy_timings_omap_to_drm(struct drm_display_mode *mode,
                mode->flags |= DRM_MODE_FLAG_NVSYNC;
 }
 
-static inline void copy_timings_drm_to_omap(struct omap_video_timings *timings,
+void copy_timings_drm_to_omap(struct omap_video_timings *timings,
                struct drm_display_mode *mode)
 {
        timings->pixel_clock = mode->clock;
@@ -96,48 +97,7 @@ static inline void copy_timings_drm_to_omap(struct omap_video_timings *timings,
        timings->sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES;
 }
 
-static void omap_connector_dpms(struct drm_connector *connector, int mode)
-{
-       struct omap_connector *omap_connector = to_omap_connector(connector);
-       struct omap_dss_device *dssdev = omap_connector->dssdev;
-       int old_dpms;
-
-       DBG("%s: %d", dssdev->name, mode);
-
-       old_dpms = connector->dpms;
-
-       /* from off to on, do from crtc to connector */
-       if (mode < old_dpms)
-               drm_helper_connector_dpms(connector, mode);
-
-       if (mode == DRM_MODE_DPMS_ON) {
-               /* store resume info for suspended displays */
-               switch (dssdev->state) {
-               case OMAP_DSS_DISPLAY_SUSPENDED:
-                       dssdev->activate_after_resume = true;
-                       break;
-               case OMAP_DSS_DISPLAY_DISABLED: {
-                       int ret = dssdev->driver->enable(dssdev);
-                       if (ret) {
-                               DBG("%s: failed to enable: %d",
-                                               dssdev->name, ret);
-                               dssdev->driver->disable(dssdev);
-                       }
-                       break;
-               }
-               default:
-                       break;
-               }
-       } else {
-               /* TODO */
-       }
-
-       /* from on to off, do from connector to crtc */
-       if (mode > old_dpms)
-               drm_helper_connector_dpms(connector, mode);
-}
-
-enum drm_connector_status omap_connector_detect(
+static enum drm_connector_status omap_connector_detect(
                struct drm_connector *connector, bool force)
 {
        struct omap_connector *omap_connector = to_omap_connector(connector);
@@ -164,8 +124,6 @@ static void omap_connector_destroy(struct drm_connector *connector)
        struct omap_connector *omap_connector = to_omap_connector(connector);
        struct omap_dss_device *dssdev = omap_connector->dssdev;
 
-       dssdev->driver->disable(dssdev);
-
        DBG("%s", omap_connector->dssdev->name);
        drm_sysfs_connector_remove(connector);
        drm_connector_cleanup(connector);
@@ -261,36 +219,12 @@ static int omap_connector_mode_valid(struct drm_connector *connector,
 struct drm_encoder *omap_connector_attached_encoder(
                struct drm_connector *connector)
 {
-       int i;
        struct omap_connector *omap_connector = to_omap_connector(connector);
-
-       for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
-               struct drm_mode_object *obj;
-
-               if (connector->encoder_ids[i] == 0)
-                       break;
-
-               obj = drm_mode_object_find(connector->dev,
-                               connector->encoder_ids[i],
-                               DRM_MODE_OBJECT_ENCODER);
-
-               if (obj) {
-                       struct drm_encoder *encoder = obj_to_encoder(obj);
-                       struct omap_overlay_manager *mgr =
-                                       omap_encoder_get_manager(encoder);
-                       DBG("%s: found %s", omap_connector->dssdev->name,
-                                       mgr->name);
-                       return encoder;
-               }
-       }
-
-       DBG("%s: no encoder", omap_connector->dssdev->name);
-
-       return NULL;
+       return omap_connector->encoder;
 }
 
 static const struct drm_connector_funcs omap_connector_funcs = {
-       .dpms = omap_connector_dpms,
+       .dpms = drm_helper_connector_dpms,
        .detect = omap_connector_detect,
        .fill_modes = drm_helper_probe_single_connector_modes,
        .destroy = omap_connector_destroy,
@@ -302,34 +236,6 @@ static const struct drm_connector_helper_funcs omap_connector_helper_funcs = {
        .best_encoder = omap_connector_attached_encoder,
 };
 
-/* called from encoder when mode is set, to propagate settings to the dssdev */
-void omap_connector_mode_set(struct drm_connector *connector,
-               struct drm_display_mode *mode)
-{
-       struct drm_device *dev = connector->dev;
-       struct omap_connector *omap_connector = to_omap_connector(connector);
-       struct omap_dss_device *dssdev = omap_connector->dssdev;
-       struct omap_dss_driver *dssdrv = dssdev->driver;
-       struct omap_video_timings timings = {0};
-
-       copy_timings_drm_to_omap(&timings, mode);
-
-       DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
-                       omap_connector->dssdev->name,
-                       mode->base.id, mode->name, mode->vrefresh, mode->clock,
-                       mode->hdisplay, mode->hsync_start,
-                       mode->hsync_end, mode->htotal,
-                       mode->vdisplay, mode->vsync_start,
-                       mode->vsync_end, mode->vtotal, mode->type, mode->flags);
-
-       if (dssdrv->check_timings(dssdev, &timings)) {
-               dev_err(dev->dev, "could not set timings\n");
-               return;
-       }
-
-       dssdrv->set_timings(dssdev, &timings);
-}
-
 /* flush an area of the framebuffer (in case of manual update display that
  * is not automatically flushed)
  */
@@ -344,7 +250,8 @@ void omap_connector_flush(struct drm_connector *connector,
 
 /* initialize connector */
 struct drm_connector *omap_connector_init(struct drm_device *dev,
-               int connector_type, struct omap_dss_device *dssdev)
+               int connector_type, struct omap_dss_device *dssdev,
+               struct drm_encoder *encoder)
 {
        struct drm_connector *connector = NULL;
        struct omap_connector *omap_connector;
@@ -360,6 +267,8 @@ struct drm_connector *omap_connector_init(struct drm_device *dev,
        }
 
        omap_connector->dssdev = dssdev;
+       omap_connector->encoder = encoder;
+
        connector = &omap_connector->base;
 
        drm_connector_init(dev, connector, &omap_connector_funcs,
index d87bd84..5c6ed60 100644 (file)
 struct omap_crtc {
        struct drm_crtc base;
        struct drm_plane *plane;
+
        const char *name;
-       int id;
+       int pipe;
+       enum omap_channel channel;
+       struct omap_overlay_manager_info info;
+
+       /*
+        * Temporary: eventually this will go away, but it is needed
+        * for now to keep the output's happy.  (They only need
+        * mgr->id.)  Eventually this will be replaced w/ something
+        * more common-panel-framework-y
+        */
+       struct omap_overlay_manager mgr;
+
+       struct omap_video_timings timings;
+       bool enabled;
+       bool full_update;
+
+       struct omap_drm_apply apply;
+
+       struct omap_drm_irq apply_irq;
+       struct omap_drm_irq error_irq;
+
+       /* list of in-progress apply's: */
+       struct list_head pending_applies;
+
+       /* list of queued apply's: */
+       struct list_head queued_applies;
+
+       /* for handling queued and in-progress applies: */
+       struct work_struct apply_work;
 
        /* if there is a pending flip, these will be non-null: */
        struct drm_pending_vblank_event *event;
        struct drm_framebuffer *old_fb;
+
+       /* for handling page flips without caring about what
+        * the callback is called from.  Possibly we should just
+        * make omap_gem always call the cb from the worker so
+        * we don't have to care about this..
+        *
+        * XXX maybe fold into apply_work??
+        */
+       struct work_struct page_flip_work;
+};
+
+/*
+ * Manager-ops, callbacks from output when they need to configure
+ * the upstream part of the video pipe.
+ *
+ * Most of these we can ignore until we add support for command-mode
+ * panels.. for video-mode the crtc-helpers already do an adequate
+ * job of sequencing the setup of the video pipe in the proper order
+ */
+
+/* we can probably ignore these until we support command-mode panels: */
+static void omap_crtc_start_update(struct omap_overlay_manager *mgr)
+{
+}
+
+static int omap_crtc_enable(struct omap_overlay_manager *mgr)
+{
+       return 0;
+}
+
+static void omap_crtc_disable(struct omap_overlay_manager *mgr)
+{
+}
+
+static void omap_crtc_set_timings(struct omap_overlay_manager *mgr,
+               const struct omap_video_timings *timings)
+{
+       struct omap_crtc *omap_crtc = container_of(mgr, struct omap_crtc, mgr);
+       DBG("%s", omap_crtc->name);
+       omap_crtc->timings = *timings;
+       omap_crtc->full_update = true;
+}
+
+static void omap_crtc_set_lcd_config(struct omap_overlay_manager *mgr,
+               const struct dss_lcd_mgr_config *config)
+{
+       struct omap_crtc *omap_crtc = container_of(mgr, struct omap_crtc, mgr);
+       DBG("%s", omap_crtc->name);
+       dispc_mgr_set_lcd_config(omap_crtc->channel, config);
+}
+
+static int omap_crtc_register_framedone_handler(
+               struct omap_overlay_manager *mgr,
+               void (*handler)(void *), void *data)
+{
+       return 0;
+}
+
+static void omap_crtc_unregister_framedone_handler(
+               struct omap_overlay_manager *mgr,
+               void (*handler)(void *), void *data)
+{
+}
+
+static const struct dss_mgr_ops mgr_ops = {
+               .start_update = omap_crtc_start_update,
+               .enable = omap_crtc_enable,
+               .disable = omap_crtc_disable,
+               .set_timings = omap_crtc_set_timings,
+               .set_lcd_config = omap_crtc_set_lcd_config,
+               .register_framedone_handler = omap_crtc_register_framedone_handler,
+               .unregister_framedone_handler = omap_crtc_unregister_framedone_handler,
 };
 
+/*
+ * CRTC funcs:
+ */
+
 static void omap_crtc_destroy(struct drm_crtc *crtc)
 {
        struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+
+       DBG("%s", omap_crtc->name);
+
+       WARN_ON(omap_crtc->apply_irq.registered);
+       omap_irq_unregister(crtc->dev, &omap_crtc->error_irq);
+
        omap_crtc->plane->funcs->destroy(omap_crtc->plane);
        drm_crtc_cleanup(crtc);
+
        kfree(omap_crtc);
 }
 
@@ -48,14 +160,25 @@ static void omap_crtc_dpms(struct drm_crtc *crtc, int mode)
 {
        struct omap_drm_private *priv = crtc->dev->dev_private;
        struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+       bool enabled = (mode == DRM_MODE_DPMS_ON);
        int i;
 
-       WARN_ON(omap_plane_dpms(omap_crtc->plane, mode));
+       DBG("%s: %d", omap_crtc->name, mode);
+
+       if (enabled != omap_crtc->enabled) {
+               omap_crtc->enabled = enabled;
+               omap_crtc->full_update = true;
+               omap_crtc_apply(crtc, &omap_crtc->apply);
 
-       for (i = 0; i < priv->num_planes; i++) {
-               struct drm_plane *plane = priv->planes[i];
-               if (plane->crtc == crtc)
-                       WARN_ON(omap_plane_dpms(plane, mode));
+               /* also enable our private plane: */
+               WARN_ON(omap_plane_dpms(omap_crtc->plane, mode));
+
+               /* and any attached overlay planes: */
+               for (i = 0; i < priv->num_planes; i++) {
+                       struct drm_plane *plane = priv->planes[i];
+                       if (plane->crtc == crtc)
+                               WARN_ON(omap_plane_dpms(plane, mode));
+               }
        }
 }
 
@@ -73,12 +196,26 @@ static int omap_crtc_mode_set(struct drm_crtc *crtc,
                struct drm_framebuffer *old_fb)
 {
        struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
-       struct drm_plane *plane = omap_crtc->plane;
 
-       return omap_plane_mode_set(plane, crtc, crtc->fb,
+       mode = adjusted_mode;
+
+       DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
+                       omap_crtc->name, mode->base.id, mode->name,
+                       mode->vrefresh, mode->clock,
+                       mode->hdisplay, mode->hsync_start,
+                       mode->hsync_end, mode->htotal,
+                       mode->vdisplay, mode->vsync_start,
+                       mode->vsync_end, mode->vtotal,
+                       mode->type, mode->flags);
+
+       copy_timings_drm_to_omap(&omap_crtc->timings, mode);
+       omap_crtc->full_update = true;
+
+       return omap_plane_mode_set(omap_crtc->plane, crtc, crtc->fb,
                        0, 0, mode->hdisplay, mode->vdisplay,
                        x << 16, y << 16,
-                       mode->hdisplay << 16, mode->vdisplay << 16);
+                       mode->hdisplay << 16, mode->vdisplay << 16,
+                       NULL, NULL);
 }
 
 static void omap_crtc_prepare(struct drm_crtc *crtc)
@@ -102,10 +239,11 @@ static int omap_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
        struct drm_plane *plane = omap_crtc->plane;
        struct drm_display_mode *mode = &crtc->mode;
 
-       return plane->funcs->update_plane(plane, crtc, crtc->fb,
+       return omap_plane_mode_set(plane, crtc, crtc->fb,
                        0, 0, mode->hdisplay, mode->vdisplay,
                        x << 16, y << 16,
-                       mode->hdisplay << 16, mode->vdisplay << 16);
+                       mode->hdisplay << 16, mode->vdisplay << 16,
+                       NULL, NULL);
 }
 
 static void omap_crtc_load_lut(struct drm_crtc *crtc)
@@ -114,63 +252,54 @@ static void omap_crtc_load_lut(struct drm_crtc *crtc)
 
 static void vblank_cb(void *arg)
 {
-       static uint32_t sequence;
        struct drm_crtc *crtc = arg;
        struct drm_device *dev = crtc->dev;
        struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
-       struct drm_pending_vblank_event *event = omap_crtc->event;
        unsigned long flags;
-       struct timeval now;
 
-       WARN_ON(!event);
+       spin_lock_irqsave(&dev->event_lock, flags);
+
+       /* wakeup userspace */
+       if (omap_crtc->event)
+               drm_send_vblank_event(dev, omap_crtc->pipe, omap_crtc->event);
 
        omap_crtc->event = NULL;
+       omap_crtc->old_fb = NULL;
 
-       /* wakeup userspace */
-       if (event) {
-               do_gettimeofday(&now);
-
-               spin_lock_irqsave(&dev->event_lock, flags);
-               /* TODO: we can't yet use the vblank time accounting,
-                * because omapdss lower layer is the one that knows
-                * the irq # and registers the handler, which more or
-                * less defeats how drm_irq works.. for now just fake
-                * the sequence number and use gettimeofday..
-                *
-               event->event.sequence = drm_vblank_count_and_time(
-                               dev, omap_crtc->id, &now);
-                */
-               event->event.sequence = sequence++;
-               event->event.tv_sec = now.tv_sec;
-               event->event.tv_usec = now.tv_usec;
-               list_add_tail(&event->base.link,
-                               &event->base.file_priv->event_list);
-               wake_up_interruptible(&event->base.file_priv->event_wait);
-               spin_unlock_irqrestore(&dev->event_lock, flags);
-       }
+       spin_unlock_irqrestore(&dev->event_lock, flags);
 }
 
-static void page_flip_cb(void *arg)
+static void page_flip_worker(struct work_struct *work)
 {
-       struct drm_crtc *crtc = arg;
-       struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
-       struct drm_framebuffer *old_fb = omap_crtc->old_fb;
+       struct omap_crtc *omap_crtc =
+                       container_of(work, struct omap_crtc, page_flip_work);
+       struct drm_crtc *crtc = &omap_crtc->base;
+       struct drm_device *dev = crtc->dev;
+       struct drm_display_mode *mode = &crtc->mode;
        struct drm_gem_object *bo;
 
-       omap_crtc->old_fb = NULL;
-
-       omap_crtc_mode_set_base(crtc, crtc->x, crtc->y, old_fb);
-
-       /* really we'd like to setup the callback atomically w/ setting the
-        * new scanout buffer to avoid getting stuck waiting an extra vblank
-        * cycle.. for now go for correctness and later figure out speed..
-        */
-       omap_plane_on_endwin(omap_crtc->plane, vblank_cb, crtc);
+       mutex_lock(&dev->mode_config.mutex);
+       omap_plane_mode_set(omap_crtc->plane, crtc, crtc->fb,
+                       0, 0, mode->hdisplay, mode->vdisplay,
+                       crtc->x << 16, crtc->y << 16,
+                       mode->hdisplay << 16, mode->vdisplay << 16,
+                       vblank_cb, crtc);
+       mutex_unlock(&dev->mode_config.mutex);
 
        bo = omap_framebuffer_bo(crtc->fb, 0);
        drm_gem_object_unreference_unlocked(bo);
 }
 
+static void page_flip_cb(void *arg)
+{
+       struct drm_crtc *crtc = arg;
+       struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+       struct omap_drm_private *priv = crtc->dev->dev_private;
+
+       /* avoid assumptions about what ctxt we are called from: */
+       queue_work(priv->wq, &omap_crtc->page_flip_work);
+}
+
 static int omap_crtc_page_flip_locked(struct drm_crtc *crtc,
                 struct drm_framebuffer *fb,
                 struct drm_pending_vblank_event *event)
@@ -179,14 +308,14 @@ static int omap_crtc_page_flip_locked(struct drm_crtc *crtc,
        struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
        struct drm_gem_object *bo;
 
-       DBG("%d -> %d", crtc->fb ? crtc->fb->base.id : -1, fb->base.id);
+       DBG("%d -> %d (event=%p)", crtc->fb ? crtc->fb->base.id : -1,
+                       fb->base.id, event);
 
-       if (omap_crtc->event) {
+       if (omap_crtc->old_fb) {
                dev_err(dev->dev, "already a pending flip\n");
                return -EINVAL;
        }
 
-       omap_crtc->old_fb = crtc->fb;
        omap_crtc->event = event;
        crtc->fb = fb;
 
@@ -234,14 +363,244 @@ static const struct drm_crtc_helper_funcs omap_crtc_helper_funcs = {
        .load_lut = omap_crtc_load_lut,
 };
 
+const struct omap_video_timings *omap_crtc_timings(struct drm_crtc *crtc)
+{
+       struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+       return &omap_crtc->timings;
+}
+
+enum omap_channel omap_crtc_channel(struct drm_crtc *crtc)
+{
+       struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+       return omap_crtc->channel;
+}
+
+static void omap_crtc_error_irq(struct omap_drm_irq *irq, uint32_t irqstatus)
+{
+       struct omap_crtc *omap_crtc =
+                       container_of(irq, struct omap_crtc, error_irq);
+       struct drm_crtc *crtc = &omap_crtc->base;
+       DRM_ERROR("%s: errors: %08x\n", omap_crtc->name, irqstatus);
+       /* avoid getting in a flood, unregister the irq until next vblank */
+       omap_irq_unregister(crtc->dev, &omap_crtc->error_irq);
+}
+
+static void omap_crtc_apply_irq(struct omap_drm_irq *irq, uint32_t irqstatus)
+{
+       struct omap_crtc *omap_crtc =
+                       container_of(irq, struct omap_crtc, apply_irq);
+       struct drm_crtc *crtc = &omap_crtc->base;
+
+       if (!omap_crtc->error_irq.registered)
+               omap_irq_register(crtc->dev, &omap_crtc->error_irq);
+
+       if (!dispc_mgr_go_busy(omap_crtc->channel)) {
+               struct omap_drm_private *priv =
+                               crtc->dev->dev_private;
+               DBG("%s: apply done", omap_crtc->name);
+               omap_irq_unregister(crtc->dev, &omap_crtc->apply_irq);
+               queue_work(priv->wq, &omap_crtc->apply_work);
+       }
+}
+
+static void apply_worker(struct work_struct *work)
+{
+       struct omap_crtc *omap_crtc =
+                       container_of(work, struct omap_crtc, apply_work);
+       struct drm_crtc *crtc = &omap_crtc->base;
+       struct drm_device *dev = crtc->dev;
+       struct omap_drm_apply *apply, *n;
+       bool need_apply;
+
+       /*
+        * Synchronize everything on mode_config.mutex, to keep
+        * the callbacks and list modification all serialized
+        * with respect to modesetting ioctls from userspace.
+        */
+       mutex_lock(&dev->mode_config.mutex);
+       dispc_runtime_get();
+
+       /*
+        * If we are still pending a previous update, wait.. when the
+        * pending update completes, we get kicked again.
+        */
+       if (omap_crtc->apply_irq.registered)
+               goto out;
+
+       /* finish up previous apply's: */
+       list_for_each_entry_safe(apply, n,
+                       &omap_crtc->pending_applies, pending_node) {
+               apply->post_apply(apply);
+               list_del(&apply->pending_node);
+       }
+
+       need_apply = !list_empty(&omap_crtc->queued_applies);
+
+       /* then handle the next round of of queued apply's: */
+       list_for_each_entry_safe(apply, n,
+                       &omap_crtc->queued_applies, queued_node) {
+               apply->pre_apply(apply);
+               list_del(&apply->queued_node);
+               apply->queued = false;
+               list_add_tail(&apply->pending_node,
+                               &omap_crtc->pending_applies);
+       }
+
+       if (need_apply) {
+               enum omap_channel channel = omap_crtc->channel;
+
+               DBG("%s: GO", omap_crtc->name);
+
+               if (dispc_mgr_is_enabled(channel)) {
+                       omap_irq_register(dev, &omap_crtc->apply_irq);
+                       dispc_mgr_go(channel);
+               } else {
+                       struct omap_drm_private *priv = dev->dev_private;
+                       queue_work(priv->wq, &omap_crtc->apply_work);
+               }
+       }
+
+out:
+       dispc_runtime_put();
+       mutex_unlock(&dev->mode_config.mutex);
+}
+
+int omap_crtc_apply(struct drm_crtc *crtc,
+               struct omap_drm_apply *apply)
+{
+       struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+       struct drm_device *dev = crtc->dev;
+
+       WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
+
+       /* no need to queue it again if it is already queued: */
+       if (apply->queued)
+               return 0;
+
+       apply->queued = true;
+       list_add_tail(&apply->queued_node, &omap_crtc->queued_applies);
+
+       /*
+        * If there are no currently pending updates, then go ahead and
+        * kick the worker immediately, otherwise it will run again when
+        * the current update finishes.
+        */
+       if (list_empty(&omap_crtc->pending_applies)) {
+               struct omap_drm_private *priv = crtc->dev->dev_private;
+               queue_work(priv->wq, &omap_crtc->apply_work);
+       }
+
+       return 0;
+}
+
+/* called only from apply */
+static void set_enabled(struct drm_crtc *crtc, bool enable)
+{
+       struct drm_device *dev = crtc->dev;
+       struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+       enum omap_channel channel = omap_crtc->channel;
+       struct omap_irq_wait *wait = NULL;
+
+       if (dispc_mgr_is_enabled(channel) == enable)
+               return;
+
+       /* ignore sync-lost irqs during enable/disable */
+       omap_irq_unregister(crtc->dev, &omap_crtc->error_irq);
+
+       if (dispc_mgr_get_framedone_irq(channel)) {
+               if (!enable) {
+                       wait = omap_irq_wait_init(dev,
+                                       dispc_mgr_get_framedone_irq(channel), 1);
+               }
+       } else {
+               /*
+                * When we disable digit output, we need to wait until fields
+                * are done.  Otherwise the DSS is still working, and turning
+                * off the clocks prevents DSS from going to OFF mode. And when
+                * enabling, we need to wait for the extra sync losts
+                */
+               wait = omap_irq_wait_init(dev,
+                               dispc_mgr_get_vsync_irq(channel), 2);
+       }
+
+       dispc_mgr_enable(channel, enable);
+
+       if (wait) {
+               int ret = omap_irq_wait(dev, wait, msecs_to_jiffies(100));
+               if (ret) {
+                       dev_err(dev->dev, "%s: timeout waiting for %s\n",
+                                       omap_crtc->name, enable ? "enable" : "disable");
+               }
+       }
+
+       omap_irq_register(crtc->dev, &omap_crtc->error_irq);
+}
+
+static void omap_crtc_pre_apply(struct omap_drm_apply *apply)
+{
+       struct omap_crtc *omap_crtc =
+                       container_of(apply, struct omap_crtc, apply);
+       struct drm_crtc *crtc = &omap_crtc->base;
+       struct drm_encoder *encoder = NULL;
+
+       DBG("%s: enabled=%d, full=%d", omap_crtc->name,
+                       omap_crtc->enabled, omap_crtc->full_update);
+
+       if (omap_crtc->full_update) {
+               struct omap_drm_private *priv = crtc->dev->dev_private;
+               int i;
+               for (i = 0; i < priv->num_encoders; i++) {
+                       if (priv->encoders[i]->crtc == crtc) {
+                               encoder = priv->encoders[i];
+                               break;
+                       }
+               }
+       }
+
+       if (!omap_crtc->enabled) {
+               set_enabled(&omap_crtc->base, false);
+               if (encoder)
+                       omap_encoder_set_enabled(encoder, false);
+       } else {
+               if (encoder) {
+                       omap_encoder_set_enabled(encoder, false);
+                       omap_encoder_update(encoder, &omap_crtc->mgr,
+                                       &omap_crtc->timings);
+                       omap_encoder_set_enabled(encoder, true);
+                       omap_crtc->full_update = false;
+               }
+
+               dispc_mgr_setup(omap_crtc->channel, &omap_crtc->info);
+               dispc_mgr_set_timings(omap_crtc->channel,
+                               &omap_crtc->timings);
+               set_enabled(&omap_crtc->base, true);
+       }
+
+       omap_crtc->full_update = false;
+}
+
+static void omap_crtc_post_apply(struct omap_drm_apply *apply)
+{
+       /* nothing needed for post-apply */
+}
+
+static const char *channel_names[] = {
+               [OMAP_DSS_CHANNEL_LCD] = "lcd",
+               [OMAP_DSS_CHANNEL_DIGIT] = "tv",
+               [OMAP_DSS_CHANNEL_LCD2] = "lcd2",
+};
+
 /* initialize crtc */
 struct drm_crtc *omap_crtc_init(struct drm_device *dev,
-               struct omap_overlay *ovl, int id)
+               struct drm_plane *plane, enum omap_channel channel, int id)
 {
        struct drm_crtc *crtc = NULL;
-       struct omap_crtc *omap_crtc = kzalloc(sizeof(*omap_crtc), GFP_KERNEL);
+       struct omap_crtc *omap_crtc;
+       struct omap_overlay_manager_info *info;
+
+       DBG("%s", channel_names[channel]);
 
-       DBG("%s", ovl->name);
+       omap_crtc = kzalloc(sizeof(*omap_crtc), GFP_KERNEL);
 
        if (!omap_crtc) {
                dev_err(dev->dev, "could not allocate CRTC\n");
@@ -250,10 +609,40 @@ struct drm_crtc *omap_crtc_init(struct drm_device *dev,
 
        crtc = &omap_crtc->base;
 
-       omap_crtc->plane = omap_plane_init(dev, ovl, (1 << id), true);
+       INIT_WORK(&omap_crtc->page_flip_work, page_flip_worker);
+       INIT_WORK(&omap_crtc->apply_work, apply_worker);
+
+       INIT_LIST_HEAD(&omap_crtc->pending_applies);
+       INIT_LIST_HEAD(&omap_crtc->queued_applies);
+
+       omap_crtc->apply.pre_apply  = omap_crtc_pre_apply;
+       omap_crtc->apply.post_apply = omap_crtc_post_apply;
+
+       omap_crtc->apply_irq.irqmask = pipe2vbl(id);
+       omap_crtc->apply_irq.irq = omap_crtc_apply_irq;
+
+       omap_crtc->error_irq.irqmask =
+                       dispc_mgr_get_sync_lost_irq(channel);
+       omap_crtc->error_irq.irq = omap_crtc_error_irq;
+       omap_irq_register(dev, &omap_crtc->error_irq);
+
+       omap_crtc->channel = channel;
+       omap_crtc->plane = plane;
        omap_crtc->plane->crtc = crtc;
-       omap_crtc->name = ovl->name;
-       omap_crtc->id = id;
+       omap_crtc->name = channel_names[channel];
+       omap_crtc->pipe = id;
+
+       /* temporary: */
+       omap_crtc->mgr.id = channel;
+
+       dss_install_mgr_ops(&mgr_ops);
+
+       /* TODO: fix hard-coded setup.. add properties! */
+       info = &omap_crtc->info;
+       info->default_color = 0x00000000;
+       info->trans_key = 0x00000000;
+       info->trans_key_type = OMAP_DSS_COLOR_KEY_GFX_DST;
+       info->trans_enabled = false;
 
        drm_crtc_init(dev, crtc, &omap_crtc_funcs);
        drm_crtc_helper_add(crtc, &omap_crtc_helper_funcs);
index 84943e5..ae5ecc2 100644 (file)
@@ -74,320 +74,99 @@ static int get_connector_type(struct omap_dss_device *dssdev)
        }
 }
 
-#if 0 /* enable when dss2 supports hotplug */
-static int omap_drm_notifier(struct notifier_block *nb,
-               unsigned long evt, void *arg)
-{
-       switch (evt) {
-       case OMAP_DSS_SIZE_CHANGE:
-       case OMAP_DSS_HOTPLUG_CONNECT:
-       case OMAP_DSS_HOTPLUG_DISCONNECT: {
-               struct drm_device *dev = drm_device;
-               DBG("hotplug event: evt=%d, dev=%p", evt, dev);
-               if (dev)
-                       drm_sysfs_hotplug_event(dev);
-
-               return NOTIFY_OK;
-       }
-       default:  /* don't care about other events for now */
-               return NOTIFY_DONE;
-       }
-}
-#endif
-
-static void dump_video_chains(void)
-{
-       int i;
-
-       DBG("dumping video chains: ");
-       for (i = 0; i < omap_dss_get_num_overlays(); i++) {
-               struct omap_overlay *ovl = omap_dss_get_overlay(i);
-               struct omap_overlay_manager *mgr = ovl->manager;
-               struct omap_dss_device *dssdev = mgr ?
-                                       mgr->get_device(mgr) : NULL;
-               if (dssdev) {
-                       DBG("%d: %s -> %s -> %s", i, ovl->name, mgr->name,
-                                               dssdev->name);
-               } else if (mgr) {
-                       DBG("%d: %s -> %s", i, ovl->name, mgr->name);
-               } else {
-                       DBG("%d: %s", i, ovl->name);
-               }
-       }
-}
-
-/* create encoders for each manager */
-static int create_encoder(struct drm_device *dev,
-               struct omap_overlay_manager *mgr)
-{
-       struct omap_drm_private *priv = dev->dev_private;
-       struct drm_encoder *encoder = omap_encoder_init(dev, mgr);
-
-       if (!encoder) {
-               dev_err(dev->dev, "could not create encoder: %s\n",
-                               mgr->name);
-               return -ENOMEM;
-       }
-
-       BUG_ON(priv->num_encoders >= ARRAY_SIZE(priv->encoders));
-
-       priv->encoders[priv->num_encoders++] = encoder;
-
-       return 0;
-}
-
-/* create connectors for each display device */
-static int create_connector(struct drm_device *dev,
-               struct omap_dss_device *dssdev)
+static int omap_modeset_init(struct drm_device *dev)
 {
        struct omap_drm_private *priv = dev->dev_private;
-       static struct notifier_block *notifier;
-       struct drm_connector *connector;
-       int j;
-
-       if (!dssdev->driver) {
-               dev_warn(dev->dev, "%s has no driver.. skipping it\n",
-                               dssdev->name);
-               return 0;
-       }
+       struct omap_dss_device *dssdev = NULL;
+       int num_ovls = dss_feat_get_num_ovls();
+       int id;
 
-       if (!(dssdev->driver->get_timings ||
-                               dssdev->driver->read_edid)) {
-               dev_warn(dev->dev, "%s driver does not support "
-                       "get_timings or read_edid.. skipping it!\n",
-                       dssdev->name);
-               return 0;
-       }
+       drm_mode_config_init(dev);
 
-       connector = omap_connector_init(dev,
-                       get_connector_type(dssdev), dssdev);
+       omap_drm_irq_install(dev);
 
-       if (!connector) {
-               dev_err(dev->dev, "could not create connector: %s\n",
-                               dssdev->name);
-               return -ENOMEM;
-       }
-
-       BUG_ON(priv->num_connectors >= ARRAY_SIZE(priv->connectors));
+       /*
+        * Create private planes and CRTCs for the last NUM_CRTCs overlay
+        * plus manager:
+        */
+       for (id = 0; id < min(num_crtc, num_ovls); id++) {
+               struct drm_plane *plane;
+               struct drm_crtc *crtc;
 
-       priv->connectors[priv->num_connectors++] = connector;
+               plane = omap_plane_init(dev, id, true);
+               crtc = omap_crtc_init(dev, plane, pipe2chan(id), id);
 
-#if 0 /* enable when dss2 supports hotplug */
-       notifier = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
-       notifier->notifier_call = omap_drm_notifier;
-       omap_dss_add_notify(dssdev, notifier);
-#else
-       notifier = NULL;
-#endif
+               BUG_ON(priv->num_crtcs >= ARRAY_SIZE(priv->crtcs));
+               priv->crtcs[id] = crtc;
+               priv->num_crtcs++;
 
-       for (j = 0; j < priv->num_encoders; j++) {
-               struct omap_overlay_manager *mgr =
-                       omap_encoder_get_manager(priv->encoders[j]);
-               if (mgr->get_device(mgr) == dssdev) {
-                       drm_mode_connector_attach_encoder(connector,
-                                       priv->encoders[j]);
-               }
+               priv->planes[id] = plane;
+               priv->num_planes++;
        }
 
-       return 0;
-}
-
-/* create up to max_overlays CRTCs mapping to overlays.. by default,
- * connect the overlays to different managers/encoders, giving priority
- * to encoders connected to connectors with a detected connection
- */
-static int create_crtc(struct drm_device *dev, struct omap_overlay *ovl,
-               int *j, unsigned int connected_connectors)
-{
-       struct omap_drm_private *priv = dev->dev_private;
-       struct omap_overlay_manager *mgr = NULL;
-       struct drm_crtc *crtc;
-
-       /* find next best connector, ones with detected connection first
+       /*
+        * Create normal planes for the remaining overlays:
         */
-       while (*j < priv->num_connectors && !mgr) {
-               if (connected_connectors & (1 << *j)) {
-                       struct drm_encoder *encoder =
-                               omap_connector_attached_encoder(
-                                               priv->connectors[*j]);
-                       if (encoder)
-                               mgr = omap_encoder_get_manager(encoder);
+       for (; id < num_ovls; id++) {
+               struct drm_plane *plane = omap_plane_init(dev, id, false);
 
-               }
-               (*j)++;
+               BUG_ON(priv->num_planes >= ARRAY_SIZE(priv->planes));
+               priv->planes[priv->num_planes++] = plane;
        }
 
-       /* if we couldn't find another connected connector, lets start
-        * looking at the unconnected connectors:
-        *
-        * note: it might not be immediately apparent, but thanks to
-        * the !mgr check in both this loop and the one above, the only
-        * way to enter this loop is with *j == priv->num_connectors,
-        * so idx can never go negative.
-        */
-       while (*j < 2 * priv->num_connectors && !mgr) {
-               int idx = *j - priv->num_connectors;
-               if (!(connected_connectors & (1 << idx))) {
-                       struct drm_encoder *encoder =
-                               omap_connector_attached_encoder(
-                                               priv->connectors[idx]);
-                       if (encoder)
-                               mgr = omap_encoder_get_manager(encoder);
+       for_each_dss_dev(dssdev) {
+               struct drm_connector *connector;
+               struct drm_encoder *encoder;
 
+               if (!dssdev->driver) {
+                       dev_warn(dev->dev, "%s has no driver.. skipping it\n",
+                                       dssdev->name);
+                       return 0;
                }
-               (*j)++;
-       }
-
-       crtc = omap_crtc_init(dev, ovl, priv->num_crtcs);
-
-       if (!crtc) {
-               dev_err(dev->dev, "could not create CRTC: %s\n",
-                               ovl->name);
-               return -ENOMEM;
-       }
 
-       BUG_ON(priv->num_crtcs >= ARRAY_SIZE(priv->crtcs));
-
-       priv->crtcs[priv->num_crtcs++] = crtc;
-
-       return 0;
-}
-
-static int create_plane(struct drm_device *dev, struct omap_overlay *ovl,
-               unsigned int possible_crtcs)
-{
-       struct omap_drm_private *priv = dev->dev_private;
-       struct drm_plane *plane =
-                       omap_plane_init(dev, ovl, possible_crtcs, false);
-
-       if (!plane) {
-               dev_err(dev->dev, "could not create plane: %s\n",
-                               ovl->name);
-               return -ENOMEM;
-       }
-
-       BUG_ON(priv->num_planes >= ARRAY_SIZE(priv->planes));
-
-       priv->planes[priv->num_planes++] = plane;
-
-       return 0;
-}
-
-static int match_dev_name(struct omap_dss_device *dssdev, void *data)
-{
-       return !strcmp(dssdev->name, data);
-}
-
-static unsigned int detect_connectors(struct drm_device *dev)
-{
-       struct omap_drm_private *priv = dev->dev_private;
-       unsigned int connected_connectors = 0;
-       int i;
-
-       for (i = 0; i < priv->num_connectors; i++) {
-               struct drm_connector *connector = priv->connectors[i];
-               if (omap_connector_detect(connector, true) ==
-                               connector_status_connected) {
-                       connected_connectors |= (1 << i);
+               if (!(dssdev->driver->get_timings ||
+                                       dssdev->driver->read_edid)) {
+                       dev_warn(dev->dev, "%s driver does not support "
+                               "get_timings or read_edid.. skipping it!\n",
+                               dssdev->name);
+                       return 0;
                }
-       }
-
-       return connected_connectors;
-}
 
-static int omap_modeset_init(struct drm_device *dev)
-{
-       const struct omap_drm_platform_data *pdata = dev->dev->platform_data;
-       struct omap_kms_platform_data *kms_pdata = NULL;
-       struct omap_drm_private *priv = dev->dev_private;
-       struct omap_dss_device *dssdev = NULL;
-       int i, j;
-       unsigned int connected_connectors = 0;
+               encoder = omap_encoder_init(dev, dssdev);
 
-       drm_mode_config_init(dev);
-
-       if (pdata && pdata->kms_pdata) {
-               kms_pdata = pdata->kms_pdata;
-
-               /* if platform data is provided by the board file, use it to
-                * control which overlays, managers, and devices we own.
-                */
-               for (i = 0; i < kms_pdata->mgr_cnt; i++) {
-                       struct omap_overlay_manager *mgr =
-                               omap_dss_get_overlay_manager(
-                                               kms_pdata->mgr_ids[i]);
-                       create_encoder(dev, mgr);
-               }
-
-               for (i = 0; i < kms_pdata->dev_cnt; i++) {
-                       struct omap_dss_device *dssdev =
-                               omap_dss_find_device(
-                                       (void *)kms_pdata->dev_names[i],
-                                       match_dev_name);
-                       if (!dssdev) {
-                               dev_warn(dev->dev, "no such dssdev: %s\n",
-                                               kms_pdata->dev_names[i]);
-                               continue;
-                       }
-                       create_connector(dev, dssdev);
+               if (!encoder) {
+                       dev_err(dev->dev, "could not create encoder: %s\n",
+                                       dssdev->name);
+                       return -ENOMEM;
                }
 
-               connected_connectors = detect_connectors(dev);
+               connector = omap_connector_init(dev,
+                               get_connector_type(dssdev), dssdev, encoder);
 
-               j = 0;
-               for (i = 0; i < kms_pdata->ovl_cnt; i++) {
-                       struct omap_overlay *ovl =
-                               omap_dss_get_overlay(kms_pdata->ovl_ids[i]);
-                       create_crtc(dev, ovl, &j, connected_connectors);
+               if (!connector) {
+                       dev_err(dev->dev, "could not create connector: %s\n",
+                                       dssdev->name);
+                       return -ENOMEM;
                }
 
-               for (i = 0; i < kms_pdata->pln_cnt; i++) {
-                       struct omap_overlay *ovl =
-                               omap_dss_get_overlay(kms_pdata->pln_ids[i]);
-                       create_plane(dev, ovl, (1 << priv->num_crtcs) - 1);
-               }
-       } else {
-               /* otherwise just grab up to CONFIG_DRM_OMAP_NUM_CRTCS and try
-                * to make educated guesses about everything else
-                */
-               int max_overlays = min(omap_dss_get_num_overlays(), num_crtc);
+               BUG_ON(priv->num_encoders >= ARRAY_SIZE(priv->encoders));
+               BUG_ON(priv->num_connectors >= ARRAY_SIZE(priv->connectors));
 
-               for (i = 0; i < omap_dss_get_num_overlay_managers(); i++)
-                       create_encoder(dev, omap_dss_get_overlay_manager(i));
-
-               for_each_dss_dev(dssdev) {
-                       create_connector(dev, dssdev);
-               }
+               priv->encoders[priv->num_encoders++] = encoder;
+               priv->connectors[priv->num_connectors++] = connector;
 
-               connected_connectors = detect_connectors(dev);
+               drm_mode_connector_attach_encoder(connector, encoder);
 
-               j = 0;
-               for (i = 0; i < max_overlays; i++) {
-                       create_crtc(dev, omap_dss_get_overlay(i),
-                                       &j, connected_connectors);
-               }
-
-               /* use any remaining overlays as drm planes */
-               for (; i < omap_dss_get_num_overlays(); i++) {
-                       struct omap_overlay *ovl = omap_dss_get_overlay(i);
-                       create_plane(dev, ovl, (1 << priv->num_crtcs) - 1);
+               /* figure out which crtc's we can connect the encoder to: */
+               encoder->possible_crtcs = 0;
+               for (id = 0; id < priv->num_crtcs; id++) {
+                       enum omap_dss_output_id supported_outputs =
+                                       dss_feat_get_supported_outputs(pipe2chan(id));
+                       if (supported_outputs & dssdev->output->id)
+                               encoder->possible_crtcs |= (1 << id);
                }
        }
 
-       /* for now keep the mapping of CRTCs and encoders static.. */
-       for (i = 0; i < priv->num_encoders; i++) {
-               struct drm_encoder *encoder = priv->encoders[i];
-               struct omap_overlay_manager *mgr =
-                               omap_encoder_get_manager(encoder);
-
-               encoder->possible_crtcs = (1 << priv->num_crtcs) - 1;
-
-               DBG("%s: possible_crtcs=%08x", mgr->name,
-                                       encoder->possible_crtcs);
-       }
-
-       dump_video_chains();
-
        dev->mode_config.min_width = 32;
        dev->mode_config.min_height = 32;
 
@@ -450,7 +229,7 @@ static int ioctl_gem_new(struct drm_device *dev, void *data,
                struct drm_file *file_priv)
 {
        struct drm_omap_gem_new *args = data;
-       DBG("%p:%p: size=0x%08x, flags=%08x", dev, file_priv,
+       VERB("%p:%p: size=0x%08x, flags=%08x", dev, file_priv,
                        args->size.bytes, args->flags);
        return omap_gem_new_handle(dev, file_priv, args->size,
                        args->flags, &args->handle);
@@ -510,7 +289,7 @@ static int ioctl_gem_info(struct drm_device *dev, void *data,
        struct drm_gem_object *obj;
        int ret = 0;
 
-       DBG("%p:%p: handle=%d", dev, file_priv, args->handle);
+       VERB("%p:%p: handle=%d", dev, file_priv, args->handle);
 
        obj = drm_gem_object_lookup(dev, file_priv, args->handle);
        if (!obj)
@@ -565,14 +344,6 @@ static int dev_load(struct drm_device *dev, unsigned long flags)
 
        dev->dev_private = priv;
 
-       ret = omapdss_compat_init();
-       if (ret) {
-               dev_err(dev->dev, "coult not init omapdss\n");
-               dev->dev_private = NULL;
-               kfree(priv);
-               return ret;
-       }
-
        priv->wq = alloc_ordered_workqueue("omapdrm", 0);
 
        INIT_LIST_HEAD(&priv->obj_list);
@@ -584,10 +355,13 @@ static int dev_load(struct drm_device *dev, unsigned long flags)
                dev_err(dev->dev, "omap_modeset_init failed: ret=%d\n", ret);
                dev->dev_private = NULL;
                kfree(priv);
-               omapdss_compat_uninit();
                return ret;
        }
 
+       ret = drm_vblank_init(dev, priv->num_crtcs);
+       if (ret)
+               dev_warn(dev->dev, "could not init vblank\n");
+
        priv->fbdev = omap_fbdev_init(dev);
        if (!priv->fbdev) {
                dev_warn(dev->dev, "omap_fbdev_init failed\n");
@@ -596,10 +370,6 @@ static int dev_load(struct drm_device *dev, unsigned long flags)
 
        drm_kms_helper_poll_init(dev);
 
-       ret = drm_vblank_init(dev, priv->num_crtcs);
-       if (ret)
-               dev_warn(dev->dev, "could not init vblank\n");
-
        return 0;
 }
 
@@ -609,8 +379,9 @@ static int dev_unload(struct drm_device *dev)
 
        DBG("unload: dev=%p", dev);
 
-       drm_vblank_cleanup(dev);
        drm_kms_helper_poll_fini(dev);
+       drm_vblank_cleanup(dev);
+       omap_drm_irq_uninstall(dev);
 
        omap_fbdev_free(dev);
        omap_modeset_free(dev);
@@ -619,8 +390,6 @@ static int dev_unload(struct drm_device *dev)
        flush_workqueue(priv->wq);
        destroy_workqueue(priv->wq);
 
-       omapdss_compat_uninit();
-
        kfree(dev->dev_private);
        dev->dev_private = NULL;
 
@@ -680,7 +449,9 @@ static void dev_lastclose(struct drm_device *dev)
                }
        }
 
+       mutex_lock(&dev->mode_config.mutex);
        ret = drm_fb_helper_restore_fbdev_mode(priv->fbdev);
+       mutex_unlock(&dev->mode_config.mutex);
        if (ret)
                DBG("failed to restore crtc mode");
 }
@@ -695,60 +466,6 @@ static void dev_postclose(struct drm_device *dev, struct drm_file *file)
        DBG("postclose: dev=%p, file=%p", dev, file);
 }
 
-/**
- * enable_vblank - enable vblank interrupt events
- * @dev: DRM device
- * @crtc: which irq to enable
- *
- * Enable vblank interrupts for @crtc.  If the device doesn't have
- * a hardware vblank counter, this routine should be a no-op, since
- * interrupts will have to stay on to keep the count accurate.
- *
- * RETURNS
- * Zero on success, appropriate errno if the given @crtc's vblank
- * interrupt cannot be enabled.
- */
-static int dev_enable_vblank(struct drm_device *dev, int crtc)
-{
-       DBG("enable_vblank: dev=%p, crtc=%d", dev, crtc);
-       return 0;
-}
-
-/**
- * disable_vblank - disable vblank interrupt events
- * @dev: DRM device
- * @crtc: which irq to enable
- *
- * Disable vblank interrupts for @crtc.  If the device doesn't have
- * a hardware vblank counter, this routine should be a no-op, since
- * interrupts will have to stay on to keep the count accurate.
- */
-static void dev_disable_vblank(struct drm_device *dev, int crtc)
-{
-       DBG("disable_vblank: dev=%p, crtc=%d", dev, crtc);
-}
-
-static irqreturn_t dev_irq_handler(DRM_IRQ_ARGS)
-{
-       return IRQ_HANDLED;
-}
-
-static void dev_irq_preinstall(struct drm_device *dev)
-{
-       DBG("irq_preinstall: dev=%p", dev);
-}
-
-static int dev_irq_postinstall(struct drm_device *dev)
-{
-       DBG("irq_postinstall: dev=%p", dev);
-       return 0;
-}
-
-static void dev_irq_uninstall(struct drm_device *dev)
-{
-       DBG("irq_uninstall: dev=%p", dev);
-}
-
 static const struct vm_operations_struct omap_gem_vm_ops = {
        .fault = omap_gem_fault,
        .open = drm_gem_vm_open,
@@ -778,12 +495,12 @@ static struct drm_driver omap_drm_driver = {
                .preclose = dev_preclose,
                .postclose = dev_postclose,
                .get_vblank_counter = drm_vblank_count,
-               .enable_vblank = dev_enable_vblank,
-               .disable_vblank = dev_disable_vblank,
-               .irq_preinstall = dev_irq_preinstall,
-               .irq_postinstall = dev_irq_postinstall,
-               .irq_uninstall = dev_irq_uninstall,
-               .irq_handler = dev_irq_handler,
+               .enable_vblank = omap_irq_enable_vblank,
+               .disable_vblank = omap_irq_disable_vblank,
+               .irq_preinstall = omap_irq_preinstall,
+               .irq_postinstall = omap_irq_postinstall,
+               .irq_uninstall = omap_irq_uninstall,
+               .irq_handler = omap_irq_handler,
 #ifdef CONFIG_DEBUG_FS
                .debugfs_init = omap_debugfs_init,
                .debugfs_cleanup = omap_debugfs_cleanup,
index 1d4aea5..cd1f22b 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/platform_data/omap_drm.h>
 #include "omap_drm.h"
 
+
 #define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
 #define VERB(fmt, ...) if (0) DRM_DEBUG(fmt, ##__VA_ARGS__) /* verbose debug */
 
  */
 #define MAX_MAPPERS 2
 
+/* parameters which describe (unrotated) coordinates of scanout within a fb: */
+struct omap_drm_window {
+       uint32_t rotation;
+       int32_t  crtc_x, crtc_y;                /* signed because can be offscreen */
+       uint32_t crtc_w, crtc_h;
+       uint32_t src_x, src_y;
+       uint32_t src_w, src_h;
+};
+
+/* Once GO bit is set, we can't make further updates to shadowed registers
+ * until the GO bit is cleared.  So various parts in the kms code that need
+ * to update shadowed registers queue up a pair of callbacks, pre_apply
+ * which is called before setting GO bit, and post_apply that is called
+ * after GO bit is cleared.  The crtc manages the queuing, and everyone
+ * else goes thru omap_crtc_apply() using these callbacks so that the
+ * code which has to deal w/ GO bit state is centralized.
+ */
+struct omap_drm_apply {
+       struct list_head pending_node, queued_node;
+       bool queued;
+       void (*pre_apply)(struct omap_drm_apply *apply);
+       void (*post_apply)(struct omap_drm_apply *apply);
+};
+
+/* For transiently registering for different DSS irqs that various parts
+ * of the KMS code need during setup/configuration.  We these are not
+ * necessarily the same as what drm_vblank_get/put() are requesting, and
+ * the hysteresis in drm_vblank_put() is not necessarily desirable for
+ * internal housekeeping related irq usage.
+ */
+struct omap_drm_irq {
+       struct list_head node;
+       uint32_t irqmask;
+       bool registered;
+       void (*irq)(struct omap_drm_irq *irq, uint32_t irqstatus);
+};
+
+/* For KMS code that needs to wait for a certain # of IRQs:
+ */
+struct omap_irq_wait;
+struct omap_irq_wait * omap_irq_wait_init(struct drm_device *dev,
+               uint32_t irqmask, int count);
+int omap_irq_wait(struct drm_device *dev, struct omap_irq_wait *wait,
+               unsigned long timeout);
+
 struct omap_drm_private {
        uint32_t omaprev;
 
@@ -58,6 +104,7 @@ struct omap_drm_private {
 
        struct workqueue_struct *wq;
 
+       /* list of GEM objects: */
        struct list_head obj_list;
 
        bool has_dmm;
@@ -65,6 +112,11 @@ struct omap_drm_private {
        /* properties: */
        struct drm_property *rotation_prop;
        struct drm_property *zorder_prop;
+
+       /* irq handling: */
+       struct list_head irq_list;    /* list of omap_drm_irq */
+       uint32_t vblank_mask;         /* irq bits set for userspace vblank */
+       struct omap_drm_irq error_handler;
 };
 
 /* this should probably be in drm-core to standardize amongst drivers */
@@ -75,15 +127,6 @@ struct omap_drm_private {
 #define DRM_REFLECT_X  4
 #define DRM_REFLECT_Y  5
 
-/* parameters which describe (unrotated) coordinates of scanout within a fb: */
-struct omap_drm_window {
-       uint32_t rotation;
-       int32_t  crtc_x, crtc_y;                /* signed because can be offscreen */
-       uint32_t crtc_w, crtc_h;
-       uint32_t src_x, src_y;
-       uint32_t src_w, src_h;
-};
-
 #ifdef CONFIG_DEBUG_FS
 int omap_debugfs_init(struct drm_minor *minor);
 void omap_debugfs_cleanup(struct drm_minor *minor);
@@ -92,23 +135,36 @@ void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m);
 void omap_gem_describe_objects(struct list_head *list, struct seq_file *m);
 #endif
 
+int omap_irq_enable_vblank(struct drm_device *dev, int crtc);
+void omap_irq_disable_vblank(struct drm_device *dev, int crtc);
+irqreturn_t omap_irq_handler(DRM_IRQ_ARGS);
+void omap_irq_preinstall(struct drm_device *dev);
+int omap_irq_postinstall(struct drm_device *dev);
+void omap_irq_uninstall(struct drm_device *dev);
+void omap_irq_register(struct drm_device *dev, struct omap_drm_irq *irq);
+void omap_irq_unregister(struct drm_device *dev, struct omap_drm_irq *irq);
+int omap_drm_irq_uninstall(struct drm_device *dev);
+int omap_drm_irq_install(struct drm_device *dev);
+
 struct drm_fb_helper *omap_fbdev_init(struct drm_device *dev);
 void omap_fbdev_free(struct drm_device *dev);
 
+const struct omap_video_timings *omap_crtc_timings(struct drm_crtc *crtc);
+enum omap_channel omap_crtc_channel(struct drm_crtc *crtc);
+int omap_crtc_apply(struct drm_crtc *crtc,
+               struct omap_drm_apply *apply);
 struct drm_crtc *omap_crtc_init(struct drm_device *dev,
-               struct omap_overlay *ovl, int id);
+               struct drm_plane *plane, enum omap_channel channel, int id);
 
 struct drm_plane *omap_plane_init(struct drm_device *dev,
-               struct omap_overlay *ovl, unsigned int possible_crtcs,
-               bool priv);
+               int plane_id, bool private_plane);
 int omap_plane_dpms(struct drm_plane *plane, int mode);
 int omap_plane_mode_set(struct drm_plane *plane,
                struct drm_crtc *crtc, struct drm_framebuffer *fb,
                int crtc_x, int crtc_y,
                unsigned int crtc_w, unsigned int crtc_h,
                uint32_t src_x, uint32_t src_y,
-               uint32_t src_w, uint32_t src_h);
-void omap_plane_on_endwin(struct drm_plane *plane,
+               uint32_t src_w, uint32_t src_h,
                void (*fxn)(void *), void *arg);
 void omap_plane_install_properties(struct drm_plane *plane,
                struct drm_mode_object *obj);
@@ -116,21 +172,25 @@ int omap_plane_set_property(struct drm_plane *plane,
                struct drm_property *property, uint64_t val);
 
 struct drm_encoder *omap_encoder_init(struct drm_device *dev,
-               struct omap_overlay_manager *mgr);
-struct omap_overlay_manager *omap_encoder_get_manager(
+               struct omap_dss_device *dssdev);
+int omap_encoder_set_enabled(struct drm_encoder *encoder, bool enabled);
+int omap_encoder_update(struct drm_encoder *encoder,
+               struct omap_overlay_manager *mgr,
+               struct omap_video_timings *timings);
+
+struct drm_connector *omap_connector_init(struct drm_device *dev,
+               int connector_type, struct omap_dss_device *dssdev,
                struct drm_encoder *encoder);
 struct drm_encoder *omap_connector_attached_encoder(
                struct drm_connector *connector);
-enum drm_connector_status omap_connector_detect(
-               struct drm_connector *connector, bool force);
-
-struct drm_connector *omap_connector_init(struct drm_device *dev,
-               int connector_type, struct omap_dss_device *dssdev);
-void omap_connector_mode_set(struct drm_connector *connector,
-               struct drm_display_mode *mode);
 void omap_connector_flush(struct drm_connector *connector,
                int x, int y, int w, int h);
 
+void copy_timings_omap_to_drm(struct drm_display_mode *mode,
+               struct omap_video_timings *timings);
+void copy_timings_drm_to_omap(struct omap_video_timings *timings,
+               struct drm_display_mode *mode);
+
 uint32_t omap_framebuffer_get_formats(uint32_t *pixel_formats,
                uint32_t max_formats, enum omap_color_mode supported_modes);
 struct drm_framebuffer *omap_framebuffer_create(struct drm_device *dev,
@@ -207,6 +267,40 @@ static inline int align_pitch(int pitch, int width, int bpp)
        return ALIGN(pitch, 8 * bytespp);
 }
 
+static inline enum omap_channel pipe2chan(int pipe)
+{
+       int num_mgrs = dss_feat_get_num_mgrs();
+
+       /*
+        * We usually don't want to create a CRTC for each manager,
+        * at least not until we have a way to expose private planes
+        * to userspace.  Otherwise there would not be enough video
+        * pipes left for drm planes.  The higher #'d managers tend
+        * to have more features so start in reverse order.
+        */
+       return num_mgrs - pipe - 1;
+}
+
+/* map crtc to vblank mask */
+static inline uint32_t pipe2vbl(int crtc)
+{
+       enum omap_channel channel = pipe2chan(crtc);
+       return dispc_mgr_get_vsync_irq(channel);
+}
+
+static inline int crtc2pipe(struct drm_device *dev, struct drm_crtc *crtc)
+{
+       struct omap_drm_private *priv = dev->dev_private;
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(priv->crtcs); i++)
+               if (priv->crtcs[i] == crtc)
+                       return i;
+
+       BUG();  /* bogus CRTC ptr */
+       return -1;
+}
+
 /* should these be made into common util helpers?
  */
 
index 5341d5e..e053160 100644 (file)
 #include "drm_crtc.h"
 #include "drm_crtc_helper.h"
 
+#include <linux/list.h>
+
+
 /*
  * encoder funcs
  */
 
 #define to_omap_encoder(x) container_of(x, struct omap_encoder, base)
 
+/* The encoder and connector both map to same dssdev.. the encoder
+ * handles the 'active' parts, ie. anything the modifies the state
+ * of the hw, and the connector handles the 'read-only' parts, like
+ * detecting connection and reading edid.
+ */
 struct omap_encoder {
        struct drm_encoder base;
-       struct omap_overlay_manager *mgr;
+       struct omap_dss_device *dssdev;
 };
 
 static void omap_encoder_destroy(struct drm_encoder *encoder)
 {
        struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
-       DBG("%s", omap_encoder->mgr->name);
        drm_encoder_cleanup(encoder);
        kfree(omap_encoder);
 }
 
+static const struct drm_encoder_funcs omap_encoder_funcs = {
+       .destroy = omap_encoder_destroy,
+};
+
+/*
+ * The CRTC drm_crtc_helper_set_mode() doesn't really give us the right
+ * order.. the easiest way to work around this for now is to make all
+ * the encoder-helper's no-op's and have the omap_crtc code take care
+ * of the sequencing and call us in the right points.
+ *
+ * Eventually to handle connecting CRTCs to different encoders properly,
+ * either the CRTC helpers need to change or we need to replace
+ * drm_crtc_helper_set_mode(), but lets wait until atomic-modeset for
+ * that.
+ */
+
 static void omap_encoder_dpms(struct drm_encoder *encoder, int mode)
 {
-       struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
-       DBG("%s: %d", omap_encoder->mgr->name, mode);
 }
 
 static bool omap_encoder_mode_fixup(struct drm_encoder *encoder,
                                  const struct drm_display_mode *mode,
                                  struct drm_display_mode *adjusted_mode)
 {
-       struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
-       DBG("%s", omap_encoder->mgr->name);
        return true;
 }
 
@@ -60,47 +79,16 @@ static void omap_encoder_mode_set(struct drm_encoder *encoder,
                                struct drm_display_mode *mode,
                                struct drm_display_mode *adjusted_mode)
 {
-       struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
-       struct drm_device *dev = encoder->dev;
-       struct omap_drm_private *priv = dev->dev_private;
-       int i;
-
-       mode = adjusted_mode;
-
-       DBG("%s: set mode: %dx%d", omap_encoder->mgr->name,
-                       mode->hdisplay, mode->vdisplay);
-
-       for (i = 0; i < priv->num_connectors; i++) {
-               struct drm_connector *connector = priv->connectors[i];
-               if (connector->encoder == encoder)
-                       omap_connector_mode_set(connector, mode);
-
-       }
 }
 
 static void omap_encoder_prepare(struct drm_encoder *encoder)
 {
-       struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
-       struct drm_encoder_helper_funcs *encoder_funcs =
-                               encoder->helper_private;
-       DBG("%s", omap_encoder->mgr->name);
-       encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
 }
 
 static void omap_encoder_commit(struct drm_encoder *encoder)
 {
-       struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
-       struct drm_encoder_helper_funcs *encoder_funcs =
-                               encoder->helper_private;
-       DBG("%s", omap_encoder->mgr->name);
-       omap_encoder->mgr->apply(omap_encoder->mgr);
-       encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
 }
 
-static const struct drm_encoder_funcs omap_encoder_funcs = {
-       .destroy = omap_encoder_destroy,
-};
-
 static const struct drm_encoder_helper_funcs omap_encoder_helper_funcs = {
        .dpms = omap_encoder_dpms,
        .mode_fixup = omap_encoder_mode_fixup,
@@ -109,23 +97,54 @@ static const struct drm_encoder_helper_funcs omap_encoder_helper_funcs = {
        .commit = omap_encoder_commit,
 };
 
-struct omap_overlay_manager *omap_encoder_get_manager(
-               struct drm_encoder *encoder)
+/*
+ * Instead of relying on the helpers for modeset, the omap_crtc code
+ * calls these functions in the proper sequence.
+ */
+
+int omap_encoder_set_enabled(struct drm_encoder *encoder, bool enabled)
 {
        struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
-       return omap_encoder->mgr;
+       struct omap_dss_device *dssdev = omap_encoder->dssdev;
+       struct omap_dss_driver *dssdrv = dssdev->driver;
+
+       if (enabled) {
+               return dssdrv->enable(dssdev);
+       } else {
+               dssdrv->disable(dssdev);
+               return 0;
+       }
+}
+
+int omap_encoder_update(struct drm_encoder *encoder,
+               struct omap_overlay_manager *mgr,
+               struct omap_video_timings *timings)
+{
+       struct drm_device *dev = encoder->dev;
+       struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
+       struct omap_dss_device *dssdev = omap_encoder->dssdev;
+       struct omap_dss_driver *dssdrv = dssdev->driver;
+       int ret;
+
+       dssdev->output->manager = mgr;
+
+       ret = dssdrv->check_timings(dssdev, timings);
+       if (ret) {
+               dev_err(dev->dev, "could not set timings: %d\n", ret);
+               return ret;
+       }
+
+       dssdrv->set_timings(dssdev, timings);
+
+       return 0;
 }
 
 /* initialize encoder */
 struct drm_encoder *omap_encoder_init(struct drm_device *dev,
-               struct omap_overlay_manager *mgr)
+               struct omap_dss_device *dssdev)
 {
        struct drm_encoder *encoder = NULL;
        struct omap_encoder *omap_encoder;
-       struct omap_overlay_manager_info info;
-       int ret;
-
-       DBG("%s", mgr->name);
 
        omap_encoder = kzalloc(sizeof(*omap_encoder), GFP_KERNEL);
        if (!omap_encoder) {
@@ -133,33 +152,14 @@ struct drm_encoder *omap_encoder_init(struct drm_device *dev,
                goto fail;
        }
 
-       omap_encoder->mgr = mgr;
+       omap_encoder->dssdev = dssdev;
+
        encoder = &omap_encoder->base;
 
        drm_encoder_init(dev, encoder, &omap_encoder_funcs,
                         DRM_MODE_ENCODER_TMDS);
        drm_encoder_helper_add(encoder, &omap_encoder_helper_funcs);
 
-       mgr->get_manager_info(mgr, &info);
-
-       /* TODO: fix hard-coded setup.. */
-       info.default_color = 0x00000000;
-       info.trans_key = 0x00000000;
-       info.trans_key_type = OMAP_DSS_COLOR_KEY_GFX_DST;
-       info.trans_enabled = false;
-
-       ret = mgr->set_manager_info(mgr, &info);
-       if (ret) {
-               dev_err(dev->dev, "could not set manager info\n");
-               goto fail;
-       }
-
-       ret = mgr->apply(mgr);
-       if (ret) {
-               dev_err(dev->dev, "could not apply\n");
-               goto fail;
-       }
-
        return encoder;
 
 fail:
index ea38400..b6c5b5c 100644 (file)
@@ -194,7 +194,7 @@ struct dma_buf_ops omap_dmabuf_ops = {
 struct dma_buf *omap_gem_prime_export(struct drm_device *dev,
                struct drm_gem_object *obj, int flags)
 {
-       return dma_buf_export(obj, &omap_dmabuf_ops, obj->size, 0600);
+       return dma_buf_export(obj, &omap_dmabuf_ops, obj->size, flags);
 }
 
 struct drm_gem_object *omap_gem_prime_import(struct drm_device *dev,
diff --git a/drivers/staging/omapdrm/omap_irq.c b/drivers/staging/omapdrm/omap_irq.c
new file mode 100644 (file)
index 0000000..2629ba7
--- /dev/null
@@ -0,0 +1,322 @@
+/*
+ * drivers/staging/omapdrm/omap_irq.c
+ *
+ * Copyright (C) 2012 Texas Instruments
+ * Author: Rob Clark <rob.clark@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "omap_drv.h"
+
+static DEFINE_SPINLOCK(list_lock);
+
+static void omap_irq_error_handler(struct omap_drm_irq *irq,
+               uint32_t irqstatus)
+{
+       DRM_ERROR("errors: %08x\n", irqstatus);
+}
+
+/* call with list_lock and dispc runtime held */
+static void omap_irq_update(struct drm_device *dev)
+{
+       struct omap_drm_private *priv = dev->dev_private;
+       struct omap_drm_irq *irq;
+       uint32_t irqmask = priv->vblank_mask;
+
+       BUG_ON(!spin_is_locked(&list_lock));
+
+       list_for_each_entry(irq, &priv->irq_list, node)
+               irqmask |= irq->irqmask;
+
+       DBG("irqmask=%08x", irqmask);
+
+       dispc_write_irqenable(irqmask);
+       dispc_read_irqenable();        /* flush posted write */
+}
+
+void omap_irq_register(struct drm_device *dev, struct omap_drm_irq *irq)
+{
+       struct omap_drm_private *priv = dev->dev_private;
+       unsigned long flags;
+
+       dispc_runtime_get();
+       spin_lock_irqsave(&list_lock, flags);
+
+       if (!WARN_ON(irq->registered)) {
+               irq->registered = true;
+               list_add(&irq->node, &priv->irq_list);
+               omap_irq_update(dev);
+       }
+
+       spin_unlock_irqrestore(&list_lock, flags);
+       dispc_runtime_put();
+}
+
+void omap_irq_unregister(struct drm_device *dev, struct omap_drm_irq *irq)
+{
+       unsigned long flags;
+
+       dispc_runtime_get();
+       spin_lock_irqsave(&list_lock, flags);
+
+       if (!WARN_ON(!irq->registered)) {
+               irq->registered = false;
+               list_del(&irq->node);
+               omap_irq_update(dev);
+       }
+
+       spin_unlock_irqrestore(&list_lock, flags);
+       dispc_runtime_put();
+}
+
+struct omap_irq_wait {
+       struct omap_drm_irq irq;
+       int count;
+};
+
+static DECLARE_WAIT_QUEUE_HEAD(wait_event);
+
+static void wait_irq(struct omap_drm_irq *irq, uint32_t irqstatus)
+{
+       struct omap_irq_wait *wait =
+                       container_of(irq, struct omap_irq_wait, irq);
+       wait->count--;
+       wake_up_all(&wait_event);
+}
+
+struct omap_irq_wait * omap_irq_wait_init(struct drm_device *dev,
+               uint32_t irqmask, int count)
+{
+       struct omap_irq_wait *wait = kzalloc(sizeof(*wait), GFP_KERNEL);
+       wait->irq.irq = wait_irq;
+       wait->irq.irqmask = irqmask;
+       wait->count = count;
+       omap_irq_register(dev, &wait->irq);
+       return wait;
+}
+
+int omap_irq_wait(struct drm_device *dev, struct omap_irq_wait *wait,
+               unsigned long timeout)
+{
+       int ret = wait_event_timeout(wait_event, (wait->count <= 0), timeout);
+       omap_irq_unregister(dev, &wait->irq);
+       kfree(wait);
+       if (ret == 0)
+               return -1;
+       return 0;
+}
+
+/**
+ * enable_vblank - enable vblank interrupt events
+ * @dev: DRM device
+ * @crtc: which irq to enable
+ *
+ * Enable vblank interrupts for @crtc.  If the device doesn't have
+ * a hardware vblank counter, this routine should be a no-op, since
+ * interrupts will have to stay on to keep the count accurate.
+ *
+ * RETURNS
+ * Zero on success, appropriate errno if the given @crtc's vblank
+ * interrupt cannot be enabled.
+ */
+int omap_irq_enable_vblank(struct drm_device *dev, int crtc)
+{
+       struct omap_drm_private *priv = dev->dev_private;
+       unsigned long flags;
+
+       DBG("dev=%p, crtc=%d", dev, crtc);
+
+       dispc_runtime_get();
+       spin_lock_irqsave(&list_lock, flags);
+       priv->vblank_mask |= pipe2vbl(crtc);
+       omap_irq_update(dev);
+       spin_unlock_irqrestore(&list_lock, flags);
+       dispc_runtime_put();
+
+       return 0;
+}
+
+/**
+ * disable_vblank - disable vblank interrupt events
+ * @dev: DRM device
+ * @crtc: which irq to enable
+ *
+ * Disable vblank interrupts for @crtc.  If the device doesn't have
+ * a hardware vblank counter, this routine should be a no-op, since
+ * interrupts will have to stay on to keep the count accurate.
+ */
+void omap_irq_disable_vblank(struct drm_device *dev, int crtc)
+{
+       struct omap_drm_private *priv = dev->dev_private;
+       unsigned long flags;
+
+       DBG("dev=%p, crtc=%d", dev, crtc);
+
+       dispc_runtime_get();
+       spin_lock_irqsave(&list_lock, flags);
+       priv->vblank_mask &= ~pipe2vbl(crtc);
+       omap_irq_update(dev);
+       spin_unlock_irqrestore(&list_lock, flags);
+       dispc_runtime_put();
+}
+
+irqreturn_t omap_irq_handler(DRM_IRQ_ARGS)
+{
+       struct drm_device *dev = (struct drm_device *) arg;
+       struct omap_drm_private *priv = dev->dev_private;
+       struct omap_drm_irq *handler, *n;
+       unsigned long flags;
+       unsigned int id;
+       u32 irqstatus;
+
+       irqstatus = dispc_read_irqstatus();
+       dispc_clear_irqstatus(irqstatus);
+       dispc_read_irqstatus();        /* flush posted write */
+
+       VERB("irqs: %08x", irqstatus);
+
+       for (id = 0; id < priv->num_crtcs; id++)
+               if (irqstatus & pipe2vbl(id))
+                       drm_handle_vblank(dev, id);
+
+       spin_lock_irqsave(&list_lock, flags);
+       list_for_each_entry_safe(handler, n, &priv->irq_list, node) {
+               if (handler->irqmask & irqstatus) {
+                       spin_unlock_irqrestore(&list_lock, flags);
+                       handler->irq(handler, handler->irqmask & irqstatus);
+                       spin_lock_irqsave(&list_lock, flags);
+               }
+       }
+       spin_unlock_irqrestore(&list_lock, flags);
+
+       return IRQ_HANDLED;
+}
+
+void omap_irq_preinstall(struct drm_device *dev)
+{
+       DBG("dev=%p", dev);
+       dispc_runtime_get();
+       dispc_clear_irqstatus(0xffffffff);
+       dispc_runtime_put();
+}
+
+int omap_irq_postinstall(struct drm_device *dev)
+{
+       struct omap_drm_private *priv = dev->dev_private;
+       struct omap_drm_irq *error_handler = &priv->error_handler;
+
+       DBG("dev=%p", dev);
+
+       INIT_LIST_HEAD(&priv->irq_list);
+
+       error_handler->irq = omap_irq_error_handler;
+       error_handler->irqmask = DISPC_IRQ_OCP_ERR;
+
+       /* for now ignore DISPC_IRQ_SYNC_LOST_DIGIT.. really I think
+        * we just need to ignore it while enabling tv-out
+        */
+       error_handler->irqmask &= ~DISPC_IRQ_SYNC_LOST_DIGIT;
+
+       omap_irq_register(dev, error_handler);
+
+       return 0;
+}
+
+void omap_irq_uninstall(struct drm_device *dev)
+{
+       DBG("dev=%p", dev);
+       // TODO prolly need to call drm_irq_uninstall() somewhere too
+}
+
+/*
+ * We need a special version, instead of just using drm_irq_install(),
+ * because we need to register the irq via omapdss.  Once omapdss and
+ * omapdrm are merged together we can assign the dispc hwmod data to
+ * ourselves and drop these and just use drm_irq_{install,uninstall}()
+ */
+
+int omap_drm_irq_install(struct drm_device *dev)
+{
+       int ret;
+
+       mutex_lock(&dev->struct_mutex);
+
+       if (dev->irq_enabled) {
+               mutex_unlock(&dev->struct_mutex);
+               return -EBUSY;
+       }
+       dev->irq_enabled = 1;
+       mutex_unlock(&dev->struct_mutex);
+
+       /* Before installing handler */
+       if (dev->driver->irq_preinstall)
+               dev->driver->irq_preinstall(dev);
+
+       ret = dispc_request_irq(dev->driver->irq_handler, dev);
+
+       if (ret < 0) {
+               mutex_lock(&dev->struct_mutex);
+               dev->irq_enabled = 0;
+               mutex_unlock(&dev->struct_mutex);
+               return ret;
+       }
+
+       /* After installing handler */
+       if (dev->driver->irq_postinstall)
+               ret = dev->driver->irq_postinstall(dev);
+
+       if (ret < 0) {
+               mutex_lock(&dev->struct_mutex);
+               dev->irq_enabled = 0;
+               mutex_unlock(&dev->struct_mutex);
+               dispc_free_irq(dev);
+       }
+
+       return ret;
+}
+
+int omap_drm_irq_uninstall(struct drm_device *dev)
+{
+       unsigned long irqflags;
+       int irq_enabled, i;
+
+       mutex_lock(&dev->struct_mutex);
+       irq_enabled = dev->irq_enabled;
+       dev->irq_enabled = 0;
+       mutex_unlock(&dev->struct_mutex);
+
+       /*
+        * Wake up any waiters so they don't hang.
+        */
+       if (dev->num_crtcs) {
+               spin_lock_irqsave(&dev->vbl_lock, irqflags);
+               for (i = 0; i < dev->num_crtcs; i++) {
+                       DRM_WAKEUP(&dev->vbl_queue[i]);
+                       dev->vblank_enabled[i] = 0;
+                       dev->last_vblank[i] =
+                               dev->driver->get_vblank_counter(dev, i);
+               }
+               spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
+       }
+
+       if (!irq_enabled)
+               return -EINVAL;
+
+       if (dev->driver->irq_uninstall)
+               dev->driver->irq_uninstall(dev);
+
+       dispc_free_irq(dev);
+
+       return 0;
+}
index 2a8e5ba..bb989d7 100644 (file)
@@ -41,12 +41,14 @@ struct callback {
 
 struct omap_plane {
        struct drm_plane base;
-       struct omap_overlay *ovl;
+       int id;  /* TODO rename omap_plane -> omap_plane_id in omapdss so I can use the enum */
+       const char *name;
        struct omap_overlay_info info;
+       struct omap_drm_apply apply;
 
        /* position/orientation of scanout within the fb: */
        struct omap_drm_window win;
-
+       bool enabled;
 
        /* last fb that we pinned: */
        struct drm_framebuffer *pinned_fb;
@@ -54,189 +56,15 @@ struct omap_plane {
        uint32_t nformats;
        uint32_t formats[32];
 
-       /* for synchronizing access to unpins fifo */
-       struct mutex unpin_mutex;
+       struct omap_drm_irq error_irq;
 
-       /* set of bo's pending unpin until next END_WIN irq */
+       /* set of bo's pending unpin until next post_apply() */
        DECLARE_KFIFO_PTR(unpin_fifo, struct drm_gem_object *);
-       int num_unpins, pending_num_unpins;
-
-       /* for deferred unpin when we need to wait for scanout complete irq */
-       struct work_struct work;
-
-       /* callback on next endwin irq */
-       struct callback endwin;
-};
 
-/* map from ovl->id to the irq we are interested in for scanout-done */
-static const uint32_t id2irq[] = {
-               [OMAP_DSS_GFX]    = DISPC_IRQ_GFX_END_WIN,
-               [OMAP_DSS_VIDEO1] = DISPC_IRQ_VID1_END_WIN,
-               [OMAP_DSS_VIDEO2] = DISPC_IRQ_VID2_END_WIN,
-               [OMAP_DSS_VIDEO3] = DISPC_IRQ_VID3_END_WIN,
+       // XXX maybe get rid of this and handle vblank in crtc too?
+       struct callback apply_done_cb;
 };
 
-static void dispc_isr(void *arg, uint32_t mask)
-{
-       struct drm_plane *plane = arg;
-       struct omap_plane *omap_plane = to_omap_plane(plane);
-       struct omap_drm_private *priv = plane->dev->dev_private;
-
-       omap_dispc_unregister_isr(dispc_isr, plane,
-                       id2irq[omap_plane->ovl->id]);
-
-       queue_work(priv->wq, &omap_plane->work);
-}
-
-static void unpin_worker(struct work_struct *work)
-{
-       struct omap_plane *omap_plane =
-                       container_of(work, struct omap_plane, work);
-       struct callback endwin;
-
-       mutex_lock(&omap_plane->unpin_mutex);
-       DBG("unpinning %d of %d", omap_plane->num_unpins,
-                       omap_plane->num_unpins + omap_plane->pending_num_unpins);
-       while (omap_plane->num_unpins > 0) {
-               struct drm_gem_object *bo = NULL;
-               int ret = kfifo_get(&omap_plane->unpin_fifo, &bo);
-               WARN_ON(!ret);
-               omap_gem_put_paddr(bo);
-               drm_gem_object_unreference_unlocked(bo);
-               omap_plane->num_unpins--;
-       }
-       endwin = omap_plane->endwin;
-       omap_plane->endwin.fxn = NULL;
-       mutex_unlock(&omap_plane->unpin_mutex);
-
-       if (endwin.fxn)
-               endwin.fxn(endwin.arg);
-}
-
-static void install_irq(struct drm_plane *plane)
-{
-       struct omap_plane *omap_plane = to_omap_plane(plane);
-       struct omap_overlay *ovl = omap_plane->ovl;
-       int ret;
-
-       ret = omap_dispc_register_isr(dispc_isr, plane, id2irq[ovl->id]);
-
-       /*
-        * omapdss has upper limit on # of registered irq handlers,
-        * which we shouldn't hit.. but if we do the limit should
-        * be raised or bad things happen:
-        */
-       WARN_ON(ret == -EBUSY);
-}
-
-/* push changes down to dss2 */
-static int commit(struct drm_plane *plane)
-{
-       struct drm_device *dev = plane->dev;
-       struct omap_plane *omap_plane = to_omap_plane(plane);
-       struct omap_overlay *ovl = omap_plane->ovl;
-       struct omap_overlay_info *info = &omap_plane->info;
-       int ret;
-
-       DBG("%s", ovl->name);
-       DBG("%dx%d -> %dx%d (%d)", info->width, info->height, info->out_width,
-                       info->out_height, info->screen_width);
-       DBG("%d,%d %08x %08x", info->pos_x, info->pos_y,
-                       info->paddr, info->p_uv_addr);
-
-       /* NOTE: do we want to do this at all here, or just wait
-        * for dpms(ON) since other CRTC's may not have their mode
-        * set yet, so fb dimensions may still change..
-        */
-       ret = ovl->set_overlay_info(ovl, info);
-       if (ret) {
-               dev_err(dev->dev, "could not set overlay info\n");
-               return ret;
-       }
-
-       mutex_lock(&omap_plane->unpin_mutex);
-       omap_plane->num_unpins += omap_plane->pending_num_unpins;
-       omap_plane->pending_num_unpins = 0;
-       mutex_unlock(&omap_plane->unpin_mutex);
-
-       /* our encoder doesn't necessarily get a commit() after this, in
-        * particular in the dpms() and mode_set_base() cases, so force the
-        * manager to update:
-        *
-        * could this be in the encoder somehow?
-        */
-       if (ovl->manager) {
-               ret = ovl->manager->apply(ovl->manager);
-               if (ret) {
-                       dev_err(dev->dev, "could not apply settings\n");
-                       return ret;
-               }
-
-               /*
-                * NOTE: really this should be atomic w/ mgr->apply() but
-                * omapdss does not expose such an API
-                */
-               if (omap_plane->num_unpins > 0)
-                       install_irq(plane);
-
-       } else {
-               struct omap_drm_private *priv = dev->dev_private;
-               queue_work(priv->wq, &omap_plane->work);
-       }
-
-
-       if (ovl->is_enabled(ovl)) {
-               omap_framebuffer_flush(plane->fb, info->pos_x, info->pos_y,
-                               info->out_width, info->out_height);
-       }
-
-       return 0;
-}
-
-/* when CRTC that we are attached to has potentially changed, this checks
- * if we are attached to proper manager, and if necessary updates.
- */
-static void update_manager(struct drm_plane *plane)
-{
-       struct omap_drm_private *priv = plane->dev->dev_private;
-       struct omap_plane *omap_plane = to_omap_plane(plane);
-       struct omap_overlay *ovl = omap_plane->ovl;
-       struct omap_overlay_manager *mgr = NULL;
-       int i;
-
-       if (plane->crtc) {
-               for (i = 0; i < priv->num_encoders; i++) {
-                       struct drm_encoder *encoder = priv->encoders[i];
-                       if (encoder->crtc == plane->crtc) {
-                               mgr = omap_encoder_get_manager(encoder);
-                               break;
-                       }
-               }
-       }
-
-       if (ovl->manager != mgr) {
-               bool enabled = ovl->is_enabled(ovl);
-
-               /* don't switch things around with enabled overlays: */
-               if (enabled)
-                       omap_plane_dpms(plane, DRM_MODE_DPMS_OFF);
-
-               if (ovl->manager) {
-                       DBG("disconnecting %s from %s", ovl->name,
-                                       ovl->manager->name);
-                       ovl->unset_manager(ovl);
-               }
-
-               if (mgr) {
-                       DBG("connecting %s to %s", ovl->name, mgr->name);
-                       ovl->set_manager(ovl, mgr);
-               }
-
-               if (enabled && mgr)
-                       omap_plane_dpms(plane, DRM_MODE_DPMS_ON);
-       }
-}
-
 static void unpin(void *arg, struct drm_gem_object *bo)
 {
        struct drm_plane *plane = arg;
@@ -244,7 +72,6 @@ static void unpin(void *arg, struct drm_gem_object *bo)
 
        if (kfifo_put(&omap_plane->unpin_fifo,
                        (const struct drm_gem_object **)&bo)) {
-               omap_plane->pending_num_unpins++;
                /* also hold a ref so it isn't free'd while pinned */
                drm_gem_object_reference(bo);
        } else {
@@ -264,13 +91,19 @@ static int update_pin(struct drm_plane *plane, struct drm_framebuffer *fb)
 
                DBG("%p -> %p", pinned_fb, fb);
 
-               mutex_lock(&omap_plane->unpin_mutex);
+               if (fb)
+                       drm_framebuffer_reference(fb);
+
                ret = omap_framebuffer_replace(pinned_fb, fb, plane, unpin);
-               mutex_unlock(&omap_plane->unpin_mutex);
+
+               if (pinned_fb)
+                       drm_framebuffer_unreference(pinned_fb);
 
                if (ret) {
                        dev_err(plane->dev->dev, "could not swap %p -> %p\n",
                                        omap_plane->pinned_fb, fb);
+                       if (fb)
+                               drm_framebuffer_unreference(fb);
                        omap_plane->pinned_fb = NULL;
                        return ret;
                }
@@ -281,31 +114,90 @@ static int update_pin(struct drm_plane *plane, struct drm_framebuffer *fb)
        return 0;
 }
 
-/* update parameters that are dependent on the framebuffer dimensions and
- * position within the fb that this plane scans out from. This is called
- * when framebuffer or x,y base may have changed.
- */
-static void update_scanout(struct drm_plane *plane)
+static void omap_plane_pre_apply(struct omap_drm_apply *apply)
 {
-       struct omap_plane *omap_plane = to_omap_plane(plane);
-       struct omap_overlay_info *info = &omap_plane->info;
+       struct omap_plane *omap_plane =
+                       container_of(apply, struct omap_plane, apply);
        struct omap_drm_window *win = &omap_plane->win;
+       struct drm_plane *plane = &omap_plane->base;
+       struct drm_device *dev = plane->dev;
+       struct omap_overlay_info *info = &omap_plane->info;
+       struct drm_crtc *crtc = plane->crtc;
+       enum omap_channel channel;
+       bool enabled = omap_plane->enabled && crtc;
+       bool ilace, replication;
        int ret;
 
-       ret = update_pin(plane, plane->fb);
-       if (ret) {
-               dev_err(plane->dev->dev,
-                       "could not pin fb: %d\n", ret);
-               omap_plane_dpms(plane, DRM_MODE_DPMS_OFF);
+       DBG("%s, enabled=%d", omap_plane->name, enabled);
+
+       /* if fb has changed, pin new fb: */
+       update_pin(plane, enabled ? plane->fb : NULL);
+
+       if (!enabled) {
+               dispc_ovl_enable(omap_plane->id, false);
                return;
        }
 
+       channel = omap_crtc_channel(crtc);
+
+       /* update scanout: */
        omap_framebuffer_update_scanout(plane->fb, win, info);
 
-       DBG("%s: %d,%d: %08x %08x (%d)", omap_plane->ovl->name,
-                       win->src_x, win->src_y,
-                       (u32)info->paddr, (u32)info->p_uv_addr,
+       DBG("%dx%d -> %dx%d (%d)", info->width, info->height,
+                       info->out_width, info->out_height,
                        info->screen_width);
+       DBG("%d,%d %08x %08x", info->pos_x, info->pos_y,
+                       info->paddr, info->p_uv_addr);
+
+       /* TODO: */
+       ilace = false;
+       replication = false;
+
+       /* and finally, update omapdss: */
+       ret = dispc_ovl_setup(omap_plane->id, info,
+                       replication, omap_crtc_timings(crtc), false);
+       if (ret) {
+               dev_err(dev->dev, "dispc_ovl_setup failed: %d\n", ret);
+               return;
+       }
+
+       dispc_ovl_enable(omap_plane->id, true);
+       dispc_ovl_set_channel_out(omap_plane->id, channel);
+}
+
+static void omap_plane_post_apply(struct omap_drm_apply *apply)
+{
+       struct omap_plane *omap_plane =
+                       container_of(apply, struct omap_plane, apply);
+       struct drm_plane *plane = &omap_plane->base;
+       struct omap_overlay_info *info = &omap_plane->info;
+       struct drm_gem_object *bo = NULL;
+       struct callback cb;
+
+       cb = omap_plane->apply_done_cb;
+       omap_plane->apply_done_cb.fxn = NULL;
+
+       while (kfifo_get(&omap_plane->unpin_fifo, &bo)) {
+               omap_gem_put_paddr(bo);
+               drm_gem_object_unreference_unlocked(bo);
+       }
+
+       if (cb.fxn)
+               cb.fxn(cb.arg);
+
+       if (omap_plane->enabled) {
+               omap_framebuffer_flush(plane->fb, info->pos_x, info->pos_y,
+                               info->out_width, info->out_height);
+       }
+}
+
+static int apply(struct drm_plane *plane)
+{
+       if (plane->crtc) {
+               struct omap_plane *omap_plane = to_omap_plane(plane);
+               return omap_crtc_apply(plane->crtc, &omap_plane->apply);
+       }
+       return 0;
 }
 
 int omap_plane_mode_set(struct drm_plane *plane,
@@ -313,7 +205,8 @@ int omap_plane_mode_set(struct drm_plane *plane,
                int crtc_x, int crtc_y,
                unsigned int crtc_w, unsigned int crtc_h,
                uint32_t src_x, uint32_t src_y,
-               uint32_t src_w, uint32_t src_h)
+               uint32_t src_w, uint32_t src_h,
+               void (*fxn)(void *), void *arg)
 {
        struct omap_plane *omap_plane = to_omap_plane(plane);
        struct omap_drm_window *win = &omap_plane->win;
@@ -329,17 +222,20 @@ int omap_plane_mode_set(struct drm_plane *plane,
        win->src_w = src_w >> 16;
        win->src_h = src_h >> 16;
 
-       /* note: this is done after this fxn returns.. but if we need
-        * to do a commit/update_scanout, etc before this returns we
-        * need the current value.
-        */
+       if (fxn) {
+               /* omap_crtc should ensure that a new page flip
+                * isn't permitted while there is one pending:
+                */
+               BUG_ON(omap_plane->apply_done_cb.fxn);
+
+               omap_plane->apply_done_cb.fxn = fxn;
+               omap_plane->apply_done_cb.arg = arg;
+       }
+
        plane->fb = fb;
        plane->crtc = crtc;
 
-       update_scanout(plane);
-       update_manager(plane);
-
-       return 0;
+       return apply(plane);
 }
 
 static int omap_plane_update(struct drm_plane *plane,
@@ -349,9 +245,12 @@ static int omap_plane_update(struct drm_plane *plane,
                uint32_t src_x, uint32_t src_y,
                uint32_t src_w, uint32_t src_h)
 {
-       omap_plane_mode_set(plane, crtc, fb, crtc_x, crtc_y, crtc_w, crtc_h,
-                       src_x, src_y, src_w, src_h);
-       return omap_plane_dpms(plane, DRM_MODE_DPMS_ON);
+       struct omap_plane *omap_plane = to_omap_plane(plane);
+       omap_plane->enabled = true;
+       return omap_plane_mode_set(plane, crtc, fb,
+                       crtc_x, crtc_y, crtc_w, crtc_h,
+                       src_x, src_y, src_w, src_h,
+                       NULL, NULL);
 }
 
 static int omap_plane_disable(struct drm_plane *plane)
@@ -364,48 +263,32 @@ static int omap_plane_disable(struct drm_plane *plane)
 static void omap_plane_destroy(struct drm_plane *plane)
 {
        struct omap_plane *omap_plane = to_omap_plane(plane);
-       DBG("%s", omap_plane->ovl->name);
+
+       DBG("%s", omap_plane->name);
+
+       omap_irq_unregister(plane->dev, &omap_plane->error_irq);
+
        omap_plane_disable(plane);
        drm_plane_cleanup(plane);
-       WARN_ON(omap_plane->pending_num_unpins + omap_plane->num_unpins > 0);
+
+       WARN_ON(!kfifo_is_empty(&omap_plane->unpin_fifo));
        kfifo_free(&omap_plane->unpin_fifo);
+
        kfree(omap_plane);
 }
 
 int omap_plane_dpms(struct drm_plane *plane, int mode)
 {
        struct omap_plane *omap_plane = to_omap_plane(plane);
-       struct omap_overlay *ovl = omap_plane->ovl;
-       int r;
+       bool enabled = (mode == DRM_MODE_DPMS_ON);
+       int ret = 0;
 
-       DBG("%s: %d", omap_plane->ovl->name, mode);
-
-       if (mode == DRM_MODE_DPMS_ON) {
-               update_scanout(plane);
-               r = commit(plane);
-               if (!r)
-                       r = ovl->enable(ovl);
-       } else {
-               struct omap_drm_private *priv = plane->dev->dev_private;
-               r = ovl->disable(ovl);
-               update_pin(plane, NULL);
-               queue_work(priv->wq, &omap_plane->work);
+       if (enabled != omap_plane->enabled) {
+               omap_plane->enabled = enabled;
+               ret = apply(plane);
        }
 
-       return r;
-}
-
-void omap_plane_on_endwin(struct drm_plane *plane,
-               void (*fxn)(void *), void *arg)
-{
-       struct omap_plane *omap_plane = to_omap_plane(plane);
-
-       mutex_lock(&omap_plane->unpin_mutex);
-       omap_plane->endwin.fxn = fxn;
-       omap_plane->endwin.arg = arg;
-       mutex_unlock(&omap_plane->unpin_mutex);
-
-       install_irq(plane);
+       return ret;
 }
 
 /* helper to install properties which are common to planes and crtcs */
@@ -454,25 +337,13 @@ int omap_plane_set_property(struct drm_plane *plane,
        int ret = -EINVAL;
 
        if (property == priv->rotation_prop) {
-               struct omap_overlay *ovl = omap_plane->ovl;
-
-               DBG("%s: rotation: %02x", ovl->name, (uint32_t)val);
+               DBG("%s: rotation: %02x", omap_plane->name, (uint32_t)val);
                omap_plane->win.rotation = val;
-
-               if (ovl->is_enabled(ovl))
-                       ret = omap_plane_dpms(plane, DRM_MODE_DPMS_ON);
-               else
-                       ret = 0;
+               ret = apply(plane);
        } else if (property == priv->zorder_prop) {
-               struct omap_overlay *ovl = omap_plane->ovl;
-
-               DBG("%s: zorder: %d", ovl->name, (uint32_t)val);
+               DBG("%s: zorder: %02x", omap_plane->name, (uint32_t)val);
                omap_plane->info.zorder = val;
-
-               if (ovl->is_enabled(ovl))
-                       ret = omap_plane_dpms(plane, DRM_MODE_DPMS_ON);
-               else
-                       ret = 0;
+               ret = apply(plane);
        }
 
        return ret;
@@ -485,20 +356,38 @@ static const struct drm_plane_funcs omap_plane_funcs = {
                .set_property = omap_plane_set_property,
 };
 
+static void omap_plane_error_irq(struct omap_drm_irq *irq, uint32_t irqstatus)
+{
+       struct omap_plane *omap_plane =
+                       container_of(irq, struct omap_plane, error_irq);
+       DRM_ERROR("%s: errors: %08x\n", omap_plane->name, irqstatus);
+}
+
+static const char *plane_names[] = {
+               [OMAP_DSS_GFX] = "gfx",
+               [OMAP_DSS_VIDEO1] = "vid1",
+               [OMAP_DSS_VIDEO2] = "vid2",
+               [OMAP_DSS_VIDEO3] = "vid3",
+};
+
+static const uint32_t error_irqs[] = {
+               [OMAP_DSS_GFX] = DISPC_IRQ_GFX_FIFO_UNDERFLOW,
+               [OMAP_DSS_VIDEO1] = DISPC_IRQ_VID1_FIFO_UNDERFLOW,
+               [OMAP_DSS_VIDEO2] = DISPC_IRQ_VID2_FIFO_UNDERFLOW,
+               [OMAP_DSS_VIDEO3] = DISPC_IRQ_VID3_FIFO_UNDERFLOW,
+};
+
 /* initialize plane */
 struct drm_plane *omap_plane_init(struct drm_device *dev,
-               struct omap_overlay *ovl, unsigned int possible_crtcs,
-               bool priv)
+               int id, bool private_plane)
 {
+       struct omap_drm_private *priv = dev->dev_private;
        struct drm_plane *plane = NULL;
        struct omap_plane *omap_plane;
+       struct omap_overlay_info *info;
        int ret;
 
-       DBG("%s: possible_crtcs=%08x, priv=%d", ovl->name,
-                       possible_crtcs, priv);
-
-       /* friendly reminder to update table for future hw: */
-       WARN_ON(ovl->id >= ARRAY_SIZE(id2irq));
+       DBG("%s: priv=%d", plane_names[id], private_plane);
 
        omap_plane = kzalloc(sizeof(*omap_plane), GFP_KERNEL);
        if (!omap_plane) {
@@ -506,47 +395,50 @@ struct drm_plane *omap_plane_init(struct drm_device *dev,
                goto fail;
        }
 
-       mutex_init(&omap_plane->unpin_mutex);
-
        ret = kfifo_alloc(&omap_plane->unpin_fifo, 16, GFP_KERNEL);
        if (ret) {
                dev_err(dev->dev, "could not allocate unpin FIFO\n");
                goto fail;
        }
 
-       INIT_WORK(&omap_plane->work, unpin_worker);
-
        omap_plane->nformats = omap_framebuffer_get_formats(
                        omap_plane->formats, ARRAY_SIZE(omap_plane->formats),
-                       ovl->supported_modes);
-       omap_plane->ovl = ovl;
+                       dss_feat_get_supported_color_modes(id));
+       omap_plane->id = id;
+       omap_plane->name = plane_names[id];
+
        plane = &omap_plane->base;
 
-       drm_plane_init(dev, plane, possible_crtcs, &omap_plane_funcs,
-                       omap_plane->formats, omap_plane->nformats, priv);
+       omap_plane->apply.pre_apply  = omap_plane_pre_apply;
+       omap_plane->apply.post_apply = omap_plane_post_apply;
+
+       omap_plane->error_irq.irqmask = error_irqs[id];
+       omap_plane->error_irq.irq = omap_plane_error_irq;
+       omap_irq_register(dev, &omap_plane->error_irq);
+
+       drm_plane_init(dev, plane, (1 << priv->num_crtcs) - 1, &omap_plane_funcs,
+                       omap_plane->formats, omap_plane->nformats, private_plane);
 
        omap_plane_install_properties(plane, &plane->base);
 
        /* get our starting configuration, set defaults for parameters
         * we don't currently use, etc:
         */
-       ovl->get_overlay_info(ovl, &omap_plane->info);
-       omap_plane->info.rotation_type = OMAP_DSS_ROT_DMA;
-       omap_plane->info.rotation = OMAP_DSS_ROT_0;
-       omap_plane->info.global_alpha = 0xff;
-       omap_plane->info.mirror = 0;
+       info = &omap_plane->info;
+       info->rotation_type = OMAP_DSS_ROT_DMA;
+       info->rotation = OMAP_DSS_ROT_0;
+       info->global_alpha = 0xff;
+       info->mirror = 0;
 
        /* Set defaults depending on whether we are a CRTC or overlay
         * layer.
         * TODO add ioctl to give userspace an API to change this.. this
         * will come in a subsequent patch.
         */
-       if (priv)
+       if (private_plane)
                omap_plane->info.zorder = 0;
        else
-               omap_plane->info.zorder = ovl->id;
-
-       update_manager(plane);
+               omap_plane->info.zorder = id;
 
        return plane;
 
index ae38475..d10d75e 100644 (file)
@@ -937,7 +937,8 @@ short alloc_rx_desc_ring(struct net_device *dev, u16 bufsize, int count)
 
                dma_tmp = pci_map_single(pdev, buf, bufsize * sizeof(u8),
                                         PCI_DMA_FROMDEVICE);
-
+               if (pci_dma_mapping_error(pdev, dma_tmp))
+                       return -1;
                if (-1 == buffer_add(&(priv->rxbuffer), buf, dma_tmp,
                           &(priv->rxbufferhead))) {
                        DMESGE("Unable to allocate mem RX buf");
index 808aab6..a9d78e9 100644 (file)
@@ -1183,6 +1183,8 @@ void  rtl8192_tx_fill_desc(struct net_device *dev, struct tx_desc *pdesc,
                                                pTxFwInfo->TxRate,
                                                cb_desc);
 
+       if (pci_dma_mapping_error(priv->pdev, mapping))
+               RT_TRACE(COMP_ERR, "DMA Mapping error\n");;
        if (cb_desc->bAMPDUEnable) {
                pTxFwInfo->AllowAggregation = 1;
                pTxFwInfo->RxMF = cb_desc->ampdu_factor;
@@ -1280,6 +1282,8 @@ void  rtl8192_tx_fill_cmd_desc(struct net_device *dev,
        dma_addr_t mapping = pci_map_single(priv->pdev, skb->data, skb->len,
                         PCI_DMA_TODEVICE);
 
+       if (pci_dma_mapping_error(priv->pdev, mapping))
+               RT_TRACE(COMP_ERR, "DMA Mapping error\n");;
        memset(entry, 0, 12);
        entry->LINIP = cb_desc->bLastIniPkt;
        entry->FirstSeg = 1;
index 1a70f32..4ebf99b 100644 (file)
@@ -2104,7 +2104,10 @@ static short rtl8192_alloc_rx_desc_ring(struct net_device *dev)
                                                  skb_tail_pointer_rsl(skb),
                                                  priv->rxbuffersize,
                                                  PCI_DMA_FROMDEVICE);
-
+                       if (pci_dma_mapping_error(priv->pdev, *mapping)) {
+                               dev_kfree_skb_any(skb);
+                               return -1;
+                       }
                        entry->BufferAddress = cpu_to_le32(*mapping);
 
                        entry->Length = priv->rxbuffersize;
@@ -2397,7 +2400,11 @@ static void rtl8192_rx_normal(struct net_device *dev)
                                                    skb_tail_pointer_rsl(skb),
                                                    priv->rxbuffersize,
                                                    PCI_DMA_FROMDEVICE);
-
+                       if (pci_dma_mapping_error(priv->pdev,
+                                                 *((dma_addr_t *)skb->cb))) {
+                               dev_kfree_skb_any(skb);
+                               return;
+                       }
                }
 done:
                pdesc->BufferAddress = cpu_to_le32(*((dma_addr_t *)skb->cb));
index 6b73843..a96cd06 100644 (file)
@@ -63,6 +63,8 @@ static struct usb_device_id rtl871x_usb_id_tbl[] = {
        {USB_DEVICE(0x0B05, 0x1791)}, /* 11n mode disable */
        /* Belkin */
        {USB_DEVICE(0x050D, 0x945A)},
+       /* ISY IWL - Belkin clone */
+       {USB_DEVICE(0x050D, 0x11F1)},
        /* Corega */
        {USB_DEVICE(0x07AA, 0x0047)},
        /* D-Link */
index ac87c5e..1facad6 100644 (file)
@@ -2,6 +2,7 @@ config SB105X
        tristate "SystemBase PCI Multiport UART"
        select SERIAL_CORE
        depends on PCI
+       depends on X86
        help
          A driver for the SystemBase Multi-2/PCI serial card
 
index edb2a85..9464f38 100644 (file)
@@ -3054,6 +3054,7 @@ static int init_mp_dev(struct pci_dev *pcidev, mppcibrd_t brd)
                                sbdev->nr_ports = ((portnum_hex/16)*10) + (portnum_hex % 16);
                        }
                        break;
+#ifdef CONFIG_PARPORT_PC
                case PCI_DEVICE_ID_MP2S1P :
                        sbdev->nr_ports = 2;
 
@@ -3073,6 +3074,7 @@ static int init_mp_dev(struct pci_dev *pcidev, mppcibrd_t brd)
                        /* add PC compatible parallel port */
                        parport_pc_probe_port(pcidev->resource[2].start, pcidev->resource[3].start, PARPORT_IRQ_NONE, PARPORT_DMA_NONE, &pcidev->dev, 0);
                        break;
+#endif
        }
 
        ret = request_region(sbdev->uart_access_addr, (8*sbdev->nr_ports), sbdev->name);
index df95337..7616f05 100644 (file)
@@ -342,7 +342,7 @@ int synth_init(char *synth_name)
 
        mutex_lock(&spk_mutex);
        /* First, check if we already have it loaded. */
-       for (i = 0; synths[i] != NULL && i < MAXSYNTHS; i++)
+       for (i = 0; i < MAXSYNTHS && synths[i] != NULL; i++)
                if (strcmp(synths[i]->name, synth_name) == 0)
                        synth = synths[i];
 
@@ -423,7 +423,7 @@ int synth_add(struct spk_synth *in_synth)
        int i;
        int status = 0;
        mutex_lock(&spk_mutex);
-       for (i = 0; synths[i] != NULL && i < MAXSYNTHS; i++)
+       for (i = 0; i < MAXSYNTHS && synths[i] != NULL; i++)
                /* synth_remove() is responsible for rotating the array down */
                if (in_synth == synths[i]) {
                        mutex_unlock(&spk_mutex);
index 543a127..b783bfa 100644 (file)
@@ -31,7 +31,7 @@
  * driver should read or write to PRM/CM registers directly; they
  * should rely on OMAP core code to do this.
  */
-#include <mach-omap2/cm2xxx_3xxx.h>
+#include <mach-omap2/cm3xxx.h>
 #include <mach-omap2/prm-regbits-34xx.h>
 #include <mach-omap2/cm-regbits-34xx.h>
 #include <dspbridge/devdefs.h>
index b647207..2f084e1 100644 (file)
@@ -121,9 +121,13 @@ void dsp_clk_exit(void)
        for (i = 0; i < DM_TIMER_CLOCKS; i++)
                omap_dm_timer_free(timer[i]);
 
+       clk_unprepare(iva2_clk);
        clk_put(iva2_clk);
+       clk_unprepare(ssi.sst_fck);
        clk_put(ssi.sst_fck);
+       clk_unprepare(ssi.ssr_fck);
        clk_put(ssi.ssr_fck);
+       clk_unprepare(ssi.ick);
        clk_put(ssi.ick);
 }
 
@@ -145,14 +149,21 @@ void dsp_clk_init(void)
        iva2_clk = clk_get(&dspbridge_device.dev, "iva2_ck");
        if (IS_ERR(iva2_clk))
                dev_err(bridge, "failed to get iva2 clock %p\n", iva2_clk);
+       else
+               clk_prepare(iva2_clk);
 
        ssi.sst_fck = clk_get(&dspbridge_device.dev, "ssi_sst_fck");
        ssi.ssr_fck = clk_get(&dspbridge_device.dev, "ssi_ssr_fck");
        ssi.ick = clk_get(&dspbridge_device.dev, "ssi_ick");
 
-       if (IS_ERR(ssi.sst_fck) || IS_ERR(ssi.ssr_fck) || IS_ERR(ssi.ick))
+       if (IS_ERR(ssi.sst_fck) || IS_ERR(ssi.ssr_fck) || IS_ERR(ssi.ick)) {
                dev_err(bridge, "failed to get ssi: sst %p, ssr %p, ick %p\n",
                                        ssi.sst_fck, ssi.ssr_fck, ssi.ick);
+       } else {
+               clk_prepare(ssi.sst_fck);
+               clk_prepare(ssi.ssr_fck);
+               clk_prepare(ssi.ick);
+       }
 }
 
 /**
index 1dce36f..7ff0e6c 100644 (file)
@@ -63,11 +63,15 @@ int dsp_wdt_init(void)
        dsp_wdt.fclk = clk_get(NULL, "wdt3_fck");
 
        if (!IS_ERR(dsp_wdt.fclk)) {
+               clk_prepare(dsp_wdt.fclk);
+
                dsp_wdt.iclk = clk_get(NULL, "wdt3_ick");
                if (IS_ERR(dsp_wdt.iclk)) {
                        clk_put(dsp_wdt.fclk);
                        dsp_wdt.fclk = NULL;
                        ret = -EFAULT;
+               } else {
+                       clk_prepare(dsp_wdt.iclk);
                }
        } else
                ret = -EFAULT;
@@ -95,10 +99,14 @@ void dsp_wdt_exit(void)
        free_irq(INT_34XX_WDT3_IRQ, &dsp_wdt);
        tasklet_kill(&dsp_wdt.wdt3_tasklet);
 
-       if (dsp_wdt.fclk)
+       if (dsp_wdt.fclk) {
+               clk_unprepare(dsp_wdt.fclk);
                clk_put(dsp_wdt.fclk);
-       if (dsp_wdt.iclk)
+       }
+       if (dsp_wdt.iclk) {
+               clk_unprepare(dsp_wdt.iclk);
                clk_put(dsp_wdt.iclk);
+       }
 
        dsp_wdt.fclk = NULL;
        dsp_wdt.iclk = NULL;
index 0331178..bf73ba2 100644 (file)
@@ -162,11 +162,9 @@ static struct vme_driver pio2_driver = {
 
 static int __init pio2_init(void)
 {
-       int retval = 0;
-
        if (bus_num == 0) {
                pr_err("No cards, skipping registration\n");
-               goto err_nocard;
+               return -ENODEV;
        }
 
        if (bus_num > PIO2_CARDS_MAX) {
@@ -176,15 +174,7 @@ static int __init pio2_init(void)
        }
 
        /* Register the PIO2 driver */
-       retval = vme_register_driver(&pio2_driver, bus_num);
-       if (retval != 0)
-               goto err_reg;
-
-       return retval;
-
-err_reg:
-err_nocard:
-       return retval;
+       return  vme_register_driver(&pio2_driver, bus_num);
 }
 
 static int pio2_match(struct vme_dev *vdev)
index 6b2ec39..806cbf7 100644 (file)
@@ -90,7 +90,6 @@ typedef struct tagSRSNCapObject {
 } SRSNCapObject, *PSRSNCapObject;
 
 // BSS info(AP)
-#pragma pack(1)
 typedef struct tagKnownBSS {
     // BSS info
     BOOL            bActive;
index 5d8faf9..e0d2b07 100644 (file)
@@ -34,7 +34,6 @@
 #include "device.h"
 
 /*---------------------  Export Definitions -------------------------*/
-#pragma pack(1)
 typedef struct tagSINTData {
        BYTE byTSR0;
        BYTE byPkt0;
index 22710ce..ae6e2d2 100644 (file)
@@ -95,13 +95,12 @@ typedef enum tagWZONETYPE {
 // Ioctl interface structure
 // Command structure
 //
-#pragma pack(1)
 typedef struct tagSCmdRequest {
        u8 name[16];
        void    *data;
        u16         wResult;
        u16     wCmdCode;
-} SCmdRequest, *PSCmdRequest;
+} __packed SCmdRequest, *PSCmdRequest;
 
 //
 // Scan
@@ -111,7 +110,7 @@ typedef struct tagSCmdScan {
 
     u8     ssid[SSID_MAXLEN + 2];
 
-} SCmdScan, *PSCmdScan;
+} __packed SCmdScan, *PSCmdScan;
 
 //
 // BSS Join
@@ -126,7 +125,7 @@ typedef struct tagSCmdBSSJoin {
     BOOL    bPSEnable;
     BOOL    bShareKeyAuth;
 
-} SCmdBSSJoin, *PSCmdBSSJoin;
+} __packed SCmdBSSJoin, *PSCmdBSSJoin;
 
 //
 // Zonetype Setting
@@ -137,7 +136,7 @@ typedef struct tagSCmdZoneTypeSet {
  BOOL       bWrite;
  WZONETYPE  ZoneType;
 
-} SCmdZoneTypeSet, *PSCmdZoneTypeSet;
+} __packed SCmdZoneTypeSet, *PSCmdZoneTypeSet;
 
 typedef struct tagSWPAResult {
          char  ifname[100];
@@ -145,7 +144,7 @@ typedef struct tagSWPAResult {
        u8 key_mgmt;
        u8 eap_type;
          BOOL authenticated;
-} SWPAResult, *PSWPAResult;
+} __packed SWPAResult, *PSWPAResult;
 
 typedef struct tagSCmdStartAP {
 
@@ -157,7 +156,7 @@ typedef struct tagSCmdStartAP {
     BOOL    bShareKeyAuth;
     u8      byBasicRate;
 
-} SCmdStartAP, *PSCmdStartAP;
+} __packed SCmdStartAP, *PSCmdStartAP;
 
 typedef struct tagSCmdSetWEP {
 
@@ -167,7 +166,7 @@ typedef struct tagSCmdSetWEP {
     BOOL    bWepKeyAvailable[WEP_NKEYS];
     u32     auWepKeyLength[WEP_NKEYS];
 
-} SCmdSetWEP, *PSCmdSetWEP;
+} __packed SCmdSetWEP, *PSCmdSetWEP;
 
 typedef struct tagSBSSIDItem {
 
@@ -180,14 +179,14 @@ typedef struct tagSBSSIDItem {
     BOOL    bWEPOn;
     u32     uRSSI;
 
-} SBSSIDItem;
+} __packed SBSSIDItem;
 
 
 typedef struct tagSBSSIDList {
 
        u32                 uItem;
        SBSSIDItem      sBSSIDList[0];
-} SBSSIDList, *PSBSSIDList;
+} __packed SBSSIDList, *PSBSSIDList;
 
 
 typedef struct tagSNodeItem {
@@ -208,7 +207,7 @@ typedef struct tagSNodeItem {
     u32            uTxAttempts;
     u16            wFailureRatio;
 
-} SNodeItem;
+} __packed SNodeItem;
 
 
 typedef struct tagSNodeList {
@@ -216,7 +215,7 @@ typedef struct tagSNodeList {
        u32                 uItem;
        SNodeItem       sNodeList[0];
 
-} SNodeList, *PSNodeList;
+} __packed SNodeList, *PSNodeList;
 
 
 typedef struct tagSCmdLinkStatus {
@@ -229,7 +228,7 @@ typedef struct tagSCmdLinkStatus {
     u32     uChannel;
     u32     uLinkRate;
 
-} SCmdLinkStatus, *PSCmdLinkStatus;
+} __packed SCmdLinkStatus, *PSCmdLinkStatus;
 
 //
 // 802.11 counter
@@ -247,7 +246,7 @@ typedef struct tagSDot11MIBCount {
     u32 ReceivedFragmentCount;
     u32 MulticastReceivedFrameCount;
     u32 FCSErrorCount;
-} SDot11MIBCount, *PSDot11MIBCount;
+} __packed SDot11MIBCount, *PSDot11MIBCount;
 
 
 
@@ -355,13 +354,13 @@ typedef struct tagSStatMIBCount {
     u32   ullTxBroadcastBytes[2];
     u32   ullTxMulticastBytes[2];
     u32   ullTxDirectedBytes[2];
-} SStatMIBCount, *PSStatMIBCount;
+} __packed SStatMIBCount, *PSStatMIBCount;
 
 typedef struct tagSCmdValue {
 
     u32     dwValue;
 
-} SCmdValue,  *PSCmdValue;
+} __packed SCmdValue,  *PSCmdValue;
 
 //
 // hostapd & viawget ioctl related
@@ -431,7 +430,7 @@ struct viawget_hostapd_param {
                        u8 ssid[32];
                } scan_req;
        } u;
-};
+} __packed;
 
 /*---------------------  Export Classes  ----------------------------*/
 
index 959c886..2522dde 100644 (file)
@@ -67,12 +67,11 @@ enum {
 
 
 
-#pragma pack(1)
 typedef struct viawget_wpa_header {
        u8 type;
        u16 req_ie_len;
        u16 resp_ie_len;
-} viawget_wpa_header;
+} __packed viawget_wpa_header;
 
 struct viawget_wpa_param {
        u32 cmd;
@@ -113,9 +112,8 @@ struct viawget_wpa_param {
                        u8 *buf;
                } scan_results;
        } u;
-};
+} __packed;
 
-#pragma pack(1)
 struct viawget_scan_result {
        u8 bssid[6];
        u8 ssid[32];
@@ -130,7 +128,7 @@ struct viawget_scan_result {
        int noise;
        int level;
        int maxrate;
-};
+} __packed;
 
 /*---------------------  Export Classes  ----------------------------*/
 
index 18c06a5..1d31eab 100644 (file)
@@ -638,8 +638,8 @@ int prism2_leave_ibss(struct wiphy *wiphy, struct net_device *dev)
 }
 
 
-int prism2_set_tx_power(struct wiphy *wiphy, enum nl80211_tx_power_setting type,
-                       int mbm)
+int prism2_set_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev,
+                       enum nl80211_tx_power_setting type, int mbm)
 {
        struct prism2_wiphy_private *priv = wiphy_priv(wiphy);
        wlandevice_t *wlandev = priv->wlandev;
@@ -665,7 +665,8 @@ exit:
        return err;
 }
 
-int prism2_get_tx_power(struct wiphy *wiphy, int *dbm)
+int prism2_get_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev,
+                       int *dbm)
 {
        struct prism2_wiphy_private *priv = wiphy_priv(wiphy);
        wlandevice_t *wlandev = priv->wlandev;
index 4efa9bc..89bfd85 100644 (file)
@@ -406,7 +406,7 @@ int prism2mgmt_scan_results(wlandevice_t *wlandev, void *msgp)
        /* SSID */
        req->ssid.status = P80211ENUM_msgitem_status_data_ok;
        req->ssid.data.len = le16_to_cpu(item->ssid.len);
-       req->ssid.data.len = min_t(u16, req->ssid.data.len, WLAN_BSSID_LEN);
+       req->ssid.data.len = min_t(u16, req->ssid.data.len, WLAN_SSID_MAXLEN);
        memcpy(req->ssid.data.data, item->ssid.data, req->ssid.data.len);
 
        /* supported rates */
index fb4a7c9..f2a73bd 100644 (file)
@@ -265,7 +265,7 @@ out_cleanup:
 static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
                           int offset)
 {
-       int ret;
+       int ret = 0;
        size_t clen;
        unsigned long handle;
        struct page *page;
@@ -286,10 +286,8 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
                        goto out;
                }
                ret = zram_decompress_page(zram, uncmem, index);
-               if (ret) {
-                       kfree(uncmem);
+               if (ret)
                        goto out;
-               }
        }
 
        /*
@@ -302,16 +300,18 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
 
        user_mem = kmap_atomic(page);
 
-       if (is_partial_io(bvec))
+       if (is_partial_io(bvec)) {
                memcpy(uncmem + offset, user_mem + bvec->bv_offset,
                       bvec->bv_len);
-       else
+               kunmap_atomic(user_mem);
+               user_mem = NULL;
+       } else {
                uncmem = user_mem;
+       }
 
        if (page_zero_filled(uncmem)) {
-               kunmap_atomic(user_mem);
-               if (is_partial_io(bvec))
-                       kfree(uncmem);
+               if (!is_partial_io(bvec))
+                       kunmap_atomic(user_mem);
                zram_stat_inc(&zram->stats.pages_zero);
                zram_set_flag(zram, index, ZRAM_ZERO);
                ret = 0;
@@ -321,9 +321,11 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
        ret = lzo1x_1_compress(uncmem, PAGE_SIZE, src, &clen,
                               zram->compress_workmem);
 
-       kunmap_atomic(user_mem);
-       if (is_partial_io(bvec))
-                       kfree(uncmem);
+       if (!is_partial_io(bvec)) {
+               kunmap_atomic(user_mem);
+               user_mem = NULL;
+               uncmem = NULL;
+       }
 
        if (unlikely(ret != LZO_E_OK)) {
                pr_err("Compression failed! err=%d\n", ret);
@@ -332,8 +334,10 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
 
        if (unlikely(clen > max_zpage_size)) {
                zram_stat_inc(&zram->stats.bad_compress);
-               src = uncmem;
                clen = PAGE_SIZE;
+               src = NULL;
+               if (is_partial_io(bvec))
+                       src = uncmem;
        }
 
        handle = zs_malloc(zram->mem_pool, clen);
@@ -345,7 +349,11 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
        }
        cmem = zs_map_object(zram->mem_pool, handle, ZS_MM_WO);
 
+       if ((clen == PAGE_SIZE) && !is_partial_io(bvec))
+               src = kmap_atomic(page);
        memcpy(cmem, src, clen);
+       if ((clen == PAGE_SIZE) && !is_partial_io(bvec))
+               kunmap_atomic(src);
 
        zs_unmap_object(zram->mem_pool, handle);
 
@@ -358,9 +366,10 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
        if (clen <= PAGE_SIZE / 2)
                zram_stat_inc(&zram->stats.good_compress);
 
-       return 0;
-
 out:
+       if (is_partial_io(bvec))
+               kfree(uncmem);
+
        if (ret)
                zram_stat64_inc(zram, &zram->stats.failed_writes);
        return ret;
index 9ac4c15..ba6091b 100644 (file)
@@ -372,7 +372,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)
                 * made generic here.
                 */
                if (!(cmd->cmd_flags & ICF_OOO_CMDSN) && !cmd->immediate_cmd &&
-                    iscsi_sna_gte(cmd->stat_sn, conn->sess->exp_cmd_sn)) {
+                    iscsi_sna_gte(cmd->cmd_sn, conn->sess->exp_cmd_sn)) {
                        list_del(&cmd->i_conn_node);
                        spin_unlock_bh(&conn->cmd_lock);
                        iscsit_free_cmd(cmd);
index 85140f7..7d4ec02 100644 (file)
@@ -212,7 +212,7 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd)
        struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, *l_tg_pt_gp_mem;
        unsigned char *buf;
        unsigned char *ptr;
-       sense_reason_t rc;
+       sense_reason_t rc = TCM_NO_SENSE;
        u32 len = 4; /* Skip over RESERVED area in header */
        int alua_access_state, primary = 0;
        u16 tg_pt_id, rtpi;
index e35dbf8..8e0290b 100644 (file)
@@ -2053,7 +2053,7 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key,
        /* Used for APTPL metadata w/ UNREGISTER */
        unsigned char *pr_aptpl_buf = NULL;
        unsigned char isid_buf[PR_REG_ISID_LEN], *isid_ptr = NULL;
-       sense_reason_t ret;
+       sense_reason_t ret = TCM_NO_SENSE;
        int pr_holder = 0, type;
 
        if (!se_sess || !se_lun) {
index c23c76c..bd587b7 100644 (file)
@@ -541,9 +541,6 @@ static void transport_lun_remove_cmd(struct se_cmd *cmd)
 
 void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
 {
-       if (!(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
-               transport_lun_remove_cmd(cmd);
-
        if (transport_cmd_check_stop_to_fabric(cmd))
                return;
        if (remove)
@@ -1396,6 +1393,8 @@ static void target_complete_tmr_failure(struct work_struct *work)
 
        se_cmd->se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST;
        se_cmd->se_tfo->queue_tm_rsp(se_cmd);
+
+       transport_cmd_check_stop_to_fabric(se_cmd);
 }
 
 /**
@@ -1688,6 +1687,7 @@ void target_execute_cmd(struct se_cmd *cmd)
        }
 
        cmd->t_state = TRANSPORT_PROCESSING;
+       cmd->transport_state |= CMD_T_ACTIVE;
        spin_unlock_irq(&cmd->t_state_lock);
 
        if (!target_handle_task_attr(cmd))
@@ -2597,6 +2597,16 @@ transport_send_check_condition_and_sense(struct se_cmd *cmd,
         * SENSE KEY values from include/scsi/scsi.h
         */
        switch (reason) {
+       case TCM_NO_SENSE:
+               /* CURRENT ERROR */
+               buffer[0] = 0x70;
+               buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
+               /* Not Ready */
+               buffer[SPC_SENSE_KEY_OFFSET] = NOT_READY;
+               /* NO ADDITIONAL SENSE INFORMATION */
+               buffer[SPC_ASC_KEY_OFFSET] = 0;
+               buffer[SPC_ASCQ_KEY_OFFSET] = 0;
+               break;
        case TCM_NON_EXISTENT_LUN:
                /* CURRENT ERROR */
                buffer[0] = 0x70;
@@ -2743,7 +2753,7 @@ transport_send_check_condition_and_sense(struct se_cmd *cmd,
                /* ILLEGAL REQUEST */
                buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
                /* LOGICAL UNIT COMMUNICATION FAILURE */
-               buffer[SPC_ASC_KEY_OFFSET] = 0x80;
+               buffer[SPC_ASC_KEY_OFFSET] = 0x08;
                break;
        }
        /*
@@ -2804,6 +2814,8 @@ void transport_send_task_abort(struct se_cmd *cmd)
        }
        cmd->scsi_status = SAM_STAT_TASK_ABORTED;
 
+       transport_lun_remove_cmd(cmd);
+
        pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x,"
                " ITT: 0x%08x\n", cmd->t_task_cdb[0],
                cmd->se_tfo->get_task_tag(cmd));
index 12d6fa2..6659dd3 100644 (file)
@@ -355,11 +355,11 @@ static int ft_prli_locked(struct fc_rport_priv *rdata, u32 spp_len,
 
        tport = ft_tport_create(rdata->local_port);
        if (!tport)
-               return 0;       /* not a target for this local port */
+               goto not_target;        /* not a target for this local port */
 
        acl = ft_acl_get(tport->tpg, rdata);
        if (!acl)
-               return 0;
+               goto not_target;        /* no target for this remote */
 
        if (!rspp)
                goto fill;
@@ -396,12 +396,18 @@ static int ft_prli_locked(struct fc_rport_priv *rdata, u32 spp_len,
 
        /*
         * OR in our service parameters with other provider (initiator), if any.
-        * TBD XXX - indicate RETRY capability?
         */
 fill:
        fcp_parm = ntohl(spp->spp_params);
+       fcp_parm &= ~FCP_SPPF_RETRY;
        spp->spp_params = htonl(fcp_parm | FCP_SPPF_TARG_FCN);
        return FC_SPP_RESP_ACK;
+
+not_target:
+       fcp_parm = ntohl(spp->spp_params);
+       fcp_parm &= ~FCP_SPPF_TARG_FCN;
+       spp->spp_params = htonl(fcp_parm);
+       return 0;
 }
 
 /**
index be6a373..79ff3a5 100644 (file)
@@ -441,6 +441,8 @@ static int pty_bsd_ioctl(struct tty_struct *tty,
                return pty_get_pktmode(tty, (int __user *)arg);
        case TIOCSIG:    /* Send signal to other side of pty */
                return pty_signal(tty, (int) arg);
+       case TIOCGPTN: /* TTY returns ENOTTY, but glibc expects EINVAL here */
+               return -EINVAL;
        }
        return -ENOIOCTLCMD;
 }
index d085e3a..f932043 100644 (file)
@@ -300,6 +300,12 @@ static const struct serial8250_config uart_config[] = {
                                  UART_FCR_R_TRIG_00 | UART_FCR_T_TRIG_00,
                .flags          = UART_CAP_FIFO,
        },
+       [PORT_BRCM_TRUMANAGE] = {
+               .name           = "TruManage",
+               .fifo_size      = 1,
+               .tx_loadsz      = 1024,
+               .flags          = UART_CAP_HFIFO,
+       },
        [PORT_8250_CIR] = {
                .name           = "CIR port"
        }
@@ -1490,6 +1496,11 @@ void serial8250_tx_chars(struct uart_8250_port *up)
                port->icount.tx++;
                if (uart_circ_empty(xmit))
                        break;
+               if (up->capabilities & UART_CAP_HFIFO) {
+                       if ((serial_port_in(port, UART_LSR) & BOTH_EMPTY) !=
+                           BOTH_EMPTY)
+                               break;
+               }
        } while (--count > 0);
 
        if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
index 3b4ea84..12caa12 100644 (file)
@@ -40,6 +40,7 @@ struct serial8250_config {
 #define UART_CAP_AFE   (1 << 11)       /* MCR-based hw flow control */
 #define UART_CAP_UUE   (1 << 12)       /* UART needs IER bit 6 set (Xscale) */
 #define UART_CAP_RTOIE (1 << 13)       /* UART needs IER bit 4 set (Xscale, Tegra) */
+#define UART_CAP_HFIFO (1 << 14)       /* UART has a "hidden" FIFO */
 
 #define UART_BUG_QUOT  (1 << 0)        /* UART has buggy quot LSB */
 #define UART_BUG_TXEN  (1 << 1)        /* UART has buggy TX IIR status */
index 1d0dba2..096d2ef 100644 (file)
@@ -79,7 +79,7 @@ static int dw8250_handle_irq(struct uart_port *p)
        } else if ((iir & UART_IIR_BUSY) == UART_IIR_BUSY) {
                /* Clear the USR and write the LCR again. */
                (void)p->serial_in(p, UART_USR);
-               p->serial_out(p, d->last_lcr, UART_LCR);
+               p->serial_out(p, UART_LCR, d->last_lcr);
 
                return 1;
        }
index 26b9dc0..a27a98e 100644 (file)
@@ -1085,6 +1085,18 @@ pci_omegapci_setup(struct serial_private *priv,
        return setup_port(priv, port, 2, idx * 8, 0);
 }
 
+static int
+pci_brcm_trumanage_setup(struct serial_private *priv,
+                        const struct pciserial_board *board,
+                        struct uart_8250_port *port, int idx)
+{
+       int ret = pci_default_setup(priv, board, port, idx);
+
+       port->port.type = PORT_BRCM_TRUMANAGE;
+       port->port.flags = (port->port.flags | UPF_FIXED_PORT | UPF_FIXED_TYPE);
+       return ret;
+}
+
 static int skip_tx_en_setup(struct serial_private *priv,
                        const struct pciserial_board *board,
                        struct uart_8250_port *port, int idx)
@@ -1301,9 +1313,10 @@ pci_wch_ch353_setup(struct serial_private *priv,
 #define PCI_VENDOR_ID_AGESTAR          0x5372
 #define PCI_DEVICE_ID_AGESTAR_9375     0x6872
 #define PCI_VENDOR_ID_ASIX             0x9710
-#define PCI_DEVICE_ID_COMMTECH_4222PCIE 0x0019
 #define PCI_DEVICE_ID_COMMTECH_4224PCIE        0x0020
 #define PCI_DEVICE_ID_COMMTECH_4228PCIE        0x0021
+#define PCI_DEVICE_ID_COMMTECH_4222PCIE        0x0022
+#define PCI_DEVICE_ID_BROADCOM_TRUMANAGE 0x160a
 
 
 /* Unknown vendors/cards - this should not be in linux/pci_ids.h */
@@ -1954,6 +1967,17 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
                .setup          = pci_xr17v35x_setup,
        },
        /*
+        * Broadcom TruManage (NetXtreme)
+        */
+       {
+               .vendor         = PCI_VENDOR_ID_BROADCOM,
+               .device         = PCI_DEVICE_ID_BROADCOM_TRUMANAGE,
+               .subvendor      = PCI_ANY_ID,
+               .subdevice      = PCI_ANY_ID,
+               .setup          = pci_brcm_trumanage_setup,
+       },
+
+       /*
         * Default "match everything" terminator entry
         */
        {
@@ -2148,6 +2172,7 @@ enum pci_board_num_t {
        pbn_ce4100_1_115200,
        pbn_omegapci,
        pbn_NETMOS9900_2s_115200,
+       pbn_brcm_trumanage,
 };
 
 /*
@@ -2246,7 +2271,7 @@ static struct pciserial_board pci_boards[] = {
 
        [pbn_b0_8_1152000_200] = {
                .flags          = FL_BASE0,
-               .num_ports      = 2,
+               .num_ports      = 8,
                .base_baud      = 1152000,
                .uart_offset    = 0x200,
        },
@@ -2892,6 +2917,12 @@ static struct pciserial_board pci_boards[] = {
                .num_ports      = 2,
                .base_baud      = 115200,
        },
+       [pbn_brcm_trumanage] = {
+               .flags          = FL_BASE0,
+               .num_ports      = 1,
+               .reg_shift      = 2,
+               .base_baud      = 115200,
+       },
 };
 
 static const struct pci_device_id blacklist[] = {
@@ -4471,6 +4502,13 @@ static struct pci_device_id serial_pci_tbl[] = {
                pbn_omegapci },
 
        /*
+        * Broadcom TruManage
+        */
+       {       PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BROADCOM_TRUMANAGE,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               pbn_brcm_trumanage },
+
+       /*
         * AgeStar as-prs2-009
         */
        {       PCI_VENDOR_ID_AGESTAR, PCI_DEVICE_ID_AGESTAR_9375,
index 675d94a..8cb6d8d 100644 (file)
@@ -637,6 +637,7 @@ static void ifx_port_shutdown(struct tty_port *port)
 
        clear_bit(IFX_SPI_STATE_IO_AVAILABLE, &ifx_dev->flags);
        mrdy_set_low(ifx_dev);
+       del_timer(&ifx_dev->spi_timer);
        clear_bit(IFX_SPI_STATE_TIMER_PENDING, &ifx_dev->flags);
        tasklet_kill(&ifx_dev->io_work_tasklet);
 }
@@ -810,7 +811,8 @@ static void ifx_spi_io(unsigned long data)
                ifx_dev->spi_xfer.cs_change = 0;
                ifx_dev->spi_xfer.speed_hz = ifx_dev->spi_dev->max_speed_hz;
                /* ifx_dev->spi_xfer.speed_hz = 390625; */
-               ifx_dev->spi_xfer.bits_per_word = spi_bpw;
+               ifx_dev->spi_xfer.bits_per_word =
+                       ifx_dev->spi_dev->bits_per_word;
 
                ifx_dev->spi_xfer.tx_buf = ifx_dev->tx_buffer;
                ifx_dev->spi_xfer.rx_buf = ifx_dev->rx_buffer;
index 6db23b0..e55615e 100644 (file)
@@ -253,7 +253,7 @@ static void mxs_auart_tx_chars(struct mxs_auart_port *s)
        struct circ_buf *xmit = &s->port.state->xmit;
 
        if (auart_dma_enabled(s)) {
-               int i = 0;
+               u32 i = 0;
                int size;
                void *buffer = s->tx_dma_buf;
 
@@ -412,10 +412,12 @@ static void mxs_auart_set_mctrl(struct uart_port *u, unsigned mctrl)
 
        u32 ctrl = readl(u->membase + AUART_CTRL2);
 
-       ctrl &= ~AUART_CTRL2_RTSEN;
+       ctrl &= ~(AUART_CTRL2_RTSEN | AUART_CTRL2_RTS);
        if (mctrl & TIOCM_RTS) {
                if (tty_port_cts_enabled(&u->state->port))
                        ctrl |= AUART_CTRL2_RTSEN;
+               else
+                       ctrl |= AUART_CTRL2_RTS;
        }
 
        s->ctrl = mctrl;
index 12e5249..e514b3a 100644 (file)
@@ -1006,7 +1006,6 @@ static void s3c24xx_serial_resetport(struct uart_port *port,
 
        ucon &= ucon_mask;
        wr_regl(port, S3C2410_UCON,  ucon | cfg->ucon);
-       wr_regl(port, S3C2410_ULCON, cfg->ulcon);
 
        /* reset both fifos */
        wr_regl(port, S3C2410_UFCON, cfg->ufcon | S3C2410_UFCON_RESETBOTH);
index 8fd1814..d5ed9f6 100644 (file)
@@ -604,7 +604,7 @@ static int vt8500_serial_probe(struct platform_device *pdev)
        vt8500_port->uart.flags = UPF_IOREMAP | UPF_BOOT_AUTOCONF;
 
        vt8500_port->clk = of_clk_get(pdev->dev.of_node, 0);
-       if (vt8500_port->clk) {
+       if (!IS_ERR(vt8500_port->clk)) {
                vt8500_port->uart.uartclk = clk_get_rate(vt8500_port->clk);
        } else {
                /* use the default of 24Mhz if not specified and warn */
index 4c90b51..640ae6c 100644 (file)
@@ -37,6 +37,7 @@ config USB_ARCH_HAS_EHCI
        default y if ARCH_W90X900
        default y if ARCH_AT91
        default y if ARCH_MXC
+       default y if ARCH_MXS
        default y if ARCH_OMAP3
        default y if ARCH_CNS3XXX
        default y if ARCH_VT8500
index caecad9..8e9d312 100644 (file)
@@ -70,6 +70,9 @@ static int host_start(struct ci13xxx *ci)
        else
                ci->hcd = hcd;
 
+       if (ci->platdata->flags & CI13XXX_DISABLE_STREAMING)
+               hw_write(ci, OP_USBMODE, USBMODE_CI_SDIS, USBMODE_CI_SDIS);
+
        return ret;
 }
 
index 8d809a8..2d92cce 100644 (file)
@@ -1602,6 +1602,9 @@ static const struct usb_device_id acm_ids[] = {
        { USB_DEVICE(0x0572, 0x1340), /* Conexant CX93010-2x UCMxx */
        .driver_info = NO_UNION_NORMAL,
        },
+       { USB_DEVICE(0x05f9, 0x4002), /* PSC Scanning, Magellan 800i */
+       .driver_info = NO_UNION_NORMAL,
+       },
        { USB_DEVICE(0x1bbb, 0x0003), /* Alcatel OT-I650 */
        .driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */
        },
index a815fd2..957ed2c 100644 (file)
@@ -877,6 +877,60 @@ static int hub_hub_status(struct usb_hub *hub,
        return ret;
 }
 
+static int hub_set_port_link_state(struct usb_hub *hub, int port1,
+                       unsigned int link_status)
+{
+       return set_port_feature(hub->hdev,
+                       port1 | (link_status << 3),
+                       USB_PORT_FEAT_LINK_STATE);
+}
+
+/*
+ * If USB 3.0 ports are placed into the Disabled state, they will no longer
+ * detect any device connects or disconnects.  This is generally not what the
+ * USB core wants, since it expects a disabled port to produce a port status
+ * change event when a new device connects.
+ *
+ * Instead, set the link state to Disabled, wait for the link to settle into
+ * that state, clear any change bits, and then put the port into the RxDetect
+ * state.
+ */
+static int hub_usb3_port_disable(struct usb_hub *hub, int port1)
+{
+       int ret;
+       int total_time;
+       u16 portchange, portstatus;
+
+       if (!hub_is_superspeed(hub->hdev))
+               return -EINVAL;
+
+       ret = hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_SS_DISABLED);
+       if (ret) {
+               dev_err(hub->intfdev, "cannot disable port %d (err = %d)\n",
+                               port1, ret);
+               return ret;
+       }
+
+       /* Wait for the link to enter the disabled state. */
+       for (total_time = 0; ; total_time += HUB_DEBOUNCE_STEP) {
+               ret = hub_port_status(hub, port1, &portstatus, &portchange);
+               if (ret < 0)
+                       return ret;
+
+               if ((portstatus & USB_PORT_STAT_LINK_STATE) ==
+                               USB_SS_PORT_LS_SS_DISABLED)
+                       break;
+               if (total_time >= HUB_DEBOUNCE_TIMEOUT)
+                       break;
+               msleep(HUB_DEBOUNCE_STEP);
+       }
+       if (total_time >= HUB_DEBOUNCE_TIMEOUT)
+               dev_warn(hub->intfdev, "Could not disable port %d after %d ms\n",
+                               port1, total_time);
+
+       return hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_RX_DETECT);
+}
+
 static int hub_port_disable(struct usb_hub *hub, int port1, int set_state)
 {
        struct usb_device *hdev = hub->hdev;
@@ -885,8 +939,13 @@ static int hub_port_disable(struct usb_hub *hub, int port1, int set_state)
        if (hub->ports[port1 - 1]->child && set_state)
                usb_set_device_state(hub->ports[port1 - 1]->child,
                                USB_STATE_NOTATTACHED);
-       if (!hub->error && !hub_is_superspeed(hub->hdev))
-               ret = clear_port_feature(hdev, port1, USB_PORT_FEAT_ENABLE);
+       if (!hub->error) {
+               if (hub_is_superspeed(hub->hdev))
+                       ret = hub_usb3_port_disable(hub, port1);
+               else
+                       ret = clear_port_feature(hdev, port1,
+                                       USB_PORT_FEAT_ENABLE);
+       }
        if (ret)
                dev_err(hub->intfdev, "cannot disable port %d (err = %d)\n",
                                port1, ret);
@@ -2440,7 +2499,7 @@ static unsigned hub_is_wusb(struct usb_hub *hub)
 #define HUB_SHORT_RESET_TIME   10
 #define HUB_BH_RESET_TIME      50
 #define HUB_LONG_RESET_TIME    200
-#define HUB_RESET_TIMEOUT      500
+#define HUB_RESET_TIMEOUT      800
 
 static int hub_port_reset(struct usb_hub *hub, int port1,
                        struct usb_device *udev, unsigned int delay, bool warm);
@@ -2475,6 +2534,10 @@ static int hub_port_wait_reset(struct usb_hub *hub, int port1,
                if (ret < 0)
                        return ret;
 
+               /* The port state is unknown until the reset completes. */
+               if ((portstatus & USB_PORT_STAT_RESET))
+                       goto delay;
+
                /*
                 * Some buggy devices require a warm reset to be issued even
                 * when the port appears not to be connected.
@@ -2520,11 +2583,7 @@ static int hub_port_wait_reset(struct usb_hub *hub, int port1,
                        if ((portchange & USB_PORT_STAT_C_CONNECTION))
                                return -ENOTCONN;
 
-                       /* if we`ve finished resetting, then break out of
-                        * the loop
-                        */
-                       if (!(portstatus & USB_PORT_STAT_RESET) &&
-                           (portstatus & USB_PORT_STAT_ENABLE)) {
+                       if ((portstatus & USB_PORT_STAT_ENABLE)) {
                                if (hub_is_wusb(hub))
                                        udev->speed = USB_SPEED_WIRELESS;
                                else if (hub_is_superspeed(hub->hdev))
@@ -2538,10 +2597,15 @@ static int hub_port_wait_reset(struct usb_hub *hub, int port1,
                                return 0;
                        }
                } else {
-                       if (portchange & USB_PORT_STAT_C_BH_RESET)
-                               return 0;
+                       if (!(portstatus & USB_PORT_STAT_CONNECTION) ||
+                                       hub_port_warm_reset_required(hub,
+                                               portstatus))
+                               return -ENOTCONN;
+
+                       return 0;
                }
 
+delay:
                /* switch to the long delay after two short delay failures */
                if (delay_time >= 2 * HUB_SHORT_RESET_TIME)
                        delay = HUB_LONG_RESET_TIME;
@@ -2565,14 +2629,11 @@ static void hub_port_finish_reset(struct usb_hub *hub, int port1,
                        msleep(10 + 40);
                        update_devnum(udev, 0);
                        hcd = bus_to_hcd(udev->bus);
-                       if (hcd->driver->reset_device) {
-                               *status = hcd->driver->reset_device(hcd, udev);
-                               if (*status < 0) {
-                                       dev_err(&udev->dev, "Cannot reset "
-                                                       "HCD device state\n");
-                                       break;
-                               }
-                       }
+                       /* The xHC may think the device is already reset,
+                        * so ignore the status.
+                        */
+                       if (hcd->driver->reset_device)
+                               hcd->driver->reset_device(hcd, udev);
                }
                /* FALL THROUGH */
        case -ENOTCONN:
@@ -2580,16 +2641,16 @@ static void hub_port_finish_reset(struct usb_hub *hub, int port1,
                clear_port_feature(hub->hdev,
                                port1, USB_PORT_FEAT_C_RESET);
                /* FIXME need disconnect() for NOTATTACHED device */
-               if (warm) {
+               if (hub_is_superspeed(hub->hdev)) {
                        clear_port_feature(hub->hdev, port1,
                                        USB_PORT_FEAT_C_BH_PORT_RESET);
                        clear_port_feature(hub->hdev, port1,
                                        USB_PORT_FEAT_C_PORT_LINK_STATE);
-               } else {
+               }
+               if (!warm)
                        usb_set_device_state(udev, *status
                                        ? USB_STATE_NOTATTACHED
                                        : USB_STATE_DEFAULT);
-               }
                break;
        }
 }
@@ -2939,7 +3000,7 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
 static int finish_port_resume(struct usb_device *udev)
 {
        int     status = 0;
-       u16     devstatus;
+       u16     devstatus = 0;
 
        /* caller owns the udev device lock */
        dev_dbg(&udev->dev, "%s\n",
@@ -2984,7 +3045,13 @@ static int finish_port_resume(struct usb_device *udev)
        if (status) {
                dev_dbg(&udev->dev, "gone after usb resume? status %d\n",
                                status);
-       } else if (udev->actconfig) {
+       /*
+        * There are a few quirky devices which violate the standard
+        * by claiming to have remote wakeup enabled after a reset,
+        * which crash if the feature is cleared, hence check for
+        * udev->reset_resume
+        */
+       } else if (udev->actconfig && !udev->reset_resume) {
                le16_to_cpus(&devstatus);
                if (devstatus & (1 << USB_DEVICE_REMOTE_WAKEUP)) {
                        status = usb_control_msg(udev,
@@ -4638,9 +4705,14 @@ static void hub_events(void)
                         * SS.Inactive state.
                         */
                        if (hub_port_warm_reset_required(hub, portstatus)) {
+                               int status;
+
                                dev_dbg(hub_dev, "warm reset port %d\n", i);
-                               hub_port_reset(hub, i, NULL,
+                               status = hub_port_reset(hub, i, NULL,
                                                HUB_BH_RESET_TIME, true);
+                               if (status < 0)
+                                       hub_port_disable(hub, i, 1);
+                               connect_change = 0;
                        }
 
                        if (connect_change)
index fdefd9c..3113c1d 100644 (file)
@@ -43,6 +43,9 @@ static const struct usb_device_id usb_quirk_list[] = {
        /* Creative SB Audigy 2 NX */
        { USB_DEVICE(0x041e, 0x3020), .driver_info = USB_QUIRK_RESET_RESUME },
 
+       /* Microsoft LifeCam-VX700 v2.0 */
+       { USB_DEVICE(0x045e, 0x0770), .driver_info = USB_QUIRK_RESET_RESUME },
+
        /* Logitech Quickcam Fusion */
        { USB_DEVICE(0x046d, 0x08c1), .driver_info = USB_QUIRK_RESET_RESUME },
 
index 92604b4..5945aad 100644 (file)
@@ -56,7 +56,7 @@
 #define dump_register(nm)                              \
 {                                                      \
        .name   = __stringify(nm),                      \
-       .offset = DWC3_ ##nm,                           \
+       .offset = DWC3_ ##nm - DWC3_GLOBALS_REGS_START, \
 }
 
 static const struct debugfs_reg32 dwc3_regs[] = {
index 2e43b33..2fdd767 100644 (file)
@@ -1605,6 +1605,7 @@ static int dwc3_gadget_init_endpoints(struct dwc3 *dwc)
 
                if (epnum == 0 || epnum == 1) {
                        dep->endpoint.maxpacket = 512;
+                       dep->endpoint.maxburst = 1;
                        dep->endpoint.ops = &dwc3_gadget_ep0_ops;
                        if (!epnum)
                                dwc->gadget.ep0 = &dep->endpoint;
index fc0ec5e..d9f6b93 100644 (file)
@@ -3231,7 +3231,7 @@ static int udc_pci_probe(
        }
 
        if (!pdev->irq) {
-               dev_err(&dev->pdev->dev, "irq not set\n");
+               dev_err(&pdev->dev, "irq not set\n");
                kfree(dev);
                dev = NULL;
                retval = -ENODEV;
@@ -3250,7 +3250,7 @@ static int udc_pci_probe(
        dev->txfifo = (u32 __iomem *)(dev->virt_addr + UDC_TXFIFO_ADDR);
 
        if (request_irq(pdev->irq, udc_irq, IRQF_SHARED, name, dev) != 0) {
-               dev_dbg(&dev->pdev->dev, "request_irq(%d) fail\n", pdev->irq);
+               dev_dbg(&pdev->dev, "request_irq(%d) fail\n", pdev->irq);
                kfree(dev);
                dev = NULL;
                retval = -EBUSY;
index 95d584d..8cf0c0f 100644 (file)
@@ -130,10 +130,7 @@ static const char ep0name[] = "ep0";
 static const char *const ep_name[] = {
        ep0name,                                /* everyone has ep0 */
 
-       /* act like a net2280: high speed, six configurable endpoints */
-       "ep-a", "ep-b", "ep-c", "ep-d", "ep-e", "ep-f",
-
-       /* or like pxa250: fifteen fixed function endpoints */
+       /* act like a pxa250: fifteen fixed function endpoints */
        "ep1in-bulk", "ep2out-bulk", "ep3in-iso", "ep4out-iso", "ep5in-int",
        "ep6in-bulk", "ep7out-bulk", "ep8in-iso", "ep9out-iso", "ep10in-int",
        "ep11in-bulk", "ep12out-bulk", "ep13in-iso", "ep14out-iso",
@@ -141,6 +138,10 @@ static const char *const ep_name[] = {
 
        /* or like sa1100: two fixed function endpoints */
        "ep1out-bulk", "ep2in-bulk",
+
+       /* and now some generic EPs so we have enough in multi config */
+       "ep3out", "ep4in", "ep5out", "ep6out", "ep7in", "ep8out", "ep9in",
+       "ep10out", "ep11out", "ep12in", "ep13out", "ep14in", "ep15out",
 };
 #define DUMMY_ENDPOINTS        ARRAY_SIZE(ep_name)
 
index 4a6961c..8c2f251 100644 (file)
@@ -1153,15 +1153,15 @@ static int ffs_fs_parse_opts(struct ffs_sb_fill_data *data, char *opts)
                                        pr_err("%s: unmapped value: %lu\n", opts, value);
                                        return -EINVAL;
                                }
-                       }
-                       else if (!memcmp(opts, "gid", 3))
+                       } else if (!memcmp(opts, "gid", 3)) {
                                data->perms.gid = make_kgid(current_user_ns(), value);
                                if (!gid_valid(data->perms.gid)) {
                                        pr_err("%s: unmapped value: %lu\n", opts, value);
                                        return -EINVAL;
                                }
-                       else
+                       } else {
                                goto invalid;
+                       }
                        break;
 
                default:
index 1b0f086..d3bd7b0 100644 (file)
 #include <linux/platform_device.h>
 #include <linux/io.h>
 
-#include <mach/hardware.h>
-
 static struct clk *mxc_ahb_clk;
 static struct clk *mxc_per_clk;
 static struct clk *mxc_ipg_clk;
 
 /* workaround ENGcm09152 for i.MX35 */
-#define USBPHYCTRL_OTGBASE_OFFSET      0x608
+#define MX35_USBPHYCTRL_OFFSET         0x600
+#define USBPHYCTRL_OTGBASE_OFFSET      0x8
 #define USBPHYCTRL_EVDO                        (1 << 23)
 
 int fsl_udc_clk_init(struct platform_device *pdev)
@@ -59,7 +58,7 @@ int fsl_udc_clk_init(struct platform_device *pdev)
        clk_prepare_enable(mxc_per_clk);
 
        /* make sure USB_CLK is running at 60 MHz +/- 1000 Hz */
-       if (!cpu_is_mx51()) {
+       if (!strcmp(pdev->id_entry->name, "imx-udc-mx27")) {
                freq = clk_get_rate(mxc_per_clk);
                if (pdata->phy_mode != FSL_USB2_PHY_ULPI &&
                    (freq < 59999000 || freq > 60001000)) {
@@ -79,27 +78,40 @@ eclkrate:
        return ret;
 }
 
-void fsl_udc_clk_finalize(struct platform_device *pdev)
+int fsl_udc_clk_finalize(struct platform_device *pdev)
 {
        struct fsl_usb2_platform_data *pdata = pdev->dev.platform_data;
-       if (cpu_is_mx35()) {
-               unsigned int v;
+       int ret = 0;
 
-               /* workaround ENGcm09152 for i.MX35 */
-               if (pdata->workaround & FLS_USB2_WORKAROUND_ENGCM09152) {
-                       v = readl(MX35_IO_ADDRESS(MX35_USB_BASE_ADDR +
-                                       USBPHYCTRL_OTGBASE_OFFSET));
-                       writel(v | USBPHYCTRL_EVDO,
-                               MX35_IO_ADDRESS(MX35_USB_BASE_ADDR +
-                                       USBPHYCTRL_OTGBASE_OFFSET));
+       /* workaround ENGcm09152 for i.MX35 */
+       if (pdata->workaround & FLS_USB2_WORKAROUND_ENGCM09152) {
+               unsigned int v;
+               struct resource *res = platform_get_resource
+                       (pdev, IORESOURCE_MEM, 0);
+               void __iomem *phy_regs = ioremap(res->start +
+                                               MX35_USBPHYCTRL_OFFSET, 512);
+               if (!phy_regs) {
+                       dev_err(&pdev->dev, "ioremap for phy address fails\n");
+                       ret = -EINVAL;
+                       goto ioremap_err;
                }
+
+               v = readl(phy_regs + USBPHYCTRL_OTGBASE_OFFSET);
+               writel(v | USBPHYCTRL_EVDO,
+                       phy_regs + USBPHYCTRL_OTGBASE_OFFSET);
+
+               iounmap(phy_regs);
        }
 
+
+ioremap_err:
        /* ULPI transceivers don't need usbpll */
        if (pdata->phy_mode == FSL_USB2_PHY_ULPI) {
                clk_disable_unprepare(mxc_per_clk);
                mxc_per_clk = NULL;
        }
+
+       return ret;
 }
 
 void fsl_udc_clk_release(void)
index c19f7f1..667275c 100644 (file)
@@ -41,6 +41,7 @@
 #include <linux/fsl_devices.h>
 #include <linux/dmapool.h>
 #include <linux/delay.h>
+#include <linux/of_device.h>
 
 #include <asm/byteorder.h>
 #include <asm/io.h>
@@ -2438,11 +2439,6 @@ static int __init fsl_udc_probe(struct platform_device *pdev)
        unsigned int i;
        u32 dccparams;
 
-       if (strcmp(pdev->name, driver_name)) {
-               VDBG("Wrong device");
-               return -ENODEV;
-       }
-
        udc_controller = kzalloc(sizeof(struct fsl_udc), GFP_KERNEL);
        if (udc_controller == NULL) {
                ERR("malloc udc failed\n");
@@ -2547,7 +2543,9 @@ static int __init fsl_udc_probe(struct platform_device *pdev)
                dr_controller_setup(udc_controller);
        }
 
-       fsl_udc_clk_finalize(pdev);
+       ret = fsl_udc_clk_finalize(pdev);
+       if (ret)
+               goto err_free_irq;
 
        /* Setup gadget structure */
        udc_controller->gadget.ops = &fsl_gadget_ops;
@@ -2756,22 +2754,32 @@ static int fsl_udc_otg_resume(struct device *dev)
 
        return fsl_udc_resume(NULL);
 }
-
 /*-------------------------------------------------------------------------
        Register entry point for the peripheral controller driver
 --------------------------------------------------------------------------*/
-
+static const struct platform_device_id fsl_udc_devtype[] = {
+       {
+               .name = "imx-udc-mx27",
+       }, {
+               .name = "imx-udc-mx51",
+       }, {
+               /* sentinel */
+       }
+};
+MODULE_DEVICE_TABLE(platform, fsl_udc_devtype);
 static struct platform_driver udc_driver = {
-       .remove  = __exit_p(fsl_udc_remove),
+       .remove         = __exit_p(fsl_udc_remove),
+       /* Just for FSL i.mx SoC currently */
+       .id_table       = fsl_udc_devtype,
        /* these suspend and resume are not usb suspend and resume */
-       .suspend = fsl_udc_suspend,
-       .resume  = fsl_udc_resume,
-       .driver  = {
-               .name = (char *)driver_name,
-               .owner = THIS_MODULE,
-               /* udc suspend/resume called from OTG driver */
-               .suspend = fsl_udc_otg_suspend,
-               .resume  = fsl_udc_otg_resume,
+       .suspend        = fsl_udc_suspend,
+       .resume         = fsl_udc_resume,
+       .driver         = {
+                       .name = (char *)driver_name,
+                       .owner = THIS_MODULE,
+                       /* udc suspend/resume called from OTG driver */
+                       .suspend = fsl_udc_otg_suspend,
+                       .resume  = fsl_udc_otg_resume,
        },
 };
 
index f61a967..c6703bb 100644 (file)
@@ -592,15 +592,16 @@ static inline struct ep_queue_head *get_qh_by_ep(struct fsl_ep *ep)
 struct platform_device;
 #ifdef CONFIG_ARCH_MXC
 int fsl_udc_clk_init(struct platform_device *pdev);
-void fsl_udc_clk_finalize(struct platform_device *pdev);
+int fsl_udc_clk_finalize(struct platform_device *pdev);
 void fsl_udc_clk_release(void);
 #else
 static inline int fsl_udc_clk_init(struct platform_device *pdev)
 {
        return 0;
 }
-static inline void fsl_udc_clk_finalize(struct platform_device *pdev)
+static inline int fsl_udc_clk_finalize(struct platform_device *pdev)
 {
+       return 0;
 }
 static inline void fsl_udc_clk_release(void)
 {
index 379aac7..6e8b127 100644 (file)
@@ -1012,7 +1012,7 @@ static void udc_clock_enable(struct mv_udc *udc)
        unsigned int i;
 
        for (i = 0; i < udc->clknum; i++)
-               clk_enable(udc->clk[i]);
+               clk_prepare_enable(udc->clk[i]);
 }
 
 static void udc_clock_disable(struct mv_udc *udc)
@@ -1020,7 +1020,7 @@ static void udc_clock_disable(struct mv_udc *udc)
        unsigned int i;
 
        for (i = 0; i < udc->clknum; i++)
-               clk_disable(udc->clk[i]);
+               clk_disable_unprepare(udc->clk[i]);
 }
 
 static void udc_stop(struct mv_udc *udc)
index 141971d..439c3f9 100644 (file)
@@ -3477,12 +3477,11 @@ static void s3c_hsotg_delete_debug(struct s3c_hsotg *hsotg)
 /**
  * s3c_hsotg_release - release callback for hsotg device
  * @dev: Device to for which release is called
+ *
+ * Nothing to do as the resource is allocated using devm_ API.
  */
 static void s3c_hsotg_release(struct device *dev)
 {
-       struct s3c_hsotg *hsotg = dev_get_drvdata(dev);
-
-       kfree(hsotg);
 }
 
 /**
index 4f7f76f..7cacd6a 100644 (file)
@@ -1794,9 +1794,10 @@ static int tcm_usbg_drop_nexus(struct usbg_tpg *tpg)
        tpg->tpg_nexus = NULL;
 
        kfree(tv_nexus);
+       ret = 0;
 out:
        mutex_unlock(&tpg->tpg_mutex);
-       return 0;
+       return ret;
 }
 
 static ssize_t tcm_usbg_tpg_store_nexus(
index d0f9548..598dcc1 100644 (file)
@@ -887,7 +887,7 @@ static void gs_close(struct tty_struct *tty, struct file *file)
        pr_debug("gs_close: ttyGS%d (%p,%p) done!\n",
                        port->port_num, tty, file);
 
-       wake_up_interruptible(&port->port.close_wait);
+       wake_up(&port->port.close_wait);
 exit:
        spin_unlock_irq(&port->port_lock);
 }
index d6bb128..3a21c5d 100644 (file)
@@ -148,7 +148,7 @@ config USB_EHCI_FSL
          Variation of ARC USB block used in some Freescale chips.
 
 config USB_EHCI_MXC
-       bool "Support for Freescale i.MX on-chip EHCI USB controller"
+       tristate "Support for Freescale i.MX on-chip EHCI USB controller"
        depends on USB_EHCI_HCD && ARCH_MXC
        select USB_EHCI_ROOT_HUB_TT
        ---help---
index 1eb4c30..001fbff 100644 (file)
@@ -26,6 +26,7 @@ obj-$(CONFIG_PCI)             += pci-quirks.o
 obj-$(CONFIG_USB_EHCI_HCD)     += ehci-hcd.o
 obj-$(CONFIG_USB_EHCI_PCI)     += ehci-pci.o
 obj-$(CONFIG_USB_EHCI_HCD_PLATFORM)    += ehci-platform.o
+obj-$(CONFIG_USB_EHCI_MXC)     += ehci-mxc.o
 
 obj-$(CONFIG_USB_OXU210HP_HCD) += oxu210hp-hcd.o
 obj-$(CONFIG_USB_ISP116X_HCD)  += isp116x-hcd.o
index fd9b542..d81d2fc 100644 (file)
@@ -230,7 +230,7 @@ static int ehci_fsl_setup_phy(struct usb_hcd *hcd,
 
        switch (phy_mode) {
        case FSL_USB2_PHY_ULPI:
-               if (pdata->controller_ver) {
+               if (pdata->have_sysif_regs && pdata->controller_ver) {
                        /* controller version 1.6 or above */
                        setbits32(non_ehci + FSL_SOC_USB_CTRL,
                                        ULPI_PHY_CLK_SEL);
@@ -251,7 +251,7 @@ static int ehci_fsl_setup_phy(struct usb_hcd *hcd,
                portsc |= PORT_PTS_PTW;
                /* fall through */
        case FSL_USB2_PHY_UTMI:
-               if (pdata->controller_ver) {
+               if (pdata->have_sysif_regs && pdata->controller_ver) {
                        /* controller version 1.6 or above */
                        setbits32(non_ehci + FSL_SOC_USB_CTRL, UTMI_PHY_EN);
                        mdelay(FSL_UTMI_PHY_DLY);  /* Delay for UTMI PHY CLK to
@@ -267,7 +267,8 @@ static int ehci_fsl_setup_phy(struct usb_hcd *hcd,
                break;
        }
 
-       if (pdata->controller_ver && (phy_mode == FSL_USB2_PHY_ULPI)) {
+       if (pdata->have_sysif_regs && pdata->controller_ver &&
+           (phy_mode == FSL_USB2_PHY_ULPI)) {
                /* check PHY_CLK_VALID to get phy clk valid */
                if (!spin_event_timeout(in_be32(non_ehci + FSL_SOC_USB_CTRL) &
                                PHY_CLK_VALID, FSL_USB_PHY_CLK_TIMEOUT, 0)) {
@@ -278,7 +279,7 @@ static int ehci_fsl_setup_phy(struct usb_hcd *hcd,
 
        ehci_writel(ehci, portsc, &ehci->regs->port_status[port_offset]);
 
-       if (phy_mode != FSL_USB2_PHY_ULPI)
+       if (phy_mode != FSL_USB2_PHY_ULPI && pdata->have_sysif_regs)
                setbits32(non_ehci + FSL_SOC_USB_CTRL, USB_CTRL_USB_EN);
 
        return 0;
index c97503b..09537b2 100644 (file)
@@ -74,10 +74,6 @@ static const char    hcd_name [] = "ehci_hcd";
 #undef VERBOSE_DEBUG
 #undef EHCI_URB_TRACE
 
-#ifdef DEBUG
-#define EHCI_STATS
-#endif
-
 /* magic numbers that can affect system performance */
 #define        EHCI_TUNE_CERR          3       /* 0-3 qtd retries; 0 == don't stop */
 #define        EHCI_TUNE_RL_HS         4       /* nak throttle; see 4.9 */
@@ -1250,11 +1246,6 @@ MODULE_LICENSE ("GPL");
 #define        PLATFORM_DRIVER         ehci_fsl_driver
 #endif
 
-#ifdef CONFIG_USB_EHCI_MXC
-#include "ehci-mxc.c"
-#define PLATFORM_DRIVER                ehci_mxc_driver
-#endif
-
 #ifdef CONFIG_USB_EHCI_SH
 #include "ehci-sh.c"
 #define PLATFORM_DRIVER                ehci_hcd_sh_driver
@@ -1352,7 +1343,8 @@ MODULE_LICENSE ("GPL");
 
 #if !IS_ENABLED(CONFIG_USB_EHCI_PCI) && \
        !IS_ENABLED(CONFIG_USB_EHCI_HCD_PLATFORM) && \
-       !defined(CONFIG_USB_CHIPIDEA_HOST) && \
+       !IS_ENABLED(CONFIG_USB_CHIPIDEA_HOST) && \
+       !IS_ENABLED(CONFIG_USB_EHCI_MXC) && \
        !defined(PLATFORM_DRIVER) && \
        !defined(PS3_SYSTEM_BUS_DRIVER) && \
        !defined(OF_PLATFORM_DRIVER) && \
index f7bfc0b..6c56297 100644 (file)
@@ -43,7 +43,7 @@ static void ehci_clock_enable(struct ehci_hcd_mv *ehci_mv)
        unsigned int i;
 
        for (i = 0; i < ehci_mv->clknum; i++)
-               clk_enable(ehci_mv->clk[i]);
+               clk_prepare_enable(ehci_mv->clk[i]);
 }
 
 static void ehci_clock_disable(struct ehci_hcd_mv *ehci_mv)
@@ -51,7 +51,7 @@ static void ehci_clock_disable(struct ehci_hcd_mv *ehci_mv)
        unsigned int i;
 
        for (i = 0; i < ehci_mv->clknum; i++)
-               clk_disable(ehci_mv->clk[i]);
+               clk_disable_unprepare(ehci_mv->clk[i]);
 }
 
 static int mv_ehci_enable(struct ehci_hcd_mv *ehci_mv)
index ec7f5d2..dedb80b 100644 (file)
  * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/io.h>
 #include <linux/platform_device.h>
 #include <linux/clk.h>
 #include <linux/delay.h>
 #include <linux/usb/otg.h>
 #include <linux/usb/ulpi.h>
 #include <linux/slab.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
 
 #include <linux/platform_data/usb-ehci-mxc.h>
 
 #include <asm/mach-types.h>
 
+#include "ehci.h"
+
+#define DRIVER_DESC "Freescale On-Chip EHCI Host driver"
+
+static const char hcd_name[] = "ehci-mxc";
+
 #define ULPI_VIEWPORT_OFFSET   0x170
 
 struct ehci_mxc_priv {
        struct clk *usbclk, *ahbclk, *phyclk;
-       struct usb_hcd *hcd;
 };
 
-/* called during probe() after chip reset completes */
-static int ehci_mxc_setup(struct usb_hcd *hcd)
-{
-       hcd->has_tt = 1;
-
-       return ehci_setup(hcd);
-}
+static struct hc_driver __read_mostly ehci_mxc_hc_driver;
 
-static const struct hc_driver ehci_mxc_hc_driver = {
-       .description = hcd_name,
-       .product_desc = "Freescale On-Chip EHCI Host Controller",
-       .hcd_priv_size = sizeof(struct ehci_hcd),
-
-       /*
-        * generic hardware linkage
-        */
-       .irq = ehci_irq,
-       .flags = HCD_USB2 | HCD_MEMORY,
-
-       /*
-        * basic lifecycle operations
-        */
-       .reset = ehci_mxc_setup,
-       .start = ehci_run,
-       .stop = ehci_stop,
-       .shutdown = ehci_shutdown,
-
-       /*
-        * managing i/o requests and associated device resources
-        */
-       .urb_enqueue = ehci_urb_enqueue,
-       .urb_dequeue = ehci_urb_dequeue,
-       .endpoint_disable = ehci_endpoint_disable,
-       .endpoint_reset = ehci_endpoint_reset,
-
-       /*
-        * scheduling support
-        */
-       .get_frame_number = ehci_get_frame,
-
-       /*
-        * root hub support
-        */
-       .hub_status_data = ehci_hub_status_data,
-       .hub_control = ehci_hub_control,
-       .bus_suspend = ehci_bus_suspend,
-       .bus_resume = ehci_bus_resume,
-       .relinquish_port = ehci_relinquish_port,
-       .port_handed_over = ehci_port_handed_over,
-
-       .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
+static const struct ehci_driver_overrides ehci_mxc_overrides __initdata = {
+       .extra_priv_size =      sizeof(struct ehci_mxc_priv),
 };
 
 static int ehci_mxc_drv_probe(struct platform_device *pdev)
@@ -112,12 +75,6 @@ static int ehci_mxc_drv_probe(struct platform_device *pdev)
        if (!hcd)
                return -ENOMEM;
 
-       priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
-       if (!priv) {
-               ret = -ENOMEM;
-               goto err_alloc;
-       }
-
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        if (!res) {
                dev_err(dev, "Found HC with no register addr. Check setup!\n");
@@ -135,6 +92,10 @@ static int ehci_mxc_drv_probe(struct platform_device *pdev)
                goto err_alloc;
        }
 
+       hcd->has_tt = 1;
+       ehci = hcd_to_ehci(hcd);
+       priv = (struct ehci_mxc_priv *) ehci->priv;
+
        /* enable clocks */
        priv->usbclk = devm_clk_get(&pdev->dev, "ipg");
        if (IS_ERR(priv->usbclk)) {
@@ -169,8 +130,6 @@ static int ehci_mxc_drv_probe(struct platform_device *pdev)
                mdelay(10);
        }
 
-       ehci = hcd_to_ehci(hcd);
-
        /* EHCI registers start at offset 0x100 */
        ehci->caps = hcd->regs + 0x100;
        ehci->regs = hcd->regs + 0x100 +
@@ -198,8 +157,7 @@ static int ehci_mxc_drv_probe(struct platform_device *pdev)
                }
        }
 
-       priv->hcd = hcd;
-       platform_set_drvdata(pdev, priv);
+       platform_set_drvdata(pdev, hcd);
 
        ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
        if (ret)
@@ -244,8 +202,11 @@ err_alloc:
 static int __exit ehci_mxc_drv_remove(struct platform_device *pdev)
 {
        struct mxc_usbh_platform_data *pdata = pdev->dev.platform_data;
-       struct ehci_mxc_priv *priv = platform_get_drvdata(pdev);
-       struct usb_hcd *hcd = priv->hcd;
+       struct usb_hcd *hcd = platform_get_drvdata(pdev);
+       struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+       struct ehci_mxc_priv *priv = (struct ehci_mxc_priv *) ehci->priv;
+
+       usb_remove_hcd(hcd);
 
        if (pdata && pdata->exit)
                pdata->exit(pdev);
@@ -253,23 +214,20 @@ static int __exit ehci_mxc_drv_remove(struct platform_device *pdev)
        if (pdata->otg)
                usb_phy_shutdown(pdata->otg);
 
-       usb_remove_hcd(hcd);
-       usb_put_hcd(hcd);
-       platform_set_drvdata(pdev, NULL);
-
        clk_disable_unprepare(priv->usbclk);
        clk_disable_unprepare(priv->ahbclk);
 
        if (priv->phyclk)
                clk_disable_unprepare(priv->phyclk);
 
+       usb_put_hcd(hcd);
+       platform_set_drvdata(pdev, NULL);
        return 0;
 }
 
 static void ehci_mxc_drv_shutdown(struct platform_device *pdev)
 {
-       struct ehci_mxc_priv *priv = platform_get_drvdata(pdev);
-       struct usb_hcd *hcd = priv->hcd;
+       struct usb_hcd *hcd = platform_get_drvdata(pdev);
 
        if (hcd->driver->shutdown)
                hcd->driver->shutdown(hcd);
@@ -279,9 +237,31 @@ MODULE_ALIAS("platform:mxc-ehci");
 
 static struct platform_driver ehci_mxc_driver = {
        .probe = ehci_mxc_drv_probe,
-       .remove = __exit_p(ehci_mxc_drv_remove),
+       .remove = ehci_mxc_drv_remove,
        .shutdown = ehci_mxc_drv_shutdown,
        .driver = {
                   .name = "mxc-ehci",
        },
 };
+
+static int __init ehci_mxc_init(void)
+{
+       if (usb_disabled())
+               return -ENODEV;
+
+       pr_info("%s: " DRIVER_DESC "\n", hcd_name);
+
+       ehci_init_driver(&ehci_mxc_hc_driver, &ehci_mxc_overrides);
+       return platform_driver_register(&ehci_mxc_driver);
+}
+module_init(ehci_mxc_init);
+
+static void __exit ehci_mxc_cleanup(void)
+{
+       platform_driver_unregister(&ehci_mxc_driver);
+}
+module_exit(ehci_mxc_cleanup);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR("Sascha Hauer");
+MODULE_LICENSE("GPL");
index dabb204..170b939 100644 (file)
@@ -200,6 +200,26 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
                break;
        }
 
+       /* optional debug port, normally in the first BAR */
+       temp = pci_find_capability(pdev, PCI_CAP_ID_DBG);
+       if (temp) {
+               pci_read_config_dword(pdev, temp, &temp);
+               temp >>= 16;
+               if (((temp >> 13) & 7) == 1) {
+                       u32 hcs_params = ehci_readl(ehci,
+                                                   &ehci->caps->hcs_params);
+
+                       temp &= 0x1fff;
+                       ehci->debug = hcd->regs + temp;
+                       temp = ehci_readl(ehci, &ehci->debug->control);
+                       ehci_info(ehci, "debug port %d%s\n",
+                                 HCS_DEBUG_PORT(hcs_params),
+                                 (temp & DBGP_ENABLED) ? " IN USE" : "");
+                       if (!(temp & DBGP_ENABLED))
+                               ehci->debug = NULL;
+               }
+       }
+
        retval = ehci_setup(hcd);
        if (retval)
                return retval;
@@ -228,25 +248,6 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
                break;
        }
 
-       /* optional debug port, normally in the first BAR */
-       temp = pci_find_capability(pdev, 0x0a);
-       if (temp) {
-               pci_read_config_dword(pdev, temp, &temp);
-               temp >>= 16;
-               if ((temp & (3 << 13)) == (1 << 13)) {
-                       temp &= 0x1fff;
-                       ehci->debug = hcd->regs + temp;
-                       temp = ehci_readl(ehci, &ehci->debug->control);
-                       ehci_info(ehci, "debug port %d%s\n",
-                               HCS_DEBUG_PORT(ehci->hcs_params),
-                               (temp & DBGP_ENABLED)
-                                       ? " IN USE"
-                                       : "");
-                       if (!(temp & DBGP_ENABLED))
-                               ehci->debug = NULL;
-               }
-       }
-
        /* at least the Genesys GL880S needs fixup here */
        temp = HCS_N_CC(ehci->hcs_params) * HCS_N_PCC(ehci->hcs_params);
        temp &= 0x0f;
index 9dadc71..36c3a82 100644 (file)
@@ -38,6 +38,10 @@ typedef __u16 __bitwise __hc16;
 #endif
 
 /* statistics can be kept for tuning/monitoring */
+#ifdef DEBUG
+#define EHCI_STATS
+#endif
+
 struct ehci_stats {
        /* irq usage */
        unsigned long           normal;
@@ -221,6 +225,9 @@ struct ehci_hcd {                   /* one per controller */
 #ifdef DEBUG
        struct dentry           *debug_dir;
 #endif
+
+       /* platform-specific data -- must come last */
+       unsigned long           priv[0] __aligned(sizeof(s64));
 };
 
 /* convert between an HCD pointer and the corresponding EHCI_HCD */
index 5105127..11e0b79 100644 (file)
@@ -142,6 +142,9 @@ static int usb_get_ver_info(struct device_node *np)
                        return ver;
        }
 
+       if (of_device_is_compatible(np, "fsl,mpc5121-usb2-dr"))
+               return FSL_USB_VER_OLD;
+
        if (of_device_is_compatible(np, "fsl-usb2-mph")) {
                if (of_device_is_compatible(np, "fsl-usb2-mph-v1.6"))
                        ver = FSL_USB_VER_1_6;
index bd6a744..f0ebe8e 100644 (file)
@@ -58,6 +58,7 @@
 #include <linux/usb.h>
 #include <linux/usb/hcd.h>
 #include <linux/dma-mapping.h>
+#include <linux/module.h>
 
 #include "imx21-hcd.h"
 
index d370245..5e3a6de 100644 (file)
@@ -128,7 +128,8 @@ static void tmio_start_hc(struct platform_device *dev)
        tmio_iowrite8(2, tmio->ccr + CCR_INTC);
 
        dev_info(&dev->dev, "revision %d @ 0x%08llx, irq %d\n",
-                       tmio_ioread8(tmio->ccr + CCR_REVID), hcd->rsrc_start, hcd->irq);
+                       tmio_ioread8(tmio->ccr + CCR_REVID),
+                       (u64) hcd->rsrc_start, hcd->irq);
 }
 
 static int ohci_tmio_start(struct usb_hcd *hcd)
index 4b9e9ab..4f64d24 100644 (file)
@@ -447,6 +447,10 @@ static irqreturn_t uhci_irq(struct usb_hcd *hcd)
                return IRQ_NONE;
        uhci_writew(uhci, status, USBSTS);              /* Clear it */
 
+       spin_lock(&uhci->lock);
+       if (unlikely(!uhci->is_initialized))    /* not yet configured */
+               goto done;
+
        if (status & ~(USBSTS_USBINT | USBSTS_ERROR | USBSTS_RD)) {
                if (status & USBSTS_HSE)
                        dev_err(uhci_dev(uhci), "host system error, "
@@ -455,7 +459,6 @@ static irqreturn_t uhci_irq(struct usb_hcd *hcd)
                        dev_err(uhci_dev(uhci), "host controller process "
                                        "error, something bad happened!\n");
                if (status & USBSTS_HCH) {
-                       spin_lock(&uhci->lock);
                        if (uhci->rh_state >= UHCI_RH_RUNNING) {
                                dev_err(uhci_dev(uhci),
                                        "host controller halted, "
@@ -473,15 +476,15 @@ static irqreturn_t uhci_irq(struct usb_hcd *hcd)
                                 * pending unlinks */
                                mod_timer(&hcd->rh_timer, jiffies);
                        }
-                       spin_unlock(&uhci->lock);
                }
        }
 
-       if (status & USBSTS_RD)
+       if (status & USBSTS_RD) {
+               spin_unlock(&uhci->lock);
                usb_hcd_poll_rh_status(hcd);
-       else {
-               spin_lock(&uhci->lock);
+       } else {
                uhci_scan_schedule(uhci);
+ done:
                spin_unlock(&uhci->lock);
        }
 
@@ -662,9 +665,9 @@ static int uhci_start(struct usb_hcd *hcd)
         */
        mb();
 
+       spin_lock_irq(&uhci->lock);
        configure_hc(uhci);
        uhci->is_initialized = 1;
-       spin_lock_irq(&uhci->lock);
        start_rh(uhci);
        spin_unlock_irq(&uhci->lock);
        return 0;
index a686cf4..6891442 100644 (file)
@@ -761,12 +761,39 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
                        break;
                case USB_PORT_FEAT_LINK_STATE:
                        temp = xhci_readl(xhci, port_array[wIndex]);
+
+                       /* Disable port */
+                       if (link_state == USB_SS_PORT_LS_SS_DISABLED) {
+                               xhci_dbg(xhci, "Disable port %d\n", wIndex);
+                               temp = xhci_port_state_to_neutral(temp);
+                               /*
+                                * Clear all change bits, so that we get a new
+                                * connection event.
+                                */
+                               temp |= PORT_CSC | PORT_PEC | PORT_WRC |
+                                       PORT_OCC | PORT_RC | PORT_PLC |
+                                       PORT_CEC;
+                               xhci_writel(xhci, temp | PORT_PE,
+                                       port_array[wIndex]);
+                               temp = xhci_readl(xhci, port_array[wIndex]);
+                               break;
+                       }
+
+                       /* Put link in RxDetect (enable port) */
+                       if (link_state == USB_SS_PORT_LS_RX_DETECT) {
+                               xhci_dbg(xhci, "Enable port %d\n", wIndex);
+                               xhci_set_link_state(xhci, port_array, wIndex,
+                                               link_state);
+                               temp = xhci_readl(xhci, port_array[wIndex]);
+                               break;
+                       }
+
                        /* Software should not attempt to set
-                        * port link state above '5' (Rx.Detect) and the port
+                        * port link state above '3' (U3) and the port
                         * must be enabled.
                         */
                        if ((temp & PORT_PE) == 0 ||
-                               (link_state > USB_SS_PORT_LS_RX_DETECT)) {
+                               (link_state > USB_SS_PORT_LS_U3)) {
                                xhci_warn(xhci, "Cannot set link state.\n");
                                goto error;
                        }
@@ -957,6 +984,7 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf)
        int max_ports;
        __le32 __iomem **port_array;
        struct xhci_bus_state *bus_state;
+       bool reset_change = false;
 
        max_ports = xhci_get_ports(hcd, &port_array);
        bus_state = &xhci->bus_state[hcd_index(hcd)];
@@ -988,6 +1016,12 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf)
                        buf[(i + 1) / 8] |= 1 << (i + 1) % 8;
                        status = 1;
                }
+               if ((temp & PORT_RC))
+                       reset_change = true;
+       }
+       if (!status && !reset_change) {
+               xhci_dbg(xhci, "%s: stopping port polling.\n", __func__);
+               clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
        }
        spin_unlock_irqrestore(&xhci->lock, flags);
        return status ? retval : 0;
index fb51c70..35616ff 100644 (file)
@@ -1250,6 +1250,8 @@ static unsigned int xhci_microframes_to_exponent(struct usb_device *udev,
 static unsigned int xhci_parse_microframe_interval(struct usb_device *udev,
                struct usb_host_endpoint *ep)
 {
+       if (ep->desc.bInterval == 0)
+               return 0;
        return xhci_microframes_to_exponent(udev, ep,
                        ep->desc.bInterval, 0, 15);
 }
index cbb44b7..59fb5c6 100644 (file)
@@ -1725,6 +1725,15 @@ cleanup:
        if (bogus_port_status)
                return;
 
+       /*
+        * xHCI port-status-change events occur when the "or" of all the
+        * status-change bits in the portsc register changes from 0 to 1.
+        * New status changes won't cause an event if any other change
+        * bits are still set.  When an event occurs, switch over to
+        * polling to avoid losing status changes.
+        */
+       xhci_dbg(xhci, "%s: starting port polling.\n", __func__);
+       set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
        spin_unlock(&xhci->lock);
        /* Pass this up to the core */
        usb_hcd_poll_rh_status(hcd);
index 5c72c43..f1f01a8 100644 (file)
@@ -884,6 +884,11 @@ int xhci_suspend(struct xhci_hcd *xhci)
                        xhci->shared_hcd->state != HC_STATE_SUSPENDED)
                return -EINVAL;
 
+       /* Don't poll the roothubs on bus suspend. */
+       xhci_dbg(xhci, "%s: stopping port polling.\n", __func__);
+       clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
+       del_timer_sync(&hcd->rh_timer);
+
        spin_lock_irq(&xhci->lock);
        clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
        clear_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
@@ -1069,6 +1074,11 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
        if (xhci->quirks & XHCI_COMP_MODE_QUIRK)
                compliance_mode_recovery_timer_init(xhci);
 
+       /* Re-enable port polling. */
+       xhci_dbg(xhci, "%s: starting port polling.\n", __func__);
+       set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
+       usb_hcd_poll_rh_status(hcd);
+
        return retval;
 }
 #endif /* CONFIG_PM */
index 7667b12..268148d 100644 (file)
@@ -2179,7 +2179,7 @@ usbtest_ioctl(struct usb_interface *intf, unsigned int code, void *buf)
                if (dev->out_pipe == 0 || !param->length || param->sglen < 4)
                        break;
                retval = 0;
-               dev_info(&intf->dev, "TEST 17:  unlink from %d queues of "
+               dev_info(&intf->dev, "TEST 24:  unlink from %d queues of "
                                "%d %d-byte writes\n",
                                param->iterations, param->sglen, param->length);
                for (i = param->iterations; retval == 0 && i > 0; --i) {
index 0968dd7..f522000 100644 (file)
@@ -105,7 +105,7 @@ static void cppi_reset_tx(struct cppi_tx_stateram __iomem *tx, u32 ptr)
        musb_writel(&tx->tx_complete, 0, ptr);
 }
 
-static void __init cppi_pool_init(struct cppi *cppi, struct cppi_channel *c)
+static void cppi_pool_init(struct cppi *cppi, struct cppi_channel *c)
 {
        int     j;
 
@@ -150,7 +150,7 @@ static void cppi_pool_free(struct cppi_channel *c)
        c->last_processed = NULL;
 }
 
-static int __init cppi_controller_start(struct dma_controller *c)
+static int cppi_controller_start(struct dma_controller *c)
 {
        struct cppi     *controller;
        void __iomem    *tibase;
index f1c6c54..fd34867 100644 (file)
@@ -2298,10 +2298,7 @@ static int __init musb_init(void)
        if (usb_disabled())
                return 0;
 
-       pr_info("%s: version " MUSB_VERSION ", "
-               "?dma?"
-               ", "
-               "otg (peripheral+host)",
+       pr_info("%s: version " MUSB_VERSION ", ?dma?, otg (peripheral+host)\n",
                musb_driver_name);
        return platform_driver_register(&musb_driver);
 }
index e6f2ae8..f7d764d 100644 (file)
@@ -134,6 +134,11 @@ static const resource_size_t dsps_control_module_phys[] = {
        DSPS_AM33XX_CONTROL_MODULE_PHYS_1,
 };
 
+#define USBPHY_CM_PWRDN                (1 << 0)
+#define USBPHY_OTG_PWRDN       (1 << 1)
+#define USBPHY_OTGVDET_EN      (1 << 19)
+#define USBPHY_OTGSESSEND_EN   (1 << 20)
+
 /**
  * musb_dsps_phy_control - phy on/off
  * @glue: struct dsps_glue *
index 6223062..37962c9 100644 (file)
@@ -110,7 +110,7 @@ config AB8500_USB
 
 config FSL_USB2_OTG
        bool "Freescale USB OTG Transceiver Driver"
-       depends on USB_EHCI_FSL && USB_GADGET_FSL_USB2 && USB_SUSPEND
+       depends on USB_EHCI_FSL && USB_FSL_USB2 && USB_SUSPEND
        select USB_OTG
        select USB_OTG_UTILS
        help
index 1dd5750..eace975 100644 (file)
@@ -240,7 +240,7 @@ static void otg_clock_enable(struct mv_otg *mvotg)
        unsigned int i;
 
        for (i = 0; i < mvotg->clknum; i++)
-               clk_enable(mvotg->clk[i]);
+               clk_prepare_enable(mvotg->clk[i]);
 }
 
 static void otg_clock_disable(struct mv_otg *mvotg)
@@ -248,7 +248,7 @@ static void otg_clock_disable(struct mv_otg *mvotg)
        unsigned int i;
 
        for (i = 0; i < mvotg->clknum; i++)
-               clk_disable(mvotg->clk[i]);
+               clk_disable_unprepare(mvotg->clk[i]);
 }
 
 static int mv_otg_enable_internal(struct mv_otg *mvotg)
index dd41f61..f2985cd 100644 (file)
@@ -545,15 +545,6 @@ static int usbhsg_pipe_disable(struct usbhsg_uep *uep)
        return 0;
 }
 
-static void usbhsg_uep_init(struct usbhsg_gpriv *gpriv)
-{
-       int i;
-       struct usbhsg_uep *uep;
-
-       usbhsg_for_each_uep_with_dcp(uep, gpriv, i)
-               uep->pipe = NULL;
-}
-
 /*
  *
  *             usb_ep_ops
@@ -610,7 +601,12 @@ static int usbhsg_ep_disable(struct usb_ep *ep)
 {
        struct usbhsg_uep *uep = usbhsg_ep_to_uep(ep);
 
-       return usbhsg_pipe_disable(uep);
+       usbhsg_pipe_disable(uep);
+
+       uep->pipe->mod_private  = NULL;
+       uep->pipe               = NULL;
+
+       return 0;
 }
 
 static struct usb_request *usbhsg_ep_alloc_request(struct usb_ep *ep,
@@ -761,9 +757,8 @@ static int usbhsg_try_start(struct usbhs_priv *priv, u32 status)
        usbhs_pipe_init(priv,
                        usbhsg_dma_map_ctrl);
        usbhs_fifo_init(priv);
-       usbhsg_uep_init(gpriv);
 
-       /* dcp init */
+       /* dcp init instead of usbhsg_ep_enable() */
        dcp->pipe               = usbhs_dcp_malloc(priv);
        dcp->pipe->mod_private  = dcp;
        usbhs_pipe_config_update(dcp->pipe, 0, 0, 64);
@@ -825,7 +820,7 @@ static int usbhsg_try_stop(struct usbhs_priv *priv, u32 status)
        usbhs_sys_set_test_mode(priv, 0);
        usbhs_sys_function_ctrl(priv, 0);
 
-       usbhsg_pipe_disable(dcp);
+       usbhsg_ep_disable(&dcp->ep);
 
        dev_dbg(dev, "stop gadget\n");
 
@@ -998,6 +993,7 @@ int usbhs_mod_gadget_probe(struct usbhs_priv *priv)
         */
        usbhsg_for_each_uep_with_dcp(uep, gpriv, i) {
                uep->gpriv      = gpriv;
+               uep->pipe       = NULL;
                snprintf(uep->ep_name, EP_NAME_SIZE, "ep%d", i);
 
                uep->ep.name            = uep->ep_name;
index 3d3cd6c..b868154 100644 (file)
@@ -661,9 +661,10 @@ static void usbhsh_queue_done(struct usbhs_priv *priv, struct usbhs_pkt *pkt)
                status = -ESHUTDOWN;
 
        urb->actual_length = pkt->actual;
-       usbhsh_ureq_free(hpriv, ureq);
 
        usbhsh_endpoint_sequence_save(hpriv, urb, pkt);
+       usbhsh_ureq_free(hpriv, ureq);
+
        usbhsh_pipe_detach(hpriv, usbhsh_ep_to_uep(urb->ep));
 
        usb_hcd_unlink_urb_from_ep(hcd, urb);
index 0a373b3..ba68835 100644 (file)
@@ -875,6 +875,8 @@ static struct usb_device_id id_table_combined [] = {
        { USB_DEVICE(FTDI_VID, FTDI_DISTORTEC_JTAG_LOCK_PICK_PID),
                .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
        { USB_DEVICE(FTDI_VID, FTDI_LUMEL_PD12_PID) },
+       /* Crucible Devices */
+       { USB_DEVICE(FTDI_VID, FTDI_CT_COMET_PID) },
        { },                                    /* Optional parameter entry */
        { }                                     /* Terminating entry */
 };
index 049b6e7..fa5d560 100644 (file)
  * ATI command output: Cinterion MC55i
  */
 #define FTDI_CINTERION_MC55I_PID       0xA951
+
+/*
+ * Product: Comet Caller ID decoder
+ * Manufacturer: Crucible Technologies
+ */
+#define FTDI_CT_COMET_PID      0x8e08
index 58184f3..82afc4d 100644 (file)
@@ -530,6 +530,9 @@ static void chase_port(struct edgeport_port *port, unsigned long timeout,
        wait_queue_t wait;
        unsigned long flags;
 
+       if (!tty)
+               return;
+
        if (!timeout)
                timeout = (HZ * EDGE_CLOSING_WAIT)/100;
 
index e6f87b7..0d9dac9 100644 (file)
@@ -288,6 +288,7 @@ static void option_instat_callback(struct urb *urb);
 #define ALCATEL_VENDOR_ID                      0x1bbb
 #define ALCATEL_PRODUCT_X060S_X200             0x0000
 #define ALCATEL_PRODUCT_X220_X500D             0x0017
+#define ALCATEL_PRODUCT_L100V                  0x011e
 
 #define PIRELLI_VENDOR_ID                      0x1266
 #define PIRELLI_PRODUCT_C100_1                 0x1002
@@ -429,9 +430,12 @@ static void option_instat_callback(struct urb *urb);
 #define MEDIATEK_VENDOR_ID                     0x0e8d
 #define MEDIATEK_PRODUCT_DC_1COM               0x00a0
 #define MEDIATEK_PRODUCT_DC_4COM               0x00a5
+#define MEDIATEK_PRODUCT_DC_4COM2              0x00a7
 #define MEDIATEK_PRODUCT_DC_5COM               0x00a4
 #define MEDIATEK_PRODUCT_7208_1COM             0x7101
 #define MEDIATEK_PRODUCT_7208_2COM             0x7102
+#define MEDIATEK_PRODUCT_7103_2COM             0x7103
+#define MEDIATEK_PRODUCT_7106_2COM             0x7106
 #define MEDIATEK_PRODUCT_FP_1COM               0x0003
 #define MEDIATEK_PRODUCT_FP_2COM               0x0023
 #define MEDIATEK_PRODUCT_FPDC_1COM             0x0043
@@ -441,6 +445,14 @@ static void option_instat_callback(struct urb *urb);
 #define CELLIENT_VENDOR_ID                     0x2692
 #define CELLIENT_PRODUCT_MEN200                        0x9005
 
+/* Hyundai Petatel Inc. products */
+#define PETATEL_VENDOR_ID                      0x1ff4
+#define PETATEL_PRODUCT_NP10T                  0x600e
+
+/* TP-LINK Incorporated products */
+#define TPLINK_VENDOR_ID                       0x2357
+#define TPLINK_PRODUCT_MA180                   0x0201
+
 /* some devices interfaces need special handling due to a number of reasons */
 enum option_blacklist_reason {
                OPTION_BLACKLIST_NONE = 0,
@@ -922,8 +934,10 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0254, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0257, 0xff, 0xff, 0xff), /* ZTE MF821 */
          .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0265, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0284, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0265, 0xff, 0xff, 0xff), /* ONDA MT8205 */
+         .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0284, 0xff, 0xff, 0xff), /* ZTE MF880 */
+         .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0317, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0326, 0xff, 0xff, 0xff),
          .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
@@ -1190,6 +1204,8 @@ static const struct usb_device_id option_ids[] = {
          .driver_info = (kernel_ulong_t)&alcatel_x200_blacklist
        },
        { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X220_X500D) },
+       { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_L100V),
+         .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
        { USB_DEVICE(AIRPLUS_VENDOR_ID, AIRPLUS_PRODUCT_MCD650) },
        { USB_DEVICE(TLAYTECH_VENDOR_ID, TLAYTECH_PRODUCT_TEU800) },
        { USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14),
@@ -1294,7 +1310,14 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_FP_2COM, 0x0a, 0x00, 0x00) },
        { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_FPDC_1COM, 0x0a, 0x00, 0x00) },
        { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_FPDC_2COM, 0x0a, 0x00, 0x00) },
+       { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_7103_2COM, 0xff, 0x00, 0x00) },
+       { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_7106_2COM, 0x02, 0x02, 0x01) },
+       { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM2, 0xff, 0x02, 0x01) },
+       { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM2, 0xff, 0x00, 0x00) },
        { USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MEN200) },
+       { USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T) },
+       { USB_DEVICE(TPLINK_VENDOR_ID, TPLINK_PRODUCT_MA180),
+         .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
        { } /* Terminating entry */
 };
 MODULE_DEVICE_TABLE(usb, option_ids);
index 4362d9e..f72323e 100644 (file)
@@ -240,17 +240,17 @@ ssize_t vfio_pci_mem_readwrite(struct vfio_pci_device *vdev, char __user *buf,
                        filled = 1;
                } else {
                        /* Drop writes, fill reads with FF */
+                       filled = min((size_t)(x_end - pos), count);
                        if (!iswrite) {
                                char val = 0xFF;
                                size_t i;
 
-                               for (i = 0; i < x_end - pos; i++) {
+                               for (i = 0; i < filled; i++) {
                                        if (put_user(val, buf + i))
                                                goto out;
                                }
                        }
 
-                       filled = x_end - pos;
                }
 
                count -= filled;
index 1252678..0abf2bf 100644 (file)
@@ -139,6 +139,7 @@ struct imxfb_info {
        struct clk              *clk_ahb;
        struct clk              *clk_per;
        enum imxfb_type         devtype;
+       bool                    enabled;
 
        /*
         * These are the addresses we mapped
@@ -536,6 +537,10 @@ static void imxfb_exit_backlight(struct imxfb_info *fbi)
 
 static void imxfb_enable_controller(struct imxfb_info *fbi)
 {
+
+       if (fbi->enabled)
+               return;
+
        pr_debug("Enabling LCD controller\n");
 
        writel(fbi->screen_dma, fbi->regs + LCDC_SSA);
@@ -556,6 +561,7 @@ static void imxfb_enable_controller(struct imxfb_info *fbi)
        clk_prepare_enable(fbi->clk_ipg);
        clk_prepare_enable(fbi->clk_ahb);
        clk_prepare_enable(fbi->clk_per);
+       fbi->enabled = true;
 
        if (fbi->backlight_power)
                fbi->backlight_power(1);
@@ -565,6 +571,9 @@ static void imxfb_enable_controller(struct imxfb_info *fbi)
 
 static void imxfb_disable_controller(struct imxfb_info *fbi)
 {
+       if (!fbi->enabled)
+               return;
+
        pr_debug("Disabling LCD controller\n");
 
        if (fbi->backlight_power)
@@ -575,6 +584,7 @@ static void imxfb_disable_controller(struct imxfb_info *fbi)
        clk_disable_unprepare(fbi->clk_per);
        clk_disable_unprepare(fbi->clk_ipg);
        clk_disable_unprepare(fbi->clk_ahb);
+       fbi->enabled = false;
 
        writel(0, fbi->regs + LCDC_RMCR);
 }
@@ -729,6 +739,8 @@ static int __init imxfb_init_fbinfo(struct platform_device *pdev)
 
        memset(fbi, 0, sizeof(struct imxfb_info));
 
+       fbi->devtype = pdev->id_entry->driver_data;
+
        strlcpy(info->fix.id, IMX_NAME, sizeof(info->fix.id));
 
        info->fix.type                  = FB_TYPE_PACKED_PIXELS;
@@ -789,7 +801,6 @@ static int __init imxfb_probe(struct platform_device *pdev)
                return -ENOMEM;
 
        fbi = info->par;
-       fbi->devtype = pdev->id_entry->driver_data;
 
        if (!fb_mode)
                fb_mode = pdata->mode[0].mode.name;
index 4d99dd7..395cb6a 100644 (file)
@@ -145,8 +145,8 @@ static void ssd1307fb_update_display(struct ssd1307fb_par *par)
                                u32 page_length = SSD1307FB_WIDTH * i;
                                u32 index = page_length + (SSD1307FB_WIDTH * k + j) / 8;
                                u8 byte = *(vmem + index);
-                               u8 bit = byte & (1 << (7 - (j % 8)));
-                               bit = bit >> (7 - (j % 8));
+                               u8 bit = byte & (1 << (j % 8));
+                               bit = bit >> (j % 8);
                                buf |= bit << k;
                        }
                        ssd1307fb_write_data(par->client, buf);
index 7c294f4..96cab6a 100644 (file)
@@ -13,6 +13,7 @@
 
 #include <linux/module.h>
 #include <linux/interrupt.h>
+#include <linux/io.h>
 #include <linux/irq.h>
 #include <linux/pm.h>
 #include <linux/platform_device.h>
@@ -459,43 +460,34 @@ static int ds1wm_probe(struct platform_device *pdev)
        if (!pdev)
                return -ENODEV;
 
-       ds1wm_data = kzalloc(sizeof(*ds1wm_data), GFP_KERNEL);
+       ds1wm_data = devm_kzalloc(&pdev->dev, sizeof(*ds1wm_data), GFP_KERNEL);
        if (!ds1wm_data)
                return -ENOMEM;
 
        platform_set_drvdata(pdev, ds1wm_data);
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res) {
-               ret = -ENXIO;
-               goto err0;
-       }
-       ds1wm_data->map = ioremap(res->start, resource_size(res));
-       if (!ds1wm_data->map) {
-               ret = -ENOMEM;
-               goto err0;
-       }
+       if (!res)
+               return -ENXIO;
+       ds1wm_data->map = devm_ioremap(&pdev->dev, res->start,
+                                      resource_size(res));
+       if (!ds1wm_data->map)
+               return -ENOMEM;
 
        /* calculate bus shift from mem resource */
        ds1wm_data->bus_shift = resource_size(res) >> 3;
 
        ds1wm_data->pdev = pdev;
        ds1wm_data->cell = mfd_get_cell(pdev);
-       if (!ds1wm_data->cell) {
-               ret = -ENODEV;
-               goto err1;
-       }
+       if (!ds1wm_data->cell)
+               return -ENODEV;
        plat = pdev->dev.platform_data;
-       if (!plat) {
-               ret = -ENODEV;
-               goto err1;
-       }
+       if (!plat)
+               return -ENODEV;
 
        res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
-       if (!res) {
-               ret = -ENXIO;
-               goto err1;
-       }
+       if (!res)
+               return -ENXIO;
        ds1wm_data->irq = res->start;
        ds1wm_data->int_en_reg_none = (plat->active_high ? DS1WM_INTEN_IAS : 0);
        ds1wm_data->reset_recover_delay = plat->reset_recover_delay;
@@ -505,10 +497,10 @@ static int ds1wm_probe(struct platform_device *pdev)
        if (res->flags & IORESOURCE_IRQ_LOWEDGE)
                irq_set_irq_type(ds1wm_data->irq, IRQ_TYPE_EDGE_FALLING);
 
-       ret = request_irq(ds1wm_data->irq, ds1wm_isr,
+       ret = devm_request_irq(&pdev->dev, ds1wm_data->irq, ds1wm_isr,
                        IRQF_DISABLED | IRQF_SHARED, "ds1wm", ds1wm_data);
        if (ret)
-               goto err1;
+               return ret;
 
        ds1wm_up(ds1wm_data);
 
@@ -516,17 +508,12 @@ static int ds1wm_probe(struct platform_device *pdev)
 
        ret = w1_add_master_device(&ds1wm_master);
        if (ret)
-               goto err2;
+               goto err;
 
        return 0;
 
-err2:
+err:
        ds1wm_down(ds1wm_data);
-       free_irq(ds1wm_data->irq, ds1wm_data);
-err1:
-       iounmap(ds1wm_data->map);
-err0:
-       kfree(ds1wm_data);
 
        return ret;
 }
@@ -560,9 +547,6 @@ static int ds1wm_remove(struct platform_device *pdev)
 
        w1_remove_master_device(&ds1wm_master);
        ds1wm_down(ds1wm_data);
-       free_irq(ds1wm_data->irq, ds1wm_data);
-       iounmap(ds1wm_data->map);
-       kfree(ds1wm_data);
 
        return 0;
 }
index 708a25f..372c8c0 100644 (file)
@@ -109,34 +109,21 @@ static int mxc_w1_probe(struct platform_device *pdev)
        struct resource *res;
        int err = 0;
 
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res)
-               return -ENODEV;
-
-       mdev = kzalloc(sizeof(struct mxc_w1_device), GFP_KERNEL);
+       mdev = devm_kzalloc(&pdev->dev, sizeof(struct mxc_w1_device),
+                           GFP_KERNEL);
        if (!mdev)
                return -ENOMEM;
 
-       mdev->clk = clk_get(&pdev->dev, NULL);
-       if (IS_ERR(mdev->clk)) {
-               err = PTR_ERR(mdev->clk);
-               goto failed_clk;
-       }
+       mdev->clk = devm_clk_get(&pdev->dev, NULL);
+       if (IS_ERR(mdev->clk))
+               return PTR_ERR(mdev->clk);
 
        mdev->clkdiv = (clk_get_rate(mdev->clk) / 1000000) - 1;
 
-       res = request_mem_region(res->start, resource_size(res),
-                               "mxc_w1");
-       if (!res) {
-               err = -EBUSY;
-               goto failed_req;
-       }
-
-       mdev->regs = ioremap(res->start, resource_size(res));
-       if (!mdev->regs) {
-               dev_err(&pdev->dev, "Cannot map mxc_w1 registers\n");
-               goto failed_ioremap;
-       }
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       mdev->regs = devm_request_and_ioremap(&pdev->dev, res);
+       if (!mdev->regs)
+               return -EBUSY;
 
        clk_prepare_enable(mdev->clk);
        __raw_writeb(mdev->clkdiv, mdev->regs + MXC_W1_TIME_DIVIDER);
@@ -148,20 +135,10 @@ static int mxc_w1_probe(struct platform_device *pdev)
        err = w1_add_master_device(&mdev->bus_master);
 
        if (err)
-               goto failed_add;
+               return err;
 
        platform_set_drvdata(pdev, mdev);
        return 0;
-
-failed_add:
-       iounmap(mdev->regs);
-failed_ioremap:
-       release_mem_region(res->start, resource_size(res));
-failed_req:
-       clk_put(mdev->clk);
-failed_clk:
-       kfree(mdev);
-       return err;
 }
 
 /*
@@ -170,16 +147,10 @@ failed_clk:
 static int mxc_w1_remove(struct platform_device *pdev)
 {
        struct mxc_w1_device *mdev = platform_get_drvdata(pdev);
-       struct resource *res;
-
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 
        w1_remove_master_device(&mdev->bus_master);
 
-       iounmap(mdev->regs);
-       release_mem_region(res->start, resource_size(res));
        clk_disable_unprepare(mdev->clk);
-       clk_put(mdev->clk);
 
        platform_set_drvdata(pdev, NULL);
 
index 85b363a..d39dfa4 100644 (file)
@@ -72,7 +72,7 @@ static int w1_gpio_probe_dt(struct platform_device *pdev)
        return 0;
 }
 
-static int __init w1_gpio_probe(struct platform_device *pdev)
+static int w1_gpio_probe(struct platform_device *pdev)
 {
        struct w1_bus_master *master;
        struct w1_gpio_platform_data *pdata;
index 92d08e7..5ef583d 100644 (file)
@@ -45,10 +45,6 @@ MODULE_DESCRIPTION("Driver for 1-wire Dallas network protocol, temperature famil
 static int w1_strong_pullup = 1;
 module_param_named(strong_pullup, w1_strong_pullup, int, 0);
 
-static u8 bad_roms[][9] = {
-                               {0xaa, 0x00, 0x4b, 0x46, 0xff, 0xff, 0x0c, 0x10, 0x87},
-                               {}
-                       };
 
 static ssize_t w1_therm_read(struct device *device,
        struct device_attribute *attr, char *buf);
@@ -168,16 +164,6 @@ static inline int w1_convert_temp(u8 rom[9], u8 fid)
        return 0;
 }
 
-static int w1_therm_check_rom(u8 rom[9])
-{
-       int i;
-
-       for (i=0; i<sizeof(bad_roms)/9; ++i)
-               if (!memcmp(bad_roms[i], rom, 9))
-                       return 1;
-
-       return 0;
-}
 
 static ssize_t w1_therm_read(struct device *device,
        struct device_attribute *attr, char *buf)
@@ -194,10 +180,11 @@ static ssize_t w1_therm_read(struct device *device,
 
        memset(rom, 0, sizeof(rom));
 
-       verdict = 0;
-       crc = 0;
-
        while (max_trying--) {
+
+               verdict = 0;
+               crc = 0;
+
                if (!w1_reset_select_slave(sl)) {
                        int count = 0;
                        unsigned int tm = 750;
@@ -249,7 +236,7 @@ static ssize_t w1_therm_read(struct device *device,
                        }
                }
 
-               if (!w1_therm_check_rom(rom))
+               if (verdict)
                        break;
        }
 
@@ -260,7 +247,7 @@ static ssize_t w1_therm_read(struct device *device,
        if (verdict)
                memcpy(sl->rom, rom, sizeof(sl->rom));
        else
-               dev_warn(device, "18S20 doesn't respond to CONVERT_TEMP.\n");
+               dev_warn(device, "Read failed CRC check\n");
 
        for (i = 0; i < 9; ++i)
                c -= snprintf(buf + PAGE_SIZE - c, c, "%02x ", sl->rom[i]);
index 4dcfced..084041d 100644 (file)
@@ -25,10 +25,10 @@ static void disable_hotplug_cpu(int cpu)
 static int vcpu_online(unsigned int cpu)
 {
        int err;
-       char dir[32], state[32];
+       char dir[16], state[16];
 
        sprintf(dir, "cpu/%u", cpu);
-       err = xenbus_scanf(XBT_NIL, dir, "availability", "%s", state);
+       err = xenbus_scanf(XBT_NIL, dir, "availability", "%15s", state);
        if (err != 1) {
                if (!xen_initial_domain())
                        printk(KERN_ERR "XENBUS: Unable to read cpu state\n");
index 2e22df2..3c8803f 100644 (file)
@@ -56,10 +56,15 @@ MODULE_PARM_DESC(limit, "Maximum number of grants that may be mapped by "
 static atomic_t pages_mapped = ATOMIC_INIT(0);
 
 static int use_ptemod;
+#define populate_freeable_maps use_ptemod
 
 struct gntdev_priv {
+       /* maps with visible offsets in the file descriptor */
        struct list_head maps;
-       /* lock protects maps from concurrent changes */
+       /* maps that are not visible; will be freed on munmap.
+        * Only populated if populate_freeable_maps == 1 */
+       struct list_head freeable_maps;
+       /* lock protects maps and freeable_maps */
        spinlock_t lock;
        struct mm_struct *mm;
        struct mmu_notifier mn;
@@ -193,7 +198,7 @@ static struct grant_map *gntdev_find_map_index(struct gntdev_priv *priv,
        return NULL;
 }
 
-static void gntdev_put_map(struct grant_map *map)
+static void gntdev_put_map(struct gntdev_priv *priv, struct grant_map *map)
 {
        if (!map)
                return;
@@ -208,6 +213,12 @@ static void gntdev_put_map(struct grant_map *map)
                evtchn_put(map->notify.event);
        }
 
+       if (populate_freeable_maps && priv) {
+               spin_lock(&priv->lock);
+               list_del(&map->next);
+               spin_unlock(&priv->lock);
+       }
+
        if (map->pages && !use_ptemod)
                unmap_grant_pages(map, 0, map->count);
        gntdev_free_map(map);
@@ -301,17 +312,10 @@ static int __unmap_grant_pages(struct grant_map *map, int offset, int pages)
 
        if (map->notify.flags & UNMAP_NOTIFY_CLEAR_BYTE) {
                int pgno = (map->notify.addr >> PAGE_SHIFT);
-               if (pgno >= offset && pgno < offset + pages && use_ptemod) {
-                       void __user *tmp = (void __user *)
-                               map->vma->vm_start + map->notify.addr;
-                       err = copy_to_user(tmp, &err, 1);
-                       if (err)
-                               return -EFAULT;
-                       map->notify.flags &= ~UNMAP_NOTIFY_CLEAR_BYTE;
-               } else if (pgno >= offset && pgno < offset + pages) {
-                       uint8_t *tmp = kmap(map->pages[pgno]);
+               if (pgno >= offset && pgno < offset + pages) {
+                       /* No need for kmap, pages are in lowmem */
+                       uint8_t *tmp = pfn_to_kaddr(page_to_pfn(map->pages[pgno]));
                        tmp[map->notify.addr & (PAGE_SIZE-1)] = 0;
-                       kunmap(map->pages[pgno]);
                        map->notify.flags &= ~UNMAP_NOTIFY_CLEAR_BYTE;
                }
        }
@@ -376,11 +380,24 @@ static void gntdev_vma_open(struct vm_area_struct *vma)
 static void gntdev_vma_close(struct vm_area_struct *vma)
 {
        struct grant_map *map = vma->vm_private_data;
+       struct file *file = vma->vm_file;
+       struct gntdev_priv *priv = file->private_data;
 
        pr_debug("gntdev_vma_close %p\n", vma);
-       map->vma = NULL;
+       if (use_ptemod) {
+               /* It is possible that an mmu notifier could be running
+                * concurrently, so take priv->lock to ensure that the vma won't
+                * vanishing during the unmap_grant_pages call, since we will
+                * spin here until that completes. Such a concurrent call will
+                * not do any unmapping, since that has been done prior to
+                * closing the vma, but it may still iterate the unmap_ops list.
+                */
+               spin_lock(&priv->lock);
+               map->vma = NULL;
+               spin_unlock(&priv->lock);
+       }
        vma->vm_private_data = NULL;
-       gntdev_put_map(map);
+       gntdev_put_map(priv, map);
 }
 
 static struct vm_operations_struct gntdev_vmops = {
@@ -390,33 +407,43 @@ static struct vm_operations_struct gntdev_vmops = {
 
 /* ------------------------------------------------------------------ */
 
+static void unmap_if_in_range(struct grant_map *map,
+                             unsigned long start, unsigned long end)
+{
+       unsigned long mstart, mend;
+       int err;
+
+       if (!map->vma)
+               return;
+       if (map->vma->vm_start >= end)
+               return;
+       if (map->vma->vm_end <= start)
+               return;
+       mstart = max(start, map->vma->vm_start);
+       mend   = min(end,   map->vma->vm_end);
+       pr_debug("map %d+%d (%lx %lx), range %lx %lx, mrange %lx %lx\n",
+                       map->index, map->count,
+                       map->vma->vm_start, map->vma->vm_end,
+                       start, end, mstart, mend);
+       err = unmap_grant_pages(map,
+                               (mstart - map->vma->vm_start) >> PAGE_SHIFT,
+                               (mend - mstart) >> PAGE_SHIFT);
+       WARN_ON(err);
+}
+
 static void mn_invl_range_start(struct mmu_notifier *mn,
                                struct mm_struct *mm,
                                unsigned long start, unsigned long end)
 {
        struct gntdev_priv *priv = container_of(mn, struct gntdev_priv, mn);
        struct grant_map *map;
-       unsigned long mstart, mend;
-       int err;
 
        spin_lock(&priv->lock);
        list_for_each_entry(map, &priv->maps, next) {
-               if (!map->vma)
-                       continue;
-               if (map->vma->vm_start >= end)
-                       continue;
-               if (map->vma->vm_end <= start)
-                       continue;
-               mstart = max(start, map->vma->vm_start);
-               mend   = min(end,   map->vma->vm_end);
-               pr_debug("map %d+%d (%lx %lx), range %lx %lx, mrange %lx %lx\n",
-                               map->index, map->count,
-                               map->vma->vm_start, map->vma->vm_end,
-                               start, end, mstart, mend);
-               err = unmap_grant_pages(map,
-                                       (mstart - map->vma->vm_start) >> PAGE_SHIFT,
-                                       (mend - mstart) >> PAGE_SHIFT);
-               WARN_ON(err);
+               unmap_if_in_range(map, start, end);
+       }
+       list_for_each_entry(map, &priv->freeable_maps, next) {
+               unmap_if_in_range(map, start, end);
        }
        spin_unlock(&priv->lock);
 }
@@ -445,6 +472,15 @@ static void mn_release(struct mmu_notifier *mn,
                err = unmap_grant_pages(map, /* offset */ 0, map->count);
                WARN_ON(err);
        }
+       list_for_each_entry(map, &priv->freeable_maps, next) {
+               if (!map->vma)
+                       continue;
+               pr_debug("map %d+%d (%lx %lx)\n",
+                               map->index, map->count,
+                               map->vma->vm_start, map->vma->vm_end);
+               err = unmap_grant_pages(map, /* offset */ 0, map->count);
+               WARN_ON(err);
+       }
        spin_unlock(&priv->lock);
 }
 
@@ -466,6 +502,7 @@ static int gntdev_open(struct inode *inode, struct file *flip)
                return -ENOMEM;
 
        INIT_LIST_HEAD(&priv->maps);
+       INIT_LIST_HEAD(&priv->freeable_maps);
        spin_lock_init(&priv->lock);
 
        if (use_ptemod) {
@@ -500,8 +537,9 @@ static int gntdev_release(struct inode *inode, struct file *flip)
        while (!list_empty(&priv->maps)) {
                map = list_entry(priv->maps.next, struct grant_map, next);
                list_del(&map->next);
-               gntdev_put_map(map);
+               gntdev_put_map(NULL /* already removed */, map);
        }
+       WARN_ON(!list_empty(&priv->freeable_maps));
 
        if (use_ptemod)
                mmu_notifier_unregister(&priv->mn, priv->mm);
@@ -529,14 +567,14 @@ static long gntdev_ioctl_map_grant_ref(struct gntdev_priv *priv,
 
        if (unlikely(atomic_add_return(op.count, &pages_mapped) > limit)) {
                pr_debug("can't map: over limit\n");
-               gntdev_put_map(map);
+               gntdev_put_map(NULL, map);
                return err;
        }
 
        if (copy_from_user(map->grants, &u->refs,
                           sizeof(map->grants[0]) * op.count) != 0) {
-               gntdev_put_map(map);
-               return err;
+               gntdev_put_map(NULL, map);
+               return -EFAULT;
        }
 
        spin_lock(&priv->lock);
@@ -565,11 +603,13 @@ static long gntdev_ioctl_unmap_grant_ref(struct gntdev_priv *priv,
        map = gntdev_find_map_index(priv, op.index >> PAGE_SHIFT, op.count);
        if (map) {
                list_del(&map->next);
+               if (populate_freeable_maps)
+                       list_add_tail(&map->next, &priv->freeable_maps);
                err = 0;
        }
        spin_unlock(&priv->lock);
        if (map)
-               gntdev_put_map(map);
+               gntdev_put_map(priv, map);
        return err;
 }
 
@@ -579,25 +619,31 @@ static long gntdev_ioctl_get_offset_for_vaddr(struct gntdev_priv *priv,
        struct ioctl_gntdev_get_offset_for_vaddr op;
        struct vm_area_struct *vma;
        struct grant_map *map;
+       int rv = -EINVAL;
 
        if (copy_from_user(&op, u, sizeof(op)) != 0)
                return -EFAULT;
        pr_debug("priv %p, offset for vaddr %lx\n", priv, (unsigned long)op.vaddr);
 
+       down_read(&current->mm->mmap_sem);
        vma = find_vma(current->mm, op.vaddr);
        if (!vma || vma->vm_ops != &gntdev_vmops)
-               return -EINVAL;
+               goto out_unlock;
 
        map = vma->vm_private_data;
        if (!map)
-               return -EINVAL;
+               goto out_unlock;
 
        op.offset = map->index << PAGE_SHIFT;
        op.count = map->count;
+       rv = 0;
 
-       if (copy_to_user(u, &op, sizeof(op)) != 0)
+ out_unlock:
+       up_read(&current->mm->mmap_sem);
+
+       if (rv == 0 && copy_to_user(u, &op, sizeof(op)) != 0)
                return -EFAULT;
-       return 0;
+       return rv;
 }
 
 static long gntdev_ioctl_notify(struct gntdev_priv *priv, void __user *u)
@@ -778,7 +824,7 @@ out_unlock_put:
 out_put_map:
        if (use_ptemod)
                map->vma = NULL;
-       gntdev_put_map(map);
+       gntdev_put_map(priv, map);
        return err;
 }
 
index 7038de5..157c0cc 100644 (file)
 /* External tools reserve first few grant table entries. */
 #define NR_RESERVED_ENTRIES 8
 #define GNTTAB_LIST_END 0xffffffff
-#define GREFS_PER_GRANT_FRAME \
-(grant_table_version == 1 ?                      \
-(PAGE_SIZE / sizeof(struct grant_entry_v1)) :   \
-(PAGE_SIZE / sizeof(union grant_entry_v2)))
 
 static grant_ref_t **gnttab_list;
 static unsigned int nr_grant_frames;
@@ -154,6 +150,7 @@ static struct gnttab_ops *gnttab_interface;
 static grant_status_t *grstatus;
 
 static int grant_table_version;
+static int grefs_per_grant_frame;
 
 static struct gnttab_free_callback *gnttab_free_callback_list;
 
@@ -767,12 +764,14 @@ static int grow_gnttab_list(unsigned int more_frames)
        unsigned int new_nr_grant_frames, extra_entries, i;
        unsigned int nr_glist_frames, new_nr_glist_frames;
 
+       BUG_ON(grefs_per_grant_frame == 0);
+
        new_nr_grant_frames = nr_grant_frames + more_frames;
-       extra_entries       = more_frames * GREFS_PER_GRANT_FRAME;
+       extra_entries       = more_frames * grefs_per_grant_frame;
 
-       nr_glist_frames = (nr_grant_frames * GREFS_PER_GRANT_FRAME + RPP - 1) / RPP;
+       nr_glist_frames = (nr_grant_frames * grefs_per_grant_frame + RPP - 1) / RPP;
        new_nr_glist_frames =
-               (new_nr_grant_frames * GREFS_PER_GRANT_FRAME + RPP - 1) / RPP;
+               (new_nr_grant_frames * grefs_per_grant_frame + RPP - 1) / RPP;
        for (i = nr_glist_frames; i < new_nr_glist_frames; i++) {
                gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_ATOMIC);
                if (!gnttab_list[i])
@@ -780,12 +779,12 @@ static int grow_gnttab_list(unsigned int more_frames)
        }
 
 
-       for (i = GREFS_PER_GRANT_FRAME * nr_grant_frames;
-            i < GREFS_PER_GRANT_FRAME * new_nr_grant_frames - 1; i++)
+       for (i = grefs_per_grant_frame * nr_grant_frames;
+            i < grefs_per_grant_frame * new_nr_grant_frames - 1; i++)
                gnttab_entry(i) = i + 1;
 
        gnttab_entry(i) = gnttab_free_head;
-       gnttab_free_head = GREFS_PER_GRANT_FRAME * nr_grant_frames;
+       gnttab_free_head = grefs_per_grant_frame * nr_grant_frames;
        gnttab_free_count += extra_entries;
 
        nr_grant_frames = new_nr_grant_frames;
@@ -957,7 +956,8 @@ EXPORT_SYMBOL_GPL(gnttab_unmap_refs);
 
 static unsigned nr_status_frames(unsigned nr_grant_frames)
 {
-       return (nr_grant_frames * GREFS_PER_GRANT_FRAME + SPP - 1) / SPP;
+       BUG_ON(grefs_per_grant_frame == 0);
+       return (nr_grant_frames * grefs_per_grant_frame + SPP - 1) / SPP;
 }
 
 static int gnttab_map_frames_v1(xen_pfn_t *frames, unsigned int nr_gframes)
@@ -1115,6 +1115,7 @@ static void gnttab_request_version(void)
        rc = HYPERVISOR_grant_table_op(GNTTABOP_set_version, &gsv, 1);
        if (rc == 0 && gsv.version == 2) {
                grant_table_version = 2;
+               grefs_per_grant_frame = PAGE_SIZE / sizeof(union grant_entry_v2);
                gnttab_interface = &gnttab_v2_ops;
        } else if (grant_table_version == 2) {
                /*
@@ -1127,17 +1128,17 @@ static void gnttab_request_version(void)
                panic("we need grant tables version 2, but only version 1 is available");
        } else {
                grant_table_version = 1;
+               grefs_per_grant_frame = PAGE_SIZE / sizeof(struct grant_entry_v1);
                gnttab_interface = &gnttab_v1_ops;
        }
        printk(KERN_INFO "Grant tables using version %d layout.\n",
                grant_table_version);
 }
 
-int gnttab_resume(void)
+static int gnttab_setup(void)
 {
        unsigned int max_nr_gframes;
 
-       gnttab_request_version();
        max_nr_gframes = gnttab_max_grant_frames();
        if (max_nr_gframes < nr_grant_frames)
                return -ENOSYS;
@@ -1160,6 +1161,12 @@ int gnttab_resume(void)
        return 0;
 }
 
+int gnttab_resume(void)
+{
+       gnttab_request_version();
+       return gnttab_setup();
+}
+
 int gnttab_suspend(void)
 {
        gnttab_interface->unmap_frames();
@@ -1171,9 +1178,10 @@ static int gnttab_expand(unsigned int req_entries)
        int rc;
        unsigned int cur, extra;
 
+       BUG_ON(grefs_per_grant_frame == 0);
        cur = nr_grant_frames;
-       extra = ((req_entries + (GREFS_PER_GRANT_FRAME-1)) /
-                GREFS_PER_GRANT_FRAME);
+       extra = ((req_entries + (grefs_per_grant_frame-1)) /
+                grefs_per_grant_frame);
        if (cur + extra > gnttab_max_grant_frames())
                return -ENOSPC;
 
@@ -1191,21 +1199,23 @@ int gnttab_init(void)
        unsigned int nr_init_grefs;
        int ret;
 
+       gnttab_request_version();
        nr_grant_frames = 1;
        boot_max_nr_grant_frames = __max_nr_grant_frames();
 
        /* Determine the maximum number of frames required for the
         * grant reference free list on the current hypervisor.
         */
+       BUG_ON(grefs_per_grant_frame == 0);
        max_nr_glist_frames = (boot_max_nr_grant_frames *
-                              GREFS_PER_GRANT_FRAME / RPP);
+                              grefs_per_grant_frame / RPP);
 
        gnttab_list = kmalloc(max_nr_glist_frames * sizeof(grant_ref_t *),
                              GFP_KERNEL);
        if (gnttab_list == NULL)
                return -ENOMEM;
 
-       nr_glist_frames = (nr_grant_frames * GREFS_PER_GRANT_FRAME + RPP - 1) / RPP;
+       nr_glist_frames = (nr_grant_frames * grefs_per_grant_frame + RPP - 1) / RPP;
        for (i = 0; i < nr_glist_frames; i++) {
                gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_KERNEL);
                if (gnttab_list[i] == NULL) {
@@ -1214,12 +1224,12 @@ int gnttab_init(void)
                }
        }
 
-       if (gnttab_resume() < 0) {
+       if (gnttab_setup() < 0) {
                ret = -ENODEV;
                goto ini_nomem;
        }
 
-       nr_init_grefs = nr_grant_frames * GREFS_PER_GRANT_FRAME;
+       nr_init_grefs = nr_grant_frames * grefs_per_grant_frame;
 
        for (i = NR_RESERVED_ENTRIES; i < nr_init_grefs - 1; i++)
                gnttab_entry(i) = i + 1;
index 0bbbccb..ca2b00e 100644 (file)
@@ -199,9 +199,6 @@ static long privcmd_ioctl_mmap(void __user *udata)
        LIST_HEAD(pagelist);
        struct mmap_mfn_state state;
 
-       if (!xen_initial_domain())
-               return -EPERM;
-
        /* We only support privcmd_ioctl_mmap_batch for auto translated. */
        if (xen_feature(XENFEAT_auto_translated_physmap))
                return -ENOSYS;
@@ -261,11 +258,12 @@ struct mmap_batch_state {
         *      -ENOENT if at least 1 -ENOENT has happened.
         */
        int global_error;
-       /* An array for individual errors */
-       int *err;
+       int version;
 
        /* User-space mfn array to store errors in the second pass for V1. */
        xen_pfn_t __user *user_mfn;
+       /* User-space int array to store errors in the second pass for V2. */
+       int __user *user_err;
 };
 
 /* auto translated dom0 note: if domU being created is PV, then mfn is
@@ -288,7 +286,19 @@ static int mmap_batch_fn(void *data, void *state)
                                         &cur_page);
 
        /* Store error code for second pass. */
-       *(st->err++) = ret;
+       if (st->version == 1) {
+               if (ret < 0) {
+                       /*
+                        * V1 encodes the error codes in the 32bit top nibble of the
+                        * mfn (with its known limitations vis-a-vis 64 bit callers).
+                        */
+                       *mfnp |= (ret == -ENOENT) ?
+                                               PRIVCMD_MMAPBATCH_PAGED_ERROR :
+                                               PRIVCMD_MMAPBATCH_MFN_ERROR;
+               }
+       } else { /* st->version == 2 */
+               *((int *) mfnp) = ret;
+       }
 
        /* And see if it affects the global_error. */
        if (ret < 0) {
@@ -305,20 +315,25 @@ static int mmap_batch_fn(void *data, void *state)
        return 0;
 }
 
-static int mmap_return_errors_v1(void *data, void *state)
+static int mmap_return_errors(void *data, void *state)
 {
-       xen_pfn_t *mfnp = data;
        struct mmap_batch_state *st = state;
-       int err = *(st->err++);
 
-       /*
-        * V1 encodes the error codes in the 32bit top nibble of the
-        * mfn (with its known limitations vis-a-vis 64 bit callers).
-        */
-       *mfnp |= (err == -ENOENT) ?
-                               PRIVCMD_MMAPBATCH_PAGED_ERROR :
-                               PRIVCMD_MMAPBATCH_MFN_ERROR;
-       return __put_user(*mfnp, st->user_mfn++);
+       if (st->version == 1) {
+               xen_pfn_t mfnp = *((xen_pfn_t *) data);
+               if (mfnp & PRIVCMD_MMAPBATCH_MFN_ERROR)
+                       return __put_user(mfnp, st->user_mfn++);
+               else
+                       st->user_mfn++;
+       } else { /* st->version == 2 */
+               int err = *((int *) data);
+               if (err)
+                       return __put_user(err, st->user_err++);
+               else
+                       st->user_err++;
+       }
+
+       return 0;
 }
 
 /* Allocate pfns that are then mapped with gmfns from foreign domid. Update
@@ -357,12 +372,8 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version)
        struct vm_area_struct *vma;
        unsigned long nr_pages;
        LIST_HEAD(pagelist);
-       int *err_array = NULL;
        struct mmap_batch_state state;
 
-       if (!xen_initial_domain())
-               return -EPERM;
-
        switch (version) {
        case 1:
                if (copy_from_user(&m, udata, sizeof(struct privcmd_mmapbatch)))
@@ -396,10 +407,12 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version)
                goto out;
        }
 
-       err_array = kcalloc(m.num, sizeof(int), GFP_KERNEL);
-       if (err_array == NULL) {
-               ret = -ENOMEM;
-               goto out;
+       if (version == 2) {
+               /* Zero error array now to only copy back actual errors. */
+               if (clear_user(m.err, sizeof(int) * m.num)) {
+                       ret = -EFAULT;
+                       goto out;
+               }
        }
 
        down_write(&mm->mmap_sem);
@@ -427,7 +440,7 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version)
        state.va            = m.addr;
        state.index         = 0;
        state.global_error  = 0;
-       state.err           = err_array;
+       state.version       = version;
 
        /* mmap_batch_fn guarantees ret == 0 */
        BUG_ON(traverse_pages(m.num, sizeof(xen_pfn_t),
@@ -435,21 +448,14 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version)
 
        up_write(&mm->mmap_sem);
 
-       if (version == 1) {
-               if (state.global_error) {
-                       /* Write back errors in second pass. */
-                       state.user_mfn = (xen_pfn_t *)m.arr;
-                       state.err      = err_array;
-                       ret = traverse_pages(m.num, sizeof(xen_pfn_t),
-                                            &pagelist, mmap_return_errors_v1, &state);
-               } else
-                       ret = 0;
-
-       } else if (version == 2) {
-               ret = __copy_to_user(m.err, err_array, m.num * sizeof(int));
-               if (ret)
-                       ret = -EFAULT;
-       }
+       if (state.global_error) {
+               /* Write back errors in second pass. */
+               state.user_mfn = (xen_pfn_t *)m.arr;
+               state.user_err = m.err;
+               ret = traverse_pages(m.num, sizeof(xen_pfn_t),
+                                                        &pagelist, mmap_return_errors, &state);
+       } else
+               ret = 0;
 
        /* If we have not had any EFAULT-like global errors then set the global
         * error to -ENOENT if necessary. */
@@ -457,7 +463,6 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version)
                ret = -ENOENT;
 
 out:
-       kfree(err_array);
        free_page_list(&pagelist);
 
        return ret;
index a7def01..f72af87 100644 (file)
@@ -124,7 +124,7 @@ static inline int xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev,
 static inline void xen_pcibk_release_pci_dev(struct xen_pcibk_device *pdev,
                                             struct pci_dev *dev)
 {
-       if (xen_pcibk_backend && xen_pcibk_backend->free)
+       if (xen_pcibk_backend && xen_pcibk_backend->release)
                return xen_pcibk_backend->release(pdev, dev);
 }
 
index cfe512f..780725a 100644 (file)
@@ -68,16 +68,6 @@ source "fs/quota/Kconfig"
 source "fs/autofs4/Kconfig"
 source "fs/fuse/Kconfig"
 
-config CUSE
-       tristate "Character device in Userspace support"
-       depends on FUSE_FS
-       help
-         This FUSE extension allows character devices to be
-         implemented in userspace.
-
-         If you want to develop or use userspace character device
-         based on CUSE, answer Y or M.
-
 config GENERIC_ACL
        bool
        select FS_POSIX_ACL
index 521e9d4..a8b8adc 100644 (file)
@@ -3997,7 +3997,7 @@ again:
         * We make the other tasks wait for the flush only when we can flush
         * all things.
         */
-       if (ret && flush == BTRFS_RESERVE_FLUSH_ALL) {
+       if (ret && flush != BTRFS_RESERVE_NO_FLUSH) {
                flushing = true;
                space_info->flush = 1;
        }
@@ -5560,7 +5560,7 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
        int empty_cluster = 2 * 1024 * 1024;
        struct btrfs_space_info *space_info;
        int loop = 0;
-       int index = 0;
+       int index = __get_raid_index(data);
        int alloc_type = (data & BTRFS_BLOCK_GROUP_DATA) ?
                RESERVE_ALLOC_NO_ACCOUNT : RESERVE_ALLOC;
        bool found_uncached_bg = false;
@@ -6788,11 +6788,13 @@ static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
                                                       &wc->flags[level]);
                        if (ret < 0) {
                                btrfs_tree_unlock_rw(eb, path->locks[level]);
+                               path->locks[level] = 0;
                                return ret;
                        }
                        BUG_ON(wc->refs[level] == 0);
                        if (wc->refs[level] == 1) {
                                btrfs_tree_unlock_rw(eb, path->locks[level]);
+                               path->locks[level] = 0;
                                return 1;
                        }
                }
index f169d6b..2e8cae6 100644 (file)
@@ -171,6 +171,10 @@ static int mergable_maps(struct extent_map *prev, struct extent_map *next)
        if (test_bit(EXTENT_FLAG_COMPRESSED, &prev->flags))
                return 0;
 
+       if (test_bit(EXTENT_FLAG_LOGGING, &prev->flags) ||
+           test_bit(EXTENT_FLAG_LOGGING, &next->flags))
+               return 0;
+
        if (extent_map_end(prev) == next->start &&
            prev->flags == next->flags &&
            prev->bdev == next->bdev &&
@@ -255,7 +259,8 @@ int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len,
        if (!em)
                goto out;
 
-       list_move(&em->list, &tree->modified_extents);
+       if (!test_bit(EXTENT_FLAG_LOGGING, &em->flags))
+               list_move(&em->list, &tree->modified_extents);
        em->generation = gen;
        clear_bit(EXTENT_FLAG_PINNED, &em->flags);
        em->mod_start = em->start;
@@ -280,6 +285,12 @@ out:
 
 }
 
+void clear_em_logging(struct extent_map_tree *tree, struct extent_map *em)
+{
+       clear_bit(EXTENT_FLAG_LOGGING, &em->flags);
+       try_merge_map(tree, em);
+}
+
 /**
  * add_extent_mapping - add new extent map to the extent tree
  * @tree:      tree to insert new map in
index 922943c..c6598c8 100644 (file)
@@ -69,6 +69,7 @@ void free_extent_map(struct extent_map *em);
 int __init extent_map_init(void);
 void extent_map_exit(void);
 int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len, u64 gen);
+void clear_em_logging(struct extent_map_tree *tree, struct extent_map *em);
 struct extent_map *search_extent_mapping(struct extent_map_tree *tree,
                                         u64 start, u64 len);
 #endif
index bd38cef..94aa53b 100644 (file)
@@ -460,8 +460,8 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,
                if (!contig)
                        offset = page_offset(bvec->bv_page) + bvec->bv_offset;
 
-               if (!contig && (offset >= ordered->file_offset + ordered->len ||
-                   offset < ordered->file_offset)) {
+               if (offset >= ordered->file_offset + ordered->len ||
+                   offset < ordered->file_offset) {
                        unsigned long bytes_left;
                        sums->len = this_sum_bytes;
                        this_sum_bytes = 0;
index 77061bf..f76b1fd 100644 (file)
@@ -2241,6 +2241,7 @@ static int find_desired_extent(struct inode *inode, loff_t *offset, int whence)
        if (lockend <= lockstart)
                lockend = lockstart + root->sectorsize;
 
+       lockend--;
        len = lockend - lockstart + 1;
 
        len = max_t(u64, len, root->sectorsize);
@@ -2307,9 +2308,12 @@ static int find_desired_extent(struct inode *inode, loff_t *offset, int whence)
                                        }
                                }
 
-                               *offset = start;
-                               free_extent_map(em);
-                               break;
+                               if (!test_bit(EXTENT_FLAG_PREALLOC,
+                                             &em->flags)) {
+                                       *offset = start;
+                                       free_extent_map(em);
+                                       break;
+                               }
                        }
                }
 
index 59ea2e4..0be7a87 100644 (file)
@@ -1862,11 +1862,13 @@ int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
 {
        struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
        struct btrfs_free_space *info;
-       int ret = 0;
+       int ret;
+       bool re_search = false;
 
        spin_lock(&ctl->tree_lock);
 
 again:
+       ret = 0;
        if (!bytes)
                goto out_lock;
 
@@ -1879,17 +1881,17 @@ again:
                info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
                                          1, 0);
                if (!info) {
-                       /* the tree logging code might be calling us before we
-                        * have fully loaded the free space rbtree for this
-                        * block group.  So it is possible the entry won't
-                        * be in the rbtree yet at all.  The caching code
-                        * will make sure not to put it in the rbtree if
-                        * the logging code has pinned it.
+                       /*
+                        * If we found a partial bit of our free space in a
+                        * bitmap but then couldn't find the other part this may
+                        * be a problem, so WARN about it.
                         */
+                       WARN_ON(re_search);
                        goto out_lock;
                }
        }
 
+       re_search = false;
        if (!info->bitmap) {
                unlink_free_space(ctl, info);
                if (offset == info->offset) {
@@ -1935,8 +1937,10 @@ again:
        }
 
        ret = remove_from_bitmap(ctl, info, &offset, &bytes);
-       if (ret == -EAGAIN)
+       if (ret == -EAGAIN) {
+               re_search = true;
                goto again;
+       }
        BUG_ON(ret); /* logic error */
 out_lock:
        spin_unlock(&ctl->tree_lock);
index 16d9e8e..cc93b23 100644 (file)
@@ -88,7 +88,7 @@ static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = {
        [S_IFLNK >> S_SHIFT]    = BTRFS_FT_SYMLINK,
 };
 
-static int btrfs_setsize(struct inode *inode, loff_t newsize);
+static int btrfs_setsize(struct inode *inode, struct iattr *attr);
 static int btrfs_truncate(struct inode *inode);
 static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent);
 static noinline int cow_file_range(struct inode *inode,
@@ -2478,6 +2478,18 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)
                                continue;
                        }
                        nr_truncate++;
+
+                       /* 1 for the orphan item deletion. */
+                       trans = btrfs_start_transaction(root, 1);
+                       if (IS_ERR(trans)) {
+                               ret = PTR_ERR(trans);
+                               goto out;
+                       }
+                       ret = btrfs_orphan_add(trans, inode);
+                       btrfs_end_transaction(trans, root);
+                       if (ret)
+                               goto out;
+
                        ret = btrfs_truncate(inode);
                } else {
                        nr_unlink++;
@@ -3665,6 +3677,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
                                block_end - cur_offset, 0);
                if (IS_ERR(em)) {
                        err = PTR_ERR(em);
+                       em = NULL;
                        break;
                }
                last_byte = min(extent_map_end(em), block_end);
@@ -3748,16 +3761,27 @@ next:
        return err;
 }
 
-static int btrfs_setsize(struct inode *inode, loff_t newsize)
+static int btrfs_setsize(struct inode *inode, struct iattr *attr)
 {
        struct btrfs_root *root = BTRFS_I(inode)->root;
        struct btrfs_trans_handle *trans;
        loff_t oldsize = i_size_read(inode);
+       loff_t newsize = attr->ia_size;
+       int mask = attr->ia_valid;
        int ret;
 
        if (newsize == oldsize)
                return 0;
 
+       /*
+        * The regular truncate() case without ATTR_CTIME and ATTR_MTIME is a
+        * special case where we need to update the times despite not having
+        * these flags set.  For all other operations the VFS set these flags
+        * explicitly if it wants a timestamp update.
+        */
+       if (newsize != oldsize && (!(mask & (ATTR_CTIME | ATTR_MTIME))))
+               inode->i_ctime = inode->i_mtime = current_fs_time(inode->i_sb);
+
        if (newsize > oldsize) {
                truncate_pagecache(inode, oldsize, newsize);
                ret = btrfs_cont_expand(inode, oldsize, newsize);
@@ -3783,9 +3807,34 @@ static int btrfs_setsize(struct inode *inode, loff_t newsize)
                        set_bit(BTRFS_INODE_ORDERED_DATA_CLOSE,
                                &BTRFS_I(inode)->runtime_flags);
 
+               /*
+                * 1 for the orphan item we're going to add
+                * 1 for the orphan item deletion.
+                */
+               trans = btrfs_start_transaction(root, 2);
+               if (IS_ERR(trans))
+                       return PTR_ERR(trans);
+
+               /*
+                * We need to do this in case we fail at _any_ point during the
+                * actual truncate.  Once we do the truncate_setsize we could
+                * invalidate pages which forces any outstanding ordered io to
+                * be instantly completed which will give us extents that need
+                * to be truncated.  If we fail to get an orphan inode down we
+                * could have left over extents that were never meant to live,
+                * so we need to garuntee from this point on that everything
+                * will be consistent.
+                */
+               ret = btrfs_orphan_add(trans, inode);
+               btrfs_end_transaction(trans, root);
+               if (ret)
+                       return ret;
+
                /* we don't support swapfiles, so vmtruncate shouldn't fail */
                truncate_setsize(inode, newsize);
                ret = btrfs_truncate(inode);
+               if (ret && inode->i_nlink)
+                       btrfs_orphan_del(NULL, inode);
        }
 
        return ret;
@@ -3805,7 +3854,7 @@ static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
                return err;
 
        if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
-               err = btrfs_setsize(inode, attr->ia_size);
+               err = btrfs_setsize(inode, attr);
                if (err)
                        return err;
        }
@@ -5572,10 +5621,13 @@ struct extent_map *btrfs_get_extent_fiemap(struct inode *inode, struct page *pag
                return em;
        if (em) {
                /*
-                * if our em maps to a hole, there might
-                * actually be delalloc bytes behind it
+                * if our em maps to
+                * -  a hole or
+                * -  a pre-alloc extent,
+                * there might actually be delalloc bytes behind it.
                 */
-               if (em->block_start != EXTENT_MAP_HOLE)
+               if (em->block_start != EXTENT_MAP_HOLE &&
+                   !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
                        return em;
                else
                        hole_em = em;
@@ -5657,6 +5709,8 @@ struct extent_map *btrfs_get_extent_fiemap(struct inode *inode, struct page *pag
                         */
                        em->block_start = hole_em->block_start;
                        em->block_len = hole_len;
+                       if (test_bit(EXTENT_FLAG_PREALLOC, &hole_em->flags))
+                               set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
                } else {
                        em->start = range_start;
                        em->len = found;
@@ -6915,11 +6969,9 @@ static int btrfs_truncate(struct inode *inode)
 
        /*
         * 1 for the truncate slack space
-        * 1 for the orphan item we're going to add
-        * 1 for the orphan item deletion
         * 1 for updating the inode.
         */
-       trans = btrfs_start_transaction(root, 4);
+       trans = btrfs_start_transaction(root, 2);
        if (IS_ERR(trans)) {
                err = PTR_ERR(trans);
                goto out;
@@ -6930,12 +6982,6 @@ static int btrfs_truncate(struct inode *inode)
                                      min_size);
        BUG_ON(ret);
 
-       ret = btrfs_orphan_add(trans, inode);
-       if (ret) {
-               btrfs_end_transaction(trans, root);
-               goto out;
-       }
-
        /*
         * setattr is responsible for setting the ordered_data_close flag,
         * but that is only tested during the last file release.  That
@@ -7004,12 +7050,6 @@ static int btrfs_truncate(struct inode *inode)
                ret = btrfs_orphan_del(trans, inode);
                if (ret)
                        err = ret;
-       } else if (ret && inode->i_nlink > 0) {
-               /*
-                * Failed to do the truncate, remove us from the in memory
-                * orphan list.
-                */
-               ret = btrfs_orphan_del(NULL, inode);
        }
 
        if (trans) {
@@ -7531,41 +7571,61 @@ void btrfs_wait_and_free_delalloc_work(struct btrfs_delalloc_work *work)
  */
 int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput)
 {
-       struct list_head *head = &root->fs_info->delalloc_inodes;
        struct btrfs_inode *binode;
        struct inode *inode;
        struct btrfs_delalloc_work *work, *next;
        struct list_head works;
+       struct list_head splice;
        int ret = 0;
 
        if (root->fs_info->sb->s_flags & MS_RDONLY)
                return -EROFS;
 
        INIT_LIST_HEAD(&works);
-
+       INIT_LIST_HEAD(&splice);
+again:
        spin_lock(&root->fs_info->delalloc_lock);
-       while (!list_empty(head)) {
-               binode = list_entry(head->next, struct btrfs_inode,
+       list_splice_init(&root->fs_info->delalloc_inodes, &splice);
+       while (!list_empty(&splice)) {
+               binode = list_entry(splice.next, struct btrfs_inode,
                                    delalloc_inodes);
+
+               list_del_init(&binode->delalloc_inodes);
+
                inode = igrab(&binode->vfs_inode);
                if (!inode)
-                       list_del_init(&binode->delalloc_inodes);
+                       continue;
+
+               list_add_tail(&binode->delalloc_inodes,
+                             &root->fs_info->delalloc_inodes);
                spin_unlock(&root->fs_info->delalloc_lock);
-               if (inode) {
-                       work = btrfs_alloc_delalloc_work(inode, 0, delay_iput);
-                       if (!work) {
-                               ret = -ENOMEM;
-                               goto out;
-                       }
-                       list_add_tail(&work->list, &works);
-                       btrfs_queue_worker(&root->fs_info->flush_workers,
-                                          &work->work);
+
+               work = btrfs_alloc_delalloc_work(inode, 0, delay_iput);
+               if (unlikely(!work)) {
+                       ret = -ENOMEM;
+                       goto out;
                }
+               list_add_tail(&work->list, &works);
+               btrfs_queue_worker(&root->fs_info->flush_workers,
+                                  &work->work);
+
                cond_resched();
                spin_lock(&root->fs_info->delalloc_lock);
        }
        spin_unlock(&root->fs_info->delalloc_lock);
 
+       list_for_each_entry_safe(work, next, &works, list) {
+               list_del_init(&work->list);
+               btrfs_wait_and_free_delalloc_work(work);
+       }
+
+       spin_lock(&root->fs_info->delalloc_lock);
+       if (!list_empty(&root->fs_info->delalloc_inodes)) {
+               spin_unlock(&root->fs_info->delalloc_lock);
+               goto again;
+       }
+       spin_unlock(&root->fs_info->delalloc_lock);
+
        /* the filemap_flush will queue IO into the worker threads, but
         * we have to make sure the IO is actually started and that
         * ordered extents get created before we return
@@ -7578,11 +7638,18 @@ int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput)
                    atomic_read(&root->fs_info->async_delalloc_pages) == 0));
        }
        atomic_dec(&root->fs_info->async_submit_draining);
+       return 0;
 out:
        list_for_each_entry_safe(work, next, &works, list) {
                list_del_init(&work->list);
                btrfs_wait_and_free_delalloc_work(work);
        }
+
+       if (!list_empty_careful(&splice)) {
+               spin_lock(&root->fs_info->delalloc_lock);
+               list_splice_tail(&splice, &root->fs_info->delalloc_inodes);
+               spin_unlock(&root->fs_info->delalloc_lock);
+       }
        return ret;
 }
 
index 4b45167..5b22d45 100644 (file)
@@ -1339,7 +1339,8 @@ static noinline int btrfs_ioctl_resize(struct file *file,
        if (atomic_xchg(&root->fs_info->mutually_exclusive_operation_running,
                        1)) {
                pr_info("btrfs: dev add/delete/balance/replace/resize operation in progress\n");
-               return -EINPROGRESS;
+               mnt_drop_write_file(file);
+               return -EINVAL;
        }
 
        mutex_lock(&root->fs_info->volume_mutex);
@@ -1362,6 +1363,7 @@ static noinline int btrfs_ioctl_resize(struct file *file,
                printk(KERN_INFO "btrfs: resizing devid %llu\n",
                       (unsigned long long)devid);
        }
+
        device = btrfs_find_device(root->fs_info, devid, NULL, NULL);
        if (!device) {
                printk(KERN_INFO "btrfs: resizer unable to find device %llu\n",
@@ -1369,9 +1371,10 @@ static noinline int btrfs_ioctl_resize(struct file *file,
                ret = -EINVAL;
                goto out_free;
        }
-       if (device->fs_devices && device->fs_devices->seeding) {
+
+       if (!device->writeable) {
                printk(KERN_INFO "btrfs: resizer unable to apply on "
-                      "seeding device %llu\n",
+                      "readonly device %llu\n",
                       (unsigned long long)devid);
                ret = -EINVAL;
                goto out_free;
@@ -1443,8 +1446,8 @@ out_free:
        kfree(vol_args);
 out:
        mutex_unlock(&root->fs_info->volume_mutex);
-       mnt_drop_write_file(file);
        atomic_set(&root->fs_info->mutually_exclusive_operation_running, 0);
+       mnt_drop_write_file(file);
        return ret;
 }
 
@@ -2095,13 +2098,13 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
                err = inode_permission(inode, MAY_WRITE | MAY_EXEC);
                if (err)
                        goto out_dput;
-
-               /* check if subvolume may be deleted by a non-root user */
-               err = btrfs_may_delete(dir, dentry, 1);
-               if (err)
-                       goto out_dput;
        }
 
+       /* check if subvolume may be deleted by a user */
+       err = btrfs_may_delete(dir, dentry, 1);
+       if (err)
+               goto out_dput;
+
        if (btrfs_ino(inode) != BTRFS_FIRST_FREE_OBJECTID) {
                err = -EINVAL;
                goto out_dput;
@@ -2183,19 +2186,20 @@ static int btrfs_ioctl_defrag(struct file *file, void __user *argp)
        struct btrfs_ioctl_defrag_range_args *range;
        int ret;
 
-       if (btrfs_root_readonly(root))
-               return -EROFS;
+       ret = mnt_want_write_file(file);
+       if (ret)
+               return ret;
 
        if (atomic_xchg(&root->fs_info->mutually_exclusive_operation_running,
                        1)) {
                pr_info("btrfs: dev add/delete/balance/replace/resize operation in progress\n");
-               return -EINPROGRESS;
+               mnt_drop_write_file(file);
+               return -EINVAL;
        }
-       ret = mnt_want_write_file(file);
-       if (ret) {
-               atomic_set(&root->fs_info->mutually_exclusive_operation_running,
-                          0);
-               return ret;
+
+       if (btrfs_root_readonly(root)) {
+               ret = -EROFS;
+               goto out;
        }
 
        switch (inode->i_mode & S_IFMT) {
@@ -2247,8 +2251,8 @@ static int btrfs_ioctl_defrag(struct file *file, void __user *argp)
                ret = -EINVAL;
        }
 out:
-       mnt_drop_write_file(file);
        atomic_set(&root->fs_info->mutually_exclusive_operation_running, 0);
+       mnt_drop_write_file(file);
        return ret;
 }
 
@@ -2263,7 +2267,7 @@ static long btrfs_ioctl_add_dev(struct btrfs_root *root, void __user *arg)
        if (atomic_xchg(&root->fs_info->mutually_exclusive_operation_running,
                        1)) {
                pr_info("btrfs: dev add/delete/balance/replace/resize operation in progress\n");
-               return -EINPROGRESS;
+               return -EINVAL;
        }
 
        mutex_lock(&root->fs_info->volume_mutex);
@@ -2300,7 +2304,7 @@ static long btrfs_ioctl_rm_dev(struct file *file, void __user *arg)
                        1)) {
                pr_info("btrfs: dev add/delete/balance/replace/resize operation in progress\n");
                mnt_drop_write_file(file);
-               return -EINPROGRESS;
+               return -EINVAL;
        }
 
        mutex_lock(&root->fs_info->volume_mutex);
@@ -2316,8 +2320,8 @@ static long btrfs_ioctl_rm_dev(struct file *file, void __user *arg)
        kfree(vol_args);
 out:
        mutex_unlock(&root->fs_info->volume_mutex);
-       mnt_drop_write_file(file);
        atomic_set(&root->fs_info->mutually_exclusive_operation_running, 0);
+       mnt_drop_write_file(file);
        return ret;
 }
 
@@ -3437,8 +3441,8 @@ static long btrfs_ioctl_balance(struct file *file, void __user *arg)
        struct btrfs_fs_info *fs_info = root->fs_info;
        struct btrfs_ioctl_balance_args *bargs;
        struct btrfs_balance_control *bctl;
+       bool need_unlock; /* for mut. excl. ops lock */
        int ret;
-       int need_to_clear_lock = 0;
 
        if (!capable(CAP_SYS_ADMIN))
                return -EPERM;
@@ -3447,14 +3451,61 @@ static long btrfs_ioctl_balance(struct file *file, void __user *arg)
        if (ret)
                return ret;
 
-       mutex_lock(&fs_info->volume_mutex);
+again:
+       if (!atomic_xchg(&fs_info->mutually_exclusive_operation_running, 1)) {
+               mutex_lock(&fs_info->volume_mutex);
+               mutex_lock(&fs_info->balance_mutex);
+               need_unlock = true;
+               goto locked;
+       }
+
+       /*
+        * mut. excl. ops lock is locked.  Three possibilites:
+        *   (1) some other op is running
+        *   (2) balance is running
+        *   (3) balance is paused -- special case (think resume)
+        */
        mutex_lock(&fs_info->balance_mutex);
+       if (fs_info->balance_ctl) {
+               /* this is either (2) or (3) */
+               if (!atomic_read(&fs_info->balance_running)) {
+                       mutex_unlock(&fs_info->balance_mutex);
+                       if (!mutex_trylock(&fs_info->volume_mutex))
+                               goto again;
+                       mutex_lock(&fs_info->balance_mutex);
+
+                       if (fs_info->balance_ctl &&
+                           !atomic_read(&fs_info->balance_running)) {
+                               /* this is (3) */
+                               need_unlock = false;
+                               goto locked;
+                       }
+
+                       mutex_unlock(&fs_info->balance_mutex);
+                       mutex_unlock(&fs_info->volume_mutex);
+                       goto again;
+               } else {
+                       /* this is (2) */
+                       mutex_unlock(&fs_info->balance_mutex);
+                       ret = -EINPROGRESS;
+                       goto out;
+               }
+       } else {
+               /* this is (1) */
+               mutex_unlock(&fs_info->balance_mutex);
+               pr_info("btrfs: dev add/delete/balance/replace/resize operation in progress\n");
+               ret = -EINVAL;
+               goto out;
+       }
+
+locked:
+       BUG_ON(!atomic_read(&fs_info->mutually_exclusive_operation_running));
 
        if (arg) {
                bargs = memdup_user(arg, sizeof(*bargs));
                if (IS_ERR(bargs)) {
                        ret = PTR_ERR(bargs);
-                       goto out;
+                       goto out_unlock;
                }
 
                if (bargs->flags & BTRFS_BALANCE_RESUME) {
@@ -3474,13 +3525,10 @@ static long btrfs_ioctl_balance(struct file *file, void __user *arg)
                bargs = NULL;
        }
 
-       if (atomic_xchg(&root->fs_info->mutually_exclusive_operation_running,
-                       1)) {
-               pr_info("btrfs: dev add/delete/balance/replace/resize operation in progress\n");
+       if (fs_info->balance_ctl) {
                ret = -EINPROGRESS;
                goto out_bargs;
        }
-       need_to_clear_lock = 1;
 
        bctl = kzalloc(sizeof(*bctl), GFP_NOFS);
        if (!bctl) {
@@ -3501,11 +3549,17 @@ static long btrfs_ioctl_balance(struct file *file, void __user *arg)
        }
 
 do_balance:
-       ret = btrfs_balance(bctl, bargs);
        /*
-        * bctl is freed in __cancel_balance or in free_fs_info if
-        * restriper was paused all the way until unmount
+        * Ownership of bctl and mutually_exclusive_operation_running
+        * goes to to btrfs_balance.  bctl is freed in __cancel_balance,
+        * or, if restriper was paused all the way until unmount, in
+        * free_fs_info.  mutually_exclusive_operation_running is
+        * cleared in __cancel_balance.
         */
+       need_unlock = false;
+
+       ret = btrfs_balance(bctl, bargs);
+
        if (arg) {
                if (copy_to_user(arg, bargs, sizeof(*bargs)))
                        ret = -EFAULT;
@@ -3513,12 +3567,12 @@ do_balance:
 
 out_bargs:
        kfree(bargs);
-out:
-       if (need_to_clear_lock)
-               atomic_set(&root->fs_info->mutually_exclusive_operation_running,
-                          0);
+out_unlock:
        mutex_unlock(&fs_info->balance_mutex);
        mutex_unlock(&fs_info->volume_mutex);
+       if (need_unlock)
+               atomic_set(&fs_info->mutually_exclusive_operation_running, 0);
+out:
        mnt_drop_write_file(file);
        return ret;
 }
@@ -3698,6 +3752,11 @@ static long btrfs_ioctl_qgroup_create(struct file *file, void __user *arg)
                goto drop_write;
        }
 
+       if (!sa->qgroupid) {
+               ret = -EINVAL;
+               goto out;
+       }
+
        trans = btrfs_join_transaction(root);
        if (IS_ERR(trans)) {
                ret = PTR_ERR(trans);
index fe9d02c..a5c8562 100644 (file)
@@ -379,6 +379,13 @@ next1:
 
                ret = add_relation_rb(fs_info, found_key.objectid,
                                      found_key.offset);
+               if (ret == -ENOENT) {
+                       printk(KERN_WARNING
+                               "btrfs: orphan qgroup relation 0x%llx->0x%llx\n",
+                               (unsigned long long)found_key.objectid,
+                               (unsigned long long)found_key.offset);
+                       ret = 0;        /* ignore the error */
+               }
                if (ret)
                        goto out;
 next2:
@@ -956,17 +963,28 @@ int btrfs_remove_qgroup(struct btrfs_trans_handle *trans,
                        struct btrfs_fs_info *fs_info, u64 qgroupid)
 {
        struct btrfs_root *quota_root;
+       struct btrfs_qgroup *qgroup;
        int ret = 0;
 
        quota_root = fs_info->quota_root;
        if (!quota_root)
                return -EINVAL;
 
+       /* check if there are no relations to this qgroup */
+       spin_lock(&fs_info->qgroup_lock);
+       qgroup = find_qgroup_rb(fs_info, qgroupid);
+       if (qgroup) {
+               if (!list_empty(&qgroup->groups) || !list_empty(&qgroup->members)) {
+                       spin_unlock(&fs_info->qgroup_lock);
+                       return -EBUSY;
+               }
+       }
+       spin_unlock(&fs_info->qgroup_lock);
+
        ret = del_qgroup_item(trans, quota_root, qgroupid);
 
        spin_lock(&fs_info->qgroup_lock);
        del_qgroup_rb(quota_root->fs_info, qgroupid);
-
        spin_unlock(&fs_info->qgroup_lock);
 
        return ret;
index 5445454..321b7fb 100644 (file)
@@ -1814,8 +1814,10 @@ static int name_cache_insert(struct send_ctx *sctx,
                        (unsigned long)nce->ino);
        if (!nce_head) {
                nce_head = kmalloc(sizeof(*nce_head), GFP_NOFS);
-               if (!nce_head)
+               if (!nce_head) {
+                       kfree(nce);
                        return -ENOMEM;
+               }
                INIT_LIST_HEAD(nce_head);
 
                ret = radix_tree_insert(&sctx->name_cache, nce->ino, nce_head);
index 99545df..d8982e9 100644 (file)
@@ -267,7 +267,7 @@ void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
                             function, line, errstr);
                return;
        }
-       trans->transaction->aborted = errno;
+       ACCESS_ONCE(trans->transaction->aborted) = errno;
        __btrfs_std_error(root->fs_info, function, line, errno, NULL);
 }
 /*
index 87fac9a..f154946 100644 (file)
@@ -1468,7 +1468,8 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
                goto cleanup_transaction;
        }
 
-       if (cur_trans->aborted) {
+       /* Stop the commit early if ->aborted is set */
+       if (unlikely(ACCESS_ONCE(cur_trans->aborted))) {
                ret = cur_trans->aborted;
                goto cleanup_transaction;
        }
@@ -1574,6 +1575,11 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
        wait_event(cur_trans->writer_wait,
                   atomic_read(&cur_trans->num_writers) == 1);
 
+       /* ->aborted might be set after the previous check, so check it */
+       if (unlikely(ACCESS_ONCE(cur_trans->aborted))) {
+               ret = cur_trans->aborted;
+               goto cleanup_transaction;
+       }
        /*
         * the reloc mutex makes sure that we stop
         * the balancing code from coming in and moving
@@ -1657,6 +1663,17 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
                goto cleanup_transaction;
        }
 
+       /*
+        * The tasks which save the space cache and inode cache may also
+        * update ->aborted, check it.
+        */
+       if (unlikely(ACCESS_ONCE(cur_trans->aborted))) {
+               ret = cur_trans->aborted;
+               mutex_unlock(&root->fs_info->tree_log_mutex);
+               mutex_unlock(&root->fs_info->reloc_mutex);
+               goto cleanup_transaction;
+       }
+
        btrfs_prepare_extent_commit(trans, root);
 
        cur_trans = root->fs_info->running_transaction;
index 83186c7..9027bb1 100644 (file)
@@ -3357,6 +3357,11 @@ static int log_one_extent(struct btrfs_trans_handle *trans,
        if (skip_csum)
                return 0;
 
+       if (em->compress_type) {
+               csum_offset = 0;
+               csum_len = block_len;
+       }
+
        /* block start is already adjusted for the file extent offset. */
        ret = btrfs_lookup_csums_range(log->fs_info->csum_root,
                                       em->block_start + csum_offset,
@@ -3410,13 +3415,13 @@ static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans,
                em = list_entry(extents.next, struct extent_map, list);
 
                list_del_init(&em->list);
-               clear_bit(EXTENT_FLAG_LOGGING, &em->flags);
 
                /*
                 * If we had an error we just need to delete everybody from our
                 * private list.
                 */
                if (ret) {
+                       clear_em_logging(tree, em);
                        free_extent_map(em);
                        continue;
                }
@@ -3424,8 +3429,9 @@ static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans,
                write_unlock(&tree->lock);
 
                ret = log_one_extent(trans, inode, root, em, path);
-               free_extent_map(em);
                write_lock(&tree->lock);
+               clear_em_logging(tree, em);
+               free_extent_map(em);
        }
        WARN_ON(!list_empty(&extents));
        write_unlock(&tree->lock);
index 5cce6aa..15f6efd 100644 (file)
@@ -1431,7 +1431,7 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
                }
        } else {
                ret = btrfs_get_bdev_and_sb(device_path,
-                                           FMODE_READ | FMODE_EXCL,
+                                           FMODE_WRITE | FMODE_EXCL,
                                            root->fs_info->bdev_holder, 0,
                                            &bdev, &bh);
                if (ret)
@@ -2614,7 +2614,14 @@ static int chunk_usage_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
        cache = btrfs_lookup_block_group(fs_info, chunk_offset);
        chunk_used = btrfs_block_group_used(&cache->item);
 
-       user_thresh = div_factor_fine(cache->key.offset, bargs->usage);
+       if (bargs->usage == 0)
+               user_thresh = 0;
+       else if (bargs->usage > 100)
+               user_thresh = cache->key.offset;
+       else
+               user_thresh = div_factor_fine(cache->key.offset,
+                                             bargs->usage);
+
        if (chunk_used < user_thresh)
                ret = 0;
 
@@ -2959,6 +2966,8 @@ static void __cancel_balance(struct btrfs_fs_info *fs_info)
        unset_balance_control(fs_info);
        ret = del_balance_item(fs_info->tree_root);
        BUG_ON(ret);
+
+       atomic_set(&fs_info->mutually_exclusive_operation_running, 0);
 }
 
 void update_ioctl_balance_args(struct btrfs_fs_info *fs_info, int lock,
@@ -3138,8 +3147,10 @@ int btrfs_balance(struct btrfs_balance_control *bctl,
 out:
        if (bctl->flags & BTRFS_BALANCE_RESUME)
                __cancel_balance(fs_info);
-       else
+       else {
                kfree(bctl);
+               atomic_set(&fs_info->mutually_exclusive_operation_running, 0);
+       }
        return ret;
 }
 
@@ -3156,7 +3167,6 @@ static int balance_kthread(void *data)
                ret = btrfs_balance(fs_info->balance_ctl, NULL);
        }
 
-       atomic_set(&fs_info->mutually_exclusive_operation_running, 0);
        mutex_unlock(&fs_info->balance_mutex);
        mutex_unlock(&fs_info->volume_mutex);
 
@@ -3179,7 +3189,6 @@ int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info)
                return 0;
        }
 
-       WARN_ON(atomic_xchg(&fs_info->mutually_exclusive_operation_running, 1));
        tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance");
        if (IS_ERR(tsk))
                return PTR_ERR(tsk);
@@ -3233,6 +3242,8 @@ int btrfs_recover_balance(struct btrfs_fs_info *fs_info)
        btrfs_balance_sys(leaf, item, &disk_bargs);
        btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs);
 
+       WARN_ON(atomic_xchg(&fs_info->mutually_exclusive_operation_running, 1));
+
        mutex_lock(&fs_info->volume_mutex);
        mutex_lock(&fs_info->balance_mutex);
 
@@ -3496,7 +3507,7 @@ struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = {
        { 1, 1, 2, 2, 2, 2 /* raid1 */ },
        { 1, 2, 1, 1, 1, 2 /* dup */ },
        { 1, 1, 0, 2, 1, 1 /* raid0 */ },
-       { 1, 1, 0, 1, 1, 1 /* single */ },
+       { 1, 1, 1, 1, 1, 1 /* single */ },
 };
 
 static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
index c017a2d..7a75c3e 100644 (file)
@@ -2935,6 +2935,7 @@ static void guard_bh_eod(int rw, struct bio *bio, struct buffer_head *bh)
                void *kaddr = kmap_atomic(bh->b_page);
                memset(kaddr + bh_offset(bh) + bytes, 0, bh->b_size - bytes);
                kunmap_atomic(kaddr);
+               flush_dcache_page(bh->b_page);
        }
 }
 
index ce5cbd7..210fce2 100644 (file)
@@ -226,6 +226,8 @@ compose_mount_options_out:
 compose_mount_options_err:
        kfree(mountdata);
        mountdata = ERR_PTR(rc);
+       kfree(*devname);
+       *devname = NULL;
        goto compose_mount_options_out;
 }
 
index 17c3643..12b3da3 100644 (file)
@@ -1917,7 +1917,7 @@ srcip_matches(struct sockaddr *srcaddr, struct sockaddr *rhs)
        }
        case AF_INET6: {
                struct sockaddr_in6 *saddr6 = (struct sockaddr_in6 *)srcaddr;
-               struct sockaddr_in6 *vaddr6 = (struct sockaddr_in6 *)&rhs;
+               struct sockaddr_in6 *vaddr6 = (struct sockaddr_in6 *)rhs;
                return ipv6_addr_equal(&saddr6->sin6_addr, &vaddr6->sin6_addr);
        }
        default:
index 153bb1e..a5f12b7 100644 (file)
@@ -176,7 +176,7 @@ static int debugfs_parse_options(char *data, struct debugfs_mount_opts *opts)
                        opts->uid = uid;
                        break;
                case Opt_gid:
-                       if (match_octal(&args[0], &option))
+                       if (match_int(&args[0], &option))
                                return -EINVAL;
                        gid = make_kgid(current_user_ns(), option);
                        if (!gid_valid(gid))
index 18c45ca..20df02c 100644 (file)
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -434,8 +434,9 @@ static int count(struct user_arg_ptr argv, int max)
                        if (IS_ERR(p))
                                return -EFAULT;
 
-                       if (i++ >= max)
+                       if (i >= max)
                                return -E2BIG;
+                       ++i;
 
                        if (fatal_signal_pending(current))
                                return -ERESTARTNOHAND;
index e95b949..137af42 100644 (file)
@@ -191,15 +191,14 @@ struct posix_acl *f2fs_get_acl(struct inode *inode, int type)
                retval = f2fs_getxattr(inode, name_index, "", value, retval);
        }
 
-       if (retval < 0) {
-               if (retval == -ENODATA)
-                       acl = NULL;
-               else
-                       acl = ERR_PTR(retval);
-       } else {
+       if (retval > 0)
                acl = f2fs_acl_from_disk(value, retval);
-       }
+       else if (retval == -ENODATA)
+               acl = NULL;
+       else
+               acl = ERR_PTR(retval);
        kfree(value);
+
        if (!IS_ERR(acl))
                set_cached_acl(inode, type, acl);
 
index 6ef36c3..ff3c843 100644 (file)
@@ -214,7 +214,6 @@ retry:
                goto retry;
        }
        new->ino = ino;
-       INIT_LIST_HEAD(&new->list);
 
        /* add new_oentry into list which is sorted by inode number */
        if (orphan) {
@@ -772,7 +771,7 @@ void init_orphan_info(struct f2fs_sb_info *sbi)
        sbi->n_orphans = 0;
 }
 
-int create_checkpoint_caches(void)
+int __init create_checkpoint_caches(void)
 {
        orphan_entry_slab = f2fs_kmem_cache_create("f2fs_orphan_entry",
                        sizeof(struct orphan_inode_entry), NULL);
index 3aa5ce7..7bd22a2 100644 (file)
@@ -547,6 +547,15 @@ redirty_out:
 
 #define MAX_DESIRED_PAGES_WP   4096
 
+static int __f2fs_writepage(struct page *page, struct writeback_control *wbc,
+                       void *data)
+{
+       struct address_space *mapping = data;
+       int ret = mapping->a_ops->writepage(page, wbc);
+       mapping_set_error(mapping, ret);
+       return ret;
+}
+
 static int f2fs_write_data_pages(struct address_space *mapping,
                            struct writeback_control *wbc)
 {
@@ -563,7 +572,7 @@ static int f2fs_write_data_pages(struct address_space *mapping,
 
        if (!S_ISDIR(inode->i_mode))
                mutex_lock(&sbi->writepages);
-       ret = generic_writepages(mapping, wbc);
+       ret = write_cache_pages(mapping, wbc, __f2fs_writepage, mapping);
        if (!S_ISDIR(inode->i_mode))
                mutex_unlock(&sbi->writepages);
        f2fs_submit_bio(sbi, DATA, (wbc->sync_mode == WB_SYNC_ALL));
@@ -689,6 +698,11 @@ static int f2fs_set_data_page_dirty(struct page *page)
        return 0;
 }
 
+static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
+{
+       return generic_block_bmap(mapping, block, get_data_block_ro);
+}
+
 const struct address_space_operations f2fs_dblock_aops = {
        .readpage       = f2fs_read_data_page,
        .readpages      = f2fs_read_data_pages,
@@ -700,4 +714,5 @@ const struct address_space_operations f2fs_dblock_aops = {
        .invalidatepage = f2fs_invalidate_data_page,
        .releasepage    = f2fs_release_data_page,
        .direct_IO      = f2fs_direct_IO,
+       .bmap           = f2fs_bmap,
 };
index 0e0380a..c8c3730 100644 (file)
@@ -26,6 +26,7 @@
 
 static LIST_HEAD(f2fs_stat_list);
 static struct dentry *debugfs_root;
+static DEFINE_MUTEX(f2fs_stat_mutex);
 
 static void update_general_status(struct f2fs_sb_info *sbi)
 {
@@ -180,18 +181,14 @@ static int stat_show(struct seq_file *s, void *v)
        int i = 0;
        int j;
 
+       mutex_lock(&f2fs_stat_mutex);
        list_for_each_entry_safe(si, next, &f2fs_stat_list, stat_list) {
 
-               mutex_lock(&si->stat_lock);
-               if (!si->sbi) {
-                       mutex_unlock(&si->stat_lock);
-                       continue;
-               }
                update_general_status(si->sbi);
 
                seq_printf(s, "\n=====[ partition info. #%d ]=====\n", i++);
-               seq_printf(s, "[SB: 1] [CP: 2] [NAT: %d] [SIT: %d] ",
-                          si->nat_area_segs, si->sit_area_segs);
+               seq_printf(s, "[SB: 1] [CP: 2] [SIT: %d] [NAT: %d] ",
+                          si->sit_area_segs, si->nat_area_segs);
                seq_printf(s, "[SSA: %d] [MAIN: %d",
                           si->ssa_area_segs, si->main_area_segs);
                seq_printf(s, "(OverProv:%d Resv:%d)]\n\n",
@@ -286,8 +283,8 @@ static int stat_show(struct seq_file *s, void *v)
                seq_printf(s, "\nMemory: %u KB = static: %u + cached: %u\n",
                                (si->base_mem + si->cache_mem) >> 10,
                                si->base_mem >> 10, si->cache_mem >> 10);
-               mutex_unlock(&si->stat_lock);
        }
+       mutex_unlock(&f2fs_stat_mutex);
        return 0;
 }
 
@@ -303,7 +300,7 @@ static const struct file_operations stat_fops = {
        .release = single_release,
 };
 
-static int init_stats(struct f2fs_sb_info *sbi)
+int f2fs_build_stats(struct f2fs_sb_info *sbi)
 {
        struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
        struct f2fs_stat_info *si;
@@ -313,9 +310,6 @@ static int init_stats(struct f2fs_sb_info *sbi)
                return -ENOMEM;
 
        si = sbi->stat_info;
-       mutex_init(&si->stat_lock);
-       list_add_tail(&si->stat_list, &f2fs_stat_list);
-
        si->all_area_segs = le32_to_cpu(raw_super->segment_count);
        si->sit_area_segs = le32_to_cpu(raw_super->segment_count_sit);
        si->nat_area_segs = le32_to_cpu(raw_super->segment_count_nat);
@@ -325,21 +319,11 @@ static int init_stats(struct f2fs_sb_info *sbi)
        si->main_area_zones = si->main_area_sections /
                                le32_to_cpu(raw_super->secs_per_zone);
        si->sbi = sbi;
-       return 0;
-}
 
-int f2fs_build_stats(struct f2fs_sb_info *sbi)
-{
-       int retval;
-
-       retval = init_stats(sbi);
-       if (retval)
-               return retval;
-
-       if (!debugfs_root)
-               debugfs_root = debugfs_create_dir("f2fs", NULL);
+       mutex_lock(&f2fs_stat_mutex);
+       list_add_tail(&si->stat_list, &f2fs_stat_list);
+       mutex_unlock(&f2fs_stat_mutex);
 
-       debugfs_create_file("status", S_IRUGO, debugfs_root, NULL, &stat_fops);
        return 0;
 }
 
@@ -347,14 +331,22 @@ void f2fs_destroy_stats(struct f2fs_sb_info *sbi)
 {
        struct f2fs_stat_info *si = sbi->stat_info;
 
+       mutex_lock(&f2fs_stat_mutex);
        list_del(&si->stat_list);
-       mutex_lock(&si->stat_lock);
-       si->sbi = NULL;
-       mutex_unlock(&si->stat_lock);
+       mutex_unlock(&f2fs_stat_mutex);
+
        kfree(sbi->stat_info);
 }
 
-void destroy_root_stats(void)
+void __init f2fs_create_root_stats(void)
+{
+       debugfs_root = debugfs_create_dir("f2fs", NULL);
+       if (debugfs_root)
+               debugfs_create_file("status", S_IRUGO, debugfs_root,
+                                        NULL, &stat_fops);
+}
+
+void f2fs_destroy_root_stats(void)
 {
        debugfs_remove_recursive(debugfs_root);
        debugfs_root = NULL;
index 951ed52..989980e 100644 (file)
@@ -503,7 +503,7 @@ void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page,
        }
 
        if (inode) {
-               inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
+               inode->i_ctime = CURRENT_TIME;
                drop_nlink(inode);
                if (S_ISDIR(inode->i_mode)) {
                        drop_nlink(inode);
index 13c6dfb..c8e2d75 100644 (file)
@@ -211,11 +211,11 @@ struct dnode_of_data {
 static inline void set_new_dnode(struct dnode_of_data *dn, struct inode *inode,
                struct page *ipage, struct page *npage, nid_t nid)
 {
+       memset(dn, 0, sizeof(*dn));
        dn->inode = inode;
        dn->inode_page = ipage;
        dn->node_page = npage;
        dn->nid = nid;
-       dn->inode_page_locked = 0;
 }
 
 /*
@@ -877,6 +877,8 @@ bool f2fs_empty_dir(struct inode *);
  * super.c
  */
 int f2fs_sync_fs(struct super_block *, int);
+extern __printf(3, 4)
+void f2fs_msg(struct super_block *, const char *, const char *, ...);
 
 /*
  * hash.c
@@ -912,7 +914,7 @@ int restore_node_summary(struct f2fs_sb_info *, unsigned int,
 void flush_nat_entries(struct f2fs_sb_info *);
 int build_node_manager(struct f2fs_sb_info *);
 void destroy_node_manager(struct f2fs_sb_info *);
-int create_node_manager_caches(void);
+int __init create_node_manager_caches(void);
 void destroy_node_manager_caches(void);
 
 /*
@@ -964,7 +966,7 @@ void sync_dirty_dir_inodes(struct f2fs_sb_info *);
 void block_operations(struct f2fs_sb_info *);
 void write_checkpoint(struct f2fs_sb_info *, bool, bool);
 void init_orphan_info(struct f2fs_sb_info *);
-int create_checkpoint_caches(void);
+int __init create_checkpoint_caches(void);
 void destroy_checkpoint_caches(void);
 
 /*
@@ -984,9 +986,9 @@ int do_write_data_page(struct page *);
 int start_gc_thread(struct f2fs_sb_info *);
 void stop_gc_thread(struct f2fs_sb_info *);
 block_t start_bidx_of_node(unsigned int);
-int f2fs_gc(struct f2fs_sb_info *, int);
+int f2fs_gc(struct f2fs_sb_info *);
 void build_gc_manager(struct f2fs_sb_info *);
-int create_gc_caches(void);
+int __init create_gc_caches(void);
 void destroy_gc_caches(void);
 
 /*
@@ -1058,7 +1060,8 @@ struct f2fs_stat_info {
 
 int f2fs_build_stats(struct f2fs_sb_info *);
 void f2fs_destroy_stats(struct f2fs_sb_info *);
-void destroy_root_stats(void);
+void __init f2fs_create_root_stats(void);
+void f2fs_destroy_root_stats(void);
 #else
 #define stat_inc_call_count(si)
 #define stat_inc_seg_count(si, type)
@@ -1068,7 +1071,8 @@ void destroy_root_stats(void);
 
 static inline int f2fs_build_stats(struct f2fs_sb_info *sbi) { return 0; }
 static inline void f2fs_destroy_stats(struct f2fs_sb_info *sbi) { }
-static inline void destroy_root_stats(void) { }
+static inline void __init f2fs_create_root_stats(void) { }
+static inline void f2fs_destroy_root_stats(void) { }
 #endif
 
 extern const struct file_operations f2fs_dir_operations;
index 7f9ea92..3191b52 100644 (file)
@@ -96,8 +96,9 @@ out:
 }
 
 static const struct vm_operations_struct f2fs_file_vm_ops = {
-       .fault        = filemap_fault,
-       .page_mkwrite = f2fs_vm_page_mkwrite,
+       .fault          = filemap_fault,
+       .page_mkwrite   = f2fs_vm_page_mkwrite,
+       .remap_pages    = generic_file_remap_pages,
 };
 
 static int need_to_sync_dir(struct f2fs_sb_info *sbi, struct inode *inode)
@@ -137,6 +138,9 @@ int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
        if (ret)
                return ret;
 
+       /* guarantee free sections for fsync */
+       f2fs_balance_fs(sbi);
+
        mutex_lock(&inode->i_mutex);
 
        if (datasync && !(inode->i_state & I_DIRTY_DATASYNC))
@@ -407,6 +411,8 @@ int truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end)
                struct dnode_of_data dn;
                struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
 
+               f2fs_balance_fs(sbi);
+
                mutex_lock_op(sbi, DATA_TRUNC);
                set_new_dnode(&dn, inode, NULL, NULL, 0);
                err = get_dnode_of_data(&dn, index, RDONLY_NODE);
@@ -534,7 +540,6 @@ static long f2fs_fallocate(struct file *file, int mode,
                                loff_t offset, loff_t len)
 {
        struct inode *inode = file->f_path.dentry->d_inode;
-       struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
        long ret;
 
        if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
@@ -545,7 +550,10 @@ static long f2fs_fallocate(struct file *file, int mode,
        else
                ret = expand_inode_data(inode, offset, len, mode);
 
-       f2fs_balance_fs(sbi);
+       if (!ret) {
+               inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+               mark_inode_dirty(inode);
+       }
        return ret;
 }
 
index b0ec721..c386910 100644 (file)
@@ -78,7 +78,7 @@ static int gc_thread_func(void *data)
 
                sbi->bg_gc++;
 
-               if (f2fs_gc(sbi, 1) == GC_NONE)
+               if (f2fs_gc(sbi) == GC_NONE)
                        wait_ms = GC_THREAD_NOGC_SLEEP_TIME;
                else if (wait_ms == GC_THREAD_NOGC_SLEEP_TIME)
                        wait_ms = GC_THREAD_MAX_SLEEP_TIME;
@@ -424,7 +424,11 @@ next_step:
 }
 
 /*
- * Calculate start block index that this node page contains
+ * Calculate start block index indicating the given node offset.
+ * Be careful, caller should give this node offset only indicating direct node
+ * blocks. If any node offsets, which point the other types of node blocks such
+ * as indirect or double indirect node blocks, are given, it must be a caller's
+ * bug.
  */
 block_t start_bidx_of_node(unsigned int node_ofs)
 {
@@ -651,62 +655,44 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi, unsigned int segno,
        return ret;
 }
 
-int f2fs_gc(struct f2fs_sb_info *sbi, int nGC)
+int f2fs_gc(struct f2fs_sb_info *sbi)
 {
-       unsigned int segno;
-       int old_free_secs, cur_free_secs;
-       int gc_status, nfree;
        struct list_head ilist;
+       unsigned int segno, i;
        int gc_type = BG_GC;
+       int gc_status = GC_NONE;
 
        INIT_LIST_HEAD(&ilist);
 gc_more:
-       nfree = 0;
-       gc_status = GC_NONE;
+       if (!(sbi->sb->s_flags & MS_ACTIVE))
+               goto stop;
 
        if (has_not_enough_free_secs(sbi))
-               old_free_secs = reserved_sections(sbi);
-       else
-               old_free_secs = free_sections(sbi);
-
-       while (sbi->sb->s_flags & MS_ACTIVE) {
-               int i;
-               if (has_not_enough_free_secs(sbi))
-                       gc_type = FG_GC;
+               gc_type = FG_GC;
 
-               cur_free_secs = free_sections(sbi) + nfree;
+       if (!__get_victim(sbi, &segno, gc_type, NO_CHECK_TYPE))
+               goto stop;
 
-               /* We got free space successfully. */
-               if (nGC < cur_free_secs - old_free_secs)
-                       break;
-
-               if (!__get_victim(sbi, &segno, gc_type, NO_CHECK_TYPE))
+       for (i = 0; i < sbi->segs_per_sec; i++) {
+               /*
+                * do_garbage_collect will give us three gc_status:
+                * GC_ERROR, GC_DONE, and GC_BLOCKED.
+                * If GC is finished uncleanly, we have to return
+                * the victim to dirty segment list.
+                */
+               gc_status = do_garbage_collect(sbi, segno + i, &ilist, gc_type);
+               if (gc_status != GC_DONE)
                        break;
-
-               for (i = 0; i < sbi->segs_per_sec; i++) {
-                       /*
-                        * do_garbage_collect will give us three gc_status:
-                        * GC_ERROR, GC_DONE, and GC_BLOCKED.
-                        * If GC is finished uncleanly, we have to return
-                        * the victim to dirty segment list.
-                        */
-                       gc_status = do_garbage_collect(sbi, segno + i,
-                                       &ilist, gc_type);
-                       if (gc_status != GC_DONE)
-                               goto stop;
-                       nfree++;
-               }
        }
-stop:
-       if (has_not_enough_free_secs(sbi) || gc_status == GC_BLOCKED) {
+       if (has_not_enough_free_secs(sbi)) {
                write_checkpoint(sbi, (gc_status == GC_BLOCKED), false);
-               if (nfree)
+               if (has_not_enough_free_secs(sbi))
                        goto gc_more;
        }
+stop:
        mutex_unlock(&sbi->gc_mutex);
 
        put_gc_inode(&ilist);
-       BUG_ON(!list_empty(&ilist));
        return gc_status;
 }
 
@@ -715,7 +701,7 @@ void build_gc_manager(struct f2fs_sb_info *sbi)
        DIRTY_I(sbi)->v_ops = &default_v_ops;
 }
 
-int create_gc_caches(void)
+int __init create_gc_caches(void)
 {
        winode_slab = f2fs_kmem_cache_create("f2fs_gc_inodes",
                        sizeof(struct inode_entry), NULL);
index bf20b4d..7942417 100644 (file)
@@ -217,6 +217,9 @@ int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc)
                        inode->i_ino == F2FS_META_INO(sbi))
                return 0;
 
+       if (wbc)
+               f2fs_balance_fs(sbi);
+
        node_page = get_node_page(sbi, inode->i_ino);
        if (IS_ERR(node_page))
                return PTR_ERR(node_page);
index 5066bfd..9bda63c 100644 (file)
@@ -1124,6 +1124,12 @@ static int f2fs_write_node_page(struct page *page,
        return 0;
 }
 
+/*
+ * It is very important to gather dirty pages and write at once, so that we can
+ * submit a big bio without interfering other data writes.
+ * Be default, 512 pages (2MB), a segment size, is quite reasonable.
+ */
+#define COLLECT_DIRTY_NODES    512
 static int f2fs_write_node_pages(struct address_space *mapping,
                            struct writeback_control *wbc)
 {
@@ -1131,17 +1137,16 @@ static int f2fs_write_node_pages(struct address_space *mapping,
        struct block_device *bdev = sbi->sb->s_bdev;
        long nr_to_write = wbc->nr_to_write;
 
-       if (wbc->for_kupdate)
-               return 0;
-
-       if (get_pages(sbi, F2FS_DIRTY_NODES) == 0)
-               return 0;
-
+       /* First check balancing cached NAT entries */
        if (try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK)) {
                write_checkpoint(sbi, false, false);
                return 0;
        }
 
+       /* collect a number of dirty node pages and write together */
+       if (get_pages(sbi, F2FS_DIRTY_NODES) < COLLECT_DIRTY_NODES)
+               return 0;
+
        /* if mounting is failed, skip writing node pages */
        wbc->nr_to_write = bio_get_nr_vecs(bdev);
        sync_node_pages(sbi, 0, wbc);
@@ -1732,7 +1737,7 @@ void destroy_node_manager(struct f2fs_sb_info *sbi)
        kfree(nm_i);
 }
 
-int create_node_manager_caches(void)
+int __init create_node_manager_caches(void)
 {
        nat_entry_slab = f2fs_kmem_cache_create("nat_entry",
                        sizeof(struct nat_entry), NULL);
index b571fee..f42e406 100644 (file)
@@ -67,7 +67,7 @@ static int recover_dentry(struct page *ipage, struct inode *inode)
                kunmap(page);
                f2fs_put_page(page, 0);
        } else {
-               f2fs_add_link(&dent, inode);
+               err = f2fs_add_link(&dent, inode);
        }
        iput(dir);
 out:
@@ -151,7 +151,6 @@ static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head)
                                goto out;
                        }
 
-                       INIT_LIST_HEAD(&entry->list);
                        list_add_tail(&entry->list, head);
                        entry->blkaddr = blkaddr;
                }
@@ -174,10 +173,9 @@ out:
 static void destroy_fsync_dnodes(struct f2fs_sb_info *sbi,
                                        struct list_head *head)
 {
-       struct list_head *this;
-       struct fsync_inode_entry *entry;
-       list_for_each(this, head) {
-               entry = list_entry(this, struct fsync_inode_entry, list);
+       struct fsync_inode_entry *entry, *tmp;
+
+       list_for_each_entry_safe(entry, tmp, head, list) {
                iput(entry->inode);
                list_del(&entry->list);
                kmem_cache_free(fsync_entry_slab, entry);
index de62409..4b00990 100644 (file)
@@ -31,7 +31,7 @@ void f2fs_balance_fs(struct f2fs_sb_info *sbi)
         */
        if (has_not_enough_free_secs(sbi)) {
                mutex_lock(&sbi->gc_mutex);
-               f2fs_gc(sbi, 1);
+               f2fs_gc(sbi);
        }
 }
 
index 08a94c8..37fad04 100644 (file)
@@ -53,6 +53,18 @@ static match_table_t f2fs_tokens = {
        {Opt_err, NULL},
 };
 
+void f2fs_msg(struct super_block *sb, const char *level, const char *fmt, ...)
+{
+       struct va_format vaf;
+       va_list args;
+
+       va_start(args, fmt);
+       vaf.fmt = fmt;
+       vaf.va = &args;
+       printk("%sF2FS-fs (%s): %pV\n", level, sb->s_id, &vaf);
+       va_end(args);
+}
+
 static void init_once(void *foo)
 {
        struct f2fs_inode_info *fi = (struct f2fs_inode_info *) foo;
@@ -125,6 +137,8 @@ int f2fs_sync_fs(struct super_block *sb, int sync)
 
        if (sync)
                write_checkpoint(sbi, false, false);
+       else
+               f2fs_balance_fs(sbi);
 
        return 0;
 }
@@ -247,7 +261,8 @@ static const struct export_operations f2fs_export_ops = {
        .get_parent = f2fs_get_parent,
 };
 
-static int parse_options(struct f2fs_sb_info *sbi, char *options)
+static int parse_options(struct super_block *sb, struct f2fs_sb_info *sbi,
+                               char *options)
 {
        substring_t args[MAX_OPT_ARGS];
        char *p;
@@ -286,7 +301,8 @@ static int parse_options(struct f2fs_sb_info *sbi, char *options)
                        break;
 #else
                case Opt_nouser_xattr:
-                       pr_info("nouser_xattr options not supported\n");
+                       f2fs_msg(sb, KERN_INFO,
+                               "nouser_xattr options not supported");
                        break;
 #endif
 #ifdef CONFIG_F2FS_FS_POSIX_ACL
@@ -295,7 +311,7 @@ static int parse_options(struct f2fs_sb_info *sbi, char *options)
                        break;
 #else
                case Opt_noacl:
-                       pr_info("noacl options not supported\n");
+                       f2fs_msg(sb, KERN_INFO, "noacl options not supported");
                        break;
 #endif
                case Opt_active_logs:
@@ -309,8 +325,9 @@ static int parse_options(struct f2fs_sb_info *sbi, char *options)
                        set_opt(sbi, DISABLE_EXT_IDENTIFY);
                        break;
                default:
-                       pr_err("Unrecognized mount option \"%s\" or missing value\n",
-                                       p);
+                       f2fs_msg(sb, KERN_ERR,
+                               "Unrecognized mount option \"%s\" or missing value",
+                               p);
                        return -EINVAL;
                }
        }
@@ -337,23 +354,36 @@ static loff_t max_file_size(unsigned bits)
        return result;
 }
 
-static int sanity_check_raw_super(struct f2fs_super_block *raw_super)
+static int sanity_check_raw_super(struct super_block *sb,
+                       struct f2fs_super_block *raw_super)
 {
        unsigned int blocksize;
 
-       if (F2FS_SUPER_MAGIC != le32_to_cpu(raw_super->magic))
+       if (F2FS_SUPER_MAGIC != le32_to_cpu(raw_super->magic)) {
+               f2fs_msg(sb, KERN_INFO,
+                       "Magic Mismatch, valid(0x%x) - read(0x%x)",
+                       F2FS_SUPER_MAGIC, le32_to_cpu(raw_super->magic));
                return 1;
+       }
 
        /* Currently, support only 4KB block size */
        blocksize = 1 << le32_to_cpu(raw_super->log_blocksize);
-       if (blocksize != PAGE_CACHE_SIZE)
+       if (blocksize != PAGE_CACHE_SIZE) {
+               f2fs_msg(sb, KERN_INFO,
+                       "Invalid blocksize (%u), supports only 4KB\n",
+                       blocksize);
                return 1;
+       }
        if (le32_to_cpu(raw_super->log_sectorsize) !=
-                                       F2FS_LOG_SECTOR_SIZE)
+                                       F2FS_LOG_SECTOR_SIZE) {
+               f2fs_msg(sb, KERN_INFO, "Invalid log sectorsize");
                return 1;
+       }
        if (le32_to_cpu(raw_super->log_sectors_per_block) !=
-                                       F2FS_LOG_SECTORS_PER_BLOCK)
+                                       F2FS_LOG_SECTORS_PER_BLOCK) {
+               f2fs_msg(sb, KERN_INFO, "Invalid log sectors per block");
                return 1;
+       }
        return 0;
 }
 
@@ -413,14 +443,17 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
        if (!sbi)
                return -ENOMEM;
 
-       /* set a temporary block size */
-       if (!sb_set_blocksize(sb, F2FS_BLKSIZE))
+       /* set a block size */
+       if (!sb_set_blocksize(sb, F2FS_BLKSIZE)) {
+               f2fs_msg(sb, KERN_ERR, "unable to set blocksize");
                goto free_sbi;
+       }
 
        /* read f2fs raw super block */
        raw_super_buf = sb_bread(sb, 0);
        if (!raw_super_buf) {
                err = -EIO;
+               f2fs_msg(sb, KERN_ERR, "unable to read superblock");
                goto free_sbi;
        }
        raw_super = (struct f2fs_super_block *)
@@ -438,12 +471,14 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
        set_opt(sbi, POSIX_ACL);
 #endif
        /* parse mount options */
-       if (parse_options(sbi, (char *)data))
+       if (parse_options(sb, sbi, (char *)data))
                goto free_sb_buf;
 
        /* sanity checking of raw super */
-       if (sanity_check_raw_super(raw_super))
+       if (sanity_check_raw_super(sb, raw_super)) {
+               f2fs_msg(sb, KERN_ERR, "Can't find a valid F2FS filesystem");
                goto free_sb_buf;
+       }
 
        sb->s_maxbytes = max_file_size(le32_to_cpu(raw_super->log_blocksize));
        sb->s_max_links = F2FS_LINK_MAX;
@@ -477,18 +512,23 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
        /* get an inode for meta space */
        sbi->meta_inode = f2fs_iget(sb, F2FS_META_INO(sbi));
        if (IS_ERR(sbi->meta_inode)) {
+               f2fs_msg(sb, KERN_ERR, "Failed to read F2FS meta data inode");
                err = PTR_ERR(sbi->meta_inode);
                goto free_sb_buf;
        }
 
        err = get_valid_checkpoint(sbi);
-       if (err)
+       if (err) {
+               f2fs_msg(sb, KERN_ERR, "Failed to get valid F2FS checkpoint");
                goto free_meta_inode;
+       }
 
        /* sanity checking of checkpoint */
        err = -EINVAL;
-       if (sanity_check_ckpt(raw_super, sbi->ckpt))
+       if (sanity_check_ckpt(raw_super, sbi->ckpt)) {
+               f2fs_msg(sb, KERN_ERR, "Invalid F2FS checkpoint");
                goto free_cp;
+       }
 
        sbi->total_valid_node_count =
                                le32_to_cpu(sbi->ckpt->valid_node_count);
@@ -502,25 +542,28 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
        INIT_LIST_HEAD(&sbi->dir_inode_list);
        spin_lock_init(&sbi->dir_inode_lock);
 
-       /* init super block */
-       if (!sb_set_blocksize(sb, sbi->blocksize))
-               goto free_cp;
-
        init_orphan_info(sbi);
 
        /* setup f2fs internal modules */
        err = build_segment_manager(sbi);
-       if (err)
+       if (err) {
+               f2fs_msg(sb, KERN_ERR,
+                       "Failed to initialize F2FS segment manager");
                goto free_sm;
+       }
        err = build_node_manager(sbi);
-       if (err)
+       if (err) {
+               f2fs_msg(sb, KERN_ERR,
+                       "Failed to initialize F2FS node manager");
                goto free_nm;
+       }
 
        build_gc_manager(sbi);
 
        /* get an inode for node space */
        sbi->node_inode = f2fs_iget(sb, F2FS_NODE_INO(sbi));
        if (IS_ERR(sbi->node_inode)) {
+               f2fs_msg(sb, KERN_ERR, "Failed to read node inode");
                err = PTR_ERR(sbi->node_inode);
                goto free_nm;
        }
@@ -533,6 +576,7 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
        /* read root inode and dentry */
        root = f2fs_iget(sb, F2FS_ROOT_INO(sbi));
        if (IS_ERR(root)) {
+               f2fs_msg(sb, KERN_ERR, "Failed to read root inode");
                err = PTR_ERR(root);
                goto free_node_inode;
        }
@@ -596,7 +640,7 @@ static struct file_system_type f2fs_fs_type = {
        .fs_flags       = FS_REQUIRES_DEV,
 };
 
-static int init_inodecache(void)
+static int __init init_inodecache(void)
 {
        f2fs_inode_cachep = f2fs_kmem_cache_create("f2fs_inode_cache",
                        sizeof(struct f2fs_inode_info), NULL);
@@ -631,14 +675,17 @@ static int __init init_f2fs_fs(void)
        err = create_checkpoint_caches();
        if (err)
                goto fail;
-       return register_filesystem(&f2fs_fs_type);
+       err = register_filesystem(&f2fs_fs_type);
+       if (err)
+               goto fail;
+       f2fs_create_root_stats();
 fail:
        return err;
 }
 
 static void __exit exit_f2fs_fs(void)
 {
-       destroy_root_stats();
+       f2fs_destroy_root_stats();
        unregister_filesystem(&f2fs_fs_type);
        destroy_checkpoint_caches();
        destroy_gc_caches();
index 940136a..8038c04 100644 (file)
@@ -318,6 +318,8 @@ int f2fs_setxattr(struct inode *inode, int name_index, const char *name,
        if (name_len > 255 || value_len > MAX_VALUE_LEN)
                return -ERANGE;
 
+       f2fs_balance_fs(sbi);
+
        mutex_lock_op(sbi, NODE_NEW);
        if (!fi->i_xattr_nid) {
                /* Allocate new attribute block */
index 0cf160a..1b2f6c2 100644 (file)
@@ -4,12 +4,24 @@ config FUSE_FS
          With FUSE it is possible to implement a fully functional filesystem
          in a userspace program.
 
-         There's also companion library: libfuse.  This library along with
-         utilities is available from the FUSE homepage:
+         There's also a companion library: libfuse2.  This library is available
+         from the FUSE homepage:
          <http://fuse.sourceforge.net/>
+         although chances are your distribution already has that library
+         installed if you've installed the "fuse" package itself.
 
          See <file:Documentation/filesystems/fuse.txt> for more information.
          See <file:Documentation/Changes> for needed library/utility version.
 
          If you want to develop a userspace FS, or if you want to use
          a filesystem based on FUSE, answer Y or M.
+
+config CUSE
+       tristate "Character device in Userspace support"
+       depends on FUSE_FS
+       help
+         This FUSE extension allows character devices to be
+         implemented in userspace.
+
+         If you want to develop or use a userspace character device
+         based on CUSE, answer Y or M.
index ee8d550..e397b67 100644 (file)
@@ -45,7 +45,6 @@
 #include <linux/miscdevice.h>
 #include <linux/mutex.h>
 #include <linux/slab.h>
-#include <linux/spinlock.h>
 #include <linux/stat.h>
 #include <linux/module.h>
 
@@ -63,7 +62,7 @@ struct cuse_conn {
        bool                    unrestricted_ioctl;
 };
 
-static DEFINE_SPINLOCK(cuse_lock);             /* protects cuse_conntbl */
+static DEFINE_MUTEX(cuse_lock);                /* protects registration */
 static struct list_head cuse_conntbl[CUSE_CONNTBL_LEN];
 static struct class *cuse_class;
 
@@ -114,14 +113,14 @@ static int cuse_open(struct inode *inode, struct file *file)
        int rc;
 
        /* look up and get the connection */
-       spin_lock(&cuse_lock);
+       mutex_lock(&cuse_lock);
        list_for_each_entry(pos, cuse_conntbl_head(devt), list)
                if (pos->dev->devt == devt) {
                        fuse_conn_get(&pos->fc);
                        cc = pos;
                        break;
                }
-       spin_unlock(&cuse_lock);
+       mutex_unlock(&cuse_lock);
 
        /* dead? */
        if (!cc)
@@ -267,7 +266,7 @@ static int cuse_parse_one(char **pp, char *end, char **keyp, char **valp)
 static int cuse_parse_devinfo(char *p, size_t len, struct cuse_devinfo *devinfo)
 {
        char *end = p + len;
-       char *key, *val;
+       char *uninitialized_var(key), *uninitialized_var(val);
        int rc;
 
        while (true) {
@@ -305,14 +304,14 @@ static void cuse_gendev_release(struct device *dev)
  */
 static void cuse_process_init_reply(struct fuse_conn *fc, struct fuse_req *req)
 {
-       struct cuse_conn *cc = fc_to_cc(fc);
+       struct cuse_conn *cc = fc_to_cc(fc), *pos;
        struct cuse_init_out *arg = req->out.args[0].value;
        struct page *page = req->pages[0];
        struct cuse_devinfo devinfo = { };
        struct device *dev;
        struct cdev *cdev;
        dev_t devt;
-       int rc;
+       int rc, i;
 
        if (req->out.h.error ||
            arg->major != FUSE_KERNEL_VERSION || arg->minor < 11) {
@@ -356,15 +355,24 @@ static void cuse_process_init_reply(struct fuse_conn *fc, struct fuse_req *req)
        dev_set_drvdata(dev, cc);
        dev_set_name(dev, "%s", devinfo.name);
 
+       mutex_lock(&cuse_lock);
+
+       /* make sure the device-name is unique */
+       for (i = 0; i < CUSE_CONNTBL_LEN; ++i) {
+               list_for_each_entry(pos, &cuse_conntbl[i], list)
+                       if (!strcmp(dev_name(pos->dev), dev_name(dev)))
+                               goto err_unlock;
+       }
+
        rc = device_add(dev);
        if (rc)
-               goto err_device;
+               goto err_unlock;
 
        /* register cdev */
        rc = -ENOMEM;
        cdev = cdev_alloc();
        if (!cdev)
-               goto err_device;
+               goto err_unlock;
 
        cdev->owner = THIS_MODULE;
        cdev->ops = &cuse_frontend_fops;
@@ -377,9 +385,8 @@ static void cuse_process_init_reply(struct fuse_conn *fc, struct fuse_req *req)
        cc->cdev = cdev;
 
        /* make the device available */
-       spin_lock(&cuse_lock);
        list_add(&cc->list, cuse_conntbl_head(devt));
-       spin_unlock(&cuse_lock);
+       mutex_unlock(&cuse_lock);
 
        /* announce device availability */
        dev_set_uevent_suppress(dev, 0);
@@ -391,7 +398,8 @@ out:
 
 err_cdev:
        cdev_del(cdev);
-err_device:
+err_unlock:
+       mutex_unlock(&cuse_lock);
        put_device(dev);
 err_region:
        unregister_chrdev_region(devt, 1);
@@ -520,9 +528,9 @@ static int cuse_channel_release(struct inode *inode, struct file *file)
        int rc;
 
        /* remove from the conntbl, no more access from this point on */
-       spin_lock(&cuse_lock);
+       mutex_lock(&cuse_lock);
        list_del_init(&cc->list);
-       spin_unlock(&cuse_lock);
+       mutex_unlock(&cuse_lock);
 
        /* remove device */
        if (cc->dev)
index c163353..e83351a 100644 (file)
@@ -692,8 +692,6 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
        struct page *oldpage = *pagep;
        struct page *newpage;
        struct pipe_buffer *buf = cs->pipebufs;
-       struct address_space *mapping;
-       pgoff_t index;
 
        unlock_request(cs->fc, cs->req);
        fuse_copy_finish(cs);
@@ -724,9 +722,6 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
        if (fuse_check_page(newpage) != 0)
                goto out_fallback_unlock;
 
-       mapping = oldpage->mapping;
-       index = oldpage->index;
-
        /*
         * This is a new and locked page, it shouldn't be mapped or
         * have any special flags on it
index e21d4d8..f3ab824 100644 (file)
@@ -2177,8 +2177,8 @@ fuse_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
        return ret;
 }
 
-long fuse_file_fallocate(struct file *file, int mode, loff_t offset,
-                           loff_t length)
+static long fuse_file_fallocate(struct file *file, int mode, loff_t offset,
+                               loff_t length)
 {
        struct fuse_file *ff = file->private_data;
        struct fuse_conn *fc = ff->fc;
@@ -2213,7 +2213,6 @@ long fuse_file_fallocate(struct file *file, int mode, loff_t offset,
 
        return err;
 }
-EXPORT_SYMBOL_GPL(fuse_file_fallocate);
 
 static const struct file_operations fuse_file_operations = {
        .llseek         = fuse_file_llseek,
index a286233..81cc7ea 100644 (file)
@@ -446,7 +446,8 @@ int __log_start_commit(journal_t *journal, tid_t target)
         * currently running transaction (if it exists).  Otherwise,
         * the target tid must be an old one.
         */
-       if (journal->j_running_transaction &&
+       if (journal->j_commit_request != target &&
+           journal->j_running_transaction &&
            journal->j_running_transaction->t_tid == target) {
                /*
                 * We want a new commit: OK, mark the request and wakeup the
index 9d863fb..f2bc3df 100644 (file)
@@ -296,7 +296,7 @@ EXPORT_SYMBOL(seq_read);
  *     seq_lseek -     ->llseek() method for sequential files.
  *     @file: the file in question
  *     @offset: new position
- *     @origin: 0 for absolute, 1 for relative position
+ *     @whence: 0 for absolute, 1 for relative position
  *
  *     Ready-made ->f_op->llseek()
  */
index d44fb56..e9be396 100644 (file)
@@ -307,7 +307,8 @@ static void udf_sb_free_partitions(struct super_block *sb)
 {
        struct udf_sb_info *sbi = UDF_SB(sb);
        int i;
-
+       if (sbi->s_partmaps == NULL)
+               return;
        for (i = 0; i < sbi->s_partitions; i++)
                udf_free_partition(&sbi->s_partmaps[i]);
        kfree(sbi->s_partmaps);
index 26673a0..56d1614 100644 (file)
@@ -175,7 +175,7 @@ xfs_buf_get_maps(
        bp->b_map_count = map_count;
 
        if (map_count == 1) {
-               bp->b_maps = &bp->b_map;
+               bp->b_maps = &bp->__b_map;
                return 0;
        }
 
@@ -193,7 +193,7 @@ static void
 xfs_buf_free_maps(
        struct xfs_buf  *bp)
 {
-       if (bp->b_maps != &bp->b_map) {
+       if (bp->b_maps != &bp->__b_map) {
                kmem_free(bp->b_maps);
                bp->b_maps = NULL;
        }
@@ -377,8 +377,8 @@ xfs_buf_allocate_memory(
        }
 
 use_alloc_page:
-       start = BBTOB(bp->b_map.bm_bn) >> PAGE_SHIFT;
-       end = (BBTOB(bp->b_map.bm_bn + bp->b_length) + PAGE_SIZE - 1)
+       start = BBTOB(bp->b_maps[0].bm_bn) >> PAGE_SHIFT;
+       end = (BBTOB(bp->b_maps[0].bm_bn + bp->b_length) + PAGE_SIZE - 1)
                                                                >> PAGE_SHIFT;
        page_count = end - start;
        error = _xfs_buf_get_pages(bp, page_count, flags);
@@ -640,7 +640,7 @@ _xfs_buf_read(
        xfs_buf_flags_t         flags)
 {
        ASSERT(!(flags & XBF_WRITE));
-       ASSERT(bp->b_map.bm_bn != XFS_BUF_DADDR_NULL);
+       ASSERT(bp->b_maps[0].bm_bn != XFS_BUF_DADDR_NULL);
 
        bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_READ_AHEAD);
        bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | XBF_READ_AHEAD);
@@ -1709,7 +1709,7 @@ xfs_buf_cmp(
        struct xfs_buf  *bp = container_of(b, struct xfs_buf, b_list);
        xfs_daddr_t             diff;
 
-       diff = ap->b_map.bm_bn - bp->b_map.bm_bn;
+       diff = ap->b_maps[0].bm_bn - bp->b_maps[0].bm_bn;
        if (diff < 0)
                return -1;
        if (diff > 0)
index 23f5642..433a12e 100644 (file)
@@ -151,7 +151,7 @@ typedef struct xfs_buf {
        struct page             **b_pages;      /* array of page pointers */
        struct page             *b_page_array[XB_PAGES]; /* inline pages */
        struct xfs_buf_map      *b_maps;        /* compound buffer map */
-       struct xfs_buf_map      b_map;          /* inline compound buffer map */
+       struct xfs_buf_map      __b_map;        /* inline compound buffer map */
        int                     b_map_count;
        int                     b_io_length;    /* IO size in BBs */
        atomic_t                b_pin_count;    /* pin count */
@@ -330,8 +330,8 @@ void xfs_buf_stale(struct xfs_buf *bp);
  * In future, uncached buffers will pass the block number directly to the io
  * request function and hence these macros will go away at that point.
  */
-#define XFS_BUF_ADDR(bp)               ((bp)->b_map.bm_bn)
-#define XFS_BUF_SET_ADDR(bp, bno)      ((bp)->b_map.bm_bn = (xfs_daddr_t)(bno))
+#define XFS_BUF_ADDR(bp)               ((bp)->b_maps[0].bm_bn)
+#define XFS_BUF_SET_ADDR(bp, bno)      ((bp)->b_maps[0].bm_bn = (xfs_daddr_t)(bno))
 
 static inline void xfs_buf_set_ref(struct xfs_buf *bp, int lru_ref)
 {
index becf4a9..77b0975 100644 (file)
@@ -71,7 +71,7 @@ xfs_buf_item_log_debug(
                chunk_num = byte >> XFS_BLF_SHIFT;
                word_num = chunk_num >> BIT_TO_WORD_SHIFT;
                bit_num = chunk_num & (NBWORD - 1);
-               wordp = &(bip->bli_format.blf_data_map[word_num]);
+               wordp = &(bip->__bli_format.blf_data_map[word_num]);
                bit_set = *wordp & (1 << bit_num);
                ASSERT(bit_set);
                byte++;
@@ -237,7 +237,7 @@ xfs_buf_item_size(
                 * cancel flag in it.
                 */
                trace_xfs_buf_item_size_stale(bip);
-               ASSERT(bip->bli_format.blf_flags & XFS_BLF_CANCEL);
+               ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL);
                return bip->bli_format_count;
        }
 
@@ -278,7 +278,7 @@ xfs_buf_item_format_segment(
        uint            buffer_offset;
 
        /* copy the flags across from the base format item */
-       blfp->blf_flags = bip->bli_format.blf_flags;
+       blfp->blf_flags = bip->__bli_format.blf_flags;
 
        /*
         * Base size is the actual size of the ondisk structure - it reflects
@@ -287,6 +287,17 @@ xfs_buf_item_format_segment(
         */
        base_size = offsetof(struct xfs_buf_log_format, blf_data_map) +
                        (blfp->blf_map_size * sizeof(blfp->blf_data_map[0]));
+
+       nvecs = 0;
+       first_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size, 0);
+       if (!(bip->bli_flags & XFS_BLI_STALE) && first_bit == -1) {
+               /*
+                * If the map is not be dirty in the transaction, mark
+                * the size as zero and do not advance the vector pointer.
+                */
+               goto out;
+       }
+
        vecp->i_addr = blfp;
        vecp->i_len = base_size;
        vecp->i_type = XLOG_REG_TYPE_BFORMAT;
@@ -301,15 +312,13 @@ xfs_buf_item_format_segment(
                 */
                trace_xfs_buf_item_format_stale(bip);
                ASSERT(blfp->blf_flags & XFS_BLF_CANCEL);
-               blfp->blf_size = nvecs;
-               return vecp;
+               goto out;
        }
 
        /*
         * Fill in an iovec for each set of contiguous chunks.
         */
-       first_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size, 0);
-       ASSERT(first_bit != -1);
+
        last_bit = first_bit;
        nbits = 1;
        for (;;) {
@@ -371,7 +380,8 @@ xfs_buf_item_format_segment(
                        nbits++;
                }
        }
-       bip->bli_format.blf_size = nvecs;
+out:
+       blfp->blf_size = nvecs;
        return vecp;
 }
 
@@ -405,7 +415,7 @@ xfs_buf_item_format(
        if (bip->bli_flags & XFS_BLI_INODE_BUF) {
                if (!((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) &&
                      xfs_log_item_in_current_chkpt(lip)))
-                       bip->bli_format.blf_flags |= XFS_BLF_INODE_BUF;
+                       bip->__bli_format.blf_flags |= XFS_BLF_INODE_BUF;
                bip->bli_flags &= ~XFS_BLI_INODE_BUF;
        }
 
@@ -485,7 +495,7 @@ xfs_buf_item_unpin(
                ASSERT(bip->bli_flags & XFS_BLI_STALE);
                ASSERT(xfs_buf_islocked(bp));
                ASSERT(XFS_BUF_ISSTALE(bp));
-               ASSERT(bip->bli_format.blf_flags & XFS_BLF_CANCEL);
+               ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL);
 
                trace_xfs_buf_item_unpin_stale(bip);
 
@@ -601,7 +611,7 @@ xfs_buf_item_unlock(
 {
        struct xfs_buf_log_item *bip = BUF_ITEM(lip);
        struct xfs_buf          *bp = bip->bli_buf;
-       int                     aborted;
+       int                     aborted, clean, i;
        uint                    hold;
 
        /* Clear the buffer's association with this transaction. */
@@ -631,7 +641,7 @@ xfs_buf_item_unlock(
         */
        if (bip->bli_flags & XFS_BLI_STALE) {
                trace_xfs_buf_item_unlock_stale(bip);
-               ASSERT(bip->bli_format.blf_flags & XFS_BLF_CANCEL);
+               ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL);
                if (!aborted) {
                        atomic_dec(&bip->bli_refcount);
                        return;
@@ -644,8 +654,15 @@ xfs_buf_item_unlock(
         * If the buf item isn't tracking any data, free it, otherwise drop the
         * reference we hold to it.
         */
-       if (xfs_bitmap_empty(bip->bli_format.blf_data_map,
-                            bip->bli_format.blf_map_size))
+       clean = 1;
+       for (i = 0; i < bip->bli_format_count; i++) {
+               if (!xfs_bitmap_empty(bip->bli_formats[i].blf_data_map,
+                            bip->bli_formats[i].blf_map_size)) {
+                       clean = 0;
+                       break;
+               }
+       }
+       if (clean)
                xfs_buf_item_relse(bp);
        else
                atomic_dec(&bip->bli_refcount);
@@ -716,7 +733,7 @@ xfs_buf_item_get_format(
        bip->bli_format_count = count;
 
        if (count == 1) {
-               bip->bli_formats = &bip->bli_format;
+               bip->bli_formats = &bip->__bli_format;
                return 0;
        }
 
@@ -731,7 +748,7 @@ STATIC void
 xfs_buf_item_free_format(
        struct xfs_buf_log_item *bip)
 {
-       if (bip->bli_formats != &bip->bli_format) {
+       if (bip->bli_formats != &bip->__bli_format) {
                kmem_free(bip->bli_formats);
                bip->bli_formats = NULL;
        }
index 6850f49..16def43 100644 (file)
@@ -104,7 +104,7 @@ typedef struct xfs_buf_log_item {
 #endif
        int                     bli_format_count;       /* count of headers */
        struct xfs_buf_log_format *bli_formats; /* array of in-log header ptrs */
-       struct xfs_buf_log_format bli_format;   /* embedded in-log header */
+       struct xfs_buf_log_format __bli_format; /* embedded in-log header */
 } xfs_buf_log_item_t;
 
 void   xfs_buf_item_init(struct xfs_buf *, struct xfs_mount *);
index 7536faa..12afe07 100644 (file)
@@ -355,10 +355,12 @@ xfs_dir2_block_addname(
        /*
         * If need to compact the leaf entries, do it now.
         */
-       if (compact)
+       if (compact) {
                xfs_dir2_block_compact(tp, bp, hdr, btp, blp, &needlog,
                                      &lfloghigh, &lfloglow);
-       else if (btp->stale) {
+               /* recalculate blp post-compaction */
+               blp = xfs_dir2_block_leaf_p(btp);
+       } else if (btp->stale) {
                /*
                 * Set leaf logging boundaries to impossible state.
                 * For the no-stale case they're set explicitly.
index 5f53e75..8a59f85 100644 (file)
@@ -784,11 +784,11 @@ xfs_qm_scall_getquota(
             (XFS_IS_OQUOTA_ENFORCED(mp) &&
                        (dst->d_flags & (FS_PROJ_QUOTA | FS_GROUP_QUOTA)))) &&
            dst->d_id != 0) {
-               if (((int) dst->d_bcount > (int) dst->d_blk_softlimit) &&
+               if ((dst->d_bcount > dst->d_blk_softlimit) &&
                    (dst->d_blk_softlimit > 0)) {
                        ASSERT(dst->d_btimer != 0);
                }
-               if (((int) dst->d_icount > (int) dst->d_ino_softlimit) &&
+               if ((dst->d_icount > dst->d_ino_softlimit) &&
                    (dst->d_ino_softlimit > 0)) {
                        ASSERT(dst->d_itimer != 0);
                }
index 4fc17d4..3edf5db 100644 (file)
@@ -93,7 +93,7 @@ _xfs_trans_bjoin(
        xfs_buf_item_init(bp, tp->t_mountp);
        bip = bp->b_fspriv;
        ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
-       ASSERT(!(bip->bli_format.blf_flags & XFS_BLF_CANCEL));
+       ASSERT(!(bip->__bli_format.blf_flags & XFS_BLF_CANCEL));
        ASSERT(!(bip->bli_flags & XFS_BLI_LOGGED));
        if (reset_recur)
                bip->bli_recur = 0;
@@ -432,7 +432,7 @@ xfs_trans_brelse(xfs_trans_t        *tp,
        bip = bp->b_fspriv;
        ASSERT(bip->bli_item.li_type == XFS_LI_BUF);
        ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
-       ASSERT(!(bip->bli_format.blf_flags & XFS_BLF_CANCEL));
+       ASSERT(!(bip->__bli_format.blf_flags & XFS_BLF_CANCEL));
        ASSERT(atomic_read(&bip->bli_refcount) > 0);
 
        trace_xfs_trans_brelse(bip);
@@ -519,7 +519,7 @@ xfs_trans_bhold(xfs_trans_t *tp,
        ASSERT(bp->b_transp == tp);
        ASSERT(bip != NULL);
        ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
-       ASSERT(!(bip->bli_format.blf_flags & XFS_BLF_CANCEL));
+       ASSERT(!(bip->__bli_format.blf_flags & XFS_BLF_CANCEL));
        ASSERT(atomic_read(&bip->bli_refcount) > 0);
 
        bip->bli_flags |= XFS_BLI_HOLD;
@@ -539,7 +539,7 @@ xfs_trans_bhold_release(xfs_trans_t *tp,
        ASSERT(bp->b_transp == tp);
        ASSERT(bip != NULL);
        ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
-       ASSERT(!(bip->bli_format.blf_flags & XFS_BLF_CANCEL));
+       ASSERT(!(bip->__bli_format.blf_flags & XFS_BLF_CANCEL));
        ASSERT(atomic_read(&bip->bli_refcount) > 0);
        ASSERT(bip->bli_flags & XFS_BLI_HOLD);
 
@@ -598,7 +598,7 @@ xfs_trans_log_buf(xfs_trans_t       *tp,
                bip->bli_flags &= ~XFS_BLI_STALE;
                ASSERT(XFS_BUF_ISSTALE(bp));
                XFS_BUF_UNSTALE(bp);
-               bip->bli_format.blf_flags &= ~XFS_BLF_CANCEL;
+               bip->__bli_format.blf_flags &= ~XFS_BLF_CANCEL;
        }
 
        tp->t_flags |= XFS_TRANS_DIRTY;
@@ -643,6 +643,7 @@ xfs_trans_binval(
        xfs_buf_t       *bp)
 {
        xfs_buf_log_item_t      *bip = bp->b_fspriv;
+       int                     i;
 
        ASSERT(bp->b_transp == tp);
        ASSERT(bip != NULL);
@@ -657,8 +658,8 @@ xfs_trans_binval(
                 */
                ASSERT(XFS_BUF_ISSTALE(bp));
                ASSERT(!(bip->bli_flags & (XFS_BLI_LOGGED | XFS_BLI_DIRTY)));
-               ASSERT(!(bip->bli_format.blf_flags & XFS_BLF_INODE_BUF));
-               ASSERT(bip->bli_format.blf_flags & XFS_BLF_CANCEL);
+               ASSERT(!(bip->__bli_format.blf_flags & XFS_BLF_INODE_BUF));
+               ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL);
                ASSERT(bip->bli_item.li_desc->lid_flags & XFS_LID_DIRTY);
                ASSERT(tp->t_flags & XFS_TRANS_DIRTY);
                return;
@@ -668,10 +669,12 @@ xfs_trans_binval(
 
        bip->bli_flags |= XFS_BLI_STALE;
        bip->bli_flags &= ~(XFS_BLI_INODE_BUF | XFS_BLI_LOGGED | XFS_BLI_DIRTY);
-       bip->bli_format.blf_flags &= ~XFS_BLF_INODE_BUF;
-       bip->bli_format.blf_flags |= XFS_BLF_CANCEL;
-       memset((char *)(bip->bli_format.blf_data_map), 0,
-             (bip->bli_format.blf_map_size * sizeof(uint)));
+       bip->__bli_format.blf_flags &= ~XFS_BLF_INODE_BUF;
+       bip->__bli_format.blf_flags |= XFS_BLF_CANCEL;
+       for (i = 0; i < bip->bli_format_count; i++) {
+               memset(bip->bli_formats[i].blf_data_map, 0,
+                      (bip->bli_formats[i].blf_map_size * sizeof(uint)));
+       }
        bip->bli_item.li_desc->lid_flags |= XFS_LID_DIRTY;
        tp->t_flags |= XFS_TRANS_DIRTY;
 }
@@ -775,5 +778,5 @@ xfs_trans_dquot_buf(
               type == XFS_BLF_GDQUOT_BUF);
        ASSERT(atomic_read(&bip->bli_refcount) > 0);
 
-       bip->bli_format.blf_flags |= type;
+       bip->__bli_format.blf_flags |= type;
 }
index ccf7b4f..6c32af9 100644 (file)
@@ -16,6 +16,22 @@ extern void
 dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
                    dma_addr_t dma_handle);
 
+static inline void *dma_alloc_attrs(struct device *dev, size_t size,
+                                   dma_addr_t *dma_handle, gfp_t flag,
+                                   struct dma_attrs *attrs)
+{
+       /* attrs is not supported and ignored */
+       return dma_alloc_coherent(dev, size, dma_handle, flag);
+}
+
+static inline void dma_free_attrs(struct device *dev, size_t size,
+                                 void *cpu_addr, dma_addr_t dma_handle,
+                                 struct dma_attrs *attrs)
+{
+       /* attrs is not supported and ignored */
+       dma_free_coherent(dev, size, cpu_addr, dma_handle);
+}
+
 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
 
index 701beab..5cf680a 100644 (file)
@@ -461,10 +461,8 @@ static inline int is_zero_pfn(unsigned long pfn)
        return offset_from_zero_pfn <= (zero_page_mask >> PAGE_SHIFT);
 }
 
-static inline unsigned long my_zero_pfn(unsigned long addr)
-{
-       return page_to_pfn(ZERO_PAGE(addr));
-}
+#define my_zero_pfn(addr)      page_to_pfn(ZERO_PAGE(addr))
+
 #else
 static inline int is_zero_pfn(unsigned long pfn)
 {
index 58f466f..1db51b8 100644 (file)
@@ -21,10 +21,12 @@ asmlinkage long sys_mmap(unsigned long addr, unsigned long len,
                        unsigned long fd, off_t pgoff);
 #endif
 
+#ifndef CONFIG_GENERIC_SIGALTSTACK
 #ifndef sys_sigaltstack
 asmlinkage long sys_sigaltstack(const stack_t __user *, stack_t __user *,
                        struct pt_regs *);
 #endif
+#endif
 
 #ifndef sys_rt_sigreturn
 asmlinkage long sys_rt_sigreturn(struct pt_regs *regs);
index 0f4a366..3527fb3 100644 (file)
@@ -70,7 +70,7 @@ struct drm_mm {
        unsigned long scan_color;
        unsigned long scan_size;
        unsigned long scan_hit_start;
-       unsigned scan_hit_size;
+       unsigned long scan_hit_end;
        unsigned scanned_blocks;
        unsigned long scan_start;
        unsigned long scan_end;
index 408da95..8f7a3d6 100644 (file)
@@ -297,10 +297,12 @@ enum {
        ATA_LOG_SATA_NCQ        = 0x10,
        ATA_LOG_SATA_ID_DEV_DATA  = 0x30,
        ATA_LOG_SATA_SETTINGS     = 0x08,
-       ATA_LOG_DEVSLP_MDAT       = 0x30,
+       ATA_LOG_DEVSLP_OFFSET     = 0x30,
+       ATA_LOG_DEVSLP_SIZE       = 0x08,
+       ATA_LOG_DEVSLP_MDAT       = 0x00,
        ATA_LOG_DEVSLP_MDAT_MASK  = 0x1F,
-       ATA_LOG_DEVSLP_DETO       = 0x31,
-       ATA_LOG_DEVSLP_VALID      = 0x37,
+       ATA_LOG_DEVSLP_DETO       = 0x01,
+       ATA_LOG_DEVSLP_VALID      = 0x07,
        ATA_LOG_DEVSLP_VALID_MASK = 0x80,
 
        /* READ/WRITE LONG (obsolete) */
index bce729a..5a6d718 100644 (file)
@@ -24,6 +24,7 @@
 #define _LINUX_AUDIT_H_
 
 #include <linux/sched.h>
+#include <linux/ptrace.h>
 #include <uapi/linux/audit.h>
 
 struct audit_sig_info {
@@ -157,7 +158,8 @@ void audit_core_dumps(long signr);
 
 static inline void audit_seccomp(unsigned long syscall, long signr, int code)
 {
-       if (unlikely(!audit_dummy_context()))
+       /* Force a record to be reported if a signal was delivered. */
+       if (signr || unlikely(!audit_dummy_context()))
                __audit_seccomp(syscall, signr, code);
 }
 
index 6ecb6dc..cc7bdde 100644 (file)
@@ -22,7 +22,7 @@ extern int sysctl_extfrag_handler(struct ctl_table *table, int write,
 extern int fragmentation_index(struct zone *zone, unsigned int order);
 extern unsigned long try_to_compact_pages(struct zonelist *zonelist,
                        int order, gfp_t gfp_mask, nodemask_t *mask,
-                       bool sync, bool *contended, struct page **page);
+                       bool sync, bool *contended);
 extern int compact_pgdat(pg_data_t *pgdat, int order);
 extern void reset_isolation_suitable(pg_data_t *pgdat);
 extern unsigned long compaction_suitable(struct zone *zone, int order);
@@ -75,7 +75,7 @@ static inline bool compaction_restarting(struct zone *zone, int order)
 #else
 static inline unsigned long try_to_compact_pages(struct zonelist *zonelist,
                        int order, gfp_t gfp_mask, nodemask_t *nodemask,
-                       bool sync, bool *contended, struct page **page)
+                       bool sync, bool *contended)
 {
        return COMPACT_CONTINUE;
 }
index ac3bbb5..1739510 100644 (file)
 #include <linux/cpumask.h>
 #include <linux/gfp.h>
 #include <linux/slab.h>
+#include <linux/kref.h>
 
 /**
  * struct cpu_rmap - CPU affinity reverse-map
+ * @refcount: kref for object
  * @size: Number of objects to be reverse-mapped
  * @used: Number of objects added
  * @obj: Pointer to array of object pointers
@@ -23,6 +25,7 @@
  *      based on affinity masks
  */
 struct cpu_rmap {
+       struct kref     refcount;
        u16             size, used;
        void            **obj;
        struct {
@@ -33,15 +36,7 @@ struct cpu_rmap {
 #define CPU_RMAP_DIST_INF 0xffff
 
 extern struct cpu_rmap *alloc_cpu_rmap(unsigned int size, gfp_t flags);
-
-/**
- * free_cpu_rmap - free CPU affinity reverse-map
- * @rmap: Reverse-map allocated with alloc_cpu_rmap(), or %NULL
- */
-static inline void free_cpu_rmap(struct cpu_rmap *rmap)
-{
-       kfree(rmap);
-}
+extern int cpu_rmap_put(struct cpu_rmap *rmap);
 
 extern int cpu_rmap_add(struct cpu_rmap *rmap, void *obj);
 extern int cpu_rmap_update(struct cpu_rmap *rmap, u16 index,
index 3711b34..24cd103 100644 (file)
@@ -126,9 +126,9 @@ struct cpuidle_driver {
        struct module           *owner;
        int                     refcnt;
 
-       unsigned int            power_specified:1;
        /* set to 1 to use the core cpuidle time keeping (for all states). */
        unsigned int            en_core_tk_irqen:1;
+       /* states array must be ordered in decreasing power consumption */
        struct cpuidle_state    states[CPUIDLE_STATE_MAX];
        int                     state_count;
        int                     safe_state_index;
index e73b852..df77ba9 100644 (file)
@@ -325,14 +325,28 @@ struct hv_ring_buffer {
 
        u32 interrupt_mask;
 
-       /* Pad it to PAGE_SIZE so that data starts on page boundary */
-       u8      reserved[4084];
-
-       /* NOTE:
-        * The interrupt_mask field is used only for channels but since our
-        * vmbus connection also uses this data structure and its data starts
-        * here, we commented out this field.
+       /*
+        * Win8 uses some of the reserved bits to implement
+        * interrupt driven flow management. On the send side
+        * we can request that the receiver interrupt the sender
+        * when the ring transitions from being full to being able
+        * to handle a message of size "pending_send_sz".
+        *
+        * Add necessary state for this enhancement.
         */
+       u32 pending_send_sz;
+
+       u32 reserved1[12];
+
+       union {
+               struct {
+                       u32 feat_pending_send_sz:1;
+               };
+               u32 value;
+       } feature_bits;
+
+       /* Pad it to PAGE_SIZE so that data starts on page boundary */
+       u8      reserved2[4028];
 
        /*
         * Ring data starts here + RingDataStartOffset
@@ -405,12 +419,22 @@ hv_get_ringbuffer_availbytes(struct hv_ring_buffer_info *rbi,
  */
 #define HV_DRV_VERSION           "3.1"
 
-
 /*
- * A revision number of vmbus that is used for ensuring both ends on a
- * partition are using compatible versions.
+ * VMBUS version is 32 bit entity broken up into
+ * two 16 bit quantities: major_number. minor_number.
+ *
+ * 0 . 13 (Windows Server 2008)
+ * 1 . 1  (Windows 7)
+ * 2 . 4  (Windows 8)
  */
-#define VMBUS_REVISION_NUMBER          13
+
+#define VERSION_WS2008  ((0 << 16) | (13))
+#define VERSION_WIN7    ((1 << 16) | (1))
+#define VERSION_WIN8    ((2 << 16) | (4))
+
+#define VERSION_INVAL -1
+
+#define VERSION_CURRENT VERSION_WIN8
 
 /* Make maximum size of pipe payload of 16K */
 #define MAX_PIPE_DATA_PAYLOAD          (sizeof(u8) * 16384)
@@ -432,9 +456,13 @@ hv_get_ringbuffer_availbytes(struct hv_ring_buffer_info *rbi,
 struct vmbus_channel_offer {
        uuid_le if_type;
        uuid_le if_instance;
-       u64 int_latency; /* in 100ns units */
-       u32 if_revision;
-       u32 server_ctx_size;    /* in bytes */
+
+       /*
+        * These two fields are not currently used.
+        */
+       u64 reserved1;
+       u64 reserved2;
+
        u16 chn_flags;
        u16 mmio_megabytes;             /* in bytes * 1024 * 1024 */
 
@@ -456,7 +484,11 @@ struct vmbus_channel_offer {
                        unsigned char user_def[MAX_PIPE_USER_DEFINED_BYTES];
                } pipe;
        } u;
-       u32 padding;
+       /*
+        * The sub_channel_index is defined in win8.
+        */
+       u16 sub_channel_index;
+       u16 reserved3;
 } __packed;
 
 /* Server Flags */
@@ -652,7 +684,25 @@ struct vmbus_channel_offer_channel {
        struct vmbus_channel_offer offer;
        u32 child_relid;
        u8 monitorid;
-       u8 monitor_allocated;
+       /*
+        * win7 and beyond splits this field into a bit field.
+        */
+       u8 monitor_allocated:1;
+       u8 reserved:7;
+       /*
+        * These are new fields added in win7 and later.
+        * Do not access these fields without checking the
+        * negotiated protocol.
+        *
+        * If "is_dedicated_interrupt" is set, we must not set the
+        * associated bit in the channel bitmap while sending the
+        * interrupt to the host.
+        *
+        * connection_id is to be used in signaling the host.
+        */
+       u16 is_dedicated_interrupt:1;
+       u16 reserved1:15;
+       u32 connection_id;
 } __packed;
 
 /* Rescind Offer parameters */
@@ -683,8 +733,15 @@ struct vmbus_channel_open_channel {
        /* GPADL for the channel's ring buffer. */
        u32 ringbuffer_gpadlhandle;
 
-       /* GPADL for the channel's server context save area. */
-       u32 server_contextarea_gpadlhandle;
+       /*
+        * Starting with win8, this field will be used to specify
+        * the target virtual processor on which to deliver the interrupt for
+        * the host to guest communication.
+        * Prior to win8, incoming channel interrupts would only
+        * be delivered on cpu 0. Setting this value to 0 would
+        * preserve the earlier behavior.
+        */
+       u32 target_vp;
 
        /*
        * The upstream ring buffer begins at offset zero in the memory
@@ -848,6 +905,27 @@ struct vmbus_close_msg {
        struct vmbus_channel_close_channel msg;
 };
 
+/* Define connection identifier type. */
+union hv_connection_id {
+       u32 asu32;
+       struct {
+               u32 id:24;
+               u32 reserved:8;
+       } u;
+};
+
+/* Definition of the hv_signal_event hypercall input structure. */
+struct hv_input_signal_event {
+       union hv_connection_id connectionid;
+       u16 flag_number;
+       u16 rsvdz;
+};
+
+struct hv_input_signal_event_buffer {
+       u64 align8;
+       struct hv_input_signal_event event;
+};
+
 struct vmbus_channel {
        struct list_head listentry;
 
@@ -882,8 +960,42 @@ struct vmbus_channel {
 
        void (*onchannel_callback)(void *context);
        void *channel_callback_context;
+
+       /*
+        * A channel can be marked for efficient (batched)
+        * reading:
+        * If batched_reading is set to "true", we read until the
+        * channel is empty and hold off interrupts from the host
+        * during the entire read process.
+        * If batched_reading is set to "false", the client is not
+        * going to perform batched reading.
+        *
+        * By default we will enable batched reading; specific
+        * drivers that don't want this behavior can turn it off.
+        */
+
+       bool batched_reading;
+
+       bool is_dedicated_interrupt;
+       struct hv_input_signal_event_buffer sig_buf;
+       struct hv_input_signal_event *sig_event;
+
+       /*
+        * Starting with win8, this field will be used to specify
+        * the target virtual processor on which to deliver the interrupt for
+        * the host to guest communication.
+        * Prior to win8, incoming channel interrupts would only
+        * be delivered on cpu 0. Setting this value to 0 would
+        * preserve the earlier behavior.
+        */
+       u32 target_vp;
 };
 
+static inline void set_channel_read_state(struct vmbus_channel *c, bool state)
+{
+       c->batched_reading = state;
+}
+
 void vmbus_onmessage(void *context);
 
 int vmbus_request_offers(void);
@@ -1047,6 +1159,100 @@ void vmbus_driver_unregister(struct hv_driver *hv_driver);
                  g8, g9, ga, gb, gc, gd, ge, gf },
 
 /*
+ * GUID definitions of various offer types - services offered to the guest.
+ */
+
+/*
+ * Network GUID
+ * {f8615163-df3e-46c5-913f-f2d2f965ed0e}
+ */
+#define HV_NIC_GUID \
+       .guid = { \
+                       0x63, 0x51, 0x61, 0xf8, 0x3e, 0xdf, 0xc5, 0x46, \
+                       0x91, 0x3f, 0xf2, 0xd2, 0xf9, 0x65, 0xed, 0x0e \
+               }
+
+/*
+ * IDE GUID
+ * {32412632-86cb-44a2-9b5c-50d1417354f5}
+ */
+#define HV_IDE_GUID \
+       .guid = { \
+                       0x32, 0x26, 0x41, 0x32, 0xcb, 0x86, 0xa2, 0x44, \
+                       0x9b, 0x5c, 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5 \
+               }
+
+/*
+ * SCSI GUID
+ * {ba6163d9-04a1-4d29-b605-72e2ffb1dc7f}
+ */
+#define HV_SCSI_GUID \
+       .guid = { \
+                       0xd9, 0x63, 0x61, 0xba, 0xa1, 0x04, 0x29, 0x4d, \
+                       0xb6, 0x05, 0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f \
+               }
+
+/*
+ * Shutdown GUID
+ * {0e0b6031-5213-4934-818b-38d90ced39db}
+ */
+#define HV_SHUTDOWN_GUID \
+       .guid = { \
+                       0x31, 0x60, 0x0b, 0x0e, 0x13, 0x52, 0x34, 0x49, \
+                       0x81, 0x8b, 0x38, 0xd9, 0x0c, 0xed, 0x39, 0xdb \
+               }
+
+/*
+ * Time Synch GUID
+ * {9527E630-D0AE-497b-ADCE-E80AB0175CAF}
+ */
+#define HV_TS_GUID \
+       .guid = { \
+                       0x30, 0xe6, 0x27, 0x95, 0xae, 0xd0, 0x7b, 0x49, \
+                       0xad, 0xce, 0xe8, 0x0a, 0xb0, 0x17, 0x5c, 0xaf \
+               }
+
+/*
+ * Heartbeat GUID
+ * {57164f39-9115-4e78-ab55-382f3bd5422d}
+ */
+#define HV_HEART_BEAT_GUID \
+       .guid = { \
+                       0x39, 0x4f, 0x16, 0x57, 0x15, 0x91, 0x78, 0x4e, \
+                       0xab, 0x55, 0x38, 0x2f, 0x3b, 0xd5, 0x42, 0x2d \
+               }
+
+/*
+ * KVP GUID
+ * {a9a0f4e7-5a45-4d96-b827-8a841e8c03e6}
+ */
+#define HV_KVP_GUID \
+       .guid = { \
+                       0xe7, 0xf4, 0xa0, 0xa9, 0x45, 0x5a, 0x96, 0x4d, \
+                       0xb8, 0x27, 0x8a, 0x84, 0x1e, 0x8c, 0x3,  0xe6 \
+               }
+
+/*
+ * Dynamic memory GUID
+ * {525074dc-8985-46e2-8057-a307dc18a502}
+ */
+#define HV_DM_GUID \
+       .guid = { \
+                       0xdc, 0x74, 0x50, 0X52, 0x85, 0x89, 0xe2, 0x46, \
+                       0x80, 0x57, 0xa3, 0x07, 0xdc, 0x18, 0xa5, 0x02 \
+               }
+
+/*
+ * Mouse GUID
+ * {cfa8b69e-5b4a-4cc0-b98b-8ba1a1f3f95a}
+ */
+#define HV_MOUSE_GUID \
+       .guid = { \
+                       0x9e, 0xb6, 0xa8, 0xcf, 0x4a, 0x5b, 0xc0, 0x4c, \
+                       0xb9, 0x8b, 0x8b, 0xa1, 0xa1, 0xf3, 0xf9, 0x5a \
+               }
+
+/*
  * Common header for Hyper-V ICs
  */
 
@@ -1150,5 +1356,11 @@ int hv_kvp_init(struct hv_util_service *);
 void hv_kvp_deinit(void);
 void hv_kvp_onchannelcallback(void *);
 
+/*
+ * Negotiated version with the Host.
+ */
+
+extern __u32 vmbus_proto_version;
+
 #endif /* __KERNEL__ */
 #endif /* _HYPERV_H */
index a799273..10ed4f4 100644 (file)
 
 #define __exit          __section(.exit.text) __exitused __cold notrace
 
-/* Used for HOTPLUG, but that is always enabled now, so just make them noops */
-#define __devinit
-#define __devinitdata
-#define __devinitconst
-#define __devexit
-#define __devexitdata
-#define __devexitconst
-
 /* Used for HOTPLUG_CPU */
 #define __cpuinit        __section(.cpuinit.text) __cold notrace
 #define __cpuinitdata    __section(.cpuinit.data)
@@ -337,18 +329,6 @@ void __init parse_early_options(char *cmdline);
 #define __INITRODATA_OR_MODULE __INITRODATA
 #endif /*CONFIG_MODULES*/
 
-/* Functions marked as __devexit may be discarded at kernel link time, depending
-   on config options.  Newer versions of binutils detect references from
-   retained sections to discarded sections and flag an error.  Pointers to
-   __devexit functions must use __devexit_p(function_name), the wrapper will
-   insert either the function_name or NULL, depending on the config options.
- */
-#if defined(MODULE) || defined(CONFIG_HOTPLUG)
-#define __devexit_p(x) x
-#else
-#define __devexit_p(x) NULL
-#endif
-
 #ifdef MODULE
 #define __exit_p(x) x
 #else
index 5e4e617..5fa5afe 100644 (file)
@@ -268,11 +268,6 @@ struct irq_affinity_notify {
 extern int
 irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify);
 
-static inline void irq_run_affinity_notifiers(void)
-{
-       flush_scheduled_work();
-}
-
 #else /* CONFIG_SMP */
 
 static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m)
index 83ba0ab..649e5f8 100644 (file)
@@ -652,8 +652,8 @@ struct ata_device {
                u32             gscr[SATA_PMP_GSCR_DWORDS]; /* PMP GSCR block */
        };
 
-       /* Identify Device Data Log (30h), SATA Settings (page 08h) */
-       u8                      sata_settings[ATA_SECT_SIZE];
+       /* DEVSLP Timing Variables from Identify Device Data Log */
+       u8                      devslp_timing[ATA_LOG_DEVSLP_SIZE];
 
        /* error history */
        int                     spdn_cnt;
index 00e4637..2bca44b 100644 (file)
@@ -524,14 +524,17 @@ static inline void print_irqtrace_events(struct task_struct *curr)
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
 # ifdef CONFIG_PROVE_LOCKING
 #  define rwsem_acquire(l, s, t, i)            lock_acquire(l, s, t, 0, 2, NULL, i)
+#  define rwsem_acquire_nest(l, s, t, n, i)    lock_acquire(l, s, t, 0, 2, n, i)
 #  define rwsem_acquire_read(l, s, t, i)       lock_acquire(l, s, t, 1, 2, NULL, i)
 # else
 #  define rwsem_acquire(l, s, t, i)            lock_acquire(l, s, t, 0, 1, NULL, i)
+#  define rwsem_acquire_nest(l, s, t, n, i)    lock_acquire(l, s, t, 0, 1, n, i)
 #  define rwsem_acquire_read(l, s, t, i)       lock_acquire(l, s, t, 1, 1, NULL, i)
 # endif
 # define rwsem_release(l, n, i)                        lock_release(l, n, i)
 #else
 # define rwsem_acquire(l, s, t, i)             do { } while (0)
+# define rwsem_acquire_nest(l, s, t, n, i)     do { } while (0)
 # define rwsem_acquire_read(l, s, t, i)                do { } while (0)
 # define rwsem_release(l, n, i)                        do { } while (0)
 #endif
index 6320407..66e2f7c 100644 (file)
@@ -455,7 +455,6 @@ void put_pages_list(struct list_head *pages);
 
 void split_page(struct page *page, unsigned int order);
 int split_free_page(struct page *page);
-int capture_free_page(struct page *page, int alloc_order, int migratetype);
 
 /*
  * Compound pages have a destructor function.  Provide a
index 7760c6d..1375ee3 100644 (file)
@@ -199,11 +199,11 @@ struct module_use {
        struct module *source, *target;
 };
 
-enum module_state
-{
-       MODULE_STATE_LIVE,
-       MODULE_STATE_COMING,
-       MODULE_STATE_GOING,
+enum module_state {
+       MODULE_STATE_LIVE,      /* Normal state. */
+       MODULE_STATE_COMING,    /* Full formed, running module_init. */
+       MODULE_STATE_GOING,     /* Going away. */
+       MODULE_STATE_UNFORMED,  /* Still setting it up. */
 };
 
 /**
index c599e47..9ef07d0 100644 (file)
@@ -60,6 +60,9 @@ struct wireless_dev;
 #define SET_ETHTOOL_OPS(netdev,ops) \
        ( (netdev)->ethtool_ops = (ops) )
 
+extern void netdev_set_default_ethtool_ops(struct net_device *dev,
+                                          const struct ethtool_ops *ops);
+
 /* hardware address assignment types */
 #define NET_ADDR_PERM          0       /* address is permanent (default) */
 #define NET_ADDR_RANDOM                1       /* address is generated randomly */
diff --git a/include/linux/ntb.h b/include/linux/ntb.h
new file mode 100644 (file)
index 0000000..f6a1520
--- /dev/null
@@ -0,0 +1,83 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ *   redistributing this file, you may do so under either license.
+ *
+ *   GPL LICENSE SUMMARY
+ *
+ *   Copyright(c) 2012 Intel Corporation. All rights reserved.
+ *
+ *   This program is free software; you can redistribute it and/or modify
+ *   it under the terms of version 2 of the GNU General Public License as
+ *   published by the Free Software Foundation.
+ *
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2012 Intel Corporation. All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copy
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Intel PCIe NTB Linux driver
+ *
+ * Contact Information:
+ * Jon Mason <jon.mason@intel.com>
+ */
+
+struct ntb_transport_qp;
+
+struct ntb_client {
+       struct device_driver driver;
+       int (*probe) (struct pci_dev *pdev);
+       void (*remove) (struct pci_dev *pdev);
+};
+
+int ntb_register_client(struct ntb_client *drvr);
+void ntb_unregister_client(struct ntb_client *drvr);
+int ntb_register_client_dev(char *device_name);
+void ntb_unregister_client_dev(char *device_name);
+
+struct ntb_queue_handlers {
+       void (*rx_handler) (struct ntb_transport_qp *qp, void *qp_data,
+                           void *data, int len);
+       void (*tx_handler) (struct ntb_transport_qp *qp, void *qp_data,
+                           void *data, int len);
+       void (*event_handler) (void *data, int status);
+};
+
+unsigned char ntb_transport_qp_num(struct ntb_transport_qp *qp);
+unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp);
+struct ntb_transport_qp *
+ntb_transport_create_queue(void *data, struct pci_dev *pdev,
+                          const struct ntb_queue_handlers *handlers);
+void ntb_transport_free_queue(struct ntb_transport_qp *qp);
+int ntb_transport_rx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
+                            unsigned int len);
+int ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
+                            unsigned int len);
+void *ntb_transport_rx_remove(struct ntb_transport_qp *qp, unsigned int *len);
+void ntb_transport_link_up(struct ntb_transport_qp *qp);
+void ntb_transport_link_down(struct ntb_transport_qp *qp);
+bool ntb_transport_link_query(struct ntb_transport_qp *qp);
index 1693775..89573a3 100644 (file)
@@ -45,7 +45,6 @@ extern long arch_ptrace(struct task_struct *child, long request,
 extern int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len);
 extern int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len);
 extern void ptrace_disable(struct task_struct *);
-extern int ptrace_check_attach(struct task_struct *task, bool ignore_state);
 extern int ptrace_request(struct task_struct *child, long request,
                          unsigned long addr, unsigned long data);
 extern void ptrace_notify(int exit_code);
index 2ac60c9..fea49b5 100644 (file)
@@ -123,9 +123,9 @@ __rb_change_child(struct rb_node *old, struct rb_node *new,
 extern void __rb_erase_color(struct rb_node *parent, struct rb_root *root,
        void (*augment_rotate)(struct rb_node *old, struct rb_node *new));
 
-static __always_inline void
-rb_erase_augmented(struct rb_node *node, struct rb_root *root,
-                  const struct rb_augment_callbacks *augment)
+static __always_inline struct rb_node *
+__rb_erase_augmented(struct rb_node *node, struct rb_root *root,
+                    const struct rb_augment_callbacks *augment)
 {
        struct rb_node *child = node->rb_right, *tmp = node->rb_left;
        struct rb_node *parent, *rebalance;
@@ -217,6 +217,14 @@ rb_erase_augmented(struct rb_node *node, struct rb_root *root,
        }
 
        augment->propagate(tmp, NULL);
+       return rebalance;
+}
+
+static __always_inline void
+rb_erase_augmented(struct rb_node *node, struct rb_root *root,
+                  const struct rb_augment_callbacks *augment)
+{
+       struct rb_node *rebalance = __rb_erase_augmented(node, root, augment);
        if (rebalance)
                __rb_erase_color(rebalance, root, augment->rotate);
 }
index 54bd7cd..8da67d6 100644 (file)
@@ -125,8 +125,17 @@ extern void downgrade_write(struct rw_semaphore *sem);
  */
 extern void down_read_nested(struct rw_semaphore *sem, int subclass);
 extern void down_write_nested(struct rw_semaphore *sem, int subclass);
+extern void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest_lock);
+
+# define down_write_nest_lock(sem, nest_lock)                  \
+do {                                                           \
+       typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \
+       _down_write_nest_lock(sem, &(nest_lock)->dep_map);      \
+} while (0);
+
 #else
 # define down_read_nested(sem, subclass)               down_read(sem)
+# define down_write_nest_lock(sem, nest_lock)  down_write(sem)
 # define down_write_nested(sem, subclass)      down_write(sem)
 #endif
 
index 206bb08..d211247 100644 (file)
@@ -1810,6 +1810,7 @@ extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut,
 #define PF_MEMALLOC    0x00000800      /* Allocating memory */
 #define PF_NPROC_EXCEEDED 0x00001000   /* set_user noticed that RLIMIT_NPROC was exceeded */
 #define PF_USED_MATH   0x00002000      /* if unset the fpu must be initialized before use */
+#define PF_USED_ASYNC  0x00004000      /* used async_schedule*(), used by module init */
 #define PF_NOFREEZE    0x00008000      /* this thread should not be frozen */
 #define PF_FROZEN      0x00010000      /* frozen for system suspend */
 #define PF_FSTRANS     0x00020000      /* inside a filesystem transaction */
@@ -2713,7 +2714,16 @@ static inline void thread_group_cputime_init(struct signal_struct *sig)
 extern void recalc_sigpending_and_wake(struct task_struct *t);
 extern void recalc_sigpending(void);
 
-extern void signal_wake_up(struct task_struct *t, int resume_stopped);
+extern void signal_wake_up_state(struct task_struct *t, unsigned int state);
+
+static inline void signal_wake_up(struct task_struct *t, bool resume)
+{
+       signal_wake_up_state(t, resume ? TASK_WAKEKILL : 0);
+}
+static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume)
+{
+       signal_wake_up_state(t, resume ? __TASK_TRACED : 0);
+}
 
 /*
  * Wrappers for p->thread_info->cpu access. No-op on UP.
diff --git a/include/linux/vmw_vmci_api.h b/include/linux/vmw_vmci_api.h
new file mode 100644 (file)
index 0000000..023430e
--- /dev/null
@@ -0,0 +1,82 @@
+/*
+ * VMware VMCI Driver
+ *
+ * Copyright (C) 2012 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * for more details.
+ */
+
+#ifndef __VMW_VMCI_API_H__
+#define __VMW_VMCI_API_H__
+
+#include <linux/uidgid.h>
+#include <linux/vmw_vmci_defs.h>
+
+#undef  VMCI_KERNEL_API_VERSION
+#define VMCI_KERNEL_API_VERSION_1 1
+#define VMCI_KERNEL_API_VERSION_2 2
+#define VMCI_KERNEL_API_VERSION   VMCI_KERNEL_API_VERSION_2
+
+typedef void (vmci_device_shutdown_fn) (void *device_registration,
+                                       void *user_data);
+
+int vmci_datagram_create_handle(u32 resource_id, u32 flags,
+                               vmci_datagram_recv_cb recv_cb,
+                               void *client_data,
+                               struct vmci_handle *out_handle);
+int vmci_datagram_create_handle_priv(u32 resource_id, u32 flags, u32 priv_flags,
+                                    vmci_datagram_recv_cb recv_cb,
+                                    void *client_data,
+                                    struct vmci_handle *out_handle);
+int vmci_datagram_destroy_handle(struct vmci_handle handle);
+int vmci_datagram_send(struct vmci_datagram *msg);
+int vmci_doorbell_create(struct vmci_handle *handle, u32 flags,
+                        u32 priv_flags,
+                        vmci_callback notify_cb, void *client_data);
+int vmci_doorbell_destroy(struct vmci_handle handle);
+int vmci_doorbell_notify(struct vmci_handle handle, u32 priv_flags);
+u32 vmci_get_context_id(void);
+bool vmci_is_context_owner(u32 context_id, kuid_t uid);
+
+int vmci_event_subscribe(u32 event,
+                        vmci_event_cb callback, void *callback_data,
+                        u32 *subid);
+int vmci_event_unsubscribe(u32 subid);
+u32 vmci_context_get_priv_flags(u32 context_id);
+int vmci_qpair_alloc(struct vmci_qp **qpair,
+                    struct vmci_handle *handle,
+                    u64 produce_qsize,
+                    u64 consume_qsize,
+                    u32 peer, u32 flags, u32 priv_flags);
+int vmci_qpair_detach(struct vmci_qp **qpair);
+int vmci_qpair_get_produce_indexes(const struct vmci_qp *qpair,
+                                  u64 *producer_tail,
+                                  u64 *consumer_head);
+int vmci_qpair_get_consume_indexes(const struct vmci_qp *qpair,
+                                  u64 *consumer_tail,
+                                  u64 *producer_head);
+s64 vmci_qpair_produce_free_space(const struct vmci_qp *qpair);
+s64 vmci_qpair_produce_buf_ready(const struct vmci_qp *qpair);
+s64 vmci_qpair_consume_free_space(const struct vmci_qp *qpair);
+s64 vmci_qpair_consume_buf_ready(const struct vmci_qp *qpair);
+ssize_t vmci_qpair_enqueue(struct vmci_qp *qpair,
+                          const void *buf, size_t buf_size, int mode);
+ssize_t vmci_qpair_dequeue(struct vmci_qp *qpair,
+                          void *buf, size_t buf_size, int mode);
+ssize_t vmci_qpair_peek(struct vmci_qp *qpair, void *buf, size_t buf_size,
+                       int mode);
+ssize_t vmci_qpair_enquev(struct vmci_qp *qpair,
+                         void *iov, size_t iov_size, int mode);
+ssize_t vmci_qpair_dequev(struct vmci_qp *qpair,
+                         void *iov, size_t iov_size, int mode);
+ssize_t vmci_qpair_peekv(struct vmci_qp *qpair, void *iov, size_t iov_size,
+                        int mode);
+
+#endif /* !__VMW_VMCI_API_H__ */
diff --git a/include/linux/vmw_vmci_defs.h b/include/linux/vmw_vmci_defs.h
new file mode 100644 (file)
index 0000000..65ac54c
--- /dev/null
@@ -0,0 +1,880 @@
+/*
+ * VMware VMCI Driver
+ *
+ * Copyright (C) 2012 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * for more details.
+ */
+
+#ifndef _VMW_VMCI_DEF_H_
+#define _VMW_VMCI_DEF_H_
+
+#include <linux/atomic.h>
+
+/* Register offsets. */
+#define VMCI_STATUS_ADDR      0x00
+#define VMCI_CONTROL_ADDR     0x04
+#define VMCI_ICR_ADDR        0x08
+#define VMCI_IMR_ADDR         0x0c
+#define VMCI_DATA_OUT_ADDR    0x10
+#define VMCI_DATA_IN_ADDR     0x14
+#define VMCI_CAPS_ADDR        0x18
+#define VMCI_RESULT_LOW_ADDR  0x1c
+#define VMCI_RESULT_HIGH_ADDR 0x20
+
+/* Max number of devices. */
+#define VMCI_MAX_DEVICES 1
+
+/* Status register bits. */
+#define VMCI_STATUS_INT_ON     0x1
+
+/* Control register bits. */
+#define VMCI_CONTROL_RESET        0x1
+#define VMCI_CONTROL_INT_ENABLE   0x2
+#define VMCI_CONTROL_INT_DISABLE  0x4
+
+/* Capabilities register bits. */
+#define VMCI_CAPS_HYPERCALL     0x1
+#define VMCI_CAPS_GUESTCALL     0x2
+#define VMCI_CAPS_DATAGRAM      0x4
+#define VMCI_CAPS_NOTIFICATIONS 0x8
+
+/* Interrupt Cause register bits. */
+#define VMCI_ICR_DATAGRAM      0x1
+#define VMCI_ICR_NOTIFICATION  0x2
+
+/* Interrupt Mask register bits. */
+#define VMCI_IMR_DATAGRAM      0x1
+#define VMCI_IMR_NOTIFICATION  0x2
+
+/* Interrupt type. */
+enum {
+       VMCI_INTR_TYPE_INTX = 0,
+       VMCI_INTR_TYPE_MSI = 1,
+       VMCI_INTR_TYPE_MSIX = 2,
+};
+
+/* Maximum MSI/MSI-X interrupt vectors in the device. */
+#define VMCI_MAX_INTRS 2
+
+/*
+ * Supported interrupt vectors.  There is one for each ICR value above,
+ * but here they indicate the position in the vector array/message ID.
+ */
+enum {
+       VMCI_INTR_DATAGRAM = 0,
+       VMCI_INTR_NOTIFICATION = 1,
+};
+
+/*
+ * A single VMCI device has an upper limit of 128MB on the amount of
+ * memory that can be used for queue pairs.
+ */
+#define VMCI_MAX_GUEST_QP_MEMORY (128 * 1024 * 1024)
+
+/*
+ * Queues with pre-mapped data pages must be small, so that we don't pin
+ * too much kernel memory (especially on vmkernel).  We limit a queuepair to
+ * 32 KB, or 16 KB per queue for symmetrical pairs.
+ */
+#define VMCI_MAX_PINNED_QP_MEMORY (32 * 1024)
+
+/*
+ * We have a fixed set of resource IDs available in the VMX.
+ * This allows us to have a very simple implementation since we statically
+ * know how many will create datagram handles. If a new caller arrives and
+ * we have run out of slots we can manually increment the maximum size of
+ * available resource IDs.
+ *
+ * VMCI reserved hypervisor datagram resource IDs.
+ */
+enum {
+       VMCI_RESOURCES_QUERY = 0,
+       VMCI_GET_CONTEXT_ID = 1,
+       VMCI_SET_NOTIFY_BITMAP = 2,
+       VMCI_DOORBELL_LINK = 3,
+       VMCI_DOORBELL_UNLINK = 4,
+       VMCI_DOORBELL_NOTIFY = 5,
+       /*
+        * VMCI_DATAGRAM_REQUEST_MAP and VMCI_DATAGRAM_REMOVE_MAP are
+        * obsoleted by the removal of VM to VM communication.
+        */
+       VMCI_DATAGRAM_REQUEST_MAP = 6,
+       VMCI_DATAGRAM_REMOVE_MAP = 7,
+       VMCI_EVENT_SUBSCRIBE = 8,
+       VMCI_EVENT_UNSUBSCRIBE = 9,
+       VMCI_QUEUEPAIR_ALLOC = 10,
+       VMCI_QUEUEPAIR_DETACH = 11,
+
+       /*
+        * VMCI_VSOCK_VMX_LOOKUP was assigned to 12 for Fusion 3.0/3.1,
+        * WS 7.0/7.1 and ESX 4.1
+        */
+       VMCI_HGFS_TRANSPORT = 13,
+       VMCI_UNITY_PBRPC_REGISTER = 14,
+       VMCI_RPC_PRIVILEGED = 15,
+       VMCI_RPC_UNPRIVILEGED = 16,
+       VMCI_RESOURCE_MAX = 17,
+};
+
+/*
+ * struct vmci_handle - Ownership information structure
+ * @context:    The VMX context ID.
+ * @resource:   The resource ID (used for locating in resource hash).
+ *
+ * The vmci_handle structure is used to track resources used within
+ * vmw_vmci.
+ */
+struct vmci_handle {
+       u32 context;
+       u32 resource;
+};
+
+#define vmci_make_handle(_cid, _rid) \
+       (struct vmci_handle){ .context = _cid, .resource = _rid }
+
+static inline bool vmci_handle_is_equal(struct vmci_handle h1,
+                                       struct vmci_handle h2)
+{
+       return h1.context == h2.context && h1.resource == h2.resource;
+}
+
+#define VMCI_INVALID_ID ~0
+static const struct vmci_handle VMCI_INVALID_HANDLE = {
+       .context = VMCI_INVALID_ID,
+       .resource = VMCI_INVALID_ID
+};
+
+static inline bool vmci_handle_is_invalid(struct vmci_handle h)
+{
+       return vmci_handle_is_equal(h, VMCI_INVALID_HANDLE);
+}
+
+/*
+ * The below defines can be used to send anonymous requests.
+ * This also indicates that no response is expected.
+ */
+#define VMCI_ANON_SRC_CONTEXT_ID   VMCI_INVALID_ID
+#define VMCI_ANON_SRC_RESOURCE_ID  VMCI_INVALID_ID
+static const struct vmci_handle VMCI_ANON_SRC_HANDLE = {
+       .context = VMCI_ANON_SRC_CONTEXT_ID,
+       .resource = VMCI_ANON_SRC_RESOURCE_ID
+};
+
+/* The lowest 16 context ids are reserved for internal use. */
+#define VMCI_RESERVED_CID_LIMIT ((u32) 16)
+
+/*
+ * Hypervisor context id, used for calling into hypervisor
+ * supplied services from the VM.
+ */
+#define VMCI_HYPERVISOR_CONTEXT_ID 0
+
+/*
+ * Well-known context id, a logical context that contains a set of
+ * well-known services. This context ID is now obsolete.
+ */
+#define VMCI_WELL_KNOWN_CONTEXT_ID 1
+
+/*
+ * Context ID used by host endpoints.
+ */
+#define VMCI_HOST_CONTEXT_ID  2
+
+#define VMCI_CONTEXT_IS_VM(_cid) (VMCI_INVALID_ID != (_cid) &&         \
+                                 (_cid) > VMCI_HOST_CONTEXT_ID)
+
+/*
+ * The VMCI_CONTEXT_RESOURCE_ID is used together with vmci_make_handle to make
+ * handles that refer to a specific context.
+ */
+#define VMCI_CONTEXT_RESOURCE_ID 0
+
+/*
+ * VMCI error codes.
+ */
+enum {
+       VMCI_SUCCESS_QUEUEPAIR_ATTACH   = 5,
+       VMCI_SUCCESS_QUEUEPAIR_CREATE   = 4,
+       VMCI_SUCCESS_LAST_DETACH        = 3,
+       VMCI_SUCCESS_ACCESS_GRANTED     = 2,
+       VMCI_SUCCESS_ENTRY_DEAD         = 1,
+       VMCI_SUCCESS                     = 0,
+       VMCI_ERROR_INVALID_RESOURCE      = (-1),
+       VMCI_ERROR_INVALID_ARGS          = (-2),
+       VMCI_ERROR_NO_MEM                = (-3),
+       VMCI_ERROR_DATAGRAM_FAILED       = (-4),
+       VMCI_ERROR_MORE_DATA             = (-5),
+       VMCI_ERROR_NO_MORE_DATAGRAMS     = (-6),
+       VMCI_ERROR_NO_ACCESS             = (-7),
+       VMCI_ERROR_NO_HANDLE             = (-8),
+       VMCI_ERROR_DUPLICATE_ENTRY       = (-9),
+       VMCI_ERROR_DST_UNREACHABLE       = (-10),
+       VMCI_ERROR_PAYLOAD_TOO_LARGE     = (-11),
+       VMCI_ERROR_INVALID_PRIV          = (-12),
+       VMCI_ERROR_GENERIC               = (-13),
+       VMCI_ERROR_PAGE_ALREADY_SHARED   = (-14),
+       VMCI_ERROR_CANNOT_SHARE_PAGE     = (-15),
+       VMCI_ERROR_CANNOT_UNSHARE_PAGE   = (-16),
+       VMCI_ERROR_NO_PROCESS            = (-17),
+       VMCI_ERROR_NO_DATAGRAM           = (-18),
+       VMCI_ERROR_NO_RESOURCES          = (-19),
+       VMCI_ERROR_UNAVAILABLE           = (-20),
+       VMCI_ERROR_NOT_FOUND             = (-21),
+       VMCI_ERROR_ALREADY_EXISTS        = (-22),
+       VMCI_ERROR_NOT_PAGE_ALIGNED      = (-23),
+       VMCI_ERROR_INVALID_SIZE          = (-24),
+       VMCI_ERROR_REGION_ALREADY_SHARED = (-25),
+       VMCI_ERROR_TIMEOUT               = (-26),
+       VMCI_ERROR_DATAGRAM_INCOMPLETE   = (-27),
+       VMCI_ERROR_INCORRECT_IRQL        = (-28),
+       VMCI_ERROR_EVENT_UNKNOWN         = (-29),
+       VMCI_ERROR_OBSOLETE              = (-30),
+       VMCI_ERROR_QUEUEPAIR_MISMATCH    = (-31),
+       VMCI_ERROR_QUEUEPAIR_NOTSET      = (-32),
+       VMCI_ERROR_QUEUEPAIR_NOTOWNER    = (-33),
+       VMCI_ERROR_QUEUEPAIR_NOTATTACHED = (-34),
+       VMCI_ERROR_QUEUEPAIR_NOSPACE     = (-35),
+       VMCI_ERROR_QUEUEPAIR_NODATA      = (-36),
+       VMCI_ERROR_BUSMEM_INVALIDATION   = (-37),
+       VMCI_ERROR_MODULE_NOT_LOADED     = (-38),
+       VMCI_ERROR_DEVICE_NOT_FOUND      = (-39),
+       VMCI_ERROR_QUEUEPAIR_NOT_READY   = (-40),
+       VMCI_ERROR_WOULD_BLOCK           = (-41),
+
+       /* VMCI clients should return error code within this range */
+       VMCI_ERROR_CLIENT_MIN            = (-500),
+       VMCI_ERROR_CLIENT_MAX            = (-550),
+
+       /* Internal error codes. */
+       VMCI_SHAREDMEM_ERROR_BAD_CONTEXT = (-1000),
+};
+
+/* VMCI reserved events. */
+enum {
+       /* Only applicable to guest endpoints */
+       VMCI_EVENT_CTX_ID_UPDATE  = 0,
+
+       /* Applicable to guest and host */
+       VMCI_EVENT_CTX_REMOVED    = 1,
+
+       /* Only applicable to guest endpoints */
+       VMCI_EVENT_QP_RESUMED     = 2,
+
+       /* Applicable to guest and host */
+       VMCI_EVENT_QP_PEER_ATTACH = 3,
+
+       /* Applicable to guest and host */
+       VMCI_EVENT_QP_PEER_DETACH = 4,
+
+       /*
+        * Applicable to VMX and vmk.  On vmk,
+        * this event has the Context payload type.
+        */
+       VMCI_EVENT_MEM_ACCESS_ON  = 5,
+
+       /*
+        * Applicable to VMX and vmk.  Same as
+        * above for the payload type.
+        */
+       VMCI_EVENT_MEM_ACCESS_OFF = 6,
+       VMCI_EVENT_MAX            = 7,
+};
+
+/*
+ * Of the above events, a few are reserved for use in the VMX, and
+ * other endpoints (guest and host kernel) should not use them. For
+ * the rest of the events, we allow both host and guest endpoints to
+ * subscribe to them, to maintain the same API for host and guest
+ * endpoints.
+ */
+#define VMCI_EVENT_VALID_VMX(_event) ((_event) == VMCI_EVENT_MEM_ACCESS_ON || \
+                                     (_event) == VMCI_EVENT_MEM_ACCESS_OFF)
+
+#define VMCI_EVENT_VALID(_event) ((_event) < VMCI_EVENT_MAX &&         \
+                                 !VMCI_EVENT_VALID_VMX(_event))
+
+/* Reserved guest datagram resource ids. */
+#define VMCI_EVENT_HANDLER 0
+
+/*
+ * VMCI coarse-grained privileges (per context or host
+ * process/endpoint. An entity with the restricted flag is only
+ * allowed to interact with the hypervisor and trusted entities.
+ */
+enum {
+       VMCI_NO_PRIVILEGE_FLAGS = 0,
+       VMCI_PRIVILEGE_FLAG_RESTRICTED = 1,
+       VMCI_PRIVILEGE_FLAG_TRUSTED = 2,
+       VMCI_PRIVILEGE_ALL_FLAGS = (VMCI_PRIVILEGE_FLAG_RESTRICTED |
+                                   VMCI_PRIVILEGE_FLAG_TRUSTED),
+       VMCI_DEFAULT_PROC_PRIVILEGE_FLAGS = VMCI_NO_PRIVILEGE_FLAGS,
+       VMCI_LEAST_PRIVILEGE_FLAGS = VMCI_PRIVILEGE_FLAG_RESTRICTED,
+       VMCI_MAX_PRIVILEGE_FLAGS = VMCI_PRIVILEGE_FLAG_TRUSTED,
+};
+
+/* 0 through VMCI_RESERVED_RESOURCE_ID_MAX are reserved. */
+#define VMCI_RESERVED_RESOURCE_ID_MAX 1023
+
+/*
+ * Driver version.
+ *
+ * Increment major version when you make an incompatible change.
+ * Compatibility goes both ways (old driver with new executable
+ * as well as new driver with old executable).
+ */
+
+/* Never change VMCI_VERSION_SHIFT_WIDTH */
+#define VMCI_VERSION_SHIFT_WIDTH 16
+#define VMCI_MAKE_VERSION(_major, _minor)                      \
+       ((_major) << VMCI_VERSION_SHIFT_WIDTH | (u16) (_minor))
+
+#define VMCI_VERSION_MAJOR(v)  ((u32) (v) >> VMCI_VERSION_SHIFT_WIDTH)
+#define VMCI_VERSION_MINOR(v)  ((u16) (v))
+
+/*
+ * VMCI_VERSION is always the current version.  Subsequently listed
+ * versions are ways of detecting previous versions of the connecting
+ * application (i.e., VMX).
+ *
+ * VMCI_VERSION_NOVMVM: This version removed support for VM to VM
+ * communication.
+ *
+ * VMCI_VERSION_NOTIFY: This version introduced doorbell notification
+ * support.
+ *
+ * VMCI_VERSION_HOSTQP: This version introduced host end point support
+ * for hosted products.
+ *
+ * VMCI_VERSION_PREHOSTQP: This is the version prior to the adoption of
+ * support for host end-points.
+ *
+ * VMCI_VERSION_PREVERS2: This fictional version number is intended to
+ * represent the version of a VMX which doesn't call into the driver
+ * with ioctl VERSION2 and thus doesn't establish its version with the
+ * driver.
+ */
+
+#define VMCI_VERSION                VMCI_VERSION_NOVMVM
+#define VMCI_VERSION_NOVMVM         VMCI_MAKE_VERSION(11, 0)
+#define VMCI_VERSION_NOTIFY         VMCI_MAKE_VERSION(10, 0)
+#define VMCI_VERSION_HOSTQP         VMCI_MAKE_VERSION(9, 0)
+#define VMCI_VERSION_PREHOSTQP      VMCI_MAKE_VERSION(8, 0)
+#define VMCI_VERSION_PREVERS2       VMCI_MAKE_VERSION(1, 0)
+
+#define VMCI_SOCKETS_MAKE_VERSION(_p)                                  \
+       ((((_p)[0] & 0xFF) << 24) | (((_p)[1] & 0xFF) << 16) | ((_p)[2]))
+
+/*
+ * The VMCI IOCTLs.  We use identity code 7, as noted in ioctl-number.h, and
+ * we start at sequence 9f.  This gives us the same values that our shipping
+ * products use, starting at 1951, provided we leave out the direction and
+ * structure size.  Note that VMMon occupies the block following us, starting
+ * at 2001.
+ */
+#define IOCTL_VMCI_VERSION                     _IO(7, 0x9f)    /* 1951 */
+#define IOCTL_VMCI_INIT_CONTEXT                        _IO(7, 0xa0)
+#define IOCTL_VMCI_QUEUEPAIR_SETVA             _IO(7, 0xa4)
+#define IOCTL_VMCI_NOTIFY_RESOURCE             _IO(7, 0xa5)
+#define IOCTL_VMCI_NOTIFICATIONS_RECEIVE       _IO(7, 0xa6)
+#define IOCTL_VMCI_VERSION2                    _IO(7, 0xa7)
+#define IOCTL_VMCI_QUEUEPAIR_ALLOC             _IO(7, 0xa8)
+#define IOCTL_VMCI_QUEUEPAIR_SETPAGEFILE       _IO(7, 0xa9)
+#define IOCTL_VMCI_QUEUEPAIR_DETACH            _IO(7, 0xaa)
+#define IOCTL_VMCI_DATAGRAM_SEND               _IO(7, 0xab)
+#define IOCTL_VMCI_DATAGRAM_RECEIVE            _IO(7, 0xac)
+#define IOCTL_VMCI_CTX_ADD_NOTIFICATION                _IO(7, 0xaf)
+#define IOCTL_VMCI_CTX_REMOVE_NOTIFICATION     _IO(7, 0xb0)
+#define IOCTL_VMCI_CTX_GET_CPT_STATE           _IO(7, 0xb1)
+#define IOCTL_VMCI_CTX_SET_CPT_STATE           _IO(7, 0xb2)
+#define IOCTL_VMCI_GET_CONTEXT_ID              _IO(7, 0xb3)
+#define IOCTL_VMCI_SOCKETS_VERSION             _IO(7, 0xb4)
+#define IOCTL_VMCI_SOCKETS_GET_AF_VALUE                _IO(7, 0xb8)
+#define IOCTL_VMCI_SOCKETS_GET_LOCAL_CID       _IO(7, 0xb9)
+#define IOCTL_VMCI_SET_NOTIFY                  _IO(7, 0xcb)    /* 1995 */
+/*IOCTL_VMMON_START                            _IO(7, 0xd1)*/  /* 2001 */
+
+/*
+ * struct vmci_queue_header - VMCI Queue Header information.
+ *
+ * A Queue cannot stand by itself as designed.  Each Queue's header
+ * contains a pointer into itself (the producer_tail) and into its peer
+ * (consumer_head).  The reason for the separation is one of
+ * accessibility: Each end-point can modify two things: where the next
+ * location to enqueue is within its produce_q (producer_tail); and
+ * where the next dequeue location is in its consume_q (consumer_head).
+ *
+ * An end-point cannot modify the pointers of its peer (guest to
+ * guest; NOTE that in the host both queue headers are mapped r/w).
+ * But, each end-point needs read access to both Queue header
+ * structures in order to determine how much space is used (or left)
+ * in the Queue.  This is because for an end-point to know how full
+ * its produce_q is, it needs to use the consumer_head that points into
+ * the produce_q but -that- consumer_head is in the Queue header for
+ * that end-points consume_q.
+ *
+ * Thoroughly confused?  Sorry.
+ *
+ * producer_tail: the point to enqueue new entrants.  When you approach
+ * a line in a store, for example, you walk up to the tail.
+ *
+ * consumer_head: the point in the queue from which the next element is
+ * dequeued.  In other words, who is next in line is he who is at the
+ * head of the line.
+ *
+ * Also, producer_tail points to an empty byte in the Queue, whereas
+ * consumer_head points to a valid byte of data (unless producer_tail ==
+ * consumer_head in which case consumer_head does not point to a valid
+ * byte of data).
+ *
+ * For a queue of buffer 'size' bytes, the tail and head pointers will be in
+ * the range [0, size-1].
+ *
+ * If produce_q_header->producer_tail == consume_q_header->consumer_head
+ * then the produce_q is empty.
+ */
+struct vmci_queue_header {
+       /* All fields are 64bit and aligned. */
+       struct vmci_handle handle;      /* Identifier. */
+       atomic64_t producer_tail;       /* Offset in this queue. */
+       atomic64_t consumer_head;       /* Offset in peer queue. */
+};
+
+/*
+ * struct vmci_datagram - Base struct for vmci datagrams.
+ * @dst:        A vmci_handle that tracks the destination of the datagram.
+ * @src:        A vmci_handle that tracks the source of the datagram.
+ * @payload_size:       The size of the payload.
+ *
+ * vmci_datagram structs are used when sending vmci datagrams.  They include
+ * the necessary source and destination information to properly route
+ * the information along with the size of the package.
+ */
+struct vmci_datagram {
+       struct vmci_handle dst;
+       struct vmci_handle src;
+       u64 payload_size;
+};
+
+/*
+ * Second flag is for creating a well-known handle instead of a per context
+ * handle.  Next flag is for deferring datagram delivery, so that the
+ * datagram callback is invoked in a delayed context (not interrupt context).
+ */
+#define VMCI_FLAG_DG_NONE          0
+#define VMCI_FLAG_WELLKNOWN_DG_HND 0x1
+#define VMCI_FLAG_ANYCID_DG_HND    0x2
+#define VMCI_FLAG_DG_DELAYED_CB    0x4
+
+/*
+ * Maximum supported size of a VMCI datagram for routable datagrams.
+ * Datagrams going to the hypervisor are allowed to be larger.
+ */
+#define VMCI_MAX_DG_SIZE (17 * 4096)
+#define VMCI_MAX_DG_PAYLOAD_SIZE (VMCI_MAX_DG_SIZE - \
+                                 sizeof(struct vmci_datagram))
+#define VMCI_DG_PAYLOAD(_dg) (void *)((char *)(_dg) +                  \
+                                     sizeof(struct vmci_datagram))
+#define VMCI_DG_HEADERSIZE sizeof(struct vmci_datagram)
+#define VMCI_DG_SIZE(_dg) (VMCI_DG_HEADERSIZE + (size_t)(_dg)->payload_size)
+#define VMCI_DG_SIZE_ALIGNED(_dg) ((VMCI_DG_SIZE(_dg) + 7) & (~((size_t) 0x7)))
+#define VMCI_MAX_DATAGRAM_QUEUE_SIZE (VMCI_MAX_DG_SIZE * 2)
+
+struct vmci_event_payload_qp {
+       struct vmci_handle handle;  /* queue_pair handle. */
+       u32 peer_id;                /* Context id of attaching/detaching VM. */
+       u32 _pad;
+};
+
+/* Flags for VMCI queue_pair API. */
+enum {
+       /* Fail alloc if QP not created by peer. */
+       VMCI_QPFLAG_ATTACH_ONLY = 1 << 0,
+
+       /* Only allow attaches from local context. */
+       VMCI_QPFLAG_LOCAL = 1 << 1,
+
+       /* Host won't block when guest is quiesced. */
+       VMCI_QPFLAG_NONBLOCK = 1 << 2,
+
+       /* Pin data pages in ESX.  Used with NONBLOCK */
+       VMCI_QPFLAG_PINNED = 1 << 3,
+
+       /* Update the following flag when adding new flags. */
+       VMCI_QP_ALL_FLAGS = (VMCI_QPFLAG_ATTACH_ONLY | VMCI_QPFLAG_LOCAL |
+                            VMCI_QPFLAG_NONBLOCK | VMCI_QPFLAG_PINNED),
+
+       /* Convenience flags */
+       VMCI_QP_ASYMM = (VMCI_QPFLAG_NONBLOCK | VMCI_QPFLAG_PINNED),
+       VMCI_QP_ASYMM_PEER = (VMCI_QPFLAG_ATTACH_ONLY | VMCI_QP_ASYMM),
+};
+
+/*
+ * We allow at least 1024 more event datagrams from the hypervisor past the
+ * normally allowed datagrams pending for a given context.  We define this
+ * limit on event datagrams from the hypervisor to guard against DoS attack
+ * from a malicious VM which could repeatedly attach to and detach from a queue
+ * pair, causing events to be queued at the destination VM.  However, the rate
+ * at which such events can be generated is small since it requires a VM exit
+ * and handling of queue pair attach/detach call at the hypervisor.  Event
+ * datagrams may be queued up at the destination VM if it has interrupts
+ * disabled or if it is not draining events for some other reason.  1024
+ * datagrams is a grossly conservative estimate of the time for which
+ * interrupts may be disabled in the destination VM, but at the same time does
+ * not exacerbate the memory pressure problem on the host by much (size of each
+ * event datagram is small).
+ */
+#define VMCI_MAX_DATAGRAM_AND_EVENT_QUEUE_SIZE                         \
+       (VMCI_MAX_DATAGRAM_QUEUE_SIZE +                                 \
+        1024 * (sizeof(struct vmci_datagram) +                         \
+                sizeof(struct vmci_event_data_max)))
+
+/*
+ * Struct used for querying, via VMCI_RESOURCES_QUERY, the availability of
+ * hypervisor resources.  Struct size is 16 bytes. All fields in struct are
+ * aligned to their natural alignment.
+ */
+struct vmci_resource_query_hdr {
+       struct vmci_datagram hdr;
+       u32 num_resources;
+       u32 _padding;
+};
+
+/*
+ * Convenience struct for negotiating vectors. Must match layout of
+ * VMCIResourceQueryHdr minus the struct vmci_datagram header.
+ */
+struct vmci_resource_query_msg {
+       u32 num_resources;
+       u32 _padding;
+       u32 resources[1];
+};
+
+/*
+ * The maximum number of resources that can be queried using
+ * VMCI_RESOURCE_QUERY is 31, as the result is encoded in the lower 31
+ * bits of a positive return value. Negative values are reserved for
+ * errors.
+ */
+#define VMCI_RESOURCE_QUERY_MAX_NUM 31
+
+/* Maximum size for the VMCI_RESOURCE_QUERY request. */
+#define VMCI_RESOURCE_QUERY_MAX_SIZE                           \
+       (sizeof(struct vmci_resource_query_hdr) +               \
+        sizeof(u32) * VMCI_RESOURCE_QUERY_MAX_NUM)
+
+/*
+ * Struct used for setting the notification bitmap.  All fields in
+ * struct are aligned to their natural alignment.
+ */
+struct vmci_notify_bm_set_msg {
+       struct vmci_datagram hdr;
+       u32 bitmap_ppn;
+       u32 _pad;
+};
+
+/*
+ * Struct used for linking a doorbell handle with an index in the
+ * notify bitmap. All fields in struct are aligned to their natural
+ * alignment.
+ */
+struct vmci_doorbell_link_msg {
+       struct vmci_datagram hdr;
+       struct vmci_handle handle;
+       u64 notify_idx;
+};
+
+/*
+ * Struct used for unlinking a doorbell handle from an index in the
+ * notify bitmap. All fields in struct are aligned to their natural
+ * alignment.
+ */
+struct vmci_doorbell_unlink_msg {
+       struct vmci_datagram hdr;
+       struct vmci_handle handle;
+};
+
+/*
+ * Struct used for generating a notification on a doorbell handle. All
+ * fields in struct are aligned to their natural alignment.
+ */
+struct vmci_doorbell_notify_msg {
+       struct vmci_datagram hdr;
+       struct vmci_handle handle;
+};
+
+/*
+ * This struct is used to contain data for events.  Size of this struct is a
+ * multiple of 8 bytes, and all fields are aligned to their natural alignment.
+ */
+struct vmci_event_data {
+       u32 event;              /* 4 bytes. */
+       u32 _pad;
+       /* Event payload is put here. */
+};
+
+/*
+ * Define the different VMCI_EVENT payload data types here.  All structs must
+ * be a multiple of 8 bytes, and fields must be aligned to their natural
+ * alignment.
+ */
+struct vmci_event_payld_ctx {
+       u32 context_id; /* 4 bytes. */
+       u32 _pad;
+};
+
+struct vmci_event_payld_qp {
+       struct vmci_handle handle;  /* queue_pair handle. */
+       u32 peer_id;        /* Context id of attaching/detaching VM. */
+       u32 _pad;
+};
+
+/*
+ * We define the following struct to get the size of the maximum event
+ * data the hypervisor may send to the guest.  If adding a new event
+ * payload type above, add it to the following struct too (inside the
+ * union).
+ */
+struct vmci_event_data_max {
+       struct vmci_event_data event_data;
+       union {
+               struct vmci_event_payld_ctx context_payload;
+               struct vmci_event_payld_qp qp_payload;
+       } ev_data_payload;
+};
+
+/*
+ * Struct used for VMCI_EVENT_SUBSCRIBE/UNSUBSCRIBE and
+ * VMCI_EVENT_HANDLER messages.  Struct size is 32 bytes.  All fields
+ * in struct are aligned to their natural alignment.
+ */
+struct vmci_event_msg {
+       struct vmci_datagram hdr;
+
+       /* Has event type and payload. */
+       struct vmci_event_data event_data;
+
+       /* Payload gets put here. */
+};
+
+/* Event with context payload. */
+struct vmci_event_ctx {
+       struct vmci_event_msg msg;
+       struct vmci_event_payld_ctx payload;
+};
+
+/* Event with QP payload. */
+struct vmci_event_qp {
+       struct vmci_event_msg msg;
+       struct vmci_event_payld_qp payload;
+};
+
+/*
+ * Structs used for queue_pair alloc and detach messages.  We align fields of
+ * these structs to 64bit boundaries.
+ */
+struct vmci_qp_alloc_msg {
+       struct vmci_datagram hdr;
+       struct vmci_handle handle;
+       u32 peer;
+       u32 flags;
+       u64 produce_size;
+       u64 consume_size;
+       u64 num_ppns;
+
+       /* List of PPNs placed here. */
+};
+
+struct vmci_qp_detach_msg {
+       struct vmci_datagram hdr;
+       struct vmci_handle handle;
+};
+
+/* VMCI Doorbell API. */
+#define VMCI_FLAG_DELAYED_CB 0x01
+
+typedef void (*vmci_callback) (void *client_data);
+
+/*
+ * struct vmci_qp - A vmw_vmci queue pair handle.
+ *
+ * This structure is used as a handle to a queue pair created by
+ * VMCI.  It is intentionally left opaque to clients.
+ */
+struct vmci_qp;
+
+/* Callback needed for correctly waiting on events. */
+typedef int (*vmci_datagram_recv_cb) (void *client_data,
+                                     struct vmci_datagram *msg);
+
+/* VMCI Event API. */
+typedef void (*vmci_event_cb) (u32 sub_id, const struct vmci_event_data *ed,
+                              void *client_data);
+
+/*
+ * We use the following inline function to access the payload data
+ * associated with an event data.
+ */
+static inline const void *
+vmci_event_data_const_payload(const struct vmci_event_data *ev_data)
+{
+       return (const char *)ev_data + sizeof(*ev_data);
+}
+
+static inline void *vmci_event_data_payload(struct vmci_event_data *ev_data)
+{
+       return (void *)vmci_event_data_const_payload(ev_data);
+}
+
+/*
+ * Helper to add a given offset to a head or tail pointer. Wraps the
+ * value of the pointer around the max size of the queue.
+ */
+static inline void vmci_qp_add_pointer(atomic64_t *var,
+                                      size_t add,
+                                      u64 size)
+{
+       u64 new_val = atomic64_read(var);
+
+       if (new_val >= size - add)
+               new_val -= size;
+
+       new_val += add;
+
+       atomic64_set(var, new_val);
+}
+
+/*
+ * Helper routine to get the Producer Tail from the supplied queue.
+ */
+static inline u64
+vmci_q_header_producer_tail(const struct vmci_queue_header *q_header)
+{
+       struct vmci_queue_header *qh = (struct vmci_queue_header *)q_header;
+       return atomic64_read(&qh->producer_tail);
+}
+
+/*
+ * Helper routine to get the Consumer Head from the supplied queue.
+ */
+static inline u64
+vmci_q_header_consumer_head(const struct vmci_queue_header *q_header)
+{
+       struct vmci_queue_header *qh = (struct vmci_queue_header *)q_header;
+       return atomic64_read(&qh->consumer_head);
+}
+
+/*
+ * Helper routine to increment the Producer Tail.  Fundamentally,
+ * vmci_qp_add_pointer() is used to manipulate the tail itself.
+ */
+static inline void
+vmci_q_header_add_producer_tail(struct vmci_queue_header *q_header,
+                               size_t add,
+                               u64 queue_size)
+{
+       vmci_qp_add_pointer(&q_header->producer_tail, add, queue_size);
+}
+
+/*
+ * Helper routine to increment the Consumer Head.  Fundamentally,
+ * vmci_qp_add_pointer() is used to manipulate the head itself.
+ */
+static inline void
+vmci_q_header_add_consumer_head(struct vmci_queue_header *q_header,
+                               size_t add,
+                               u64 queue_size)
+{
+       vmci_qp_add_pointer(&q_header->consumer_head, add, queue_size);
+}
+
+/*
+ * Helper routine for getting the head and the tail pointer for a queue.
+ * Both the VMCIQueues are needed to get both the pointers for one queue.
+ */
+static inline void
+vmci_q_header_get_pointers(const struct vmci_queue_header *produce_q_header,
+                          const struct vmci_queue_header *consume_q_header,
+                          u64 *producer_tail,
+                          u64 *consumer_head)
+{
+       if (producer_tail)
+               *producer_tail = vmci_q_header_producer_tail(produce_q_header);
+
+       if (consumer_head)
+               *consumer_head = vmci_q_header_consumer_head(consume_q_header);
+}
+
+static inline void vmci_q_header_init(struct vmci_queue_header *q_header,
+                                     const struct vmci_handle handle)
+{
+       q_header->handle = handle;
+       atomic64_set(&q_header->producer_tail, 0);
+       atomic64_set(&q_header->consumer_head, 0);
+}
+
+/*
+ * Finds available free space in a produce queue to enqueue more
+ * data or reports an error if queue pair corruption is detected.
+ */
+static s64
+vmci_q_header_free_space(const struct vmci_queue_header *produce_q_header,
+                        const struct vmci_queue_header *consume_q_header,
+                        const u64 produce_q_size)
+{
+       u64 tail;
+       u64 head;
+       u64 free_space;
+
+       tail = vmci_q_header_producer_tail(produce_q_header);
+       head = vmci_q_header_consumer_head(consume_q_header);
+
+       if (tail >= produce_q_size || head >= produce_q_size)
+               return VMCI_ERROR_INVALID_SIZE;
+
+       /*
+        * Deduct 1 to avoid tail becoming equal to head which causes
+        * ambiguity. If head and tail are equal it means that the
+        * queue is empty.
+        */
+       if (tail >= head)
+               free_space = produce_q_size - (tail - head) - 1;
+       else
+               free_space = head - tail - 1;
+
+       return free_space;
+}
+
+/*
+ * vmci_q_header_free_space() does all the heavy lifting of
+ * determing the number of free bytes in a Queue.  This routine,
+ * then subtracts that size from the full size of the Queue so
+ * the caller knows how many bytes are ready to be dequeued.
+ * Results:
+ * On success, available data size in bytes (up to MAX_INT64).
+ * On failure, appropriate error code.
+ */
+static inline s64
+vmci_q_header_buf_ready(const struct vmci_queue_header *consume_q_header,
+                       const struct vmci_queue_header *produce_q_header,
+                       const u64 consume_q_size)
+{
+       s64 free_space;
+
+       free_space = vmci_q_header_free_space(consume_q_header,
+                                             produce_q_header, consume_q_size);
+       if (free_space < VMCI_SUCCESS)
+               return free_space;
+
+       return consume_q_size - free_space - 1;
+}
+
+
+#endif /* _VMW_VMCI_DEF_H_ */
index 6d9e15e..dd8c48d 100644 (file)
@@ -19,7 +19,7 @@
 
 struct cs4271_platform_data {
        int gpio_nreset;        /* GPIO driving Reset pin, if any */
-       int amutec_eq_bmutec:1; /* flag to enable AMUTEC=BMUTEC */
+       bool amutec_eq_bmutec;  /* flag to enable AMUTEC=BMUTEC */
 };
 
 #endif /* __CS4271_H */
index 769e27c..bc56738 100644 (file)
@@ -58,8 +58,9 @@
        .info = snd_soc_info_volsw_range, .get = snd_soc_get_volsw_range, \
        .put = snd_soc_put_volsw_range, \
        .private_value = (unsigned long)&(struct soc_mixer_control) \
-               {.reg = xreg, .shift = xshift, .min = xmin,\
-                .max = xmax, .platform_max = xmax, .invert = xinvert} }
+               {.reg = xreg, .rreg = xreg, .shift = xshift, \
+                .rshift = xshift,  .min = xmin, .max = xmax, \
+                .platform_max = xmax, .invert = xinvert} }
 #define SOC_SINGLE_TLV(xname, reg, shift, max, invert, tlv_array) \
 {      .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
        .access = SNDRV_CTL_ELEM_ACCESS_TLV_READ |\
@@ -88,8 +89,9 @@
        .info = snd_soc_info_volsw_range, \
        .get = snd_soc_get_volsw_range, .put = snd_soc_put_volsw_range, \
        .private_value = (unsigned long)&(struct soc_mixer_control) \
-               {.reg = xreg, .shift = xshift, .min = xmin,\
-                .max = xmax, .platform_max = xmax, .invert = xinvert} }
+               {.reg = xreg, .rreg = xreg, .shift = xshift, \
+                .rshift = xshift, .min = xmin, .max = xmax, \
+                .platform_max = xmax, .invert = xinvert} }
 #define SOC_DOUBLE(xname, reg, shift_left, shift_right, max, invert) \
 {      .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname),\
        .info = snd_soc_info_volsw, .get = snd_soc_get_volsw, \
index 7cae236..663e34a 100644 (file)
@@ -174,6 +174,7 @@ typedef unsigned __bitwise__ sense_reason_t;
 
 enum tcm_sense_reason_table {
 #define R(x)   (__force sense_reason_t )(x)
+       TCM_NO_SENSE                            = R(0x00),
        TCM_NON_EXISTENT_LUN                    = R(0x01),
        TCM_UNSUPPORTED_SCSI_OPCODE             = R(0x02),
        TCM_INCORRECT_AMOUNT_OF_DATA            = R(0x03),
index 76352ac..9f096f1 100644 (file)
@@ -26,7 +26,6 @@
 
 #include <linux/types.h>
 #include <linux/elf-em.h>
-#include <linux/ptrace.h>
 
 /* The netlink messages for the audit system is divided into blocks:
  * 1000 - 1099 are for commanding the audit system
 #define AUDIT_MMAP             1323    /* Record showing descriptor and flags in mmap */
 #define AUDIT_NETFILTER_PKT    1324    /* Packets traversing netfilter chains */
 #define AUDIT_NETFILTER_CFG    1325    /* Netfilter chain modifications */
+#define AUDIT_SECCOMP          1326    /* Secure Computing event */
 
 #define AUDIT_AVC              1400    /* SE Linux avc denial or grant */
 #define AUDIT_SELINUX_ERR      1401    /* Internal SE Linux Errors */
index 78f99d9..2c6c85f 100644 (file)
@@ -50,7 +50,8 @@
 #define PORT_LPC3220   22      /* NXP LPC32xx SoC "Standard" UART */
 #define PORT_8250_CIR  23      /* CIR infrared port, has its own driver */
 #define PORT_XR17V35X  24      /* Exar XR17V35x UARTs */
-#define PORT_MAX_8250  24      /* max port ID */
+#define PORT_BRCM_TRUMANAGE    24
+#define PORT_MAX_8250  25      /* max port ID */
 
 /*
  * ARM specific type numbers.  These are not currently guaranteed
index 7d30240..be8b7f5 100644 (file)
@@ -1182,7 +1182,7 @@ config CC_OPTIMIZE_FOR_SIZE
          Enabling this option will pass "-Os" instead of "-O2" to gcc
          resulting in a smaller kernel.
 
-         If unsure, say Y.
+         If unsure, say N.
 
 config SYSCTL
        bool
index 5e4ded5..f9acf71 100644 (file)
@@ -36,6 +36,10 @@ __setup("noinitrd", no_initrd);
 static int init_linuxrc(struct subprocess_info *info, struct cred *new)
 {
        sys_unshare(CLONE_FS | CLONE_FILES);
+       /* stdin/stdout/stderr for /linuxrc */
+       sys_open("/dev/console", O_RDWR, 0);
+       sys_dup(0);
+       sys_dup(0);
        /* move initrd over / and chdir/chroot in initrd root */
        sys_chdir("/root");
        sys_mount(".", "/", NULL, MS_MOVE, NULL);
index 85d69df..92d728a 100644 (file)
@@ -802,7 +802,7 @@ static int run_init_process(const char *init_filename)
                (const char __user *const __user *)envp_init);
 }
 
-static void __init kernel_init_freeable(void);
+static noinline void __init kernel_init_freeable(void);
 
 static int __ref kernel_init(void *unused)
 {
@@ -845,7 +845,7 @@ static int __ref kernel_init(void *unused)
              "See Linux Documentation/init.txt for guidance.");
 }
 
-static void __init kernel_init_freeable(void)
+static noinline void __init kernel_init_freeable(void)
 {
        /*
         * Wait until kthreadd is all set-up.
index 9d31183..6f34904 100644 (file)
@@ -86,18 +86,27 @@ static atomic_t entry_count;
  */
 static async_cookie_t  __lowest_in_progress(struct async_domain *running)
 {
+       async_cookie_t first_running = next_cookie;     /* infinity value */
+       async_cookie_t first_pending = next_cookie;     /* ditto */
        struct async_entry *entry;
 
+       /*
+        * Both running and pending lists are sorted but not disjoint.
+        * Take the first cookies from both and return the min.
+        */
        if (!list_empty(&running->domain)) {
                entry = list_first_entry(&running->domain, typeof(*entry), list);
-               return entry->cookie;
+               first_running = entry->cookie;
        }
 
-       list_for_each_entry(entry, &async_pending, list)
-               if (entry->running == running)
-                       return entry->cookie;
+       list_for_each_entry(entry, &async_pending, list) {
+               if (entry->running == running) {
+                       first_pending = entry->cookie;
+                       break;
+               }
+       }
 
-       return next_cookie;     /* "infinity" value */
+       return min(first_running, first_pending);
 }
 
 static async_cookie_t  lowest_in_progress(struct async_domain *running)
@@ -118,13 +127,17 @@ static void async_run_entry_fn(struct work_struct *work)
 {
        struct async_entry *entry =
                container_of(work, struct async_entry, work);
+       struct async_entry *pos;
        unsigned long flags;
        ktime_t uninitialized_var(calltime), delta, rettime;
        struct async_domain *running = entry->running;
 
-       /* 1) move self to the running queue */
+       /* 1) move self to the running queue, make sure it stays sorted */
        spin_lock_irqsave(&async_lock, flags);
-       list_move_tail(&entry->list, &running->domain);
+       list_for_each_entry_reverse(pos, &running->domain, list)
+               if (entry->cookie < pos->cookie)
+                       break;
+       list_move_tail(&entry->list, &pos->list);
        spin_unlock_irqrestore(&async_lock, flags);
 
        /* 2) run (and print duration) */
@@ -196,6 +209,9 @@ static async_cookie_t __async_schedule(async_func_ptr *ptr, void *data, struct a
        atomic_inc(&entry_count);
        spin_unlock_irqrestore(&async_lock, flags);
 
+       /* mark that this task has queued an async job, used by module init */
+       current->flags |= PF_USED_ASYNC;
+
        /* schedule for execution */
        queue_work(system_unbound_wq, &entry->work);
 
index 40414e9..d596e53 100644 (file)
@@ -272,6 +272,8 @@ static int audit_log_config_change(char *function_name, int new, int old,
        int rc = 0;
 
        ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE);
+       if (unlikely(!ab))
+               return rc;
        audit_log_format(ab, "%s=%d old=%d auid=%u ses=%u", function_name, new,
                         old, from_kuid(&init_user_ns, loginuid), sessionid);
        if (sid) {
@@ -619,6 +621,8 @@ static int audit_log_common_recv_msg(struct audit_buffer **ab, u16 msg_type,
        }
 
        *ab = audit_log_start(NULL, GFP_KERNEL, msg_type);
+       if (unlikely(!*ab))
+               return rc;
        audit_log_format(*ab, "pid=%d uid=%u auid=%u ses=%u",
                         task_tgid_vnr(current),
                         from_kuid(&init_user_ns, current_uid()),
@@ -1097,6 +1101,23 @@ static inline void audit_get_stamp(struct audit_context *ctx,
        }
 }
 
+/*
+ * Wait for auditd to drain the queue a little
+ */
+static void wait_for_auditd(unsigned long sleep_time)
+{
+       DECLARE_WAITQUEUE(wait, current);
+       set_current_state(TASK_INTERRUPTIBLE);
+       add_wait_queue(&audit_backlog_wait, &wait);
+
+       if (audit_backlog_limit &&
+           skb_queue_len(&audit_skb_queue) > audit_backlog_limit)
+               schedule_timeout(sleep_time);
+
+       __set_current_state(TASK_RUNNING);
+       remove_wait_queue(&audit_backlog_wait, &wait);
+}
+
 /* Obtain an audit buffer.  This routine does locking to obtain the
  * audit buffer, but then no locking is required for calls to
  * audit_log_*format.  If the tsk is a task that is currently in a
@@ -1142,20 +1163,13 @@ struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask,
 
        while (audit_backlog_limit
               && skb_queue_len(&audit_skb_queue) > audit_backlog_limit + reserve) {
-               if (gfp_mask & __GFP_WAIT && audit_backlog_wait_time
-                   && time_before(jiffies, timeout_start + audit_backlog_wait_time)) {
+               if (gfp_mask & __GFP_WAIT && audit_backlog_wait_time) {
+                       unsigned long sleep_time;
 
-                       /* Wait for auditd to drain the queue a little */
-                       DECLARE_WAITQUEUE(wait, current);
-                       set_current_state(TASK_INTERRUPTIBLE);
-                       add_wait_queue(&audit_backlog_wait, &wait);
-
-                       if (audit_backlog_limit &&
-                           skb_queue_len(&audit_skb_queue) > audit_backlog_limit)
-                               schedule_timeout(timeout_start + audit_backlog_wait_time - jiffies);
-
-                       __set_current_state(TASK_RUNNING);
-                       remove_wait_queue(&audit_backlog_wait, &wait);
+                       sleep_time = timeout_start + audit_backlog_wait_time -
+                                       jiffies;
+                       if ((long)sleep_time > 0)
+                               wait_for_auditd(sleep_time);
                        continue;
                }
                if (audit_rate_check() && printk_ratelimit())
index e81175e..642a89c 100644 (file)
@@ -449,11 +449,26 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree)
        return 0;
 }
 
+static void audit_log_remove_rule(struct audit_krule *rule)
+{
+       struct audit_buffer *ab;
+
+       ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE);
+       if (unlikely(!ab))
+               return;
+       audit_log_format(ab, "op=");
+       audit_log_string(ab, "remove rule");
+       audit_log_format(ab, " dir=");
+       audit_log_untrustedstring(ab, rule->tree->pathname);
+       audit_log_key(ab, rule->filterkey);
+       audit_log_format(ab, " list=%d res=1", rule->listnr);
+       audit_log_end(ab);
+}
+
 static void kill_rules(struct audit_tree *tree)
 {
        struct audit_krule *rule, *next;
        struct audit_entry *entry;
-       struct audit_buffer *ab;
 
        list_for_each_entry_safe(rule, next, &tree->rules, rlist) {
                entry = container_of(rule, struct audit_entry, rule);
@@ -461,14 +476,7 @@ static void kill_rules(struct audit_tree *tree)
                list_del_init(&rule->rlist);
                if (rule->tree) {
                        /* not a half-baked one */
-                       ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE);
-                       audit_log_format(ab, "op=");
-                       audit_log_string(ab, "remove rule");
-                       audit_log_format(ab, " dir=");
-                       audit_log_untrustedstring(ab, rule->tree->pathname);
-                       audit_log_key(ab, rule->filterkey);
-                       audit_log_format(ab, " list=%d res=1", rule->listnr);
-                       audit_log_end(ab);
+                       audit_log_remove_rule(rule);
                        rule->tree = NULL;
                        list_del_rcu(&entry->list);
                        list_del(&entry->rule.list);
index 4a599f6..22831c4 100644 (file)
@@ -240,6 +240,8 @@ static void audit_watch_log_rule_change(struct audit_krule *r, struct audit_watc
        if (audit_enabled) {
                struct audit_buffer *ab;
                ab = audit_log_start(NULL, GFP_NOFS, AUDIT_CONFIG_CHANGE);
+               if (unlikely(!ab))
+                       return;
                audit_log_format(ab, "auid=%u ses=%u op=",
                                 from_kuid(&init_user_ns, audit_get_loginuid(current)),
                                 audit_get_sessionid(current));
index 7f19f23..f9fc54b 100644 (file)
@@ -1144,7 +1144,6 @@ static void audit_log_rule_change(kuid_t loginuid, u32 sessionid, u32 sid,
  * audit_receive_filter - apply all rules to the specified message type
  * @type: audit message type
  * @pid: target pid for netlink audit messages
- * @uid: target uid for netlink audit messages
  * @seq: netlink audit message sequence (serial) number
  * @data: payload data
  * @datasz: size of payload data
index e37e6a1..a371f85 100644 (file)
@@ -1464,14 +1464,14 @@ static void show_special(struct audit_context *context, int *call_panic)
                        audit_log_end(ab);
                        ab = audit_log_start(context, GFP_KERNEL,
                                             AUDIT_IPC_SET_PERM);
+                       if (unlikely(!ab))
+                               return;
                        audit_log_format(ab,
                                "qbytes=%lx ouid=%u ogid=%u mode=%#ho",
                                context->ipc.qbytes,
                                context->ipc.perm_uid,
                                context->ipc.perm_gid,
                                context->ipc.perm_mode);
-                       if (!ab)
-                               return;
                }
                break; }
        case AUDIT_MQ_OPEN: {
@@ -2675,7 +2675,7 @@ void __audit_mmap_fd(int fd, int flags)
        context->type = AUDIT_MMAP;
 }
 
-static void audit_log_abend(struct audit_buffer *ab, char *reason, long signr)
+static void audit_log_task(struct audit_buffer *ab)
 {
        kuid_t auid, uid;
        kgid_t gid;
@@ -2693,6 +2693,11 @@ static void audit_log_abend(struct audit_buffer *ab, char *reason, long signr)
        audit_log_task_context(ab);
        audit_log_format(ab, " pid=%d comm=", current->pid);
        audit_log_untrustedstring(ab, current->comm);
+}
+
+static void audit_log_abend(struct audit_buffer *ab, char *reason, long signr)
+{
+       audit_log_task(ab);
        audit_log_format(ab, " reason=");
        audit_log_string(ab, reason);
        audit_log_format(ab, " sig=%ld", signr);
@@ -2715,6 +2720,8 @@ void audit_core_dumps(long signr)
                return;
 
        ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_ANOM_ABEND);
+       if (unlikely(!ab))
+               return;
        audit_log_abend(ab, "memory violation", signr);
        audit_log_end(ab);
 }
@@ -2723,8 +2730,11 @@ void __audit_seccomp(unsigned long syscall, long signr, int code)
 {
        struct audit_buffer *ab;
 
-       ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_ANOM_ABEND);
-       audit_log_abend(ab, "seccomp", signr);
+       ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_SECCOMP);
+       if (unlikely(!ab))
+               return;
+       audit_log_task(ab);
+       audit_log_format(ab, " sig=%ld", signr);
        audit_log_format(ab, " syscall=%ld", syscall);
        audit_log_format(ab, " compat=%d", is_compat_task());
        audit_log_format(ab, " ip=0x%lx", KSTK_EIP(current));
index f6150e9..36700e9 100644 (file)
@@ -535,9 +535,11 @@ asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru)
        return 0;
 }
 
-asmlinkage long
-compat_sys_wait4(compat_pid_t pid, compat_uint_t __user *stat_addr, int options,
-       struct compat_rusage __user *ru)
+COMPAT_SYSCALL_DEFINE4(wait4,
+       compat_pid_t, pid,
+       compat_uint_t __user *, stat_addr,
+       int, options,
+       struct compat_rusage __user *, ru)
 {
        if (!ru) {
                return sys_wait4(pid, stat_addr, options, NULL);
@@ -564,9 +566,10 @@ compat_sys_wait4(compat_pid_t pid, compat_uint_t __user *stat_addr, int options,
        }
 }
 
-asmlinkage long compat_sys_waitid(int which, compat_pid_t pid,
-               struct compat_siginfo __user *uinfo, int options,
-               struct compat_rusage __user *uru)
+COMPAT_SYSCALL_DEFINE5(waitid,
+               int, which, compat_pid_t, pid,
+               struct compat_siginfo __user *, uinfo, int, options,
+               struct compat_rusage __user *, uru)
 {
        siginfo_t info;
        struct rusage ru;
@@ -584,7 +587,11 @@ asmlinkage long compat_sys_waitid(int which, compat_pid_t pid,
                return ret;
 
        if (uru) {
-               ret = put_compat_rusage(&ru, uru);
+               /* sys_waitid() overwrites everything in ru */
+               if (COMPAT_USE_64BIT_TIME)
+                       ret = copy_to_user(uru, &ru, sizeof(ru));
+               else
+                       ret = put_compat_rusage(&ru, uru);
                if (ret)
                        return ret;
        }
@@ -994,7 +1001,7 @@ compat_sys_rt_sigtimedwait (compat_sigset_t __user *uthese,
        sigset_from_compat(&s, &s32);
 
        if (uts) {
-               if (get_compat_timespec(&t, uts))
+               if (compat_get_timespec(&t, uts))
                        return -EFAULT;
        }
 
index 4d5f8d5..8875254 100644 (file)
@@ -1970,6 +1970,8 @@ static int kdb_lsmod(int argc, const char **argv)
 
        kdb_printf("Module                  Size  modstruct     Used by\n");
        list_for_each_entry(mod, kdb_modules, list) {
+               if (mod->state == MODULE_STATE_UNFORMED)
+                       continue;
 
                kdb_printf("%-20s%8u  0x%p ", mod->name,
                           mod->core_size, (void *)mod);
index 65ca6d2..c535f33 100644 (file)
@@ -1668,8 +1668,10 @@ SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp,
                 int, tls_val)
 #endif
 {
-       return do_fork(clone_flags, newsp, 0,
-               parent_tidptr, child_tidptr);
+       long ret = do_fork(clone_flags, newsp, 0, parent_tidptr, child_tidptr);
+       asmlinkage_protect(5, ret, clone_flags, newsp,
+                       parent_tidptr, child_tidptr, tls_val);
+       return ret;
 }
 #endif
 
index 250092c..eab0827 100644 (file)
@@ -188,6 +188,7 @@ struct load_info {
    ongoing or failed initialization etc. */
 static inline int strong_try_module_get(struct module *mod)
 {
+       BUG_ON(mod && mod->state == MODULE_STATE_UNFORMED);
        if (mod && mod->state == MODULE_STATE_COMING)
                return -EBUSY;
        if (try_module_get(mod))
@@ -343,6 +344,9 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
 #endif
                };
 
+               if (mod->state == MODULE_STATE_UNFORMED)
+                       continue;
+
                if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
                        return true;
        }
@@ -450,16 +454,24 @@ const struct kernel_symbol *find_symbol(const char *name,
 EXPORT_SYMBOL_GPL(find_symbol);
 
 /* Search for module by name: must hold module_mutex. */
-struct module *find_module(const char *name)
+static struct module *find_module_all(const char *name,
+                                     bool even_unformed)
 {
        struct module *mod;
 
        list_for_each_entry(mod, &modules, list) {
+               if (!even_unformed && mod->state == MODULE_STATE_UNFORMED)
+                       continue;
                if (strcmp(mod->name, name) == 0)
                        return mod;
        }
        return NULL;
 }
+
+struct module *find_module(const char *name)
+{
+       return find_module_all(name, false);
+}
 EXPORT_SYMBOL_GPL(find_module);
 
 #ifdef CONFIG_SMP
@@ -525,6 +537,8 @@ bool is_module_percpu_address(unsigned long addr)
        preempt_disable();
 
        list_for_each_entry_rcu(mod, &modules, list) {
+               if (mod->state == MODULE_STATE_UNFORMED)
+                       continue;
                if (!mod->percpu_size)
                        continue;
                for_each_possible_cpu(cpu) {
@@ -1048,6 +1062,8 @@ static ssize_t show_initstate(struct module_attribute *mattr,
        case MODULE_STATE_GOING:
                state = "going";
                break;
+       default:
+               BUG();
        }
        return sprintf(buffer, "%s\n", state);
 }
@@ -1786,6 +1802,8 @@ void set_all_modules_text_rw(void)
 
        mutex_lock(&module_mutex);
        list_for_each_entry_rcu(mod, &modules, list) {
+               if (mod->state == MODULE_STATE_UNFORMED)
+                       continue;
                if ((mod->module_core) && (mod->core_text_size)) {
                        set_page_attributes(mod->module_core,
                                                mod->module_core + mod->core_text_size,
@@ -1807,6 +1825,8 @@ void set_all_modules_text_ro(void)
 
        mutex_lock(&module_mutex);
        list_for_each_entry_rcu(mod, &modules, list) {
+               if (mod->state == MODULE_STATE_UNFORMED)
+                       continue;
                if ((mod->module_core) && (mod->core_text_size)) {
                        set_page_attributes(mod->module_core,
                                                mod->module_core + mod->core_text_size,
@@ -2527,6 +2547,13 @@ static int copy_module_from_fd(int fd, struct load_info *info)
                err = -EFBIG;
                goto out;
        }
+
+       /* Don't hand 0 to vmalloc, it whines. */
+       if (stat.size == 0) {
+               err = -EINVAL;
+               goto out;
+       }
+
        info->hdr = vmalloc(stat.size);
        if (!info->hdr) {
                err = -ENOMEM;
@@ -2990,8 +3017,9 @@ static bool finished_loading(const char *name)
        bool ret;
 
        mutex_lock(&module_mutex);
-       mod = find_module(name);
-       ret = !mod || mod->state != MODULE_STATE_COMING;
+       mod = find_module_all(name, true);
+       ret = !mod || mod->state == MODULE_STATE_LIVE
+               || mod->state == MODULE_STATE_GOING;
        mutex_unlock(&module_mutex);
 
        return ret;
@@ -3013,6 +3041,12 @@ static int do_init_module(struct module *mod)
 {
        int ret = 0;
 
+       /*
+        * We want to find out whether @mod uses async during init.  Clear
+        * PF_USED_ASYNC.  async_schedule*() will set it.
+        */
+       current->flags &= ~PF_USED_ASYNC;
+
        blocking_notifier_call_chain(&module_notify_list,
                        MODULE_STATE_COMING, mod);
 
@@ -3058,8 +3092,25 @@ static int do_init_module(struct module *mod)
        blocking_notifier_call_chain(&module_notify_list,
                                     MODULE_STATE_LIVE, mod);
 
-       /* We need to finish all async code before the module init sequence is done */
-       async_synchronize_full();
+       /*
+        * We need to finish all async code before the module init sequence
+        * is done.  This has potential to deadlock.  For example, a newly
+        * detected block device can trigger request_module() of the
+        * default iosched from async probing task.  Once userland helper
+        * reaches here, async_synchronize_full() will wait on the async
+        * task waiting on request_module() and deadlock.
+        *
+        * This deadlock is avoided by perfomring async_synchronize_full()
+        * iff module init queued any async jobs.  This isn't a full
+        * solution as it will deadlock the same if module loading from
+        * async jobs nests more than once; however, due to the various
+        * constraints, this hack seems to be the best option for now.
+        * Please refer to the following thread for details.
+        *
+        * http://thread.gmane.org/gmane.linux.kernel/1420814
+        */
+       if (current->flags & PF_USED_ASYNC)
+               async_synchronize_full();
 
        mutex_lock(&module_mutex);
        /* Drop initial reference. */
@@ -3113,6 +3164,32 @@ static int load_module(struct load_info *info, const char __user *uargs,
                goto free_copy;
        }
 
+       /*
+        * We try to place it in the list now to make sure it's unique
+        * before we dedicate too many resources.  In particular,
+        * temporary percpu memory exhaustion.
+        */
+       mod->state = MODULE_STATE_UNFORMED;
+again:
+       mutex_lock(&module_mutex);
+       if ((old = find_module_all(mod->name, true)) != NULL) {
+               if (old->state == MODULE_STATE_COMING
+                   || old->state == MODULE_STATE_UNFORMED) {
+                       /* Wait in case it fails to load. */
+                       mutex_unlock(&module_mutex);
+                       err = wait_event_interruptible(module_wq,
+                                              finished_loading(mod->name));
+                       if (err)
+                               goto free_module;
+                       goto again;
+               }
+               err = -EEXIST;
+               mutex_unlock(&module_mutex);
+               goto free_module;
+       }
+       list_add_rcu(&mod->list, &modules);
+       mutex_unlock(&module_mutex);
+
 #ifdef CONFIG_MODULE_SIG
        mod->sig_ok = info->sig_ok;
        if (!mod->sig_ok)
@@ -3122,7 +3199,7 @@ static int load_module(struct load_info *info, const char __user *uargs,
        /* Now module is in final location, initialize linked lists, etc. */
        err = module_unload_init(mod);
        if (err)
-               goto free_module;
+               goto unlink_mod;
 
        /* Now we've got everything in the final locations, we can
         * find optional sections. */
@@ -3157,54 +3234,33 @@ static int load_module(struct load_info *info, const char __user *uargs,
                goto free_arch_cleanup;
        }
 
-       /* Mark state as coming so strong_try_module_get() ignores us. */
-       mod->state = MODULE_STATE_COMING;
-
-       /* Now sew it into the lists so we can get lockdep and oops
-        * info during argument parsing.  No one should access us, since
-        * strong_try_module_get() will fail.
-        * lockdep/oops can run asynchronous, so use the RCU list insertion
-        * function to insert in a way safe to concurrent readers.
-        * The mutex protects against concurrent writers.
-        */
-again:
-       mutex_lock(&module_mutex);
-       if ((old = find_module(mod->name)) != NULL) {
-               if (old->state == MODULE_STATE_COMING) {
-                       /* Wait in case it fails to load. */
-                       mutex_unlock(&module_mutex);
-                       err = wait_event_interruptible(module_wq,
-                                              finished_loading(mod->name));
-                       if (err)
-                               goto free_arch_cleanup;
-                       goto again;
-               }
-               err = -EEXIST;
-               goto unlock;
-       }
-
-       /* This has to be done once we're sure module name is unique. */
        dynamic_debug_setup(info->debug, info->num_debug);
 
-       /* Find duplicate symbols */
+       mutex_lock(&module_mutex);
+       /* Find duplicate symbols (must be called under lock). */
        err = verify_export_symbols(mod);
        if (err < 0)
-               goto ddebug;
+               goto ddebug_cleanup;
 
+       /* This relies on module_mutex for list integrity. */
        module_bug_finalize(info->hdr, info->sechdrs, mod);
-       list_add_rcu(&mod->list, &modules);
+
+       /* Mark state as coming so strong_try_module_get() ignores us,
+        * but kallsyms etc. can see us. */
+       mod->state = MODULE_STATE_COMING;
+
        mutex_unlock(&module_mutex);
 
        /* Module is ready to execute: parsing args may do that. */
        err = parse_args(mod->name, mod->args, mod->kp, mod->num_kp,
                         -32768, 32767, &ddebug_dyndbg_module_param_cb);
        if (err < 0)
-               goto unlink;
+               goto bug_cleanup;
 
        /* Link in to syfs. */
        err = mod_sysfs_setup(mod, info, mod->kp, mod->num_kp);
        if (err < 0)
-               goto unlink;
+               goto bug_cleanup;
 
        /* Get rid of temporary copy. */
        free_copy(info);
@@ -3214,16 +3270,13 @@ again:
 
        return do_init_module(mod);
 
- unlink:
+ bug_cleanup:
+       /* module_bug_cleanup needs module_mutex protection */
        mutex_lock(&module_mutex);
-       /* Unlink carefully: kallsyms could be walking list. */
-       list_del_rcu(&mod->list);
        module_bug_cleanup(mod);
-       wake_up_all(&module_wq);
- ddebug:
-       dynamic_debug_remove(info->debug);
- unlock:
+ ddebug_cleanup:
        mutex_unlock(&module_mutex);
+       dynamic_debug_remove(info->debug);
        synchronize_sched();
        kfree(mod->args);
  free_arch_cleanup:
@@ -3232,6 +3285,12 @@ again:
        free_modinfo(mod);
  free_unload:
        module_unload_free(mod);
+ unlink_mod:
+       mutex_lock(&module_mutex);
+       /* Unlink carefully: kallsyms could be walking list. */
+       list_del_rcu(&mod->list);
+       wake_up_all(&module_wq);
+       mutex_unlock(&module_mutex);
  free_module:
        module_deallocate(mod, info);
  free_copy:
@@ -3354,6 +3413,8 @@ const char *module_address_lookup(unsigned long addr,
 
        preempt_disable();
        list_for_each_entry_rcu(mod, &modules, list) {
+               if (mod->state == MODULE_STATE_UNFORMED)
+                       continue;
                if (within_module_init(addr, mod) ||
                    within_module_core(addr, mod)) {
                        if (modname)
@@ -3377,6 +3438,8 @@ int lookup_module_symbol_name(unsigned long addr, char *symname)
 
        preempt_disable();
        list_for_each_entry_rcu(mod, &modules, list) {
+               if (mod->state == MODULE_STATE_UNFORMED)
+                       continue;
                if (within_module_init(addr, mod) ||
                    within_module_core(addr, mod)) {
                        const char *sym;
@@ -3401,6 +3464,8 @@ int lookup_module_symbol_attrs(unsigned long addr, unsigned long *size,
 
        preempt_disable();
        list_for_each_entry_rcu(mod, &modules, list) {
+               if (mod->state == MODULE_STATE_UNFORMED)
+                       continue;
                if (within_module_init(addr, mod) ||
                    within_module_core(addr, mod)) {
                        const char *sym;
@@ -3428,6 +3493,8 @@ int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
 
        preempt_disable();
        list_for_each_entry_rcu(mod, &modules, list) {
+               if (mod->state == MODULE_STATE_UNFORMED)
+                       continue;
                if (symnum < mod->num_symtab) {
                        *value = mod->symtab[symnum].st_value;
                        *type = mod->symtab[symnum].st_info;
@@ -3470,9 +3537,12 @@ unsigned long module_kallsyms_lookup_name(const char *name)
                        ret = mod_find_symname(mod, colon+1);
                *colon = ':';
        } else {
-               list_for_each_entry_rcu(mod, &modules, list)
+               list_for_each_entry_rcu(mod, &modules, list) {
+                       if (mod->state == MODULE_STATE_UNFORMED)
+                               continue;
                        if ((ret = mod_find_symname(mod, name)) != 0)
                                break;
+               }
        }
        preempt_enable();
        return ret;
@@ -3487,6 +3557,8 @@ int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
        int ret;
 
        list_for_each_entry(mod, &modules, list) {
+               if (mod->state == MODULE_STATE_UNFORMED)
+                       continue;
                for (i = 0; i < mod->num_symtab; i++) {
                        ret = fn(data, mod->strtab + mod->symtab[i].st_name,
                                 mod, mod->symtab[i].st_value);
@@ -3502,6 +3574,7 @@ static char *module_flags(struct module *mod, char *buf)
 {
        int bx = 0;
 
+       BUG_ON(mod->state == MODULE_STATE_UNFORMED);
        if (mod->taints ||
            mod->state == MODULE_STATE_GOING ||
            mod->state == MODULE_STATE_COMING) {
@@ -3543,6 +3616,10 @@ static int m_show(struct seq_file *m, void *p)
        struct module *mod = list_entry(p, struct module, list);
        char buf[8];
 
+       /* We always ignore unformed modules. */
+       if (mod->state == MODULE_STATE_UNFORMED)
+               return 0;
+
        seq_printf(m, "%s %u",
                   mod->name, mod->init_size + mod->core_size);
        print_unload_info(m, mod);
@@ -3603,6 +3680,8 @@ const struct exception_table_entry *search_module_extables(unsigned long addr)
 
        preempt_disable();
        list_for_each_entry_rcu(mod, &modules, list) {
+               if (mod->state == MODULE_STATE_UNFORMED)
+                       continue;
                if (mod->num_exentries == 0)
                        continue;
 
@@ -3651,10 +3730,13 @@ struct module *__module_address(unsigned long addr)
        if (addr < module_addr_min || addr > module_addr_max)
                return NULL;
 
-       list_for_each_entry_rcu(mod, &modules, list)
+       list_for_each_entry_rcu(mod, &modules, list) {
+               if (mod->state == MODULE_STATE_UNFORMED)
+                       continue;
                if (within_module_core(addr, mod)
                    || within_module_init(addr, mod))
                        return mod;
+       }
        return NULL;
 }
 EXPORT_SYMBOL_GPL(__module_address);
@@ -3707,8 +3789,11 @@ void print_modules(void)
        printk(KERN_DEFAULT "Modules linked in:");
        /* Most callers should already have preempt disabled, but make sure */
        preempt_disable();
-       list_for_each_entry_rcu(mod, &modules, list)
+       list_for_each_entry_rcu(mod, &modules, list) {
+               if (mod->state == MODULE_STATE_UNFORMED)
+                       continue;
                printk(" %s%s", mod->name, module_flags(mod, buf));
+       }
        preempt_enable();
        if (last_unloaded_module[0])
                printk(" [last unloaded: %s]", last_unloaded_module);
index 1599157..6cbeaae 100644 (file)
@@ -117,11 +117,45 @@ void __ptrace_unlink(struct task_struct *child)
         * TASK_KILLABLE sleeps.
         */
        if (child->jobctl & JOBCTL_STOP_PENDING || task_is_traced(child))
-               signal_wake_up(child, task_is_traced(child));
+               ptrace_signal_wake_up(child, true);
 
        spin_unlock(&child->sighand->siglock);
 }
 
+/* Ensure that nothing can wake it up, even SIGKILL */
+static bool ptrace_freeze_traced(struct task_struct *task)
+{
+       bool ret = false;
+
+       /* Lockless, nobody but us can set this flag */
+       if (task->jobctl & JOBCTL_LISTENING)
+               return ret;
+
+       spin_lock_irq(&task->sighand->siglock);
+       if (task_is_traced(task) && !__fatal_signal_pending(task)) {
+               task->state = __TASK_TRACED;
+               ret = true;
+       }
+       spin_unlock_irq(&task->sighand->siglock);
+
+       return ret;
+}
+
+static void ptrace_unfreeze_traced(struct task_struct *task)
+{
+       if (task->state != __TASK_TRACED)
+               return;
+
+       WARN_ON(!task->ptrace || task->parent != current);
+
+       spin_lock_irq(&task->sighand->siglock);
+       if (__fatal_signal_pending(task))
+               wake_up_state(task, __TASK_TRACED);
+       else
+               task->state = TASK_TRACED;
+       spin_unlock_irq(&task->sighand->siglock);
+}
+
 /**
  * ptrace_check_attach - check whether ptracee is ready for ptrace operation
  * @child: ptracee to check for
@@ -139,7 +173,7 @@ void __ptrace_unlink(struct task_struct *child)
  * RETURNS:
  * 0 on success, -ESRCH if %child is not ready.
  */
-int ptrace_check_attach(struct task_struct *child, bool ignore_state)
+static int ptrace_check_attach(struct task_struct *child, bool ignore_state)
 {
        int ret = -ESRCH;
 
@@ -151,24 +185,29 @@ int ptrace_check_attach(struct task_struct *child, bool ignore_state)
         * be changed by us so it's not changing right after this.
         */
        read_lock(&tasklist_lock);
-       if ((child->ptrace & PT_PTRACED) && child->parent == current) {
+       if (child->ptrace && child->parent == current) {
+               WARN_ON(child->state == __TASK_TRACED);
                /*
                 * child->sighand can't be NULL, release_task()
                 * does ptrace_unlink() before __exit_signal().
                 */
-               spin_lock_irq(&child->sighand->siglock);
-               WARN_ON_ONCE(task_is_stopped(child));
-               if (ignore_state || (task_is_traced(child) &&
-                                    !(child->jobctl & JOBCTL_LISTENING)))
+               if (ignore_state || ptrace_freeze_traced(child))
                        ret = 0;
-               spin_unlock_irq(&child->sighand->siglock);
        }
        read_unlock(&tasklist_lock);
 
-       if (!ret && !ignore_state)
-               ret = wait_task_inactive(child, TASK_TRACED) ? 0 : -ESRCH;
+       if (!ret && !ignore_state) {
+               if (!wait_task_inactive(child, __TASK_TRACED)) {
+                       /*
+                        * This can only happen if may_ptrace_stop() fails and
+                        * ptrace_stop() changes ->state back to TASK_RUNNING,
+                        * so we should not worry about leaking __TASK_TRACED.
+                        */
+                       WARN_ON(child->state == __TASK_TRACED);
+                       ret = -ESRCH;
+               }
+       }
 
-       /* All systems go.. */
        return ret;
 }
 
@@ -317,7 +356,7 @@ static int ptrace_attach(struct task_struct *task, long request,
         */
        if (task_is_stopped(task) &&
            task_set_jobctl_pending(task, JOBCTL_TRAP_STOP | JOBCTL_TRAPPING))
-               signal_wake_up(task, 1);
+               signal_wake_up_state(task, __TASK_STOPPED);
 
        spin_unlock(&task->sighand->siglock);
 
@@ -737,7 +776,7 @@ int ptrace_request(struct task_struct *child, long request,
                 * tracee into STOP.
                 */
                if (likely(task_set_jobctl_pending(child, JOBCTL_TRAP_STOP)))
-                       signal_wake_up(child, child->jobctl & JOBCTL_LISTENING);
+                       ptrace_signal_wake_up(child, child->jobctl & JOBCTL_LISTENING);
 
                unlock_task_sighand(child, &flags);
                ret = 0;
@@ -763,7 +802,7 @@ int ptrace_request(struct task_struct *child, long request,
                         * start of this trap and now.  Trigger re-trap.
                         */
                        if (child->jobctl & JOBCTL_TRAP_NOTIFY)
-                               signal_wake_up(child, true);
+                               ptrace_signal_wake_up(child, true);
                        ret = 0;
                }
                unlock_task_sighand(child, &flags);
@@ -900,6 +939,8 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
                goto out_put_task_struct;
 
        ret = arch_ptrace(child, request, addr, data);
+       if (ret || request != PTRACE_DETACH)
+               ptrace_unfreeze_traced(child);
 
  out_put_task_struct:
        put_task_struct(child);
@@ -1039,8 +1080,11 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
 
        ret = ptrace_check_attach(child, request == PTRACE_KILL ||
                                  request == PTRACE_INTERRUPT);
-       if (!ret)
+       if (!ret) {
                ret = compat_arch_ptrace(child, request, addr, data);
+               if (ret || request != PTRACE_DETACH)
+                       ptrace_unfreeze_traced(child);
+       }
 
  out_put_task_struct:
        put_task_struct(child);
index 6850f53..b3c6c3f 100644 (file)
@@ -116,6 +116,16 @@ void down_read_nested(struct rw_semaphore *sem, int subclass)
 
 EXPORT_SYMBOL(down_read_nested);
 
+void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest)
+{
+       might_sleep();
+       rwsem_acquire_nest(&sem->dep_map, 0, 0, nest, _RET_IP_);
+
+       LOCK_CONTENDED(sem, __down_write_trylock, __down_write);
+}
+
+EXPORT_SYMBOL(_down_write_nest_lock);
+
 void down_write_nested(struct rw_semaphore *sem, int subclass)
 {
        might_sleep();
index 257002c..26058d0 100644 (file)
@@ -1523,7 +1523,8 @@ out:
  */
 int wake_up_process(struct task_struct *p)
 {
-       return try_to_wake_up(p, TASK_ALL, 0);
+       WARN_ON(task_is_stopped_or_traced(p));
+       return try_to_wake_up(p, TASK_NORMAL, 0);
 }
 EXPORT_SYMBOL(wake_up_process);
 
index 372771e..3d09cf6 100644 (file)
@@ -680,23 +680,17 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
  * No need to set need_resched since signal event passing
  * goes through ->blocked
  */
-void signal_wake_up(struct task_struct *t, int resume)
+void signal_wake_up_state(struct task_struct *t, unsigned int state)
 {
-       unsigned int mask;
-
        set_tsk_thread_flag(t, TIF_SIGPENDING);
-
        /*
-        * For SIGKILL, we want to wake it up in the stopped/traced/killable
+        * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
         * case. We don't check t->state here because there is a race with it
         * executing another processor and just now entering stopped state.
         * By using wake_up_state, we ensure the process will wake up and
         * handle its death signal.
         */
-       mask = TASK_INTERRUPTIBLE;
-       if (resume)
-               mask |= TASK_WAKEKILL;
-       if (!wake_up_state(t, mask))
+       if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
                kick_process(t);
 }
 
@@ -844,7 +838,7 @@ static void ptrace_trap_notify(struct task_struct *t)
        assert_spin_locked(&t->sighand->siglock);
 
        task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
-       signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
+       ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
 }
 
 /*
@@ -1800,6 +1794,10 @@ static inline int may_ptrace_stop(void)
         * If SIGKILL was already sent before the caller unlocked
         * ->siglock we must see ->core_state != NULL. Otherwise it
         * is safe to enter schedule().
+        *
+        * This is almost outdated, a task with the pending SIGKILL can't
+        * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported
+        * after SIGKILL was already dequeued.
         */
        if (unlikely(current->mm->core_state) &&
            unlikely(current->mm == current->parent->mm))
@@ -1925,6 +1923,7 @@ static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
                if (gstop_done)
                        do_notify_parent_cldstop(current, false, why);
 
+               /* tasklist protects us from ptrace_freeze_traced() */
                __set_current_state(TASK_RUNNING);
                if (clear_code)
                        current->exit_code = 0;
@@ -3116,8 +3115,9 @@ int __save_altstack(stack_t __user *uss, unsigned long sp)
 
 #ifdef CONFIG_COMPAT
 #ifdef CONFIG_GENERIC_SIGALTSTACK
-asmlinkage long compat_sys_sigaltstack(const compat_stack_t __user *uss_ptr,
-                                      compat_stack_t __user *uoss_ptr)
+COMPAT_SYSCALL_DEFINE2(sigaltstack,
+                       const compat_stack_t __user *, uss_ptr,
+                       compat_stack_t __user *, uoss_ptr)
 {
        stack_t uss, uoss;
        int ret;
index 3ffe4c5..41473b4 100644 (file)
@@ -3998,7 +3998,7 @@ static int ftrace_module_notify(struct notifier_block *self,
 
 struct notifier_block ftrace_module_nb = {
        .notifier_call = ftrace_module_notify,
-       .priority = 0,
+       .priority = INT_MAX,    /* Run before anything that can use kprobes */
 };
 
 extern unsigned long __start_mcount_loc[];
index e512567..3c13e46 100644 (file)
@@ -2899,6 +2899,8 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf,
        if (copy_from_user(&buf, ubuf, cnt))
                return -EFAULT;
 
+       buf[cnt] = 0;
+
        trace_set_options(buf);
 
        *ppos += cnt;
@@ -3452,7 +3454,7 @@ static int tracing_wait_pipe(struct file *filp)
                        return -EINTR;
 
                /*
-                * We block until we read something and tracing is enabled.
+                * We block until we read something and tracing is disabled.
                 * We still block if tracing is disabled, but we have never
                 * read anything. This allows a user to cat this file, and
                 * then enable tracing. But after we have read something,
@@ -3460,7 +3462,7 @@ static int tracing_wait_pipe(struct file *filp)
                 *
                 * iter->pos will be 0 if we haven't read anything.
                 */
-               if (tracing_is_enabled() && iter->pos)
+               if (!tracing_is_enabled() && iter->pos)
                        break;
        }
 
@@ -4815,10 +4817,17 @@ rb_simple_write(struct file *filp, const char __user *ubuf,
                return ret;
 
        if (buffer) {
-               if (val)
+               mutex_lock(&trace_types_lock);
+               if (val) {
                        ring_buffer_record_on(buffer);
-               else
+                       if (current_trace->start)
+                               current_trace->start(tr);
+               } else {
                        ring_buffer_record_off(buffer);
+                       if (current_trace->stop)
+                               current_trace->stop(tr);
+               }
+               mutex_unlock(&trace_types_lock);
        }
 
        (*ppos)++;
index a28c141..d0cdf14 100644 (file)
--- a/lib/bug.c
+++ b/lib/bug.c
@@ -55,6 +55,7 @@ static inline unsigned long bug_addr(const struct bug_entry *bug)
 }
 
 #ifdef CONFIG_MODULES
+/* Updates are protected by module mutex */
 static LIST_HEAD(module_bug_list);
 
 static const struct bug_entry *module_find_bug(unsigned long bugaddr)
index 145dec5..5fbed5c 100644 (file)
@@ -45,6 +45,7 @@ struct cpu_rmap *alloc_cpu_rmap(unsigned int size, gfp_t flags)
        if (!rmap)
                return NULL;
 
+       kref_init(&rmap->refcount);
        rmap->obj = (void **)((char *)rmap + obj_offset);
 
        /* Initially assign CPUs to objects on a rota, since we have
@@ -63,6 +64,35 @@ struct cpu_rmap *alloc_cpu_rmap(unsigned int size, gfp_t flags)
 }
 EXPORT_SYMBOL(alloc_cpu_rmap);
 
+/**
+ * cpu_rmap_release - internal reclaiming helper called from kref_put
+ * @ref: kref to struct cpu_rmap
+ */
+static void cpu_rmap_release(struct kref *ref)
+{
+       struct cpu_rmap *rmap = container_of(ref, struct cpu_rmap, refcount);
+       kfree(rmap);
+}
+
+/**
+ * cpu_rmap_get - internal helper to get new ref on a cpu_rmap
+ * @rmap: reverse-map allocated with alloc_cpu_rmap()
+ */
+static inline void cpu_rmap_get(struct cpu_rmap *rmap)
+{
+       kref_get(&rmap->refcount);
+}
+
+/**
+ * cpu_rmap_put - release ref on a cpu_rmap
+ * @rmap: reverse-map allocated with alloc_cpu_rmap()
+ */
+int cpu_rmap_put(struct cpu_rmap *rmap)
+{
+       return kref_put(&rmap->refcount, cpu_rmap_release);
+}
+EXPORT_SYMBOL(cpu_rmap_put);
+
 /* Reevaluate nearest object for given CPU, comparing with the given
  * neighbours at the given distance.
  */
@@ -197,8 +227,7 @@ struct irq_glue {
  * free_irq_cpu_rmap - free a CPU affinity reverse-map used for IRQs
  * @rmap: Reverse-map allocated with alloc_irq_cpu_map(), or %NULL
  *
- * Must be called in process context, before freeing the IRQs, and
- * without holding any locks required by global workqueue items.
+ * Must be called in process context, before freeing the IRQs.
  */
 void free_irq_cpu_rmap(struct cpu_rmap *rmap)
 {
@@ -212,12 +241,18 @@ void free_irq_cpu_rmap(struct cpu_rmap *rmap)
                glue = rmap->obj[index];
                irq_set_affinity_notifier(glue->notify.irq, NULL);
        }
-       irq_run_affinity_notifiers();
 
-       kfree(rmap);
+       cpu_rmap_put(rmap);
 }
 EXPORT_SYMBOL(free_irq_cpu_rmap);
 
+/**
+ * irq_cpu_rmap_notify - callback for IRQ subsystem when IRQ affinity updated
+ * @notify: struct irq_affinity_notify passed by irq/manage.c
+ * @mask: cpu mask for new SMP affinity
+ *
+ * This is executed in workqueue context.
+ */
 static void
 irq_cpu_rmap_notify(struct irq_affinity_notify *notify, const cpumask_t *mask)
 {
@@ -230,10 +265,16 @@ irq_cpu_rmap_notify(struct irq_affinity_notify *notify, const cpumask_t *mask)
                pr_warning("irq_cpu_rmap_notify: update failed: %d\n", rc);
 }
 
+/**
+ * irq_cpu_rmap_release - reclaiming callback for IRQ subsystem
+ * @ref: kref to struct irq_affinity_notify passed by irq/manage.c
+ */
 static void irq_cpu_rmap_release(struct kref *ref)
 {
        struct irq_glue *glue =
                container_of(ref, struct irq_glue, notify.kref);
+
+       cpu_rmap_put(glue->rmap);
        kfree(glue);
 }
 
@@ -258,10 +299,13 @@ int irq_cpu_rmap_add(struct cpu_rmap *rmap, int irq)
        glue->notify.notify = irq_cpu_rmap_notify;
        glue->notify.release = irq_cpu_rmap_release;
        glue->rmap = rmap;
+       cpu_rmap_get(rmap);
        glue->index = cpu_rmap_add(rmap, glue);
        rc = irq_set_affinity_notifier(irq, &glue->notify);
-       if (rc)
+       if (rc) {
+               cpu_rmap_put(glue->rmap);
                kfree(glue);
+       }
        return rc;
 }
 EXPORT_SYMBOL(irq_cpu_rmap_add);
index 4f56a11..c0e31fe 100644 (file)
@@ -194,8 +194,12 @@ __rb_insert(struct rb_node *node, struct rb_root *root,
        }
 }
 
-__always_inline void
-__rb_erase_color(struct rb_node *parent, struct rb_root *root,
+/*
+ * Inline version for rb_erase() use - we want to be able to inline
+ * and eliminate the dummy_rotate callback there
+ */
+static __always_inline void
+____rb_erase_color(struct rb_node *parent, struct rb_root *root,
        void (*augment_rotate)(struct rb_node *old, struct rb_node *new))
 {
        struct rb_node *node = NULL, *sibling, *tmp1, *tmp2;
@@ -355,6 +359,13 @@ __rb_erase_color(struct rb_node *parent, struct rb_root *root,
                }
        }
 }
+
+/* Non-inline version for rb_erase_augmented() use */
+void __rb_erase_color(struct rb_node *parent, struct rb_root *root,
+       void (*augment_rotate)(struct rb_node *old, struct rb_node *new))
+{
+       ____rb_erase_color(parent, root, augment_rotate);
+}
 EXPORT_SYMBOL(__rb_erase_color);
 
 /*
@@ -380,7 +391,10 @@ EXPORT_SYMBOL(rb_insert_color);
 
 void rb_erase(struct rb_node *node, struct rb_root *root)
 {
-       rb_erase_augmented(node, root, &dummy_callbacks);
+       struct rb_node *rebalance;
+       rebalance = __rb_erase_augmented(node, root, &dummy_callbacks);
+       if (rebalance)
+               ____rb_erase_color(rebalance, root, dummy_rotate);
 }
 EXPORT_SYMBOL(rb_erase);
 
index 1324cd7..b93376c 100644 (file)
@@ -185,10 +185,23 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
 
        while (start < end) {
                unsigned long *map, idx, vec;
+               unsigned shift;
 
                map = bdata->node_bootmem_map;
                idx = start - bdata->node_min_pfn;
+               shift = idx & (BITS_PER_LONG - 1);
+               /*
+                * vec holds at most BITS_PER_LONG map bits,
+                * bit 0 corresponds to start.
+                */
                vec = ~map[idx / BITS_PER_LONG];
+
+               if (shift) {
+                       vec >>= shift;
+                       if (end - start >= BITS_PER_LONG)
+                               vec |= ~map[idx / BITS_PER_LONG + 1] <<
+                                       (BITS_PER_LONG - shift);
+               }
                /*
                 * If we have a properly aligned and fully unreserved
                 * BITS_PER_LONG block of pages in front of us, free
@@ -201,19 +214,18 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
                        count += BITS_PER_LONG;
                        start += BITS_PER_LONG;
                } else {
-                       unsigned long off = 0;
+                       unsigned long cur = start;
 
-                       vec >>= start & (BITS_PER_LONG - 1);
-                       while (vec) {
+                       start = ALIGN(start + 1, BITS_PER_LONG);
+                       while (vec && cur != start) {
                                if (vec & 1) {
-                                       page = pfn_to_page(start + off);
+                                       page = pfn_to_page(cur);
                                        __free_pages_bootmem(page, 0);
                                        count++;
                                }
                                vec >>= 1;
-                               off++;
+                               ++cur;
                        }
-                       start = ALIGN(start + 1, BITS_PER_LONG);
                }
        }
 
index 6b807e4..c62bd06 100644 (file)
@@ -816,6 +816,7 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
 static int compact_finished(struct zone *zone,
                            struct compact_control *cc)
 {
+       unsigned int order;
        unsigned long watermark;
 
        if (fatal_signal_pending(current))
@@ -850,22 +851,16 @@ static int compact_finished(struct zone *zone,
                return COMPACT_CONTINUE;
 
        /* Direct compactor: Is a suitable page free? */
-       if (cc->page) {
-               /* Was a suitable page captured? */
-               if (*cc->page)
+       for (order = cc->order; order < MAX_ORDER; order++) {
+               struct free_area *area = &zone->free_area[order];
+
+               /* Job done if page is free of the right migratetype */
+               if (!list_empty(&area->free_list[cc->migratetype]))
+                       return COMPACT_PARTIAL;
+
+               /* Job done if allocation would set block type */
+               if (cc->order >= pageblock_order && area->nr_free)
                        return COMPACT_PARTIAL;
-       } else {
-               unsigned int order;
-               for (order = cc->order; order < MAX_ORDER; order++) {
-                       struct free_area *area = &zone->free_area[cc->order];
-                       /* Job done if page is free of the right migratetype */
-                       if (!list_empty(&area->free_list[cc->migratetype]))
-                               return COMPACT_PARTIAL;
-
-                       /* Job done if allocation would set block type */
-                       if (cc->order >= pageblock_order && area->nr_free)
-                               return COMPACT_PARTIAL;
-               }
        }
 
        return COMPACT_CONTINUE;
@@ -921,60 +916,6 @@ unsigned long compaction_suitable(struct zone *zone, int order)
        return COMPACT_CONTINUE;
 }
 
-static void compact_capture_page(struct compact_control *cc)
-{
-       unsigned long flags;
-       int mtype, mtype_low, mtype_high;
-
-       if (!cc->page || *cc->page)
-               return;
-
-       /*
-        * For MIGRATE_MOVABLE allocations we capture a suitable page ASAP
-        * regardless of the migratetype of the freelist is is captured from.
-        * This is fine because the order for a high-order MIGRATE_MOVABLE
-        * allocation is typically at least a pageblock size and overall
-        * fragmentation is not impaired. Other allocation types must
-        * capture pages from their own migratelist because otherwise they
-        * could pollute other pageblocks like MIGRATE_MOVABLE with
-        * difficult to move pages and making fragmentation worse overall.
-        */
-       if (cc->migratetype == MIGRATE_MOVABLE) {
-               mtype_low = 0;
-               mtype_high = MIGRATE_PCPTYPES;
-       } else {
-               mtype_low = cc->migratetype;
-               mtype_high = cc->migratetype + 1;
-       }
-
-       /* Speculatively examine the free lists without zone lock */
-       for (mtype = mtype_low; mtype < mtype_high; mtype++) {
-               int order;
-               for (order = cc->order; order < MAX_ORDER; order++) {
-                       struct page *page;
-                       struct free_area *area;
-                       area = &(cc->zone->free_area[order]);
-                       if (list_empty(&area->free_list[mtype]))
-                               continue;
-
-                       /* Take the lock and attempt capture of the page */
-                       if (!compact_trylock_irqsave(&cc->zone->lock, &flags, cc))
-                               return;
-                       if (!list_empty(&area->free_list[mtype])) {
-                               page = list_entry(area->free_list[mtype].next,
-                                                       struct page, lru);
-                               if (capture_free_page(page, cc->order, mtype)) {
-                                       spin_unlock_irqrestore(&cc->zone->lock,
-                                                                       flags);
-                                       *cc->page = page;
-                                       return;
-                               }
-                       }
-                       spin_unlock_irqrestore(&cc->zone->lock, flags);
-               }
-       }
-}
-
 static int compact_zone(struct zone *zone, struct compact_control *cc)
 {
        int ret;
@@ -1054,9 +995,6 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
                                goto out;
                        }
                }
-
-               /* Capture a page now if it is a suitable size */
-               compact_capture_page(cc);
        }
 
 out:
@@ -1069,8 +1007,7 @@ out:
 
 static unsigned long compact_zone_order(struct zone *zone,
                                 int order, gfp_t gfp_mask,
-                                bool sync, bool *contended,
-                                struct page **page)
+                                bool sync, bool *contended)
 {
        unsigned long ret;
        struct compact_control cc = {
@@ -1080,7 +1017,6 @@ static unsigned long compact_zone_order(struct zone *zone,
                .migratetype = allocflags_to_migratetype(gfp_mask),
                .zone = zone,
                .sync = sync,
-               .page = page,
        };
        INIT_LIST_HEAD(&cc.freepages);
        INIT_LIST_HEAD(&cc.migratepages);
@@ -1110,7 +1046,7 @@ int sysctl_extfrag_threshold = 500;
  */
 unsigned long try_to_compact_pages(struct zonelist *zonelist,
                        int order, gfp_t gfp_mask, nodemask_t *nodemask,
-                       bool sync, bool *contended, struct page **page)
+                       bool sync, bool *contended)
 {
        enum zone_type high_zoneidx = gfp_zone(gfp_mask);
        int may_enter_fs = gfp_mask & __GFP_FS;
@@ -1136,7 +1072,7 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist,
                int status;
 
                status = compact_zone_order(zone, order, gfp_mask, sync,
-                                               contended, page);
+                                               contended);
                rc = max(status, rc);
 
                /* If a normal allocation would succeed, stop compacting */
@@ -1192,7 +1128,6 @@ int compact_pgdat(pg_data_t *pgdat, int order)
        struct compact_control cc = {
                .order = order,
                .sync = false,
-               .page = NULL,
        };
 
        return __compact_pgdat(pgdat, &cc);
@@ -1203,14 +1138,13 @@ static int compact_node(int nid)
        struct compact_control cc = {
                .order = -1,
                .sync = true,
-               .page = NULL,
        };
 
        return __compact_pgdat(NODE_DATA(nid), &cc);
 }
 
 /* Compact all nodes in the system */
-static int compact_nodes(void)
+static void compact_nodes(void)
 {
        int nid;
 
@@ -1219,8 +1153,6 @@ static int compact_nodes(void)
 
        for_each_online_node(nid)
                compact_node(nid);
-
-       return COMPACT_COMPLETE;
 }
 
 /* The written value is actually unused, all memory is compacted */
@@ -1231,7 +1163,7 @@ int sysctl_compaction_handler(struct ctl_table *table, int write,
                        void __user *buffer, size_t *length, loff_t *ppos)
 {
        if (write)
-               return compact_nodes();
+               compact_nodes();
 
        return 0;
 }
index 9e894ed..6001ee6 100644 (file)
@@ -1819,9 +1819,19 @@ int split_huge_page(struct page *page)
 
        BUG_ON(is_huge_zero_pfn(page_to_pfn(page)));
        BUG_ON(!PageAnon(page));
-       anon_vma = page_lock_anon_vma_read(page);
+
+       /*
+        * The caller does not necessarily hold an mmap_sem that would prevent
+        * the anon_vma disappearing so we first we take a reference to it
+        * and then lock the anon_vma for write. This is similar to
+        * page_lock_anon_vma_read except the write lock is taken to serialise
+        * against parallel split or collapse operations.
+        */
+       anon_vma = page_get_anon_vma(page);
        if (!anon_vma)
                goto out;
+       anon_vma_lock_write(anon_vma);
+
        ret = 0;
        if (!PageCompound(page))
                goto out_unlock;
@@ -1832,7 +1842,8 @@ int split_huge_page(struct page *page)
 
        BUG_ON(PageCompound(page));
 out_unlock:
-       page_unlock_anon_vma_read(anon_vma);
+       anon_vma_unlock(anon_vma);
+       put_anon_vma(anon_vma);
 out:
        return ret;
 }
index d597f94..9ba2110 100644 (file)
@@ -135,7 +135,6 @@ struct compact_control {
        int migratetype;                /* MOVABLE, RECLAIMABLE etc */
        struct zone *zone;
        bool contended;                 /* True if a lock was contended */
-       struct page **page;             /* Page captured of requested size */
 };
 
 unsigned long
index 6259055..88adc8a 100644 (file)
@@ -314,7 +314,8 @@ static void __init_memblock memblock_merge_regions(struct memblock_type *type)
                }
 
                this->size += next->size;
-               memmove(next, next + 1, (type->cnt - (i + 1)) * sizeof(*next));
+               /* move forward from next + 1, index of which is i + 2 */
+               memmove(next, next + 1, (type->cnt - (i + 2)) * sizeof(*next));
                type->cnt--;
        }
 }
index 3b676b0..c387786 100644 (file)
@@ -1679,9 +1679,21 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
        page_xchg_last_nid(new_page, page_last_nid(page));
 
        isolated = numamigrate_isolate_page(pgdat, page);
-       if (!isolated) {
+
+       /*
+        * Failing to isolate or a GUP pin prevents migration. The expected
+        * page count is 2. 1 for anonymous pages without a mapping and 1
+        * for the callers pin. If the page was isolated, the page will
+        * need to be put back on the LRU.
+        */
+       if (!isolated || page_count(page) != 2) {
                count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR);
                put_page(new_page);
+               if (isolated) {
+                       putback_lru_page(page);
+                       isolated = 0;
+                       goto out;
+               }
                goto out_keep_locked;
        }
 
index f54b235..35730ee 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2886,7 +2886,7 @@ static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma)
                 * The LSB of head.next can't change from under us
                 * because we hold the mm_all_locks_mutex.
                 */
-               down_write(&anon_vma->root->rwsem);
+               down_write_nest_lock(&anon_vma->root->rwsem, &mm->mmap_sem);
                /*
                 * We can safely modify head.next after taking the
                 * anon_vma->root->rwsem. If some other vma in this mm shares
index bc6cc0e..df2022f 100644 (file)
@@ -1384,14 +1384,8 @@ void split_page(struct page *page, unsigned int order)
                set_page_refcounted(page + i);
 }
 
-/*
- * Similar to the split_page family of functions except that the page
- * required at the given order and being isolated now to prevent races
- * with parallel allocators
- */
-int capture_free_page(struct page *page, int alloc_order, int migratetype)
+static int __isolate_free_page(struct page *page, unsigned int order)
 {
-       unsigned int order;
        unsigned long watermark;
        struct zone *zone;
        int mt;
@@ -1399,7 +1393,6 @@ int capture_free_page(struct page *page, int alloc_order, int migratetype)
        BUG_ON(!PageBuddy(page));
 
        zone = page_zone(page);
-       order = page_order(page);
        mt = get_pageblock_migratetype(page);
 
        if (mt != MIGRATE_ISOLATE) {
@@ -1408,7 +1401,7 @@ int capture_free_page(struct page *page, int alloc_order, int migratetype)
                if (!zone_watermark_ok(zone, 0, watermark, 0, 0))
                        return 0;
 
-               __mod_zone_freepage_state(zone, -(1UL << alloc_order), mt);
+               __mod_zone_freepage_state(zone, -(1UL << order), mt);
        }
 
        /* Remove page from free list */
@@ -1416,11 +1409,7 @@ int capture_free_page(struct page *page, int alloc_order, int migratetype)
        zone->free_area[order].nr_free--;
        rmv_page_order(page);
 
-       if (alloc_order != order)
-               expand(zone, page, alloc_order, order,
-                       &zone->free_area[order], migratetype);
-
-       /* Set the pageblock if the captured page is at least a pageblock */
+       /* Set the pageblock if the isolated page is at least a pageblock */
        if (order >= pageblock_order - 1) {
                struct page *endpage = page + (1 << order) - 1;
                for (; page < endpage; page += pageblock_nr_pages) {
@@ -1431,7 +1420,7 @@ int capture_free_page(struct page *page, int alloc_order, int migratetype)
                }
        }
 
-       return 1UL << alloc_order;
+       return 1UL << order;
 }
 
 /*
@@ -1449,10 +1438,9 @@ int split_free_page(struct page *page)
        unsigned int order;
        int nr_pages;
 
-       BUG_ON(!PageBuddy(page));
        order = page_order(page);
 
-       nr_pages = capture_free_page(page, order, 0);
+       nr_pages = __isolate_free_page(page, order);
        if (!nr_pages)
                return 0;
 
@@ -2136,8 +2124,6 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
        bool *contended_compaction, bool *deferred_compaction,
        unsigned long *did_some_progress)
 {
-       struct page *page = NULL;
-
        if (!order)
                return NULL;
 
@@ -2149,16 +2135,12 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
        current->flags |= PF_MEMALLOC;
        *did_some_progress = try_to_compact_pages(zonelist, order, gfp_mask,
                                                nodemask, sync_migration,
-                                               contended_compaction, &page);
+                                               contended_compaction);
        current->flags &= ~PF_MEMALLOC;
 
-       /* If compaction captured a page, prep and use it */
-       if (page) {
-               prep_new_page(page, order, gfp_mask);
-               goto got_page;
-       }
-
        if (*did_some_progress != COMPACT_SKIPPED) {
+               struct page *page;
+
                /* Page migration frees to the PCP lists but we want merging */
                drain_pages(get_cpu());
                put_cpu();
@@ -2168,7 +2150,6 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
                                alloc_flags & ~ALLOC_NO_WATERMARKS,
                                preferred_zone, migratetype);
                if (page) {
-got_page:
                        preferred_zone->compact_blockskip_flush = false;
                        preferred_zone->compact_considered = 0;
                        preferred_zone->compact_defer_shift = 0;
@@ -5604,7 +5585,7 @@ static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn)
        pfn &= (PAGES_PER_SECTION-1);
        return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
 #else
-       pfn = pfn - zone->zone_start_pfn;
+       pfn = pfn - round_down(zone->zone_start_pfn, pageblock_nr_pages);
        return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
 #endif /* CONFIG_SPARSEMEM */
 }
index 515473e..f64e439 100644 (file)
@@ -6121,6 +6121,14 @@ struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
 
 static const struct ethtool_ops default_ethtool_ops;
 
+void netdev_set_default_ethtool_ops(struct net_device *dev,
+                                   const struct ethtool_ops *ops)
+{
+       if (dev->ethtool_ops == &default_ethtool_ops)
+               dev->ethtool_ops = ops;
+}
+EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops);
+
 /**
  *     alloc_netdev_mqs - allocate network device
  *     @sizeof_priv:   size of private data to allocate space for
index 3c9d208..d9c4f11 100644 (file)
@@ -590,7 +590,7 @@ static int do_ip_setsockopt(struct sock *sk, int level,
        case IP_TTL:
                if (optlen < 1)
                        goto e_inval;
-               if (val != -1 && (val < 0 || val > 255))
+               if (val != -1 && (val < 1 || val > 255))
                        goto e_inval;
                inet->uc_ttl = val;
                break;
index 1ca2536..2aa69c8 100644 (file)
@@ -1428,12 +1428,12 @@ static void tcp_service_net_dma(struct sock *sk, bool wait)
 }
 #endif
 
-static inline struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
+static struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
 {
        struct sk_buff *skb;
        u32 offset;
 
-       skb_queue_walk(&sk->sk_receive_queue, skb) {
+       while ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) {
                offset = seq - TCP_SKB_CB(skb)->seq;
                if (tcp_hdr(skb)->syn)
                        offset--;
@@ -1441,6 +1441,11 @@ static inline struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
                        *off = offset;
                        return skb;
                }
+               /* This looks weird, but this can happen if TCP collapsing
+                * splitted a fat GRO packet, while we released socket lock
+                * in skb_splice_bits()
+                */
+               sk_eat_skb(sk, skb, false);
        }
        return NULL;
 }
@@ -1482,7 +1487,7 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
                                        break;
                        }
                        used = recv_actor(desc, skb, offset, len);
-                       if (used < 0) {
+                       if (used <= 0) {
                                if (!copied)
                                        copied = used;
                                break;
@@ -1520,8 +1525,10 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
        tcp_rcv_space_adjust(sk);
 
        /* Clean up data we have read: This will do ACK frames. */
-       if (copied > 0)
+       if (copied > 0) {
+               tcp_recv_skb(sk, seq, &offset);
                tcp_cleanup_rbuf(sk, copied);
+       }
        return copied;
 }
 EXPORT_SYMBOL(tcp_read_sock);
index a28e4db..18f97ca 100644 (file)
@@ -5543,7 +5543,7 @@ slow_path:
        if (len < (th->doff << 2) || tcp_checksum_complete_user(sk, skb))
                goto csum_error;
 
-       if (!th->ack)
+       if (!th->ack && !th->rst)
                goto discard;
 
        /*
@@ -5988,7 +5988,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
                        goto discard;
        }
 
-       if (!th->ack)
+       if (!th->ack && !th->rst)
                goto discard;
 
        if (!tcp_validate_incoming(sk, skb, th, 0))
index 408cac4..420e563 100644 (file)
@@ -154,6 +154,11 @@ static void addrconf_type_change(struct net_device *dev,
                                 unsigned long event);
 static int addrconf_ifdown(struct net_device *dev, int how);
 
+static struct rt6_info *addrconf_get_prefix_route(const struct in6_addr *pfx,
+                                                 int plen,
+                                                 const struct net_device *dev,
+                                                 u32 flags, u32 noflags);
+
 static void addrconf_dad_start(struct inet6_ifaddr *ifp);
 static void addrconf_dad_timer(unsigned long data);
 static void addrconf_dad_completed(struct inet6_ifaddr *ifp);
@@ -250,12 +255,6 @@ static inline bool addrconf_qdisc_ok(const struct net_device *dev)
        return !qdisc_tx_is_noop(dev);
 }
 
-/* Check if a route is valid prefix route */
-static inline int addrconf_is_prefix_route(const struct rt6_info *rt)
-{
-       return (rt->rt6i_flags & (RTF_GATEWAY | RTF_DEFAULT)) == 0;
-}
-
 static void addrconf_del_timer(struct inet6_ifaddr *ifp)
 {
        if (del_timer(&ifp->timer))
@@ -941,17 +940,15 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp)
        if ((ifp->flags & IFA_F_PERMANENT) && onlink < 1) {
                struct in6_addr prefix;
                struct rt6_info *rt;
-               struct net *net = dev_net(ifp->idev->dev);
-               struct flowi6 fl6 = {};
 
                ipv6_addr_prefix(&prefix, &ifp->addr, ifp->prefix_len);
-               fl6.flowi6_oif = ifp->idev->dev->ifindex;
-               fl6.daddr = prefix;
-               rt = (struct rt6_info *)ip6_route_lookup(net, &fl6,
-                                                        RT6_LOOKUP_F_IFACE);
 
-               if (rt != net->ipv6.ip6_null_entry &&
-                   addrconf_is_prefix_route(rt)) {
+               rt = addrconf_get_prefix_route(&prefix,
+                                              ifp->prefix_len,
+                                              ifp->idev->dev,
+                                              0, RTF_GATEWAY | RTF_DEFAULT);
+
+               if (rt) {
                        if (onlink == 0) {
                                ip6_del_rt(rt);
                                rt = NULL;
@@ -1877,7 +1874,7 @@ static struct rt6_info *addrconf_get_prefix_route(const struct in6_addr *pfx,
                        continue;
                if ((rt->rt6i_flags & flags) != flags)
                        continue;
-               if ((noflags != 0) && ((rt->rt6i_flags & flags) != 0))
+               if ((rt->rt6i_flags & noflags) != 0)
                        continue;
                dst_hold(&rt->dst);
                break;
index 3ad1f9d..df08250 100644 (file)
@@ -1806,7 +1806,7 @@ static void iucv_external_interrupt(struct ext_code ext_code,
        struct iucv_irq_data *p;
        struct iucv_irq_list *work;
 
-       kstat_cpu(smp_processor_id()).irqs[EXTINT_IUC]++;
+       inc_irq_stat(IRQEXT_IUC);
        p = iucv_irq_data[smp_processor_id()];
        if (p->ippathid >= iucv_max_pathid) {
                WARN_ON(p->ippathid >= iucv_max_pathid);
index 5c61677..47e0aca 100644 (file)
@@ -1009,6 +1009,8 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev)
        if (old_probe_resp)
                kfree_rcu(old_probe_resp, rcu_head);
 
+       list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list)
+               sta_info_flush(local, vlan);
        sta_info_flush(local, sdata);
        ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED);
 
index 53f0312..80e5552 100644 (file)
@@ -4,6 +4,7 @@
 
 #include <linux/nl80211.h>
 #include <linux/export.h>
+#include <linux/rtnetlink.h>
 #include <net/cfg80211.h>
 #include "ieee80211_i.h"
 #include "driver-ops.h"
@@ -197,6 +198,15 @@ static void __ieee80211_vif_release_channel(struct ieee80211_sub_if_data *sdata)
 
        ctx = container_of(conf, struct ieee80211_chanctx, conf);
 
+       if (sdata->vif.type == NL80211_IFTYPE_AP) {
+               struct ieee80211_sub_if_data *vlan;
+
+               /* for the VLAN list */
+               ASSERT_RTNL();
+               list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list)
+                       rcu_assign_pointer(vlan->vif.chanctx_conf, NULL);
+       }
+
        ieee80211_unassign_vif_chanctx(sdata, ctx);
        if (ctx->refcount == 0)
                ieee80211_free_chanctx(local, ctx);
@@ -316,6 +326,15 @@ int ieee80211_vif_use_channel(struct ieee80211_sub_if_data *sdata,
                goto out;
        }
 
+       if (sdata->vif.type == NL80211_IFTYPE_AP) {
+               struct ieee80211_sub_if_data *vlan;
+
+               /* for the VLAN list */
+               ASSERT_RTNL();
+               list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list)
+                       rcu_assign_pointer(vlan->vif.chanctx_conf, &ctx->conf);
+       }
+
        ieee80211_recalc_smps_chanctx(local, ctx);
  out:
        mutex_unlock(&local->chanctx_mtx);
@@ -331,6 +350,25 @@ void ieee80211_vif_release_channel(struct ieee80211_sub_if_data *sdata)
        mutex_unlock(&sdata->local->chanctx_mtx);
 }
 
+void ieee80211_vif_vlan_copy_chanctx(struct ieee80211_sub_if_data *sdata)
+{
+       struct ieee80211_local *local = sdata->local;
+       struct ieee80211_sub_if_data *ap;
+       struct ieee80211_chanctx_conf *conf;
+
+       if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_AP_VLAN || !sdata->bss))
+               return;
+
+       ap = container_of(sdata->bss, struct ieee80211_sub_if_data, u.ap);
+
+       mutex_lock(&local->chanctx_mtx);
+
+       conf = rcu_dereference_protected(ap->vif.chanctx_conf,
+                                        lockdep_is_held(&local->chanctx_mtx));
+       rcu_assign_pointer(sdata->vif.chanctx_conf, conf);
+       mutex_unlock(&local->chanctx_mtx);
+}
+
 void ieee80211_iter_chan_contexts_atomic(
        struct ieee80211_hw *hw,
        void (*iter)(struct ieee80211_hw *hw,
index 8881fc7..6b7644e 100644 (file)
@@ -703,8 +703,8 @@ static void ieee80211_sta_merge_ibss(struct ieee80211_sub_if_data *sdata)
        sdata_info(sdata,
                   "No active IBSS STAs - trying to scan for other IBSS networks with same SSID (merge)\n");
 
-       ieee80211_request_internal_scan(sdata,
-                       ifibss->ssid, ifibss->ssid_len, NULL);
+       ieee80211_request_ibss_scan(sdata, ifibss->ssid, ifibss->ssid_len,
+                                   NULL);
 }
 
 static void ieee80211_sta_create_ibss(struct ieee80211_sub_if_data *sdata)
@@ -802,9 +802,8 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
                                        IEEE80211_SCAN_INTERVAL)) {
                sdata_info(sdata, "Trigger new scan to find an IBSS to join\n");
 
-               ieee80211_request_internal_scan(sdata,
-                               ifibss->ssid, ifibss->ssid_len,
-                               ifibss->fixed_channel ? ifibss->channel : NULL);
+               ieee80211_request_ibss_scan(sdata, ifibss->ssid,
+                                           ifibss->ssid_len, chan);
        } else {
                int interval = IEEE80211_SCAN_INTERVAL;
 
index 42d0d02..8563b9a 100644 (file)
@@ -92,8 +92,6 @@ struct ieee80211_bss {
 
        u32 device_ts;
 
-       u8 dtim_period;
-
        bool wmm_used;
        bool uapsd_supported;
 
@@ -140,7 +138,6 @@ enum ieee80211_bss_corrupt_data_flags {
 
 /**
  * enum ieee80211_valid_data_flags - BSS valid data flags
- * @IEEE80211_BSS_VALID_DTIM: DTIM data was gathered from non-corrupt IE
  * @IEEE80211_BSS_VALID_WMM: WMM/UAPSD data was gathered from non-corrupt IE
  * @IEEE80211_BSS_VALID_RATES: Supported rates were gathered from non-corrupt IE
  * @IEEE80211_BSS_VALID_ERP: ERP flag was gathered from non-corrupt IE
@@ -151,7 +148,6 @@ enum ieee80211_bss_corrupt_data_flags {
  * beacon/probe response.
  */
 enum ieee80211_bss_valid_data_flags {
-       IEEE80211_BSS_VALID_DTIM                = BIT(0),
        IEEE80211_BSS_VALID_WMM                 = BIT(1),
        IEEE80211_BSS_VALID_RATES               = BIT(2),
        IEEE80211_BSS_VALID_ERP                 = BIT(3)
@@ -440,6 +436,7 @@ struct ieee80211_if_managed {
        unsigned long timers_running; /* used for quiesce/restart */
        bool powersave; /* powersave requested for this iface */
        bool broken_ap; /* AP is broken -- turn off powersave */
+       u8 dtim_period;
        enum ieee80211_smps_mode req_smps, /* requested smps mode */
                                 driver_smps_mode; /* smps mode request */
 
@@ -773,6 +770,10 @@ struct ieee80211_sub_if_data {
                u32 mntr_flags;
        } u;
 
+       spinlock_t cleanup_stations_lock;
+       struct list_head cleanup_stations;
+       struct work_struct cleanup_stations_wk;
+
 #ifdef CONFIG_MAC80211_DEBUGFS
        struct {
                struct dentry *dir;
@@ -1329,9 +1330,9 @@ void ieee80211_mesh_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
 
 /* scan/BSS handling */
 void ieee80211_scan_work(struct work_struct *work);
-int ieee80211_request_internal_scan(struct ieee80211_sub_if_data *sdata,
-                                   const u8 *ssid, u8 ssid_len,
-                                   struct ieee80211_channel *chan);
+int ieee80211_request_ibss_scan(struct ieee80211_sub_if_data *sdata,
+                               const u8 *ssid, u8 ssid_len,
+                               struct ieee80211_channel *chan);
 int ieee80211_request_scan(struct ieee80211_sub_if_data *sdata,
                           struct cfg80211_scan_request *req);
 void ieee80211_scan_cancel(struct ieee80211_local *local);
@@ -1628,6 +1629,7 @@ ieee80211_vif_use_channel(struct ieee80211_sub_if_data *sdata,
                          const struct cfg80211_chan_def *chandef,
                          enum ieee80211_chanctx_mode mode);
 void ieee80211_vif_release_channel(struct ieee80211_sub_if_data *sdata);
+void ieee80211_vif_vlan_copy_chanctx(struct ieee80211_sub_if_data *sdata);
 
 void ieee80211_recalc_smps_chanctx(struct ieee80211_local *local,
                                   struct ieee80211_chanctx *chanctx);
index 09a80b5..8be854e 100644 (file)
@@ -207,17 +207,8 @@ void ieee80211_recalc_idle(struct ieee80211_local *local)
 
 static int ieee80211_change_mtu(struct net_device *dev, int new_mtu)
 {
-       int meshhdrlen;
-       struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
-
-       meshhdrlen = (sdata->vif.type == NL80211_IFTYPE_MESH_POINT) ? 5 : 0;
-
-       /* FIX: what would be proper limits for MTU?
-        * This interface uses 802.3 frames. */
-       if (new_mtu < 256 ||
-           new_mtu > IEEE80211_MAX_DATA_LEN - 24 - 6 - meshhdrlen) {
+       if (new_mtu < 256 || new_mtu > IEEE80211_MAX_DATA_LEN)
                return -EINVAL;
-       }
 
        dev->mtu = new_mtu;
        return 0;
@@ -586,11 +577,13 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
 
        switch (sdata->vif.type) {
        case NL80211_IFTYPE_AP_VLAN:
-               /* no need to tell driver, but set carrier */
-               if (rtnl_dereference(sdata->bss->beacon))
+               /* no need to tell driver, but set carrier and chanctx */
+               if (rtnl_dereference(sdata->bss->beacon)) {
+                       ieee80211_vif_vlan_copy_chanctx(sdata);
                        netif_carrier_on(dev);
-               else
+               } else {
                        netif_carrier_off(dev);
+               }
                break;
        case NL80211_IFTYPE_MONITOR:
                if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) {
@@ -839,6 +832,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
        switch (sdata->vif.type) {
        case NL80211_IFTYPE_AP_VLAN:
                list_del(&sdata->u.vlan.list);
+               rcu_assign_pointer(sdata->vif.chanctx_conf, NULL);
                /* no need to tell driver */
                break;
        case NL80211_IFTYPE_MONITOR:
@@ -865,20 +859,11 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
                cancel_work_sync(&sdata->work);
                /*
                 * When we get here, the interface is marked down.
-                * Call rcu_barrier() to wait both for the RX path
+                * Call synchronize_rcu() to wait for the RX path
                 * should it be using the interface and enqueuing
-                * frames at this very time on another CPU, and
-                * for the sta free call_rcu callbacks.
-                */
-               rcu_barrier();
-
-               /*
-                * free_sta_rcu() enqueues a work for the actual
-                * sta cleanup, so we need to flush it while
-                * sdata is still valid.
+                * frames at this very time on another CPU.
                 */
-               flush_workqueue(local->workqueue);
-
+               synchronize_rcu();
                skb_queue_purge(&sdata->skb_queue);
 
                /*
@@ -1498,6 +1483,15 @@ static void ieee80211_assign_perm_addr(struct ieee80211_local *local,
        mutex_unlock(&local->iflist_mtx);
 }
 
+static void ieee80211_cleanup_sdata_stas_wk(struct work_struct *wk)
+{
+       struct ieee80211_sub_if_data *sdata;
+
+       sdata = container_of(wk, struct ieee80211_sub_if_data, cleanup_stations_wk);
+
+       ieee80211_cleanup_sdata_stas(sdata);
+}
+
 int ieee80211_if_add(struct ieee80211_local *local, const char *name,
                     struct wireless_dev **new_wdev, enum nl80211_iftype type,
                     struct vif_params *params)
@@ -1573,6 +1567,10 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
 
        INIT_LIST_HEAD(&sdata->key_list);
 
+       spin_lock_init(&sdata->cleanup_stations_lock);
+       INIT_LIST_HEAD(&sdata->cleanup_stations);
+       INIT_WORK(&sdata->cleanup_stations_wk, ieee80211_cleanup_sdata_stas_wk);
+
        for (i = 0; i < IEEE80211_NUM_BANDS; i++) {
                struct ieee80211_supported_band *sband;
                sband = local->hw.wiphy->bands[i];
index 1bf03f9..649ad51 100644 (file)
@@ -163,7 +163,7 @@ int mesh_rmc_init(struct ieee80211_sub_if_data *sdata)
                return -ENOMEM;
        sdata->u.mesh.rmc->idx_mask = RMC_BUCKETS - 1;
        for (i = 0; i < RMC_BUCKETS; i++)
-               INIT_LIST_HEAD(&sdata->u.mesh.rmc->bucket[i].list);
+               INIT_LIST_HEAD(&sdata->u.mesh.rmc->bucket[i]);
        return 0;
 }
 
@@ -177,7 +177,7 @@ void mesh_rmc_free(struct ieee80211_sub_if_data *sdata)
                return;
 
        for (i = 0; i < RMC_BUCKETS; i++)
-               list_for_each_entry_safe(p, n, &rmc->bucket[i].list, list) {
+               list_for_each_entry_safe(p, n, &rmc->bucket[i], list) {
                        list_del(&p->list);
                        kmem_cache_free(rm_cache, p);
                }
@@ -210,7 +210,7 @@ int mesh_rmc_check(u8 *sa, struct ieee80211s_hdr *mesh_hdr,
        /* Don't care about endianness since only match matters */
        memcpy(&seqnum, &mesh_hdr->seqnum, sizeof(mesh_hdr->seqnum));
        idx = le32_to_cpu(mesh_hdr->seqnum) & rmc->idx_mask;
-       list_for_each_entry_safe(p, n, &rmc->bucket[idx].list, list) {
+       list_for_each_entry_safe(p, n, &rmc->bucket[idx], list) {
                ++entries;
                if (time_after(jiffies, p->exp_time) ||
                                (entries == RMC_QUEUE_MAX_LEN)) {
@@ -229,7 +229,7 @@ int mesh_rmc_check(u8 *sa, struct ieee80211s_hdr *mesh_hdr,
        p->seqnum = seqnum;
        p->exp_time = jiffies + RMC_TIMEOUT;
        memcpy(p->sa, sa, ETH_ALEN);
-       list_add(&p->list, &rmc->bucket[idx].list);
+       list_add(&p->list, &rmc->bucket[idx]);
        return 0;
 }
 
index 7c9215f..84c28c6 100644 (file)
@@ -184,7 +184,7 @@ struct rmc_entry {
 };
 
 struct mesh_rmc {
-       struct rmc_entry bucket[RMC_BUCKETS];
+       struct list_head bucket[RMC_BUCKETS];
        u32 idx_mask;
 };
 
index 7753a9c..a355292 100644 (file)
@@ -1074,12 +1074,8 @@ void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency)
                if (beaconint_us > latency) {
                        local->ps_sdata = NULL;
                } else {
-                       struct ieee80211_bss *bss;
                        int maxslp = 1;
-                       u8 dtimper;
-
-                       bss = (void *)found->u.mgd.associated->priv;
-                       dtimper = bss->dtim_period;
+                       u8 dtimper = found->u.mgd.dtim_period;
 
                        /* If the TIM IE is invalid, pretend the value is 1 */
                        if (!dtimper)
@@ -1410,10 +1406,17 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
 
        ieee80211_led_assoc(local, 1);
 
-       if (local->hw.flags & IEEE80211_HW_NEED_DTIM_PERIOD)
-               bss_conf->dtim_period = bss->dtim_period;
-       else
+       if (local->hw.flags & IEEE80211_HW_NEED_DTIM_PERIOD) {
+               /*
+                * If the AP is buggy we may get here with no DTIM period
+                * known, so assume it's 1 which is the only safe assumption
+                * in that case, although if the TIM IE is broken powersave
+                * probably just won't work at all.
+                */
+               bss_conf->dtim_period = sdata->u.mgd.dtim_period ?: 1;
+       } else {
                bss_conf->dtim_period = 0;
+       }
 
        bss_conf->assoc = 1;
 
@@ -1562,6 +1565,8 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
 
        sdata->u.mgd.timers_running = 0;
 
+       sdata->vif.bss_conf.dtim_period = 0;
+
        ifmgd->flags = 0;
        ieee80211_vif_release_channel(sdata);
 }
@@ -2373,11 +2378,18 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
        struct ieee80211_channel *channel;
        bool need_ps = false;
 
-       if (sdata->u.mgd.associated &&
-           ether_addr_equal(mgmt->bssid, sdata->u.mgd.associated->bssid)) {
-               bss = (void *)sdata->u.mgd.associated->priv;
+       if ((sdata->u.mgd.associated &&
+            ether_addr_equal(mgmt->bssid, sdata->u.mgd.associated->bssid)) ||
+           (sdata->u.mgd.assoc_data &&
+            ether_addr_equal(mgmt->bssid,
+                             sdata->u.mgd.assoc_data->bss->bssid))) {
                /* not previously set so we may need to recalc */
-               need_ps = !bss->dtim_period;
+               need_ps = sdata->u.mgd.associated && !sdata->u.mgd.dtim_period;
+
+               if (elems->tim && !elems->parse_error) {
+                       struct ieee80211_tim_ie *tim_ie = elems->tim;
+                       sdata->u.mgd.dtim_period = tim_ie->dtim_period;
+               }
        }
 
        if (elems->ds_params && elems->ds_params_len == 1)
@@ -3896,20 +3908,41 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
        /* kick off associate process */
 
        ifmgd->assoc_data = assoc_data;
+       ifmgd->dtim_period = 0;
 
        err = ieee80211_prep_connection(sdata, req->bss, true);
        if (err)
                goto err_clear;
 
-       if (!bss->dtim_period &&
-           sdata->local->hw.flags & IEEE80211_HW_NEED_DTIM_PERIOD) {
-               /*
-                * Wait up to one beacon interval ...
-                * should this be more if we miss one?
-                */
-               sdata_info(sdata, "waiting for beacon from %pM\n",
-                          ifmgd->bssid);
-               assoc_data->timeout = TU_TO_EXP_TIME(req->bss->beacon_interval);
+       if (sdata->local->hw.flags & IEEE80211_HW_NEED_DTIM_PERIOD) {
+               const struct cfg80211_bss_ies *beacon_ies;
+
+               rcu_read_lock();
+               beacon_ies = rcu_dereference(req->bss->beacon_ies);
+               if (!beacon_ies) {
+                       /*
+                        * Wait up to one beacon interval ...
+                        * should this be more if we miss one?
+                        */
+                       sdata_info(sdata, "waiting for beacon from %pM\n",
+                                  ifmgd->bssid);
+                       assoc_data->timeout =
+                               TU_TO_EXP_TIME(req->bss->beacon_interval);
+               } else {
+                       const u8 *tim_ie = cfg80211_find_ie(WLAN_EID_TIM,
+                                                           beacon_ies->data,
+                                                           beacon_ies->len);
+                       if (tim_ie && tim_ie[1] >=
+                                       sizeof(struct ieee80211_tim_ie)) {
+                               const struct ieee80211_tim_ie *tim;
+                               tim = (void *)(tim_ie + 2);
+                               ifmgd->dtim_period = tim->dtim_period;
+                       }
+                       assoc_data->have_beacon = true;
+                       assoc_data->sent_assoc = false;
+                       assoc_data->timeout = jiffies;
+               }
+               rcu_read_unlock();
        } else {
                assoc_data->have_beacon = true;
                assoc_data->sent_assoc = false;
index 8ed83dc..d59fc68 100644 (file)
@@ -113,18 +113,6 @@ ieee80211_bss_info_update(struct ieee80211_local *local,
                        bss->valid_data |= IEEE80211_BSS_VALID_ERP;
        }
 
-       if (elems->tim && (!elems->parse_error ||
-                          !(bss->valid_data & IEEE80211_BSS_VALID_DTIM))) {
-               struct ieee80211_tim_ie *tim_ie = elems->tim;
-               bss->dtim_period = tim_ie->dtim_period;
-               if (!elems->parse_error)
-                       bss->valid_data |= IEEE80211_BSS_VALID_DTIM;
-       }
-
-       /* If the beacon had no TIM IE, or it was invalid, use 1 */
-       if (beacon && !bss->dtim_period)
-               bss->dtim_period = 1;
-
        /* replace old supported rates if we get new values */
        if (!elems->parse_error ||
            !(bss->valid_data & IEEE80211_BSS_VALID_RATES)) {
@@ -832,9 +820,9 @@ int ieee80211_request_scan(struct ieee80211_sub_if_data *sdata,
        return res;
 }
 
-int ieee80211_request_internal_scan(struct ieee80211_sub_if_data *sdata,
-                                   const u8 *ssid, u8 ssid_len,
-                                   struct ieee80211_channel *chan)
+int ieee80211_request_ibss_scan(struct ieee80211_sub_if_data *sdata,
+                               const u8 *ssid, u8 ssid_len,
+                               struct ieee80211_channel *chan)
 {
        struct ieee80211_local *local = sdata->local;
        int ret = -EBUSY;
@@ -848,22 +836,36 @@ int ieee80211_request_internal_scan(struct ieee80211_sub_if_data *sdata,
 
        /* fill internal scan request */
        if (!chan) {
-               int i, nchan = 0;
+               int i, max_n;
+               int n_ch = 0;
 
                for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
                        if (!local->hw.wiphy->bands[band])
                                continue;
-                       for (i = 0;
-                            i < local->hw.wiphy->bands[band]->n_channels;
-                            i++) {
-                               local->int_scan_req->channels[nchan] =
+
+                       max_n = local->hw.wiphy->bands[band]->n_channels;
+                       for (i = 0; i < max_n; i++) {
+                               struct ieee80211_channel *tmp_ch =
                                    &local->hw.wiphy->bands[band]->channels[i];
-                               nchan++;
+
+                               if (tmp_ch->flags & (IEEE80211_CHAN_NO_IBSS |
+                                                    IEEE80211_CHAN_DISABLED))
+                                       continue;
+
+                               local->int_scan_req->channels[n_ch] = tmp_ch;
+                               n_ch++;
                        }
                }
 
-               local->int_scan_req->n_channels = nchan;
+               if (WARN_ON_ONCE(n_ch == 0))
+                       goto unlock;
+
+               local->int_scan_req->n_channels = n_ch;
        } else {
+               if (WARN_ON_ONCE(chan->flags & (IEEE80211_CHAN_NO_IBSS |
+                                               IEEE80211_CHAN_DISABLED)))
+                       goto unlock;
+
                local->int_scan_req->channels[0] = chan;
                local->int_scan_req->n_channels = 1;
        }
index f3e5025..ca9fde1 100644 (file)
@@ -91,9 +91,8 @@ static int sta_info_hash_del(struct ieee80211_local *local,
        return -ENOENT;
 }
 
-static void free_sta_work(struct work_struct *wk)
+static void cleanup_single_sta(struct sta_info *sta)
 {
-       struct sta_info *sta = container_of(wk, struct sta_info, free_sta_wk);
        int ac, i;
        struct tid_ampdu_tx *tid_tx;
        struct ieee80211_sub_if_data *sdata = sta->sdata;
@@ -153,11 +152,35 @@ static void free_sta_work(struct work_struct *wk)
        sta_info_free(local, sta);
 }
 
+void ieee80211_cleanup_sdata_stas(struct ieee80211_sub_if_data *sdata)
+{
+       struct sta_info *sta;
+
+       spin_lock_bh(&sdata->cleanup_stations_lock);
+       while (!list_empty(&sdata->cleanup_stations)) {
+               sta = list_first_entry(&sdata->cleanup_stations,
+                                      struct sta_info, list);
+               list_del(&sta->list);
+               spin_unlock_bh(&sdata->cleanup_stations_lock);
+
+               cleanup_single_sta(sta);
+
+               spin_lock_bh(&sdata->cleanup_stations_lock);
+       }
+
+       spin_unlock_bh(&sdata->cleanup_stations_lock);
+}
+
 static void free_sta_rcu(struct rcu_head *h)
 {
        struct sta_info *sta = container_of(h, struct sta_info, rcu_head);
+       struct ieee80211_sub_if_data *sdata = sta->sdata;
 
-       ieee80211_queue_work(&sta->local->hw, &sta->free_sta_wk);
+       spin_lock(&sdata->cleanup_stations_lock);
+       list_add_tail(&sta->list, &sdata->cleanup_stations);
+       spin_unlock(&sdata->cleanup_stations_lock);
+
+       ieee80211_queue_work(&sdata->local->hw, &sdata->cleanup_stations_wk);
 }
 
 /* protected by RCU */
@@ -310,7 +333,6 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
 
        spin_lock_init(&sta->lock);
        INIT_WORK(&sta->drv_unblock_wk, sta_unblock);
-       INIT_WORK(&sta->free_sta_wk, free_sta_work);
        INIT_WORK(&sta->ampdu_mlme.work, ieee80211_ba_session_work);
        mutex_init(&sta->ampdu_mlme.mtx);
 
@@ -862,7 +884,7 @@ void sta_info_init(struct ieee80211_local *local)
 
 void sta_info_stop(struct ieee80211_local *local)
 {
-       del_timer(&local->sta_cleanup);
+       del_timer_sync(&local->sta_cleanup);
        sta_info_flush(local, NULL);
 }
 
@@ -891,6 +913,20 @@ int sta_info_flush(struct ieee80211_local *local,
        }
        mutex_unlock(&local->sta_mtx);
 
+       rcu_barrier();
+
+       if (sdata) {
+               ieee80211_cleanup_sdata_stas(sdata);
+               cancel_work_sync(&sdata->cleanup_stations_wk);
+       } else {
+               mutex_lock(&local->iflist_mtx);
+               list_for_each_entry(sdata, &local->interfaces, list) {
+                       ieee80211_cleanup_sdata_stas(sdata);
+                       cancel_work_sync(&sdata->cleanup_stations_wk);
+               }
+               mutex_unlock(&local->iflist_mtx);
+       }
+
        return ret;
 }
 
index 1489bca..37c1889 100644 (file)
@@ -299,7 +299,6 @@ struct sta_info {
        spinlock_t lock;
 
        struct work_struct drv_unblock_wk;
-       struct work_struct free_sta_wk;
 
        u16 listen_interval;
 
@@ -563,4 +562,6 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta);
 void ieee80211_sta_ps_deliver_poll_response(struct sta_info *sta);
 void ieee80211_sta_ps_deliver_uapsd(struct sta_info *sta);
 
+void ieee80211_cleanup_sdata_stas(struct ieee80211_sub_if_data *sdata);
+
 #endif /* STA_INFO_H */
index 1915ffe..507b5e8 100644 (file)
@@ -555,7 +555,7 @@ EXPORT_SYMBOL_GPL(rpc_clone_client);
  * rpc_clone_client_set_auth - Clone an RPC client structure and set its auth
  *
  * @clnt: RPC client whose parameters are copied
- * @auth: security flavor for new client
+ * @flavor: security flavor for new client
  *
  * Returns a fresh RPC client or an ERR_PTR.
  */
index b4133bd..bfa3171 100644 (file)
@@ -972,8 +972,7 @@ static void rpc_async_release(struct work_struct *work)
 
 static void rpc_release_resources_task(struct rpc_task *task)
 {
-       if (task->tk_rqstp)
-               xprt_release(task);
+       xprt_release(task);
        if (task->tk_msg.rpc_cred) {
                put_rpccred(task->tk_msg.rpc_cred);
                task->tk_msg.rpc_cred = NULL;
index bd462a5..33811db 100644 (file)
@@ -1136,10 +1136,18 @@ static void xprt_request_init(struct rpc_task *task, struct rpc_xprt *xprt)
 void xprt_release(struct rpc_task *task)
 {
        struct rpc_xprt *xprt;
-       struct rpc_rqst *req;
+       struct rpc_rqst *req = task->tk_rqstp;
 
-       if (!(req = task->tk_rqstp))
+       if (req == NULL) {
+               if (task->tk_client) {
+                       rcu_read_lock();
+                       xprt = rcu_dereference(task->tk_client->cl_xprt);
+                       if (xprt->snd_task == task)
+                               xprt_release_write(xprt, task);
+                       rcu_read_unlock();
+               }
                return;
+       }
 
        xprt = req->rq_xprt;
        if (task->tk_ops->rpc_count_stats != NULL)
index 14d9904..b677eab 100644 (file)
@@ -866,8 +866,7 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
                /* allow mac80211 to determine the timeout */
                wdev->ps_timeout = -1;
 
-               if (!dev->ethtool_ops)
-                       dev->ethtool_ops = &cfg80211_ethtool_ops;
+               netdev_set_default_ethtool_ops(dev, &cfg80211_ethtool_ops);
 
                if ((wdev->iftype == NL80211_IFTYPE_STATION ||
                     wdev->iftype == NL80211_IFTYPE_P2P_CLIENT ||
index 19ecc8d..d794abc 100644 (file)
@@ -215,7 +215,9 @@ static void devcgroup_css_free(struct cgroup *cgroup)
        struct dev_cgroup *dev_cgroup;
 
        dev_cgroup = cgroup_to_devcgroup(cgroup);
+       mutex_lock(&devcgroup_mutex);
        dev_exception_clean(dev_cgroup);
+       mutex_unlock(&devcgroup_mutex);
        kfree(dev_cgroup);
 }
 
index dfb2691..7dd538e 100644 (file)
@@ -205,9 +205,9 @@ int evm_update_evmxattr(struct dentry *dentry, const char *xattr_name,
                rc = __vfs_setxattr_noperm(dentry, XATTR_NAME_EVM,
                                           &xattr_data,
                                           sizeof(xattr_data), 0);
-       }
-       else if (rc == -ENODATA)
+       } else if (rc == -ENODATA && inode->i_op->removexattr) {
                rc = inode->i_op->removexattr(dentry, XATTR_NAME_EVM);
+       }
        return rc;
 }
 
index 6fc0ae9..fff7753 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/delay.h>
 #include <linux/module.h>
 #include <linux/io.h>
+#include <linux/gpio.h>
 
 #include <sound/ac97_codec.h>
 #include <sound/pxa2xx-lib.h>
@@ -148,6 +149,8 @@ static inline void pxa_ac97_warm_pxa27x(void)
 
 static inline void pxa_ac97_cold_pxa27x(void)
 {
+       unsigned int timeout;
+
        GCR &=  GCR_COLD_RST;  /* clear everything but nCRST */
        GCR &= ~GCR_COLD_RST;  /* then assert nCRST */
 
@@ -157,8 +160,10 @@ static inline void pxa_ac97_cold_pxa27x(void)
        clk_enable(ac97conf_clk);
        udelay(5);
        clk_disable(ac97conf_clk);
-       GCR = GCR_COLD_RST;
-       udelay(50);
+       GCR = GCR_COLD_RST | GCR_WARM_RST;
+       timeout = 100;     /* wait for the codec-ready bit to be set */
+       while (!((GSR | gsr_bits) & (GSR_PCR | GSR_SCR)) && timeout--)
+               mdelay(1);
 }
 #endif
 
@@ -340,8 +345,21 @@ int pxa2xx_ac97_hw_probe(struct platform_device *dev)
        }
 
        if (cpu_is_pxa27x()) {
-               /* Use GPIO 113 as AC97 Reset on Bulverde */
+               /*
+                * This gpio is needed for a work-around to a bug in the ac97
+                * controller during warm reset.  The direction and level is set
+                * here so that it is an output driven high when switching from
+                * AC97_nRESET alt function to generic gpio.
+                */
+               ret = gpio_request_one(reset_gpio, GPIOF_OUT_INIT_HIGH,
+                                      "pxa27x ac97 reset");
+               if (ret < 0) {
+                       pr_err("%s: gpio_request_one() failed: %d\n",
+                              __func__, ret);
+                       goto err_conf;
+               }
                pxa27x_assert_ac97reset(reset_gpio, 0);
+
                ac97conf_clk = clk_get(&dev->dev, "AC97CONFCLK");
                if (IS_ERR(ac97conf_clk)) {
                        ret = PTR_ERR(ac97conf_clk);
@@ -384,6 +402,8 @@ EXPORT_SYMBOL_GPL(pxa2xx_ac97_hw_probe);
 
 void pxa2xx_ac97_hw_remove(struct platform_device *dev)
 {
+       if (cpu_is_pxa27x())
+               gpio_free(reset_gpio);
        GCR |= GCR_ACLINK_OFF;
        free_irq(IRQ_AC97, NULL);
        if (ac97conf_clk) {
index b8fb0a5..822df97 100644 (file)
@@ -3654,6 +3654,7 @@ static void hda_call_codec_resume(struct hda_codec *codec)
        hda_set_power_state(codec, AC_PWRST_D0);
        restore_shutup_pins(codec);
        hda_exec_init_verbs(codec);
+       snd_hda_jack_set_dirty_all(codec);
        if (codec->patch_ops.resume)
                codec->patch_ops.resume(codec);
        else {
@@ -3665,10 +3666,8 @@ static void hda_call_codec_resume(struct hda_codec *codec)
 
        if (codec->jackpoll_interval)
                hda_jackpoll_work(&codec->jackpoll_work.work);
-       else {
-               snd_hda_jack_set_dirty_all(codec);
+       else
                snd_hda_jack_report_sync(codec);
-       }
 
        codec->in_pm = 0;
        snd_hda_power_down(codec); /* flag down before returning */
index cca8727..0b6aeba 100644 (file)
@@ -573,9 +573,12 @@ enum {
 #define AZX_DCAPS_PM_RUNTIME   (1 << 26)       /* runtime PM support */
 
 /* quirks for Intel PCH */
-#define AZX_DCAPS_INTEL_PCH \
+#define AZX_DCAPS_INTEL_PCH_NOPM \
        (AZX_DCAPS_SCH_SNOOP | AZX_DCAPS_BUFSIZE | \
-        AZX_DCAPS_COUNT_LPIB_DELAY | AZX_DCAPS_PM_RUNTIME)
+        AZX_DCAPS_COUNT_LPIB_DELAY)
+
+#define AZX_DCAPS_INTEL_PCH \
+       (AZX_DCAPS_INTEL_PCH_NOPM | AZX_DCAPS_PM_RUNTIME)
 
 /* quirks for ATI SB / AMD Hudson */
 #define AZX_DCAPS_PRESET_ATI_SB \
@@ -3586,13 +3589,13 @@ static void azx_remove(struct pci_dev *pci)
 static DEFINE_PCI_DEVICE_TABLE(azx_ids) = {
        /* CPT */
        { PCI_DEVICE(0x8086, 0x1c20),
-         .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
+         .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH_NOPM },
        /* PBG */
        { PCI_DEVICE(0x8086, 0x1d20),
-         .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
+         .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH_NOPM },
        /* Panther Point */
        { PCI_DEVICE(0x8086, 0x1e20),
-         .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
+         .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH_NOPM },
        /* Lynx Point */
        { PCI_DEVICE(0x8086, 0x8c20),
          .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
index 60890bf..009b77a 100644 (file)
@@ -558,24 +558,12 @@ static int conexant_build_controls(struct hda_codec *codec)
        return 0;
 }
 
-#ifdef CONFIG_PM
-static int conexant_suspend(struct hda_codec *codec)
-{
-       snd_hda_shutup_pins(codec);
-       return 0;
-}
-#endif
-
 static const struct hda_codec_ops conexant_patch_ops = {
        .build_controls = conexant_build_controls,
        .build_pcms = conexant_build_pcms,
        .init = conexant_init,
        .free = conexant_free,
        .set_power_state = conexant_set_power,
-#ifdef CONFIG_PM
-       .suspend = conexant_suspend,
-#endif
-       .reboot_notify = snd_hda_shutup_pins,
 };
 
 #ifdef CONFIG_SND_HDA_INPUT_BEEP
@@ -4405,10 +4393,6 @@ static const struct hda_codec_ops cx_auto_patch_ops = {
        .init = cx_auto_init,
        .free = conexant_free,
        .unsol_event = snd_hda_jack_unsol_event,
-#ifdef CONFIG_PM
-       .suspend = conexant_suspend,
-#endif
-       .reboot_notify = snd_hda_shutup_pins,
 };
 
 /*
@@ -4652,6 +4636,12 @@ static const struct hda_codec_preset snd_hda_preset_conexant[] = {
          .patch = patch_conexant_auto },
        { .id = 0x14f15111, .name = "CX20753/4",
          .patch = patch_conexant_auto },
+       { .id = 0x14f15113, .name = "CX20755",
+         .patch = patch_conexant_auto },
+       { .id = 0x14f15114, .name = "CX20756",
+         .patch = patch_conexant_auto },
+       { .id = 0x14f15115, .name = "CX20757",
+         .patch = patch_conexant_auto },
        {} /* terminator */
 };
 
@@ -4675,6 +4665,9 @@ MODULE_ALIAS("snd-hda-codec-id:14f150b9");
 MODULE_ALIAS("snd-hda-codec-id:14f1510f");
 MODULE_ALIAS("snd-hda-codec-id:14f15110");
 MODULE_ALIAS("snd-hda-codec-id:14f15111");
+MODULE_ALIAS("snd-hda-codec-id:14f15113");
+MODULE_ALIAS("snd-hda-codec-id:14f15114");
+MODULE_ALIAS("snd-hda-codec-id:14f15115");
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("Conexant HD-audio codec");
index b6c21ea..807a2aa 100644 (file)
@@ -1502,7 +1502,7 @@ static int hdmi_chmap_ctl_put(struct snd_kcontrol *kcontrol,
        ctl_idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id);
        substream = snd_pcm_chmap_substream(info, ctl_idx);
        if (!substream || !substream->runtime)
-               return -EBADFD;
+               return 0; /* just for avoiding error from alsactl restore */
        switch (substream->runtime->status->state) {
        case SNDRV_PCM_STATE_OPEN:
        case SNDRV_PCM_STATE_SETUP:
index 71ae23d..cf38861 100644 (file)
@@ -5817,6 +5817,9 @@ enum {
        ALC269_TYPE_ALC269VB,
        ALC269_TYPE_ALC269VC,
        ALC269_TYPE_ALC269VD,
+       ALC269_TYPE_ALC280,
+       ALC269_TYPE_ALC282,
+       ALC269_TYPE_ALC284,
 };
 
 /*
@@ -5833,10 +5836,13 @@ static int alc269_parse_auto_config(struct hda_codec *codec)
        switch (spec->codec_variant) {
        case ALC269_TYPE_ALC269VA:
        case ALC269_TYPE_ALC269VC:
+       case ALC269_TYPE_ALC280:
+       case ALC269_TYPE_ALC284:
                ssids = alc269va_ssids;
                break;
        case ALC269_TYPE_ALC269VB:
        case ALC269_TYPE_ALC269VD:
+       case ALC269_TYPE_ALC282:
                ssids = alc269_ssids;
                break;
        default:
@@ -6245,6 +6251,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1025, 0x0349, "Acer AOD260", ALC269_FIXUP_INV_DMIC),
        SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_MIC2_MUTE_LED),
        SND_PCI_QUIRK(0x103c, 0x1972, "HP Pavilion 17", ALC269_FIXUP_MIC1_MUTE_LED),
+       SND_PCI_QUIRK(0x103c, 0x1977, "HP Pavilion 14", ALC269_FIXUP_MIC1_MUTE_LED),
        SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_DMIC),
        SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_DMIC),
        SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW),
@@ -6259,6 +6266,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x104d, 0x9084, "Sony VAIO", ALC275_FIXUP_SONY_HWEQ),
        SND_PCI_QUIRK_VENDOR(0x104d, "Sony VAIO", ALC269_FIXUP_SONY_VAIO),
        SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z),
+       SND_PCI_QUIRK(0x1025, 0x0740, "Acer AO725", ALC271_FIXUP_HP_GATE_MIC_JACK),
        SND_PCI_QUIRK(0x1025, 0x0742, "Acer AO756", ALC271_FIXUP_HP_GATE_MIC_JACK),
        SND_PCI_QUIRK_VENDOR(0x1025, "Acer Aspire", ALC271_FIXUP_DMIC),
        SND_PCI_QUIRK(0x10cf, 0x1475, "Lifebook", ALC269_FIXUP_LIFEBOOK),
@@ -6400,7 +6408,8 @@ static int patch_alc269(struct hda_codec *codec)
 
        alc_auto_parse_customize_define(codec);
 
-       if (codec->vendor_id == 0x10ec0269) {
+       switch (codec->vendor_id) {
+       case 0x10ec0269:
                spec->codec_variant = ALC269_TYPE_ALC269VA;
                switch (alc_get_coef0(codec) & 0x00f0) {
                case 0x0010:
@@ -6425,6 +6434,20 @@ static int patch_alc269(struct hda_codec *codec)
                        goto error;
                spec->init_hook = alc269_fill_coef;
                alc269_fill_coef(codec);
+               break;
+
+       case 0x10ec0280:
+       case 0x10ec0290:
+               spec->codec_variant = ALC269_TYPE_ALC280;
+               break;
+       case 0x10ec0282:
+       case 0x10ec0283:
+               spec->codec_variant = ALC269_TYPE_ALC282;
+               break;
+       case 0x10ec0284:
+       case 0x10ec0292:
+               spec->codec_variant = ALC269_TYPE_ALC284;
+               break;
        }
 
        /* automatic parse from the BIOS config */
@@ -7129,6 +7152,7 @@ static const struct hda_codec_preset snd_hda_preset_realtek[] = {
        { .id = 0x10ec0280, .name = "ALC280", .patch = patch_alc269 },
        { .id = 0x10ec0282, .name = "ALC282", .patch = patch_alc269 },
        { .id = 0x10ec0283, .name = "ALC283", .patch = patch_alc269 },
+       { .id = 0x10ec0284, .name = "ALC284", .patch = patch_alc269 },
        { .id = 0x10ec0290, .name = "ALC290", .patch = patch_alc269 },
        { .id = 0x10ec0292, .name = "ALC292", .patch = patch_alc269 },
        { .id = 0x10ec0861, .rev = 0x100340, .name = "ALC660",
index 6e02e06..223c3d9 100644 (file)
@@ -441,6 +441,7 @@ MODULE_SUPPORTED_DEVICE("{{RME HDSPM-MADI}}");
 */
 /* status */
 #define HDSPM_AES32_wcLock     0x0200000
+#define HDSPM_AES32_wcSync     0x0100000
 #define HDSPM_AES32_wcFreq_bit  22
 /* (status >> HDSPM_AES32_wcFreq_bit) & 0xF gives WC frequency (cf function
   HDSPM_bit2freq */
@@ -3467,10 +3468,12 @@ static int hdspm_wc_sync_check(struct hdspm *hdspm)
        switch (hdspm->io_type) {
        case AES32:
                status = hdspm_read(hdspm, HDSPM_statusRegister);
-               if (status & HDSPM_wcSync)
-                       return 2;
-               else if (status & HDSPM_wcLock)
-                       return 1;
+               if (status & HDSPM_AES32_wcLock) {
+                       if (status & HDSPM_AES32_wcSync)
+                               return 2;
+                       else
+                               return 1;
+               }
                return 0;
                break;
 
@@ -4658,6 +4661,7 @@ snd_hdspm_proc_read_aes32(struct snd_info_entry * entry,
        unsigned int status;
        unsigned int status2;
        unsigned int timecode;
+       unsigned int wcLock, wcSync;
        int pref_syncref;
        char *autosync_ref;
        int x;
@@ -4751,8 +4755,11 @@ snd_hdspm_proc_read_aes32(struct snd_info_entry * entry,
 
        snd_iprintf(buffer, "--- Status:\n");
 
+       wcLock = status & HDSPM_AES32_wcLock;
+       wcSync = wcLock && (status & HDSPM_AES32_wcSync);
+
        snd_iprintf(buffer, "Word: %s  Frequency: %d\n",
-                   (status & HDSPM_AES32_wcLock) ? "Sync   " : "No Lock",
+                   (wcLock) ? (wcSync ? "Sync   " : "Lock   ") : "No Lock",
                    HDSPM_bit2freq((status >> HDSPM_AES32_wcFreq_bit) & 0xF));
 
        for (x = 0; x < 8; x++) {
index adf397b..1d8bb59 100644 (file)
@@ -446,15 +446,9 @@ static int arizona_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
        case SND_SOC_DAIFMT_DSP_A:
                mode = 0;
                break;
-       case SND_SOC_DAIFMT_DSP_B:
-               mode = 1;
-               break;
        case SND_SOC_DAIFMT_I2S:
                mode = 2;
                break;
-       case SND_SOC_DAIFMT_LEFT_J:
-               mode = 3;
-               break;
        default:
                arizona_aif_err(dai, "Unsupported DAI format %d\n",
                                fmt & SND_SOC_DAIFMT_FORMAT_MASK);
@@ -714,7 +708,8 @@ static int arizona_hw_params(struct snd_pcm_substream *substream,
                snd_soc_update_bits(codec, ARIZONA_ASYNC_SAMPLE_RATE_1,
                                    ARIZONA_ASYNC_SAMPLE_RATE_MASK, sr_val);
                snd_soc_update_bits(codec, base + ARIZONA_AIF_RATE_CTRL,
-                                   ARIZONA_AIF1_RATE_MASK, 8);
+                                   ARIZONA_AIF1_RATE_MASK,
+                                   8 << ARIZONA_AIF1_RATE_SHIFT);
                break;
        default:
                arizona_aif_err(dai, "Invalid clock %d\n", dai_priv->clk);
index 41dae1e..4deebeb 100644 (file)
 
 #define ARIZONA_FLL_SRC_MCLK1      0
 #define ARIZONA_FLL_SRC_MCLK2      1
-#define ARIZONA_FLL_SRC_SLIMCLK    2
-#define ARIZONA_FLL_SRC_FLL1       3
-#define ARIZONA_FLL_SRC_FLL2       4
-#define ARIZONA_FLL_SRC_AIF1BCLK   5
-#define ARIZONA_FLL_SRC_AIF2BCLK   6
-#define ARIZONA_FLL_SRC_AIF3BCLK   7
-#define ARIZONA_FLL_SRC_AIF1LRCLK  8
-#define ARIZONA_FLL_SRC_AIF2LRCLK  9
-#define ARIZONA_FLL_SRC_AIF3LRCLK 10
+#define ARIZONA_FLL_SRC_SLIMCLK    3
+#define ARIZONA_FLL_SRC_FLL1       4
+#define ARIZONA_FLL_SRC_FLL2       5
+#define ARIZONA_FLL_SRC_AIF1BCLK   8
+#define ARIZONA_FLL_SRC_AIF2BCLK   9
+#define ARIZONA_FLL_SRC_AIF3BCLK  10
+#define ARIZONA_FLL_SRC_AIF1LRCLK 12
+#define ARIZONA_FLL_SRC_AIF2LRCLK 13
+#define ARIZONA_FLL_SRC_AIF3LRCLK 14
 
 #define ARIZONA_MIXER_VOL_MASK             0x00FE
 #define ARIZONA_MIXER_VOL_SHIFT                 1
index 4f11279..ac8742a 100644 (file)
@@ -474,16 +474,16 @@ static int cs4271_probe(struct snd_soc_codec *codec)
        struct cs4271_platform_data *cs4271plat = codec->dev->platform_data;
        int ret;
        int gpio_nreset = -EINVAL;
-       int amutec_eq_bmutec = 0;
+       bool amutec_eq_bmutec = false;
 
 #ifdef CONFIG_OF
        if (of_match_device(cs4271_dt_ids, codec->dev)) {
                gpio_nreset = of_get_named_gpio(codec->dev->of_node,
                                                "reset-gpio", 0);
 
-               if (!of_get_property(codec->dev->of_node,
+               if (of_get_property(codec->dev->of_node,
                                     "cirrus,amutec-eq-bmutec", NULL))
-                       amutec_eq_bmutec = 1;
+                       amutec_eq_bmutec = true;
        }
 #endif
 
index 99bb1c6..9811a54 100644 (file)
@@ -737,7 +737,7 @@ static const struct cs42l52_clk_para clk_map_table[] = {
 
 static int cs42l52_get_clk(int mclk, int rate)
 {
-       int i, ret = 0;
+       int i, ret = -EINVAL;
        u_int mclk1, mclk2 = 0;
 
        for (i = 0; i < ARRAY_SIZE(clk_map_table); i++) {
@@ -749,8 +749,6 @@ static int cs42l52_get_clk(int mclk, int rate)
                        }
                }
        }
-       if (ret > ARRAY_SIZE(clk_map_table))
-               return -EINVAL;
        return ret;
 }
 
index d75257d..e19490c 100644 (file)
@@ -111,9 +111,9 @@ static struct reg_default lm49453_reg_defs[] = {
        { 101, 0x00 },
        { 102, 0x00 },
        { 103, 0x01 },
-       { 105, 0x01 },
-       { 106, 0x00 },
-       { 107, 0x01 },
+       { 104, 0x01 },
+       { 105, 0x00 },
+       { 106, 0x01 },
        { 107, 0x00 },
        { 108, 0x00 },
        { 109, 0x00 },
@@ -163,56 +163,25 @@ static struct reg_default lm49453_reg_defs[] = {
        { 184, 0x00 },
        { 185, 0x00 },
        { 186, 0x00 },
-       { 189, 0x00 },
+       { 187, 0x00 },
        { 188, 0x00 },
-       { 194, 0x00 },
-       { 195, 0x00 },
-       { 196, 0x00 },
-       { 197, 0x00 },
-       { 200, 0x00 },
-       { 201, 0x00 },
-       { 202, 0x00 },
-       { 203, 0x00 },
-       { 204, 0x00 },
-       { 205, 0x00 },
-       { 208, 0x00 },
+       { 189, 0x00 },
+       { 208, 0x06 },
        { 209, 0x00 },
-       { 210, 0x00 },
-       { 211, 0x00 },
-       { 213, 0x00 },
-       { 214, 0x00 },
-       { 215, 0x00 },
-       { 216, 0x00 },
-       { 217, 0x00 },
-       { 218, 0x00 },
-       { 219, 0x00 },
+       { 210, 0x08 },
+       { 211, 0x54 },
+       { 212, 0x14 },
+       { 213, 0x0d },
+       { 214, 0x0d },
+       { 215, 0x14 },
+       { 216, 0x60 },
        { 221, 0x00 },
        { 222, 0x00 },
+       { 223, 0x00 },
        { 224, 0x00 },
-       { 225, 0x00 },
-       { 226, 0x00 },
-       { 227, 0x00 },
-       { 228, 0x00 },
-       { 229, 0x00 },
-       { 230, 0x13 },
-       { 231, 0x00 },
-       { 232, 0x80 },
-       { 233, 0x0C },
-       { 234, 0xDD },
-       { 235, 0x00 },
-       { 236, 0x04 },
-       { 237, 0x00 },
-       { 238, 0x00 },
-       { 239, 0x00 },
-       { 240, 0x00 },
-       { 241, 0x00 },
-       { 242, 0x00 },
-       { 243, 0x00 },
-       { 244, 0x00 },
-       { 245, 0x00 },
        { 248, 0x00 },
        { 249, 0x00 },
-       { 254, 0x00 },
+       { 250, 0x00 },
        { 255, 0x00 },
 };
 
@@ -525,36 +494,41 @@ SOC_DAPM_SINGLE("Port2_2 Switch", LM49453_P0_PORT2_TX2_REG, 7, 1, 0),
 };
 
 /* TLV Declarations */
-static const DECLARE_TLV_DB_SCALE(digital_tlv, -7650, 150, 1);
-static const DECLARE_TLV_DB_SCALE(port_tlv, 0, 600, 0);
+static const DECLARE_TLV_DB_SCALE(adc_dac_tlv, -7650, 150, 1);
+static const DECLARE_TLV_DB_SCALE(mic_tlv, 0, 200, 1);
+static const DECLARE_TLV_DB_SCALE(port_tlv, -1800, 600, 0);
+static const DECLARE_TLV_DB_SCALE(stn_tlv, -7200, 150, 0);
 
 static const struct snd_kcontrol_new lm49453_sidetone_mixer_controls[] = {
 /* Sidetone supports mono only */
 SOC_DAPM_SINGLE_TLV("Sidetone ADCL Volume", LM49453_P0_STN_VOL_ADCL_REG,
-                    0, 0x3F, 0, digital_tlv),
+                    0, 0x3F, 0, stn_tlv),
 SOC_DAPM_SINGLE_TLV("Sidetone ADCR Volume", LM49453_P0_STN_VOL_ADCR_REG,
-                    0, 0x3F, 0, digital_tlv),
+                    0, 0x3F, 0, stn_tlv),
 SOC_DAPM_SINGLE_TLV("Sidetone DMIC1L Volume", LM49453_P0_STN_VOL_DMIC1L_REG,
-                    0, 0x3F, 0, digital_tlv),
+                    0, 0x3F, 0, stn_tlv),
 SOC_DAPM_SINGLE_TLV("Sidetone DMIC1R Volume", LM49453_P0_STN_VOL_DMIC1R_REG,
-                    0, 0x3F, 0, digital_tlv),
+                    0, 0x3F, 0, stn_tlv),
 SOC_DAPM_SINGLE_TLV("Sidetone DMIC2L Volume", LM49453_P0_STN_VOL_DMIC2L_REG,
-                    0, 0x3F, 0, digital_tlv),
+                    0, 0x3F, 0, stn_tlv),
 SOC_DAPM_SINGLE_TLV("Sidetone DMIC2R Volume", LM49453_P0_STN_VOL_DMIC2R_REG,
-                    0, 0x3F, 0, digital_tlv),
+                    0, 0x3F, 0, stn_tlv),
 };
 
 static const struct snd_kcontrol_new lm49453_snd_controls[] = {
        /* mic1 and mic2 supports mono only */
-       SOC_SINGLE_TLV("Mic1 Volume", LM49453_P0_ADC_LEVELL_REG, 0, 6,
-                       0, digital_tlv),
-       SOC_SINGLE_TLV("Mic2 Volume", LM49453_P0_ADC_LEVELR_REG, 0, 6,
-                       0, digital_tlv),
+       SOC_SINGLE_TLV("Mic1 Volume", LM49453_P0_MICL_REG, 0, 15, 0, mic_tlv),
+       SOC_SINGLE_TLV("Mic2 Volume", LM49453_P0_MICR_REG, 0, 15, 0, mic_tlv),
+
+       SOC_SINGLE_TLV("ADCL Volume", LM49453_P0_ADC_LEVELL_REG, 0, 63,
+                       0, adc_dac_tlv),
+       SOC_SINGLE_TLV("ADCR Volume", LM49453_P0_ADC_LEVELR_REG, 0, 63,
+                       0, adc_dac_tlv),
 
        SOC_DOUBLE_R_TLV("DMIC1 Volume", LM49453_P0_DMIC1_LEVELL_REG,
-                         LM49453_P0_DMIC1_LEVELR_REG, 0, 6, 0, digital_tlv),
+                         LM49453_P0_DMIC1_LEVELR_REG, 0, 63, 0, adc_dac_tlv),
        SOC_DOUBLE_R_TLV("DMIC2 Volume", LM49453_P0_DMIC2_LEVELL_REG,
-                         LM49453_P0_DMIC2_LEVELR_REG, 0, 6, 0, digital_tlv),
+                         LM49453_P0_DMIC2_LEVELR_REG, 0, 63, 0, adc_dac_tlv),
 
        SOC_DAPM_ENUM("Mic2Mode", lm49453_mic2mode_enum),
        SOC_DAPM_ENUM("DMIC12 SRC", lm49453_dmic12_cfg_enum),
@@ -569,16 +543,16 @@ static const struct snd_kcontrol_new lm49453_snd_controls[] = {
                                          2, 1, 0),
 
        SOC_DOUBLE_R_TLV("DAC HP Volume", LM49453_P0_DAC_HP_LEVELL_REG,
-                         LM49453_P0_DAC_HP_LEVELR_REG, 0, 6, 0, digital_tlv),
+                         LM49453_P0_DAC_HP_LEVELR_REG, 0, 63, 0, adc_dac_tlv),
        SOC_DOUBLE_R_TLV("DAC LO Volume", LM49453_P0_DAC_LO_LEVELL_REG,
-                         LM49453_P0_DAC_LO_LEVELR_REG, 0, 6, 0, digital_tlv),
+                         LM49453_P0_DAC_LO_LEVELR_REG, 0, 63, 0, adc_dac_tlv),
        SOC_DOUBLE_R_TLV("DAC LS Volume", LM49453_P0_DAC_LS_LEVELL_REG,
-                         LM49453_P0_DAC_LS_LEVELR_REG, 0, 6, 0, digital_tlv),
+                         LM49453_P0_DAC_LS_LEVELR_REG, 0, 63, 0, adc_dac_tlv),
        SOC_DOUBLE_R_TLV("DAC HA Volume", LM49453_P0_DAC_HA_LEVELL_REG,
-                         LM49453_P0_DAC_HA_LEVELR_REG, 0, 6, 0, digital_tlv),
+                         LM49453_P0_DAC_HA_LEVELR_REG, 0, 63, 0, adc_dac_tlv),
 
        SOC_SINGLE_TLV("EP Volume", LM49453_P0_DAC_LS_LEVELL_REG,
-                       0, 6, 0, digital_tlv),
+                       0, 63, 0, adc_dac_tlv),
 
        SOC_SINGLE_TLV("PORT1_1_RX_LVL Volume", LM49453_P0_PORT1_RX_LVL1_REG,
                        0, 3, 0, port_tlv),
@@ -1218,7 +1192,7 @@ static int lm49453_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
        }
 
        snd_soc_update_bits(codec, LM49453_P0_AUDIO_PORT1_BASIC_REG,
-                           LM49453_AUDIO_PORT1_BASIC_FMT_MASK|BIT(1)|BIT(5),
+                           LM49453_AUDIO_PORT1_BASIC_FMT_MASK|BIT(0)|BIT(5),
                            (aif_val | mode | clk_phase));
 
        snd_soc_write(codec, LM49453_P0_AUDIO_PORT1_RX_MSB_REG, clk_shift);
index cb1675c..92bbfec 100644 (file)
@@ -401,7 +401,7 @@ static const struct snd_kcontrol_new sgtl5000_snd_controls[] = {
                        5, 1, 0),
 
        SOC_SINGLE_TLV("Mic Volume", SGTL5000_CHIP_MIC_CTRL,
-                       0, 4, 0, mic_gain_tlv),
+                       0, 3, 0, mic_gain_tlv),
 };
 
 /* mute the codec used by alsa core */
@@ -1344,7 +1344,7 @@ static int sgtl5000_probe(struct snd_soc_codec *codec)
                        SGTL5000_HP_ZCD_EN |
                        SGTL5000_ADC_ZCD_EN);
 
-       snd_soc_write(codec, SGTL5000_CHIP_MIC_CTRL, 0);
+       snd_soc_write(codec, SGTL5000_CHIP_MIC_CTRL, 2);
 
        /*
         * disable DAP
index ab355c4..40c07be 100644 (file)
                                SNDRV_PCM_FMTBIT_S32_LE)
 #define        S2PC_VALUE              0x98
 #define CLOCK_OUT              0x60
-#define LEFT_J_DATA_FORMAT     0x10
-#define I2S_DATA_FORMAT                0x12
-#define RIGHT_J_DATA_FORMAT    0x14
+#define DATA_FORMAT_MSK                0x0E
+#define LEFT_J_DATA_FORMAT     0x00
+#define I2S_DATA_FORMAT                0x02
+#define RIGHT_J_DATA_FORMAT    0x04
 #define CODEC_MUTE_VAL         0x80
 
 #define POWER_CNTLMSAK         0x40
@@ -289,7 +290,7 @@ static int sta529_set_dai_fmt(struct snd_soc_dai *codec_dai, u32 fmt)
                return -EINVAL;
        }
 
-       snd_soc_update_bits(codec, STA529_S2PCFG0, 0x0D, mode);
+       snd_soc_update_bits(codec, STA529_S2PCFG0, DATA_FORMAT_MSK, mode);
 
        return 0;
 }
index 1cbe88f..12bcae6 100644 (file)
@@ -209,9 +209,9 @@ static int wm2000_power_up(struct i2c_client *i2c, int analogue)
 
        ret = wm2000_read(i2c, WM2000_REG_SPEECH_CLARITY);
        if (wm2000->speech_clarity)
-               ret &= ~WM2000_SPEECH_CLARITY;
-       else
                ret |= WM2000_SPEECH_CLARITY;
+       else
+               ret &= ~WM2000_SPEECH_CLARITY;
        wm2000_write(i2c, WM2000_REG_SPEECH_CLARITY, ret);
 
        wm2000_write(i2c, WM2000_REG_SYS_START0, 0x33);
index afcf31d..e6cefe1 100644 (file)
@@ -1566,15 +1566,9 @@ static int wm2200_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
        case SND_SOC_DAIFMT_DSP_A:
                fmt_val = 0;
                break;
-       case SND_SOC_DAIFMT_DSP_B:
-               fmt_val = 1;
-               break;
        case SND_SOC_DAIFMT_I2S:
                fmt_val = 2;
                break;
-       case SND_SOC_DAIFMT_LEFT_J:
-               fmt_val = 3;
-               break;
        default:
                dev_err(codec->dev, "Unsupported DAI format %d\n",
                        fmt & SND_SOC_DAIFMT_FORMAT_MASK);
@@ -1626,7 +1620,7 @@ static int wm2200_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
                            WM2200_AIF1TX_LRCLK_MSTR | WM2200_AIF1TX_LRCLK_INV,
                            lrclk);
        snd_soc_update_bits(codec, WM2200_AUDIO_IF_1_5,
-                           WM2200_AIF1_FMT_MASK << 1, fmt_val << 1);
+                           WM2200_AIF1_FMT_MASK, fmt_val);
 
        return 0;
 }
index 5a5f369..54397a5 100644 (file)
@@ -1279,15 +1279,9 @@ static int wm5100_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
        case SND_SOC_DAIFMT_DSP_A:
                mask = 0;
                break;
-       case SND_SOC_DAIFMT_DSP_B:
-               mask = 1;
-               break;
        case SND_SOC_DAIFMT_I2S:
                mask = 2;
                break;
-       case SND_SOC_DAIFMT_LEFT_J:
-               mask = 3;
-               break;
        default:
                dev_err(codec->dev, "Unsupported DAI format %d\n",
                        fmt & SND_SOC_DAIFMT_FORMAT_MASK);
index 688ade0..7a9048d 100644 (file)
@@ -36,6 +36,9 @@
 struct wm5102_priv {
        struct arizona_priv core;
        struct arizona_fll fll[2];
+
+       unsigned int spk_ena:2;
+       unsigned int spk_ena_pending:1;
 };
 
 static DECLARE_TLV_DB_SCALE(ana_tlv, 0, 100, 0);
@@ -787,6 +790,47 @@ ARIZONA_MIXER_CONTROLS("AIF3TX1", ARIZONA_AIF3TX1MIX_INPUT_1_SOURCE),
 ARIZONA_MIXER_CONTROLS("AIF3TX2", ARIZONA_AIF3TX2MIX_INPUT_1_SOURCE),
 };
 
+static int wm5102_spk_ev(struct snd_soc_dapm_widget *w,
+                        struct snd_kcontrol *kcontrol,
+                        int event)
+{
+       struct snd_soc_codec *codec = w->codec;
+       struct arizona *arizona = dev_get_drvdata(codec->dev->parent);
+       struct wm5102_priv *wm5102 = snd_soc_codec_get_drvdata(codec);
+
+       if (arizona->rev < 1)
+               return 0;
+
+       switch (event) {
+       case SND_SOC_DAPM_PRE_PMU:
+               if (!wm5102->spk_ena) {
+                       snd_soc_write(codec, 0x4f5, 0x25a);
+                       wm5102->spk_ena_pending = true;
+               }
+               break;
+       case SND_SOC_DAPM_POST_PMU:
+               if (wm5102->spk_ena_pending) {
+                       msleep(75);
+                       snd_soc_write(codec, 0x4f5, 0xda);
+                       wm5102->spk_ena_pending = false;
+                       wm5102->spk_ena++;
+               }
+               break;
+       case SND_SOC_DAPM_PRE_PMD:
+               wm5102->spk_ena--;
+               if (!wm5102->spk_ena)
+                       snd_soc_write(codec, 0x4f5, 0x25a);
+               break;
+       case SND_SOC_DAPM_POST_PMD:
+               if (!wm5102->spk_ena)
+                       snd_soc_write(codec, 0x4f5, 0x0da);
+               break;
+       }
+
+       return 0;
+}
+
+
 ARIZONA_MIXER_ENUMS(EQ1, ARIZONA_EQ1MIX_INPUT_1_SOURCE);
 ARIZONA_MIXER_ENUMS(EQ2, ARIZONA_EQ2MIX_INPUT_1_SOURCE);
 ARIZONA_MIXER_ENUMS(EQ3, ARIZONA_EQ3MIX_INPUT_1_SOURCE);
@@ -1034,10 +1078,10 @@ SND_SOC_DAPM_PGA_E("OUT3L", ARIZONA_OUTPUT_ENABLES_1,
                   ARIZONA_OUT3L_ENA_SHIFT, 0, NULL, 0, arizona_out_ev,
                   SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU),
 SND_SOC_DAPM_PGA_E("OUT4L", ARIZONA_OUTPUT_ENABLES_1,
-                  ARIZONA_OUT4L_ENA_SHIFT, 0, NULL, 0, arizona_out_ev,
+                  ARIZONA_OUT4L_ENA_SHIFT, 0, NULL, 0, wm5102_spk_ev,
                   SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU),
 SND_SOC_DAPM_PGA_E("OUT4R", ARIZONA_OUTPUT_ENABLES_1,
-                  ARIZONA_OUT4R_ENA_SHIFT, 0, NULL, 0, arizona_out_ev,
+                  ARIZONA_OUT4R_ENA_SHIFT, 0, NULL, 0, wm5102_spk_ev,
                   SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU),
 SND_SOC_DAPM_PGA_E("OUT5L", ARIZONA_OUTPUT_ENABLES_1,
                   ARIZONA_OUT5L_ENA_SHIFT, 0, NULL, 0, arizona_out_ev,
index ffc89fa..7b198c3 100644 (file)
@@ -169,6 +169,7 @@ static int wm_adsp_load(struct wm_adsp *dsp)
        const struct wm_adsp_region *mem;
        const char *region_name;
        char *file, *text;
+       void *buf;
        unsigned int reg;
        int regions = 0;
        int ret, offset, type, sizes;
@@ -322,8 +323,18 @@ static int wm_adsp_load(struct wm_adsp *dsp)
                }
 
                if (reg) {
-                       ret = regmap_raw_write(regmap, reg, region->data,
+                       buf = kmemdup(region->data, le32_to_cpu(region->len),
+                                     GFP_KERNEL);
+                       if (!buf) {
+                               adsp_err(dsp, "Out of memory\n");
+                               return -ENOMEM;
+                       }
+
+                       ret = regmap_raw_write(regmap, reg, buf,
                                               le32_to_cpu(region->len));
+
+                       kfree(buf);
+
                        if (ret != 0) {
                                adsp_err(dsp,
                                        "%s.%d: Failed to write %d bytes at %d in %s: %d\n",
@@ -359,6 +370,7 @@ static int wm_adsp_load_coeff(struct wm_adsp *dsp)
        const char *region_name;
        int ret, pos, blocks, type, offset, reg;
        char *file;
+       void *buf;
 
        file = kzalloc(PAGE_SIZE, GFP_KERNEL);
        if (file == NULL)
@@ -426,6 +438,13 @@ static int wm_adsp_load_coeff(struct wm_adsp *dsp)
                }
 
                if (reg) {
+                       buf = kmemdup(blk->data, le32_to_cpu(blk->len),
+                                     GFP_KERNEL);
+                       if (!buf) {
+                               adsp_err(dsp, "Out of memory\n");
+                               return -ENOMEM;
+                       }
+
                        ret = regmap_raw_write(regmap, reg, blk->data,
                                               le32_to_cpu(blk->len));
                        if (ret != 0) {
@@ -433,6 +452,8 @@ static int wm_adsp_load_coeff(struct wm_adsp *dsp)
                                        "%s.%d: Failed to write to %x in %s\n",
                                        file, blocks, reg, region_name);
                        }
+
+                       kfree(buf);
                }
 
                pos += le32_to_cpu(blk->len) + sizeof(*blk);
index 91d592f..2370063 100644 (file)
@@ -1255,6 +1255,8 @@ static int soc_post_component_init(struct snd_soc_card *card,
        INIT_LIST_HEAD(&rtd->dpcm[SNDRV_PCM_STREAM_CAPTURE].fe_clients);
        ret = device_add(rtd->dev);
        if (ret < 0) {
+               /* calling put_device() here to free the rtd->dev */
+               put_device(rtd->dev);
                dev_err(card->dev,
                        "ASoC: failed to register runtime device: %d\n", ret);
                return ret;
@@ -1554,7 +1556,7 @@ static void soc_remove_aux_dev(struct snd_soc_card *card, int num)
        /* unregister the rtd device */
        if (rtd->dev_registered) {
                device_remove_file(rtd->dev, &dev_attr_codec_reg);
-               device_del(rtd->dev);
+               device_unregister(rtd->dev);
                rtd->dev_registered = 0;
        }
 
@@ -2917,7 +2919,7 @@ int snd_soc_info_volsw_range(struct snd_kcontrol *kcontrol,
        platform_max = mc->platform_max;
 
        uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
-       uinfo->count = 1;
+       uinfo->count = snd_soc_volsw_is_stereo(mc) ? 2 : 1;
        uinfo->value.integer.min = 0;
        uinfo->value.integer.max = platform_max - min;
 
@@ -2941,12 +2943,14 @@ int snd_soc_put_volsw_range(struct snd_kcontrol *kcontrol,
                (struct soc_mixer_control *)kcontrol->private_value;
        struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
        unsigned int reg = mc->reg;
+       unsigned int rreg = mc->rreg;
        unsigned int shift = mc->shift;
        int min = mc->min;
        int max = mc->max;
        unsigned int mask = (1 << fls(max)) - 1;
        unsigned int invert = mc->invert;
        unsigned int val, val_mask;
+       int ret;
 
        val = ((ucontrol->value.integer.value[0] + min) & mask);
        if (invert)
@@ -2954,7 +2958,21 @@ int snd_soc_put_volsw_range(struct snd_kcontrol *kcontrol,
        val_mask = mask << shift;
        val = val << shift;
 
-       return snd_soc_update_bits_locked(codec, reg, val_mask, val);
+       ret = snd_soc_update_bits_locked(codec, reg, val_mask, val);
+       if (ret != 0)
+               return ret;
+
+       if (snd_soc_volsw_is_stereo(mc)) {
+               val = ((ucontrol->value.integer.value[1] + min) & mask);
+               if (invert)
+                       val = max - val;
+               val_mask = mask << shift;
+               val = val << shift;
+
+               ret = snd_soc_update_bits_locked(codec, rreg, val_mask, val);
+       }
+
+       return ret;
 }
 EXPORT_SYMBOL_GPL(snd_soc_put_volsw_range);
 
@@ -2974,6 +2992,7 @@ int snd_soc_get_volsw_range(struct snd_kcontrol *kcontrol,
                (struct soc_mixer_control *)kcontrol->private_value;
        struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
        unsigned int reg = mc->reg;
+       unsigned int rreg = mc->rreg;
        unsigned int shift = mc->shift;
        int min = mc->min;
        int max = mc->max;
@@ -2988,6 +3007,16 @@ int snd_soc_get_volsw_range(struct snd_kcontrol *kcontrol,
        ucontrol->value.integer.value[0] =
                ucontrol->value.integer.value[0] - min;
 
+       if (snd_soc_volsw_is_stereo(mc)) {
+               ucontrol->value.integer.value[1] =
+                       (snd_soc_read(codec, rreg) >> shift) & mask;
+               if (invert)
+                       ucontrol->value.integer.value[1] =
+                               max - ucontrol->value.integer.value[1];
+               ucontrol->value.integer.value[1] =
+                       ucontrol->value.integer.value[1] - min;
+       }
+
        return 0;
 }
 EXPORT_SYMBOL_GPL(snd_soc_get_volsw_range);
index d7711fc..cf191e6 100644 (file)
@@ -1243,6 +1243,7 @@ static int dpcm_be_dai_hw_free(struct snd_soc_pcm_runtime *fe, int stream)
                if ((be->dpcm[stream].state != SND_SOC_DPCM_STATE_HW_PARAMS) &&
                    (be->dpcm[stream].state != SND_SOC_DPCM_STATE_PREPARE) &&
                    (be->dpcm[stream].state != SND_SOC_DPCM_STATE_HW_FREE) &&
+                   (be->dpcm[stream].state != SND_SOC_DPCM_STATE_PAUSED) &&
                    (be->dpcm[stream].state != SND_SOC_DPCM_STATE_STOP))
                        continue;
 
index e71fe55..0e2ed3d 100644 (file)
@@ -179,6 +179,15 @@ static struct usbmix_name_map audigy2nx_map[] = {
        { 0 } /* terminator */
 };
 
+static struct usbmix_selector_map c400_selectors[] = {
+       {
+               .id = 0x80,
+               .count = 2,
+               .names = (const char*[]) {"Internal", "SPDIF"}
+       },
+       { 0 } /* terminator */
+};
+
 static struct usbmix_selector_map audigy2nx_selectors[] = {
        {
                .id = 14, /* Capture Source */
@@ -367,6 +376,10 @@ static struct usbmix_ctl_map usbmix_ctl_maps[] = {
                .map = hercules_usb51_map,
        },
        {
+               .id = USB_ID(0x0763, 0x2030),
+               .selector_map = c400_selectors,
+       },
+       {
                .id = USB_ID(0x08bb, 0x2702),
                .map = linex_map,
                .ignore_ctl_error = 1,
index 0422b13..15520de 100644 (file)
@@ -1206,7 +1206,7 @@ static int snd_c400_create_mixer(struct usb_mixer_interface *mixer)
  * are valid they presents mono controls as L and R channels of
  * stereo. So we provide a good mixer here.
  */
-struct std_mono_table ebox44_table[] = {
+static struct std_mono_table ebox44_table[] = {
        {
                .unitid = 4,
                .control = 1,
index c659310..d82e378 100644 (file)
@@ -511,6 +511,16 @@ static int configure_sync_endpoint(struct snd_usb_substream *subs)
        struct snd_usb_substream *sync_subs =
                &subs->stream->substream[subs->direction ^ 1];
 
+       if (subs->sync_endpoint->type != SND_USB_ENDPOINT_TYPE_DATA ||
+           !subs->stream)
+               return snd_usb_endpoint_set_params(subs->sync_endpoint,
+                                                  subs->pcm_format,
+                                                  subs->channels,
+                                                  subs->period_bytes,
+                                                  subs->cur_rate,
+                                                  subs->cur_audiofmt,
+                                                  NULL);
+
        /* Try to find the best matching audioformat. */
        list_for_each_entry(fp, &sync_subs->fmt_list, list) {
                int score = match_endpoint_audioformats(fp, subs->cur_audiofmt,
index 78e845e..64d25a7 100644 (file)
@@ -2289,7 +2289,7 @@ YAMAHA_DEVICE(0x7010, "UB99"),
                                        .rate_table = (unsigned int[]) {
                                                        44100, 48000, 88200, 96000
                                        },
-                                       .clock = 0x81,
+                                       .clock = 0x80,
                                }
                        },
                        /* Capture */
@@ -2315,7 +2315,7 @@ YAMAHA_DEVICE(0x7010, "UB99"),
                                        .rate_table = (unsigned int[]) {
                                                44100, 48000, 88200, 96000
                                        },
-                                       .clock = 0x81,
+                                       .clock = 0x80,
                                }
                        },
                        /* MIDI */
index acc12f0..2c97185 100644 (file)
@@ -387,11 +387,13 @@ static int snd_usb_fasttrackpro_boot_quirk(struct usb_device *dev)
                 * rules
                 */
                err = usb_driver_set_configuration(dev, 2);
-               if (err < 0) {
+               if (err < 0)
                        snd_printdd("error usb_driver_set_configuration: %d\n",
                                    err);
-                       return -ENODEV;
-               }
+               /* Always return an error, so that we stop creating a device
+                  that will just be destroyed and recreated with a new
+                  configuration */
+               return -ENODEV;
        } else
                snd_printk(KERN_INFO "usb-audio: Fast Track Pro config OK\n");
 
@@ -859,6 +861,17 @@ void snd_usb_endpoint_start_quirk(struct snd_usb_endpoint *ep)
        if ((le16_to_cpu(ep->chip->dev->descriptor.idVendor) == 0x23ba) &&
            ep->type == SND_USB_ENDPOINT_TYPE_SYNC)
                ep->skip_packets = 4;
+
+       /*
+        * M-Audio Fast Track C400 - when packets are not skipped, real world
+        * latency varies by approx. +/- 50 frames (at 96KHz) each time the
+        * stream is (re)started. When skipping packets 16 at endpoint start
+        * up, the real world latency is stable within +/- 1 frame (also
+        * across power cycles).
+        */
+       if (ep->chip->usb_id == USB_ID(0x0763, 0x2030) &&
+           ep->type == SND_USB_ENDPOINT_TYPE_DATA)
+               ep->skip_packets = 16;
 }
 
 void snd_usb_ctl_msg_quirk(struct usb_device *dev, unsigned int pipe,
index d25a469..c800ea4 100644 (file)
@@ -97,7 +97,7 @@ static struct utsname uts_buf;
  * The location of the interface configuration file.
  */
 
-#define KVP_CONFIG_LOC "/var/opt/"
+#define KVP_CONFIG_LOC "/var/lib/hyperv"
 
 #define MAX_FILE_NAME 100
 #define ENTRIES_PER_BLOCK 50
@@ -151,7 +151,7 @@ static void kvp_update_file(int pool)
         */
        kvp_acquire_lock(pool);
 
-       filep = fopen(kvp_file_info[pool].fname, "w");
+       filep = fopen(kvp_file_info[pool].fname, "we");
        if (!filep) {
                kvp_release_lock(pool);
                syslog(LOG_ERR, "Failed to open file, pool: %d", pool);
@@ -182,7 +182,7 @@ static void kvp_update_mem_state(int pool)
 
        kvp_acquire_lock(pool);
 
-       filep = fopen(kvp_file_info[pool].fname, "r");
+       filep = fopen(kvp_file_info[pool].fname, "re");
        if (!filep) {
                kvp_release_lock(pool);
                syslog(LOG_ERR, "Failed to open file, pool: %d", pool);
@@ -234,9 +234,9 @@ static int kvp_file_init(void)
        int i;
        int alloc_unit = sizeof(struct kvp_record) * ENTRIES_PER_BLOCK;
 
-       if (access("/var/opt/hyperv", F_OK)) {
-               if (mkdir("/var/opt/hyperv", S_IRUSR | S_IWUSR | S_IROTH)) {
-                       syslog(LOG_ERR, " Failed to create /var/opt/hyperv");
+       if (access(KVP_CONFIG_LOC, F_OK)) {
+               if (mkdir(KVP_CONFIG_LOC, 0755 /* rwxr-xr-x */)) {
+                       syslog(LOG_ERR, " Failed to create %s", KVP_CONFIG_LOC);
                        exit(EXIT_FAILURE);
                }
        }
@@ -245,14 +245,14 @@ static int kvp_file_init(void)
                fname = kvp_file_info[i].fname;
                records_read = 0;
                num_blocks = 1;
-               sprintf(fname, "/var/opt/hyperv/.kvp_pool_%d", i);
-               fd = open(fname, O_RDWR | O_CREAT, S_IRUSR | S_IWUSR | S_IROTH);
+               sprintf(fname, "%s/.kvp_pool_%d", KVP_CONFIG_LOC, i);
+               fd = open(fname, O_RDWR | O_CREAT | O_CLOEXEC, 0644 /* rw-r--r-- */);
 
                if (fd == -1)
                        return 1;
 
 
-               filep = fopen(fname, "r");
+               filep = fopen(fname, "re");
                if (!filep)
                        return 1;
 
@@ -1162,16 +1162,13 @@ static int process_ip_string(FILE *f, char *ip_string, int type)
                                snprintf(str, sizeof(str), "%s", "DNS");
                                break;
                        }
-                       if (i != 0) {
-                               if (type != DNS) {
-                                       snprintf(sub_str, sizeof(sub_str),
-                                               "_%d", i++);
-                               } else {
-                                       snprintf(sub_str, sizeof(sub_str),
-                                               "%d", ++i);
-                               }
-                       } else if (type == DNS) {
+
+                       if (type == DNS) {
                                snprintf(sub_str, sizeof(sub_str), "%d", ++i);
+                       } else if (type == GATEWAY && i == 0) {
+                               ++i;
+                       } else {
+                               snprintf(sub_str, sizeof(sub_str), "%d", i++);
                        }
 
 
@@ -1191,17 +1188,13 @@ static int process_ip_string(FILE *f, char *ip_string, int type)
                                snprintf(str, sizeof(str), "%s",  "DNS");
                                break;
                        }
-                       if ((j != 0) || (type == DNS)) {
-                               if (type != DNS) {
-                                       snprintf(sub_str, sizeof(sub_str),
-                                               "_%d", j++);
-                               } else {
-                                       snprintf(sub_str, sizeof(sub_str),
-                                               "%d", ++i);
-                               }
-                       } else if (type == DNS) {
-                               snprintf(sub_str, sizeof(sub_str),
-                                       "%d", ++i);
+
+                       if (type == DNS) {
+                               snprintf(sub_str, sizeof(sub_str), "%d", ++i);
+                       } else if (j == 0) {
+                               ++j;
+                       } else {
+                               snprintf(sub_str, sizeof(sub_str), "_%d", j++);
                        }
                } else {
                        return  HV_INVALIDARG;
@@ -1244,18 +1237,19 @@ static int kvp_set_ip_info(char *if_name, struct hv_kvp_ipaddr_value *new_val)
         * Here is the format of the ip configuration file:
         *
         * HWADDR=macaddr
-        * IF_NAME=interface name
-        * DHCP=yes (This is optional; if yes, DHCP is configured)
+        * DEVICE=interface name
+        * BOOTPROTO=<protocol> (where <protocol> is "dhcp" if DHCP is configured
+        *                       or "none" if no boot-time protocol should be used)
         *
-        * IPADDR=ipaddr1
-        * IPADDR_1=ipaddr2
-        * IPADDR_x=ipaddry (where y = x + 1)
+        * IPADDR0=ipaddr1
+        * IPADDR1=ipaddr2
+        * IPADDRx=ipaddry (where y = x + 1)
         *
-        * NETMASK=netmask1
-        * NETMASK_x=netmasky (where y = x + 1)
+        * NETMASK0=netmask1
+        * NETMASKx=netmasky (where y = x + 1)
         *
         * GATEWAY=ipaddr1
-        * GATEWAY_x=ipaddry (where y = x + 1)
+        * GATEWAYx=ipaddry (where y = x + 1)
         *
         * DNSx=ipaddrx (where first DNS address is tagged as DNS1 etc)
         *
@@ -1271,7 +1265,7 @@ static int kvp_set_ip_info(char *if_name, struct hv_kvp_ipaddr_value *new_val)
         */
 
        snprintf(if_file, sizeof(if_file), "%s%s%s", KVP_CONFIG_LOC,
-               "hyperv/ifcfg-", if_name);
+               "/ifcfg-", if_name);
 
        file = fopen(if_file, "w");
 
@@ -1294,12 +1288,12 @@ static int kvp_set_ip_info(char *if_name, struct hv_kvp_ipaddr_value *new_val)
        if (error)
                goto setval_error;
 
-       error = kvp_write_file(file, "IF_NAME", "", if_name);
+       error = kvp_write_file(file, "DEVICE", "", if_name);
        if (error)
                goto setval_error;
 
        if (new_val->dhcp_enabled) {
-               error = kvp_write_file(file, "DHCP", "", "yes");
+               error = kvp_write_file(file, "BOOTPROTO", "", "dhcp");
                if (error)
                        goto setval_error;
 
@@ -1307,6 +1301,11 @@ static int kvp_set_ip_info(char *if_name, struct hv_kvp_ipaddr_value *new_val)
                 * We are done!.
                 */
                goto setval_done;
+
+       } else {
+               error = kvp_write_file(file, "BOOTPROTO", "", "none");
+               if (error)
+                       goto setval_error;
        }
 
        /*
index 3e9427e..735aafd 100755 (executable)
 # Here is the format of the ip configuration file:
 #
 # HWADDR=macaddr
-# IF_NAME=interface name
-# DHCP=yes (This is optional; if yes, DHCP is configured)
+# DEVICE=interface name
+# BOOTPROTO=<protocol> (where <protocol> is "dhcp" if DHCP is configured
+#                       or "none" if no boot-time protocol should be used)
 #
-# IPADDR=ipaddr1
-# IPADDR_1=ipaddr2
-# IPADDR_x=ipaddry (where y = x + 1)
+# IPADDR0=ipaddr1
+# IPADDR1=ipaddr2
+# IPADDRx=ipaddry (where y = x + 1)
 #
-# NETMASK=netmask1
-# NETMASK_x=netmasky (where y = x + 1)
+# NETMASK0=netmask1
+# NETMASKx=netmasky (where y = x + 1)
 #
 # GATEWAY=ipaddr1
-# GATEWAY_x=ipaddry (where y = x + 1)
+# GATEWAYx=ipaddry (where y = x + 1)
 #
 # DNSx=ipaddrx (where first DNS address is tagged as DNS1 etc)
 #
@@ -53,11 +54,6 @@ echo "NM_CONTROLLED=no" >> $1
 echo "PEERDNS=yes" >> $1
 echo "ONBOOT=yes" >> $1
 
-dhcp=$(grep "DHCP" $1 2>/dev/null)
-if [ "$dhcp" != "" ];
-then
-echo "BOOTPROTO=dhcp" >> $1;
-fi
 
 cp $1 /etc/sysconfig/network-scripts/
 
@@ -65,4 +61,4 @@ cp $1 /etc/sysconfig/network-scripts/
 interface=$(echo $1 | awk -F - '{ print $2 }')
 
 /sbin/ifdown $interface 2>/dev/null
-/sbin/ifup $interfac 2>/dev/null
+/sbin/ifup $interface 2>/dev/null
index 80db3f4..39d4106 100644 (file)
@@ -11,11 +11,21 @@ lib/rbtree.c
 include/linux/swab.h
 arch/*/include/asm/unistd*.h
 arch/*/include/asm/perf_regs.h
+arch/*/include/uapi/asm/unistd*.h
+arch/*/include/uapi/asm/perf_regs.h
 arch/*/lib/memcpy*.S
 arch/*/lib/memset*.S
 include/linux/poison.h
 include/linux/magic.h
 include/linux/hw_breakpoint.h
+include/linux/rbtree_augmented.h
+include/uapi/linux/perf_event.h
+include/uapi/linux/const.h
+include/uapi/linux/swab.h
+include/uapi/linux/hw_breakpoint.h
 arch/x86/include/asm/svm.h
 arch/x86/include/asm/vmx.h
 arch/x86/include/asm/kvm_host.h
+arch/x86/include/uapi/asm/svm.h
+arch/x86/include/uapi/asm/vmx.h
+arch/x86/include/uapi/asm/kvm.h
index 891bc77..8ab05e5 100644 (file)
@@ -58,7 +58,7 @@ ARCH ?= $(shell echo $(uname_M) | sed -e s/i.86/i386/ -e s/sun4u/sparc64/ \
                                  -e s/arm.*/arm/ -e s/sa110/arm/ \
                                  -e s/s390x/s390/ -e s/parisc64/parisc/ \
                                  -e s/ppc.*/powerpc/ -e s/mips.*/mips/ \
-                                 -e s/sh[234].*/sh/ )
+                                 -e s/sh[234].*/sh/ -e s/aarch64.*/arm64/ )
 NO_PERF_REGS := 1
 
 CC = $(CROSS_COMPILE)gcc